about summary refs log tree commit diff
path: root/third_party/abseil_cpp/absl/base/internal
diff options
context:
space:
mode:
Diffstat (limited to 'third_party/abseil_cpp/absl/base/internal')
-rw-r--r--third_party/abseil_cpp/absl/base/internal/atomic_hook.h200
-rw-r--r--third_party/abseil_cpp/absl/base/internal/atomic_hook_test.cc97
-rw-r--r--third_party/abseil_cpp/absl/base/internal/atomic_hook_test_helper.cc32
-rw-r--r--third_party/abseil_cpp/absl/base/internal/atomic_hook_test_helper.h34
-rw-r--r--third_party/abseil_cpp/absl/base/internal/bits.h218
-rw-r--r--third_party/abseil_cpp/absl/base/internal/bits_test.cc97
-rw-r--r--third_party/abseil_cpp/absl/base/internal/cmake_thread_test.cc22
-rw-r--r--third_party/abseil_cpp/absl/base/internal/cycleclock.cc107
-rw-r--r--third_party/abseil_cpp/absl/base/internal/cycleclock.h94
-rw-r--r--third_party/abseil_cpp/absl/base/internal/direct_mmap.h166
-rw-r--r--third_party/abseil_cpp/absl/base/internal/endian.h266
-rw-r--r--third_party/abseil_cpp/absl/base/internal/endian_test.cc263
-rw-r--r--third_party/abseil_cpp/absl/base/internal/errno_saver.h43
-rw-r--r--third_party/abseil_cpp/absl/base/internal/errno_saver_test.cc45
-rw-r--r--third_party/abseil_cpp/absl/base/internal/exception_safety_testing.cc79
-rw-r--r--third_party/abseil_cpp/absl/base/internal/exception_safety_testing.h1101
-rw-r--r--third_party/abseil_cpp/absl/base/internal/exception_testing.h42
-rw-r--r--third_party/abseil_cpp/absl/base/internal/exponential_biased.cc93
-rw-r--r--third_party/abseil_cpp/absl/base/internal/exponential_biased.h130
-rw-r--r--third_party/abseil_cpp/absl/base/internal/exponential_biased_test.cc199
-rw-r--r--third_party/abseil_cpp/absl/base/internal/fast_type_id.h48
-rw-r--r--third_party/abseil_cpp/absl/base/internal/fast_type_id_test.cc123
-rw-r--r--third_party/abseil_cpp/absl/base/internal/hide_ptr.h51
-rw-r--r--third_party/abseil_cpp/absl/base/internal/identity.h37
-rw-r--r--third_party/abseil_cpp/absl/base/internal/inline_variable.h107
-rw-r--r--third_party/abseil_cpp/absl/base/internal/inline_variable_testing.h46
-rw-r--r--third_party/abseil_cpp/absl/base/internal/invoke.h187
-rw-r--r--third_party/abseil_cpp/absl/base/internal/low_level_alloc.cc620
-rw-r--r--third_party/abseil_cpp/absl/base/internal/low_level_alloc.h126
-rw-r--r--third_party/abseil_cpp/absl/base/internal/low_level_alloc_test.cc162
-rw-r--r--third_party/abseil_cpp/absl/base/internal/low_level_scheduling.h107
-rw-r--r--third_party/abseil_cpp/absl/base/internal/per_thread_tls.h52
-rw-r--r--third_party/abseil_cpp/absl/base/internal/periodic_sampler.cc53
-rw-r--r--third_party/abseil_cpp/absl/base/internal/periodic_sampler.h211
-rw-r--r--third_party/abseil_cpp/absl/base/internal/periodic_sampler_benchmark.cc79
-rw-r--r--third_party/abseil_cpp/absl/base/internal/periodic_sampler_test.cc177
-rw-r--r--third_party/abseil_cpp/absl/base/internal/pretty_function.h33
-rw-r--r--third_party/abseil_cpp/absl/base/internal/raw_logging.cc240
-rw-r--r--third_party/abseil_cpp/absl/base/internal/raw_logging.h183
-rw-r--r--third_party/abseil_cpp/absl/base/internal/scheduling_mode.h58
-rw-r--r--third_party/abseil_cpp/absl/base/internal/scoped_set_env.cc81
-rw-r--r--third_party/abseil_cpp/absl/base/internal/scoped_set_env.h45
-rw-r--r--third_party/abseil_cpp/absl/base/internal/scoped_set_env_test.cc99
-rw-r--r--third_party/abseil_cpp/absl/base/internal/spinlock.cc220
-rw-r--r--third_party/abseil_cpp/absl/base/internal/spinlock.h230
-rw-r--r--third_party/abseil_cpp/absl/base/internal/spinlock_akaros.inc35
-rw-r--r--third_party/abseil_cpp/absl/base/internal/spinlock_benchmark.cc52
-rw-r--r--third_party/abseil_cpp/absl/base/internal/spinlock_linux.inc74
-rw-r--r--third_party/abseil_cpp/absl/base/internal/spinlock_posix.inc46
-rw-r--r--third_party/abseil_cpp/absl/base/internal/spinlock_wait.cc81
-rw-r--r--third_party/abseil_cpp/absl/base/internal/spinlock_wait.h93
-rw-r--r--third_party/abseil_cpp/absl/base/internal/spinlock_win32.inc37
-rw-r--r--third_party/abseil_cpp/absl/base/internal/strerror.cc75
-rw-r--r--third_party/abseil_cpp/absl/base/internal/strerror.h39
-rw-r--r--third_party/abseil_cpp/absl/base/internal/strerror_benchmark.cc38
-rw-r--r--third_party/abseil_cpp/absl/base/internal/strerror_test.cc86
-rw-r--r--third_party/abseil_cpp/absl/base/internal/sysinfo.cc425
-rw-r--r--third_party/abseil_cpp/absl/base/internal/sysinfo.h66
-rw-r--r--third_party/abseil_cpp/absl/base/internal/sysinfo_test.cc100
-rw-r--r--third_party/abseil_cpp/absl/base/internal/thread_annotations.h271
-rw-r--r--third_party/abseil_cpp/absl/base/internal/thread_identity.cc152
-rw-r--r--third_party/abseil_cpp/absl/base/internal/thread_identity.h259
-rw-r--r--third_party/abseil_cpp/absl/base/internal/thread_identity_benchmark.cc38
-rw-r--r--third_party/abseil_cpp/absl/base/internal/thread_identity_test.cc129
-rw-r--r--third_party/abseil_cpp/absl/base/internal/throw_delegate.cc108
-rw-r--r--third_party/abseil_cpp/absl/base/internal/throw_delegate.h75
-rw-r--r--third_party/abseil_cpp/absl/base/internal/tsan_mutex_interface.h66
-rw-r--r--third_party/abseil_cpp/absl/base/internal/unaligned_access.h158
-rw-r--r--third_party/abseil_cpp/absl/base/internal/unique_small_name_test.cc77
-rw-r--r--third_party/abseil_cpp/absl/base/internal/unscaledcycleclock.cc140
-rw-r--r--third_party/abseil_cpp/absl/base/internal/unscaledcycleclock.h124
71 files changed, 9547 insertions, 0 deletions
diff --git a/third_party/abseil_cpp/absl/base/internal/atomic_hook.h b/third_party/abseil_cpp/absl/base/internal/atomic_hook.h
new file mode 100644
index 000000000000..ae21cd7fe50a
--- /dev/null
+++ b/third_party/abseil_cpp/absl/base/internal/atomic_hook.h
@@ -0,0 +1,200 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_BASE_INTERNAL_ATOMIC_HOOK_H_
+#define ABSL_BASE_INTERNAL_ATOMIC_HOOK_H_
+
+#include <atomic>
+#include <cassert>
+#include <cstdint>
+#include <utility>
+
+#include "absl/base/attributes.h"
+#include "absl/base/config.h"
+
+#if defined(_MSC_VER) && !defined(__clang__)
+#define ABSL_HAVE_WORKING_CONSTEXPR_STATIC_INIT 0
+#else
+#define ABSL_HAVE_WORKING_CONSTEXPR_STATIC_INIT 1
+#endif
+
+#if defined(_MSC_VER)
+#define ABSL_HAVE_WORKING_ATOMIC_POINTER 0
+#else
+#define ABSL_HAVE_WORKING_ATOMIC_POINTER 1
+#endif
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+template <typename T>
+class AtomicHook;
+
+// To workaround AtomicHook not being constant-initializable on some platforms,
+// prefer to annotate instances with `ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES`
+// instead of `ABSL_CONST_INIT`.
+#if ABSL_HAVE_WORKING_CONSTEXPR_STATIC_INIT
+#define ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES ABSL_CONST_INIT
+#else
+#define ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
+#endif
+
+// `AtomicHook` is a helper class, templatized on a raw function pointer type,
+// for implementing Abseil customization hooks.  It is a callable object that
+// dispatches to the registered hook.  Objects of type `AtomicHook` must have
+// static or thread storage duration.
+//
+// A default constructed object performs a no-op (and returns a default
+// constructed object) if no hook has been registered.
+//
+// Hooks can be pre-registered via constant initialization, for example:
+//
+// ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES static AtomicHook<void(*)()>
+//     my_hook(DefaultAction);
+//
+// and then changed at runtime via a call to `Store()`.
+//
+// Reads and writes guarantee memory_order_acquire/memory_order_release
+// semantics.
+template <typename ReturnType, typename... Args>
+class AtomicHook<ReturnType (*)(Args...)> {
+ public:
+  using FnPtr = ReturnType (*)(Args...);
+
+  // Constructs an object that by default performs a no-op (and
+  // returns a default constructed object) when no hook as been registered.
+  constexpr AtomicHook() : AtomicHook(DummyFunction) {}
+
+  // Constructs an object that by default dispatches to/returns the
+  // pre-registered default_fn when no hook has been registered at runtime.
+#if ABSL_HAVE_WORKING_ATOMIC_POINTER && ABSL_HAVE_WORKING_CONSTEXPR_STATIC_INIT
+  explicit constexpr AtomicHook(FnPtr default_fn)
+      : hook_(default_fn), default_fn_(default_fn) {}
+#elif ABSL_HAVE_WORKING_CONSTEXPR_STATIC_INIT
+  explicit constexpr AtomicHook(FnPtr default_fn)
+      : hook_(kUninitialized), default_fn_(default_fn) {}
+#else
+  // As of January 2020, on all known versions of MSVC this constructor runs in
+  // the global constructor sequence.  If `Store()` is called by a dynamic
+  // initializer, we want to preserve the value, even if this constructor runs
+  // after the call to `Store()`.  If not, `hook_` will be
+  // zero-initialized by the linker and we have no need to set it.
+  // https://developercommunity.visualstudio.com/content/problem/336946/class-with-constexpr-constructor-not-using-static.html
+  explicit constexpr AtomicHook(FnPtr default_fn)
+      : /* hook_(deliberately omitted), */ default_fn_(default_fn) {
+    static_assert(kUninitialized == 0, "here we rely on zero-initialization");
+  }
+#endif
+
+  // Stores the provided function pointer as the value for this hook.
+  //
+  // This is intended to be called once.  Multiple calls are legal only if the
+  // same function pointer is provided for each call.  The store is implemented
+  // as a memory_order_release operation, and read accesses are implemented as
+  // memory_order_acquire.
+  void Store(FnPtr fn) {
+    bool success = DoStore(fn);
+    static_cast<void>(success);
+    assert(success);
+  }
+
+  // Invokes the registered callback.  If no callback has yet been registered, a
+  // default-constructed object of the appropriate type is returned instead.
+  template <typename... CallArgs>
+  ReturnType operator()(CallArgs&&... args) const {
+    return DoLoad()(std::forward<CallArgs>(args)...);
+  }
+
+  // Returns the registered callback, or nullptr if none has been registered.
+  // Useful if client code needs to conditionalize behavior based on whether a
+  // callback was registered.
+  //
+  // Note that atomic_hook.Load()() and atomic_hook() have different semantics:
+  // operator()() will perform a no-op if no callback was registered, while
+  // Load()() will dereference a null function pointer.  Prefer operator()() to
+  // Load()() unless you must conditionalize behavior on whether a hook was
+  // registered.
+  FnPtr Load() const {
+    FnPtr ptr = DoLoad();
+    return (ptr == DummyFunction) ? nullptr : ptr;
+  }
+
+ private:
+  static ReturnType DummyFunction(Args...) {
+    return ReturnType();
+  }
+
+  // Current versions of MSVC (as of September 2017) have a broken
+  // implementation of std::atomic<T*>:  Its constructor attempts to do the
+  // equivalent of a reinterpret_cast in a constexpr context, which is not
+  // allowed.
+  //
+  // This causes an issue when building with LLVM under Windows.  To avoid this,
+  // we use a less-efficient, intptr_t-based implementation on Windows.
+#if ABSL_HAVE_WORKING_ATOMIC_POINTER
+  // Return the stored value, or DummyFunction if no value has been stored.
+  FnPtr DoLoad() const { return hook_.load(std::memory_order_acquire); }
+
+  // Store the given value.  Returns false if a different value was already
+  // stored to this object.
+  bool DoStore(FnPtr fn) {
+    assert(fn);
+    FnPtr expected = default_fn_;
+    const bool store_succeeded = hook_.compare_exchange_strong(
+        expected, fn, std::memory_order_acq_rel, std::memory_order_acquire);
+    const bool same_value_already_stored = (expected == fn);
+    return store_succeeded || same_value_already_stored;
+  }
+
+  std::atomic<FnPtr> hook_;
+#else  // !ABSL_HAVE_WORKING_ATOMIC_POINTER
+  // Use a sentinel value unlikely to be the address of an actual function.
+  static constexpr intptr_t kUninitialized = 0;
+
+  static_assert(sizeof(intptr_t) >= sizeof(FnPtr),
+                "intptr_t can't contain a function pointer");
+
+  FnPtr DoLoad() const {
+    const intptr_t value = hook_.load(std::memory_order_acquire);
+    if (value == kUninitialized) {
+      return default_fn_;
+    }
+    return reinterpret_cast<FnPtr>(value);
+  }
+
+  bool DoStore(FnPtr fn) {
+    assert(fn);
+    const auto value = reinterpret_cast<intptr_t>(fn);
+    intptr_t expected = kUninitialized;
+    const bool store_succeeded = hook_.compare_exchange_strong(
+        expected, value, std::memory_order_acq_rel, std::memory_order_acquire);
+    const bool same_value_already_stored = (expected == value);
+    return store_succeeded || same_value_already_stored;
+  }
+
+  std::atomic<intptr_t> hook_;
+#endif
+
+  const FnPtr default_fn_;
+};
+
+#undef ABSL_HAVE_WORKING_ATOMIC_POINTER
+#undef ABSL_HAVE_WORKING_CONSTEXPR_STATIC_INIT
+
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_BASE_INTERNAL_ATOMIC_HOOK_H_
diff --git a/third_party/abseil_cpp/absl/base/internal/atomic_hook_test.cc b/third_party/abseil_cpp/absl/base/internal/atomic_hook_test.cc
new file mode 100644
index 000000000000..e577a8fd930b
--- /dev/null
+++ b/third_party/abseil_cpp/absl/base/internal/atomic_hook_test.cc
@@ -0,0 +1,97 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/internal/atomic_hook.h"
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/base/attributes.h"
+#include "absl/base/internal/atomic_hook_test_helper.h"
+
+namespace {
+
+using ::testing::Eq;
+
+int value = 0;
+void TestHook(int x) { value = x; }
+
+TEST(AtomicHookTest, NoDefaultFunction) {
+  ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES static absl::base_internal::AtomicHook<
+      void (*)(int)>
+      hook;
+  value = 0;
+
+  // Test the default DummyFunction.
+  EXPECT_TRUE(hook.Load() == nullptr);
+  EXPECT_EQ(value, 0);
+  hook(1);
+  EXPECT_EQ(value, 0);
+
+  // Test a stored hook.
+  hook.Store(TestHook);
+  EXPECT_TRUE(hook.Load() == TestHook);
+  EXPECT_EQ(value, 0);
+  hook(1);
+  EXPECT_EQ(value, 1);
+
+  // Calling Store() with the same hook should not crash.
+  hook.Store(TestHook);
+  EXPECT_TRUE(hook.Load() == TestHook);
+  EXPECT_EQ(value, 1);
+  hook(2);
+  EXPECT_EQ(value, 2);
+}
+
+TEST(AtomicHookTest, WithDefaultFunction) {
+  // Set the default value to TestHook at compile-time.
+  ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES static absl::base_internal::AtomicHook<
+      void (*)(int)>
+      hook(TestHook);
+  value = 0;
+
+  // Test the default value is TestHook.
+  EXPECT_TRUE(hook.Load() == TestHook);
+  EXPECT_EQ(value, 0);
+  hook(1);
+  EXPECT_EQ(value, 1);
+
+  // Calling Store() with the same hook should not crash.
+  hook.Store(TestHook);
+  EXPECT_TRUE(hook.Load() == TestHook);
+  EXPECT_EQ(value, 1);
+  hook(2);
+  EXPECT_EQ(value, 2);
+}
+
+ABSL_CONST_INIT int override_func_calls = 0;
+void OverrideFunc() { override_func_calls++; }
+static struct OverrideInstaller {
+  OverrideInstaller() { absl::atomic_hook_internal::func.Store(OverrideFunc); }
+} override_installer;
+
+TEST(AtomicHookTest, DynamicInitFromAnotherTU) {
+  // MSVC 14.2 doesn't do constexpr static init correctly; in particular it
+  // tends to sequence static init (i.e. defaults) of `AtomicHook` objects
+  // after their dynamic init (i.e. overrides), overwriting whatever value was
+  // written during dynamic init.  This regression test validates the fix.
+  // https://developercommunity.visualstudio.com/content/problem/336946/class-with-constexpr-constructor-not-using-static.html
+  EXPECT_THAT(absl::atomic_hook_internal::default_func_calls, Eq(0));
+  EXPECT_THAT(override_func_calls, Eq(0));
+  absl::atomic_hook_internal::func();
+  EXPECT_THAT(absl::atomic_hook_internal::default_func_calls, Eq(0));
+  EXPECT_THAT(override_func_calls, Eq(1));
+  EXPECT_THAT(absl::atomic_hook_internal::func.Load(), Eq(OverrideFunc));
+}
+
+}  // namespace
diff --git a/third_party/abseil_cpp/absl/base/internal/atomic_hook_test_helper.cc b/third_party/abseil_cpp/absl/base/internal/atomic_hook_test_helper.cc
new file mode 100644
index 000000000000..537d47cd2d2f
--- /dev/null
+++ b/third_party/abseil_cpp/absl/base/internal/atomic_hook_test_helper.cc
@@ -0,0 +1,32 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/internal/atomic_hook_test_helper.h"
+
+#include "absl/base/attributes.h"
+#include "absl/base/internal/atomic_hook.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace atomic_hook_internal {
+
+ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES absl::base_internal::AtomicHook<VoidF>
+    func(DefaultFunc);
+ABSL_CONST_INIT int default_func_calls = 0;
+void DefaultFunc() { default_func_calls++; }
+void RegisterFunc(VoidF f) { func.Store(f); }
+
+}  // namespace atomic_hook_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/third_party/abseil_cpp/absl/base/internal/atomic_hook_test_helper.h b/third_party/abseil_cpp/absl/base/internal/atomic_hook_test_helper.h
new file mode 100644
index 000000000000..3e72b4977d2f
--- /dev/null
+++ b/third_party/abseil_cpp/absl/base/internal/atomic_hook_test_helper.h
@@ -0,0 +1,34 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_BASE_ATOMIC_HOOK_TEST_HELPER_H_
+#define ABSL_BASE_ATOMIC_HOOK_TEST_HELPER_H_
+
+#include "absl/base/internal/atomic_hook.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace atomic_hook_internal {
+
+using VoidF = void (*)();
+extern absl::base_internal::AtomicHook<VoidF> func;
+extern int default_func_calls;
+void DefaultFunc();
+void RegisterFunc(VoidF func);
+
+}  // namespace atomic_hook_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_BASE_ATOMIC_HOOK_TEST_HELPER_H_
diff --git a/third_party/abseil_cpp/absl/base/internal/bits.h b/third_party/abseil_cpp/absl/base/internal/bits.h
new file mode 100644
index 000000000000..14c51d8b3013
--- /dev/null
+++ b/third_party/abseil_cpp/absl/base/internal/bits.h
@@ -0,0 +1,218 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_BASE_INTERNAL_BITS_H_
+#define ABSL_BASE_INTERNAL_BITS_H_
+
+// This file contains bitwise ops which are implementation details of various
+// absl libraries.
+
+#include <cstdint>
+
+#include "absl/base/config.h"
+
+// Clang on Windows has __builtin_clzll; otherwise we need to use the
+// windows intrinsic functions.
+#if defined(_MSC_VER) && !defined(__clang__)
+#include <intrin.h>
+#if defined(_M_X64)
+#pragma intrinsic(_BitScanReverse64)
+#pragma intrinsic(_BitScanForward64)
+#endif
+#pragma intrinsic(_BitScanReverse)
+#pragma intrinsic(_BitScanForward)
+#endif
+
+#include "absl/base/attributes.h"
+
+#if defined(_MSC_VER) && !defined(__clang__)
+// We can achieve something similar to attribute((always_inline)) with MSVC by
+// using the __forceinline keyword, however this is not perfect. MSVC is
+// much less aggressive about inlining, and even with the __forceinline keyword.
+#define ABSL_BASE_INTERNAL_FORCEINLINE __forceinline
+#else
+// Use default attribute inline.
+#define ABSL_BASE_INTERNAL_FORCEINLINE inline ABSL_ATTRIBUTE_ALWAYS_INLINE
+#endif
+
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+ABSL_BASE_INTERNAL_FORCEINLINE int CountLeadingZeros64Slow(uint64_t n) {
+  int zeroes = 60;
+  if (n >> 32) {
+    zeroes -= 32;
+    n >>= 32;
+  }
+  if (n >> 16) {
+    zeroes -= 16;
+    n >>= 16;
+  }
+  if (n >> 8) {
+    zeroes -= 8;
+    n >>= 8;
+  }
+  if (n >> 4) {
+    zeroes -= 4;
+    n >>= 4;
+  }
+  return "\4\3\2\2\1\1\1\1\0\0\0\0\0\0\0"[n] + zeroes;
+}
+
+ABSL_BASE_INTERNAL_FORCEINLINE int CountLeadingZeros64(uint64_t n) {
+#if defined(_MSC_VER) && !defined(__clang__) && defined(_M_X64)
+  // MSVC does not have __buitin_clzll. Use _BitScanReverse64.
+  unsigned long result = 0;  // NOLINT(runtime/int)
+  if (_BitScanReverse64(&result, n)) {
+    return 63 - result;
+  }
+  return 64;
+#elif defined(_MSC_VER) && !defined(__clang__)
+  // MSVC does not have __buitin_clzll. Compose two calls to _BitScanReverse
+  unsigned long result = 0;  // NOLINT(runtime/int)
+  if ((n >> 32) && _BitScanReverse(&result, n >> 32)) {
+    return 31 - result;
+  }
+  if (_BitScanReverse(&result, n)) {
+    return 63 - result;
+  }
+  return 64;
+#elif defined(__GNUC__) || defined(__clang__)
+  // Use __builtin_clzll, which uses the following instructions:
+  //  x86: bsr
+  //  ARM64: clz
+  //  PPC: cntlzd
+  static_assert(sizeof(unsigned long long) == sizeof(n),  // NOLINT(runtime/int)
+                "__builtin_clzll does not take 64-bit arg");
+
+  // Handle 0 as a special case because __builtin_clzll(0) is undefined.
+  if (n == 0) {
+    return 64;
+  }
+  return __builtin_clzll(n);
+#else
+  return CountLeadingZeros64Slow(n);
+#endif
+}
+
+ABSL_BASE_INTERNAL_FORCEINLINE int CountLeadingZeros32Slow(uint64_t n) {
+  int zeroes = 28;
+  if (n >> 16) {
+    zeroes -= 16;
+    n >>= 16;
+  }
+  if (n >> 8) {
+    zeroes -= 8;
+    n >>= 8;
+  }
+  if (n >> 4) {
+    zeroes -= 4;
+    n >>= 4;
+  }
+  return "\4\3\2\2\1\1\1\1\0\0\0\0\0\0\0"[n] + zeroes;
+}
+
+ABSL_BASE_INTERNAL_FORCEINLINE int CountLeadingZeros32(uint32_t n) {
+#if defined(_MSC_VER) && !defined(__clang__)
+  unsigned long result = 0;  // NOLINT(runtime/int)
+  if (_BitScanReverse(&result, n)) {
+    return 31 - result;
+  }
+  return 32;
+#elif defined(__GNUC__) || defined(__clang__)
+  // Use __builtin_clz, which uses the following instructions:
+  //  x86: bsr
+  //  ARM64: clz
+  //  PPC: cntlzd
+  static_assert(sizeof(int) == sizeof(n),
+                "__builtin_clz does not take 32-bit arg");
+
+  // Handle 0 as a special case because __builtin_clz(0) is undefined.
+  if (n == 0) {
+    return 32;
+  }
+  return __builtin_clz(n);
+#else
+  return CountLeadingZeros32Slow(n);
+#endif
+}
+
+ABSL_BASE_INTERNAL_FORCEINLINE int CountTrailingZerosNonZero64Slow(uint64_t n) {
+  int c = 63;
+  n &= ~n + 1;
+  if (n & 0x00000000FFFFFFFF) c -= 32;
+  if (n & 0x0000FFFF0000FFFF) c -= 16;
+  if (n & 0x00FF00FF00FF00FF) c -= 8;
+  if (n & 0x0F0F0F0F0F0F0F0F) c -= 4;
+  if (n & 0x3333333333333333) c -= 2;
+  if (n & 0x5555555555555555) c -= 1;
+  return c;
+}
+
+ABSL_BASE_INTERNAL_FORCEINLINE int CountTrailingZerosNonZero64(uint64_t n) {
+#if defined(_MSC_VER) && !defined(__clang__) && defined(_M_X64)
+  unsigned long result = 0;  // NOLINT(runtime/int)
+  _BitScanForward64(&result, n);
+  return result;
+#elif defined(_MSC_VER) && !defined(__clang__)
+  unsigned long result = 0;  // NOLINT(runtime/int)
+  if (static_cast<uint32_t>(n) == 0) {
+    _BitScanForward(&result, n >> 32);
+    return result + 32;
+  }
+  _BitScanForward(&result, n);
+  return result;
+#elif defined(__GNUC__) || defined(__clang__)
+  static_assert(sizeof(unsigned long long) == sizeof(n),  // NOLINT(runtime/int)
+                "__builtin_ctzll does not take 64-bit arg");
+  return __builtin_ctzll(n);
+#else
+  return CountTrailingZerosNonZero64Slow(n);
+#endif
+}
+
+ABSL_BASE_INTERNAL_FORCEINLINE int CountTrailingZerosNonZero32Slow(uint32_t n) {
+  int c = 31;
+  n &= ~n + 1;
+  if (n & 0x0000FFFF) c -= 16;
+  if (n & 0x00FF00FF) c -= 8;
+  if (n & 0x0F0F0F0F) c -= 4;
+  if (n & 0x33333333) c -= 2;
+  if (n & 0x55555555) c -= 1;
+  return c;
+}
+
+ABSL_BASE_INTERNAL_FORCEINLINE int CountTrailingZerosNonZero32(uint32_t n) {
+#if defined(_MSC_VER) && !defined(__clang__)
+  unsigned long result = 0;  // NOLINT(runtime/int)
+  _BitScanForward(&result, n);
+  return result;
+#elif defined(__GNUC__) || defined(__clang__)
+  static_assert(sizeof(int) == sizeof(n),
+                "__builtin_ctz does not take 32-bit arg");
+  return __builtin_ctz(n);
+#else
+  return CountTrailingZerosNonZero32Slow(n);
+#endif
+}
+
+#undef ABSL_BASE_INTERNAL_FORCEINLINE
+
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_BASE_INTERNAL_BITS_H_
diff --git a/third_party/abseil_cpp/absl/base/internal/bits_test.cc b/third_party/abseil_cpp/absl/base/internal/bits_test.cc
new file mode 100644
index 000000000000..7855fa629724
--- /dev/null
+++ b/third_party/abseil_cpp/absl/base/internal/bits_test.cc
@@ -0,0 +1,97 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/internal/bits.h"
+
+#include "gtest/gtest.h"
+
+namespace {
+
+int CLZ64(uint64_t n) {
+  int fast = absl::base_internal::CountLeadingZeros64(n);
+  int slow = absl::base_internal::CountLeadingZeros64Slow(n);
+  EXPECT_EQ(fast, slow) << n;
+  return fast;
+}
+
+TEST(BitsTest, CountLeadingZeros64) {
+  EXPECT_EQ(64, CLZ64(uint64_t{}));
+  EXPECT_EQ(0, CLZ64(~uint64_t{}));
+
+  for (int index = 0; index < 64; index++) {
+    uint64_t x = static_cast<uint64_t>(1) << index;
+    const auto cnt = 63 - index;
+    ASSERT_EQ(cnt, CLZ64(x)) << index;
+    ASSERT_EQ(cnt, CLZ64(x + x - 1)) << index;
+  }
+}
+
+int CLZ32(uint32_t n) {
+  int fast = absl::base_internal::CountLeadingZeros32(n);
+  int slow = absl::base_internal::CountLeadingZeros32Slow(n);
+  EXPECT_EQ(fast, slow) << n;
+  return fast;
+}
+
+TEST(BitsTest, CountLeadingZeros32) {
+  EXPECT_EQ(32, CLZ32(uint32_t{}));
+  EXPECT_EQ(0, CLZ32(~uint32_t{}));
+
+  for (int index = 0; index < 32; index++) {
+    uint32_t x = static_cast<uint32_t>(1) << index;
+    const auto cnt = 31 - index;
+    ASSERT_EQ(cnt, CLZ32(x)) << index;
+    ASSERT_EQ(cnt, CLZ32(x + x - 1)) << index;
+    ASSERT_EQ(CLZ64(x), CLZ32(x) + 32);
+  }
+}
+
+int CTZ64(uint64_t n) {
+  int fast = absl::base_internal::CountTrailingZerosNonZero64(n);
+  int slow = absl::base_internal::CountTrailingZerosNonZero64Slow(n);
+  EXPECT_EQ(fast, slow) << n;
+  return fast;
+}
+
+TEST(BitsTest, CountTrailingZerosNonZero64) {
+  EXPECT_EQ(0, CTZ64(~uint64_t{}));
+
+  for (int index = 0; index < 64; index++) {
+    uint64_t x = static_cast<uint64_t>(1) << index;
+    const auto cnt = index;
+    ASSERT_EQ(cnt, CTZ64(x)) << index;
+    ASSERT_EQ(cnt, CTZ64(~(x - 1))) << index;
+  }
+}
+
+int CTZ32(uint32_t n) {
+  int fast = absl::base_internal::CountTrailingZerosNonZero32(n);
+  int slow = absl::base_internal::CountTrailingZerosNonZero32Slow(n);
+  EXPECT_EQ(fast, slow) << n;
+  return fast;
+}
+
+TEST(BitsTest, CountTrailingZerosNonZero32) {
+  EXPECT_EQ(0, CTZ32(~uint32_t{}));
+
+  for (int index = 0; index < 32; index++) {
+    uint32_t x = static_cast<uint32_t>(1) << index;
+    const auto cnt = index;
+    ASSERT_EQ(cnt, CTZ32(x)) << index;
+    ASSERT_EQ(cnt, CTZ32(~(x - 1))) << index;
+  }
+}
+
+
+}  // namespace
diff --git a/third_party/abseil_cpp/absl/base/internal/cmake_thread_test.cc b/third_party/abseil_cpp/absl/base/internal/cmake_thread_test.cc
new file mode 100644
index 000000000000..f70bb24eb7b4
--- /dev/null
+++ b/third_party/abseil_cpp/absl/base/internal/cmake_thread_test.cc
@@ -0,0 +1,22 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <iostream>
+#include "absl/base/internal/thread_identity.h"
+
+int main() {
+  auto* tid = absl::base_internal::CurrentThreadIdentityIfPresent();
+  // Make sure the above call can't be optimized out
+  std::cout << (void*)tid << std::endl;
+}
diff --git a/third_party/abseil_cpp/absl/base/internal/cycleclock.cc b/third_party/abseil_cpp/absl/base/internal/cycleclock.cc
new file mode 100644
index 000000000000..0e65005b8914
--- /dev/null
+++ b/third_party/abseil_cpp/absl/base/internal/cycleclock.cc
@@ -0,0 +1,107 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// The implementation of CycleClock::Frequency.
+//
+// NOTE: only i386 and x86_64 have been well tested.
+// PPC, sparc, alpha, and ia64 are based on
+//    http://peter.kuscsik.com/wordpress/?p=14
+// with modifications by m3b.  See also
+//    https://setisvn.ssl.berkeley.edu/svn/lib/fftw-3.0.1/kernel/cycle.h
+
+#include "absl/base/internal/cycleclock.h"
+
+#include <atomic>
+#include <chrono>  // NOLINT(build/c++11)
+
+#include "absl/base/internal/unscaledcycleclock.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+#if ABSL_USE_UNSCALED_CYCLECLOCK
+
+namespace {
+
+#ifdef NDEBUG
+#ifdef ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY
+// Not debug mode and the UnscaledCycleClock frequency is the CPU
+// frequency.  Scale the CycleClock to prevent overflow if someone
+// tries to represent the time as cycles since the Unix epoch.
+static constexpr int32_t kShift = 1;
+#else
+// Not debug mode and the UnscaledCycleClock isn't operating at the
+// raw CPU frequency. There is no need to do any scaling, so don't
+// needlessly sacrifice precision.
+static constexpr int32_t kShift = 0;
+#endif
+#else
+// In debug mode use a different shift to discourage depending on a
+// particular shift value.
+static constexpr int32_t kShift = 2;
+#endif
+
+static constexpr double kFrequencyScale = 1.0 / (1 << kShift);
+static std::atomic<CycleClockSourceFunc> cycle_clock_source;
+
+CycleClockSourceFunc LoadCycleClockSource() {
+  // Optimize for the common case (no callback) by first doing a relaxed load;
+  // this is significantly faster on non-x86 platforms.
+  if (cycle_clock_source.load(std::memory_order_relaxed) == nullptr) {
+    return nullptr;
+  }
+  // This corresponds to the store(std::memory_order_release) in
+  // CycleClockSource::Register, and makes sure that any updates made prior to
+  // registering the callback are visible to this thread before the callback is
+  // invoked.
+  return cycle_clock_source.load(std::memory_order_acquire);
+}
+
+}  // namespace
+
+int64_t CycleClock::Now() {
+  auto fn = LoadCycleClockSource();
+  if (fn == nullptr) {
+    return base_internal::UnscaledCycleClock::Now() >> kShift;
+  }
+  return fn() >> kShift;
+}
+
+double CycleClock::Frequency() {
+  return kFrequencyScale * base_internal::UnscaledCycleClock::Frequency();
+}
+
+void CycleClockSource::Register(CycleClockSourceFunc source) {
+  // Corresponds to the load(std::memory_order_acquire) in LoadCycleClockSource.
+  cycle_clock_source.store(source, std::memory_order_release);
+}
+
+#else
+
+int64_t CycleClock::Now() {
+  return std::chrono::duration_cast<std::chrono::nanoseconds>(
+             std::chrono::steady_clock::now().time_since_epoch())
+      .count();
+}
+
+double CycleClock::Frequency() {
+  return 1e9;
+}
+
+#endif
+
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/third_party/abseil_cpp/absl/base/internal/cycleclock.h b/third_party/abseil_cpp/absl/base/internal/cycleclock.h
new file mode 100644
index 000000000000..a18b58444560
--- /dev/null
+++ b/third_party/abseil_cpp/absl/base/internal/cycleclock.h
@@ -0,0 +1,94 @@
+//
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+// -----------------------------------------------------------------------------
+// File: cycleclock.h
+// -----------------------------------------------------------------------------
+//
+// This header file defines a `CycleClock`, which yields the value and frequency
+// of a cycle counter that increments at a rate that is approximately constant.
+//
+// NOTE:
+//
+// The cycle counter frequency is not necessarily related to the core clock
+// frequency and should not be treated as such. That is, `CycleClock` cycles are
+// not necessarily "CPU cycles" and code should not rely on that behavior, even
+// if experimentally observed.
+//
+// An arbitrary offset may have been added to the counter at power on.
+//
+// On some platforms, the rate and offset of the counter may differ
+// slightly when read from different CPUs of a multiprocessor. Usually,
+// we try to ensure that the operating system adjusts values periodically
+// so that values agree approximately.   If you need stronger guarantees,
+// consider using alternate interfaces.
+//
+// The CPU is not required to maintain the ordering of a cycle counter read
+// with respect to surrounding instructions.
+
+#ifndef ABSL_BASE_INTERNAL_CYCLECLOCK_H_
+#define ABSL_BASE_INTERNAL_CYCLECLOCK_H_
+
+#include <cstdint>
+
+#include "absl/base/config.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+// -----------------------------------------------------------------------------
+// CycleClock
+// -----------------------------------------------------------------------------
+class CycleClock {
+ public:
+  // CycleClock::Now()
+  //
+  // Returns the value of a cycle counter that counts at a rate that is
+  // approximately constant.
+  static int64_t Now();
+
+  // CycleClock::Frequency()
+  //
+  // Returns the amount by which `CycleClock::Now()` increases per second. Note
+  // that this value may not necessarily match the core CPU clock frequency.
+  static double Frequency();
+
+ private:
+  CycleClock() = delete;  // no instances
+  CycleClock(const CycleClock&) = delete;
+  CycleClock& operator=(const CycleClock&) = delete;
+};
+
+using CycleClockSourceFunc = int64_t (*)();
+
+class CycleClockSource {
+ private:
+  // CycleClockSource::Register()
+  //
+  // Register a function that provides an alternate source for the unscaled CPU
+  // cycle count value. The source function must be async signal safe, must not
+  // call CycleClock::Now(), and must have a frequency that matches that of the
+  // unscaled clock used by CycleClock. A nullptr value resets CycleClock to use
+  // the default source.
+  static void Register(CycleClockSourceFunc source);
+};
+
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_BASE_INTERNAL_CYCLECLOCK_H_
diff --git a/third_party/abseil_cpp/absl/base/internal/direct_mmap.h b/third_party/abseil_cpp/absl/base/internal/direct_mmap.h
new file mode 100644
index 000000000000..16accf096604
--- /dev/null
+++ b/third_party/abseil_cpp/absl/base/internal/direct_mmap.h
@@ -0,0 +1,166 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Functions for directly invoking mmap() via syscall, avoiding the case where
+// mmap() has been locally overridden.
+
+#ifndef ABSL_BASE_INTERNAL_DIRECT_MMAP_H_
+#define ABSL_BASE_INTERNAL_DIRECT_MMAP_H_
+
+#include "absl/base/config.h"
+
+#if ABSL_HAVE_MMAP
+
+#include <sys/mman.h>
+
+#ifdef __linux__
+
+#include <sys/types.h>
+#ifdef __BIONIC__
+#include <sys/syscall.h>
+#else
+#include <syscall.h>
+#endif
+
+#include <linux/unistd.h>
+#include <unistd.h>
+#include <cerrno>
+#include <cstdarg>
+#include <cstdint>
+
+#ifdef __mips__
+// Include definitions of the ABI currently in use.
+#ifdef __BIONIC__
+// Android doesn't have sgidefs.h, but does have asm/sgidefs.h, which has the
+// definitions we need.
+#include <asm/sgidefs.h>
+#else
+#include <sgidefs.h>
+#endif  // __BIONIC__
+#endif  // __mips__
+
+// SYS_mmap and SYS_munmap are not defined in Android.
+#ifdef __BIONIC__
+extern "C" void* __mmap2(void*, size_t, int, int, int, size_t);
+#if defined(__NR_mmap) && !defined(SYS_mmap)
+#define SYS_mmap __NR_mmap
+#endif
+#ifndef SYS_munmap
+#define SYS_munmap __NR_munmap
+#endif
+#endif  // __BIONIC__
+
+#if defined(__NR_mmap2) && !defined(SYS_mmap2)
+#define SYS_mmap2 __NR_mmap2
+#endif
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+// Platform specific logic extracted from
+// https://chromium.googlesource.com/linux-syscall-support/+/master/linux_syscall_support.h
+inline void* DirectMmap(void* start, size_t length, int prot, int flags, int fd,
+                        off64_t offset) noexcept {
+#if defined(__i386__) || defined(__ARM_ARCH_3__) || defined(__ARM_EABI__) || \
+    (defined(__mips__) && _MIPS_SIM == _MIPS_SIM_ABI32) ||                   \
+    (defined(__PPC__) && !defined(__PPC64__)) ||                             \
+    (defined(__riscv) && __riscv_xlen == 32) ||                              \
+    (defined(__s390__) && !defined(__s390x__))
+  // On these architectures, implement mmap with mmap2.
+  static int pagesize = 0;
+  if (pagesize == 0) {
+#if defined(__wasm__) || defined(__asmjs__)
+    pagesize = getpagesize();
+#else
+    pagesize = sysconf(_SC_PAGESIZE);
+#endif
+  }
+  if (offset < 0 || offset % pagesize != 0) {
+    errno = EINVAL;
+    return MAP_FAILED;
+  }
+#ifdef __BIONIC__
+  // SYS_mmap2 has problems on Android API level <= 16.
+  // Workaround by invoking __mmap2() instead.
+  return __mmap2(start, length, prot, flags, fd, offset / pagesize);
+#else
+  return reinterpret_cast<void*>(
+      syscall(SYS_mmap2, start, length, prot, flags, fd,
+              static_cast<off_t>(offset / pagesize)));
+#endif
+#elif defined(__s390x__)
+  // On s390x, mmap() arguments are passed in memory.
+  unsigned long buf[6] = {reinterpret_cast<unsigned long>(start),  // NOLINT
+                          static_cast<unsigned long>(length),      // NOLINT
+                          static_cast<unsigned long>(prot),        // NOLINT
+                          static_cast<unsigned long>(flags),       // NOLINT
+                          static_cast<unsigned long>(fd),          // NOLINT
+                          static_cast<unsigned long>(offset)};     // NOLINT
+  return reinterpret_cast<void*>(syscall(SYS_mmap, buf));
+#elif defined(__x86_64__)
+// The x32 ABI has 32 bit longs, but the syscall interface is 64 bit.
+// We need to explicitly cast to an unsigned 64 bit type to avoid implicit
+// sign extension.  We can't cast pointers directly because those are
+// 32 bits, and gcc will dump ugly warnings about casting from a pointer
+// to an integer of a different size. We also need to make sure __off64_t
+// isn't truncated to 32-bits under x32.
+#define MMAP_SYSCALL_ARG(x) ((uint64_t)(uintptr_t)(x))
+  return reinterpret_cast<void*>(
+      syscall(SYS_mmap, MMAP_SYSCALL_ARG(start), MMAP_SYSCALL_ARG(length),
+              MMAP_SYSCALL_ARG(prot), MMAP_SYSCALL_ARG(flags),
+              MMAP_SYSCALL_ARG(fd), static_cast<uint64_t>(offset)));
+#undef MMAP_SYSCALL_ARG
+#else  // Remaining 64-bit aritectures.
+  static_assert(sizeof(unsigned long) == 8, "Platform is not 64-bit");
+  return reinterpret_cast<void*>(
+      syscall(SYS_mmap, start, length, prot, flags, fd, offset));
+#endif
+}
+
+inline int DirectMunmap(void* start, size_t length) {
+  return static_cast<int>(syscall(SYS_munmap, start, length));
+}
+
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#else  // !__linux__
+
+// For non-linux platforms where we have mmap, just dispatch directly to the
+// actual mmap()/munmap() methods.
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+inline void* DirectMmap(void* start, size_t length, int prot, int flags, int fd,
+                        off_t offset) {
+  return mmap(start, length, prot, flags, fd, offset);
+}
+
+inline int DirectMunmap(void* start, size_t length) {
+  return munmap(start, length);
+}
+
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // __linux__
+
+#endif  // ABSL_HAVE_MMAP
+
+#endif  // ABSL_BASE_INTERNAL_DIRECT_MMAP_H_
diff --git a/third_party/abseil_cpp/absl/base/internal/endian.h b/third_party/abseil_cpp/absl/base/internal/endian.h
new file mode 100644
index 000000000000..9677530e8de3
--- /dev/null
+++ b/third_party/abseil_cpp/absl/base/internal/endian.h
@@ -0,0 +1,266 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef ABSL_BASE_INTERNAL_ENDIAN_H_
+#define ABSL_BASE_INTERNAL_ENDIAN_H_
+
+// The following guarantees declaration of the byte swap functions
+#ifdef _MSC_VER
+#include <stdlib.h>  // NOLINT(build/include)
+#elif defined(__FreeBSD__)
+#include <sys/endian.h>
+#elif defined(__GLIBC__)
+#include <byteswap.h>  // IWYU pragma: export
+#endif
+
+#include <cstdint>
+#include "absl/base/config.h"
+#include "absl/base/internal/unaligned_access.h"
+#include "absl/base/port.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+
+// Use compiler byte-swapping intrinsics if they are available.  32-bit
+// and 64-bit versions are available in Clang and GCC as of GCC 4.3.0.
+// The 16-bit version is available in Clang and GCC only as of GCC 4.8.0.
+// For simplicity, we enable them all only for GCC 4.8.0 or later.
+#if defined(__clang__) || \
+    (defined(__GNUC__) && \
+     ((__GNUC__ == 4 && __GNUC_MINOR__ >= 8) || __GNUC__ >= 5))
+inline uint64_t gbswap_64(uint64_t host_int) {
+  return __builtin_bswap64(host_int);
+}
+inline uint32_t gbswap_32(uint32_t host_int) {
+  return __builtin_bswap32(host_int);
+}
+inline uint16_t gbswap_16(uint16_t host_int) {
+  return __builtin_bswap16(host_int);
+}
+
+#elif defined(_MSC_VER)
+inline uint64_t gbswap_64(uint64_t host_int) {
+  return _byteswap_uint64(host_int);
+}
+inline uint32_t gbswap_32(uint32_t host_int) {
+  return _byteswap_ulong(host_int);
+}
+inline uint16_t gbswap_16(uint16_t host_int) {
+  return _byteswap_ushort(host_int);
+}
+
+#else
+inline uint64_t gbswap_64(uint64_t host_int) {
+#if defined(__GNUC__) && defined(__x86_64__) && !defined(__APPLE__)
+  // Adapted from /usr/include/byteswap.h.  Not available on Mac.
+  if (__builtin_constant_p(host_int)) {
+    return __bswap_constant_64(host_int);
+  } else {
+    uint64_t result;
+    __asm__("bswap %0" : "=r"(result) : "0"(host_int));
+    return result;
+  }
+#elif defined(__GLIBC__)
+  return bswap_64(host_int);
+#else
+  return (((host_int & uint64_t{0xFF}) << 56) |
+          ((host_int & uint64_t{0xFF00}) << 40) |
+          ((host_int & uint64_t{0xFF0000}) << 24) |
+          ((host_int & uint64_t{0xFF000000}) << 8) |
+          ((host_int & uint64_t{0xFF00000000}) >> 8) |
+          ((host_int & uint64_t{0xFF0000000000}) >> 24) |
+          ((host_int & uint64_t{0xFF000000000000}) >> 40) |
+          ((host_int & uint64_t{0xFF00000000000000}) >> 56));
+#endif  // bswap_64
+}
+
+inline uint32_t gbswap_32(uint32_t host_int) {
+#if defined(__GLIBC__)
+  return bswap_32(host_int);
+#else
+  return (((host_int & uint32_t{0xFF}) << 24) |
+          ((host_int & uint32_t{0xFF00}) << 8) |
+          ((host_int & uint32_t{0xFF0000}) >> 8) |
+          ((host_int & uint32_t{0xFF000000}) >> 24));
+#endif
+}
+
+inline uint16_t gbswap_16(uint16_t host_int) {
+#if defined(__GLIBC__)
+  return bswap_16(host_int);
+#else
+  return (((host_int & uint16_t{0xFF}) << 8) |
+          ((host_int & uint16_t{0xFF00}) >> 8));
+#endif
+}
+
+#endif  // intrinsics available
+
+#ifdef ABSL_IS_LITTLE_ENDIAN
+
+// Definitions for ntohl etc. that don't require us to include
+// netinet/in.h. We wrap gbswap_32 and gbswap_16 in functions rather
+// than just #defining them because in debug mode, gcc doesn't
+// correctly handle the (rather involved) definitions of bswap_32.
+// gcc guarantees that inline functions are as fast as macros, so
+// this isn't a performance hit.
+inline uint16_t ghtons(uint16_t x) { return gbswap_16(x); }
+inline uint32_t ghtonl(uint32_t x) { return gbswap_32(x); }
+inline uint64_t ghtonll(uint64_t x) { return gbswap_64(x); }
+
+#elif defined ABSL_IS_BIG_ENDIAN
+
+// These definitions are simpler on big-endian machines
+// These are functions instead of macros to avoid self-assignment warnings
+// on calls such as "i = ghtnol(i);".  This also provides type checking.
+inline uint16_t ghtons(uint16_t x) { return x; }
+inline uint32_t ghtonl(uint32_t x) { return x; }
+inline uint64_t ghtonll(uint64_t x) { return x; }
+
+#else
+#error \
+    "Unsupported byte order: Either ABSL_IS_BIG_ENDIAN or " \
+       "ABSL_IS_LITTLE_ENDIAN must be defined"
+#endif  // byte order
+
+inline uint16_t gntohs(uint16_t x) { return ghtons(x); }
+inline uint32_t gntohl(uint32_t x) { return ghtonl(x); }
+inline uint64_t gntohll(uint64_t x) { return ghtonll(x); }
+
+// Utilities to convert numbers between the current hosts's native byte
+// order and little-endian byte order
+//
+// Load/Store methods are alignment safe
+namespace little_endian {
+// Conversion functions.
+#ifdef ABSL_IS_LITTLE_ENDIAN
+
+inline uint16_t FromHost16(uint16_t x) { return x; }
+inline uint16_t ToHost16(uint16_t x) { return x; }
+
+inline uint32_t FromHost32(uint32_t x) { return x; }
+inline uint32_t ToHost32(uint32_t x) { return x; }
+
+inline uint64_t FromHost64(uint64_t x) { return x; }
+inline uint64_t ToHost64(uint64_t x) { return x; }
+
+inline constexpr bool IsLittleEndian() { return true; }
+
+#elif defined ABSL_IS_BIG_ENDIAN
+
+inline uint16_t FromHost16(uint16_t x) { return gbswap_16(x); }
+inline uint16_t ToHost16(uint16_t x) { return gbswap_16(x); }
+
+inline uint32_t FromHost32(uint32_t x) { return gbswap_32(x); }
+inline uint32_t ToHost32(uint32_t x) { return gbswap_32(x); }
+
+inline uint64_t FromHost64(uint64_t x) { return gbswap_64(x); }
+inline uint64_t ToHost64(uint64_t x) { return gbswap_64(x); }
+
+inline constexpr bool IsLittleEndian() { return false; }
+
+#endif /* ENDIAN */
+
+// Functions to do unaligned loads and stores in little-endian order.
+inline uint16_t Load16(const void *p) {
+  return ToHost16(ABSL_INTERNAL_UNALIGNED_LOAD16(p));
+}
+
+inline void Store16(void *p, uint16_t v) {
+  ABSL_INTERNAL_UNALIGNED_STORE16(p, FromHost16(v));
+}
+
+inline uint32_t Load32(const void *p) {
+  return ToHost32(ABSL_INTERNAL_UNALIGNED_LOAD32(p));
+}
+
+inline void Store32(void *p, uint32_t v) {
+  ABSL_INTERNAL_UNALIGNED_STORE32(p, FromHost32(v));
+}
+
+inline uint64_t Load64(const void *p) {
+  return ToHost64(ABSL_INTERNAL_UNALIGNED_LOAD64(p));
+}
+
+inline void Store64(void *p, uint64_t v) {
+  ABSL_INTERNAL_UNALIGNED_STORE64(p, FromHost64(v));
+}
+
+}  // namespace little_endian
+
+// Utilities to convert numbers between the current hosts's native byte
+// order and big-endian byte order (same as network byte order)
+//
+// Load/Store methods are alignment safe
+namespace big_endian {
+#ifdef ABSL_IS_LITTLE_ENDIAN
+
+inline uint16_t FromHost16(uint16_t x) { return gbswap_16(x); }
+inline uint16_t ToHost16(uint16_t x) { return gbswap_16(x); }
+
+inline uint32_t FromHost32(uint32_t x) { return gbswap_32(x); }
+inline uint32_t ToHost32(uint32_t x) { return gbswap_32(x); }
+
+inline uint64_t FromHost64(uint64_t x) { return gbswap_64(x); }
+inline uint64_t ToHost64(uint64_t x) { return gbswap_64(x); }
+
+inline constexpr bool IsLittleEndian() { return true; }
+
+#elif defined ABSL_IS_BIG_ENDIAN
+
+inline uint16_t FromHost16(uint16_t x) { return x; }
+inline uint16_t ToHost16(uint16_t x) { return x; }
+
+inline uint32_t FromHost32(uint32_t x) { return x; }
+inline uint32_t ToHost32(uint32_t x) { return x; }
+
+inline uint64_t FromHost64(uint64_t x) { return x; }
+inline uint64_t ToHost64(uint64_t x) { return x; }
+
+inline constexpr bool IsLittleEndian() { return false; }
+
+#endif /* ENDIAN */
+
+// Functions to do unaligned loads and stores in big-endian order.
+inline uint16_t Load16(const void *p) {
+  return ToHost16(ABSL_INTERNAL_UNALIGNED_LOAD16(p));
+}
+
+inline void Store16(void *p, uint16_t v) {
+  ABSL_INTERNAL_UNALIGNED_STORE16(p, FromHost16(v));
+}
+
+inline uint32_t Load32(const void *p) {
+  return ToHost32(ABSL_INTERNAL_UNALIGNED_LOAD32(p));
+}
+
+inline void Store32(void *p, uint32_t v) {
+  ABSL_INTERNAL_UNALIGNED_STORE32(p, FromHost32(v));
+}
+
+inline uint64_t Load64(const void *p) {
+  return ToHost64(ABSL_INTERNAL_UNALIGNED_LOAD64(p));
+}
+
+inline void Store64(void *p, uint64_t v) {
+  ABSL_INTERNAL_UNALIGNED_STORE64(p, FromHost64(v));
+}
+
+}  // namespace big_endian
+
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_BASE_INTERNAL_ENDIAN_H_
diff --git a/third_party/abseil_cpp/absl/base/internal/endian_test.cc b/third_party/abseil_cpp/absl/base/internal/endian_test.cc
new file mode 100644
index 000000000000..a1691b1f82c0
--- /dev/null
+++ b/third_party/abseil_cpp/absl/base/internal/endian_test.cc
@@ -0,0 +1,263 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/internal/endian.h"
+
+#include <algorithm>
+#include <cstdint>
+#include <limits>
+#include <random>
+#include <vector>
+
+#include "gtest/gtest.h"
+#include "absl/base/config.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace {
+
+const uint64_t kInitialNumber{0x0123456789abcdef};
+const uint64_t k64Value{kInitialNumber};
+const uint32_t k32Value{0x01234567};
+const uint16_t k16Value{0x0123};
+const int kNumValuesToTest = 1000000;
+const int kRandomSeed = 12345;
+
+#if defined(ABSL_IS_BIG_ENDIAN)
+const uint64_t kInitialInNetworkOrder{kInitialNumber};
+const uint64_t k64ValueLE{0xefcdab8967452301};
+const uint32_t k32ValueLE{0x67452301};
+const uint16_t k16ValueLE{0x2301};
+
+const uint64_t k64ValueBE{kInitialNumber};
+const uint32_t k32ValueBE{k32Value};
+const uint16_t k16ValueBE{k16Value};
+#elif defined(ABSL_IS_LITTLE_ENDIAN)
+const uint64_t kInitialInNetworkOrder{0xefcdab8967452301};
+const uint64_t k64ValueLE{kInitialNumber};
+const uint32_t k32ValueLE{k32Value};
+const uint16_t k16ValueLE{k16Value};
+
+const uint64_t k64ValueBE{0xefcdab8967452301};
+const uint32_t k32ValueBE{0x67452301};
+const uint16_t k16ValueBE{0x2301};
+#endif
+
+std::vector<uint16_t> GenerateAllUint16Values() {
+  std::vector<uint16_t> result;
+  result.reserve(size_t{1} << (sizeof(uint16_t) * 8));
+  for (uint32_t i = std::numeric_limits<uint16_t>::min();
+       i <= std::numeric_limits<uint16_t>::max(); ++i) {
+    result.push_back(static_cast<uint16_t>(i));
+  }
+  return result;
+}
+
+template<typename T>
+std::vector<T> GenerateRandomIntegers(size_t num_values_to_test) {
+  std::vector<T> result;
+  result.reserve(num_values_to_test);
+  std::mt19937_64 rng(kRandomSeed);
+  for (size_t i = 0; i < num_values_to_test; ++i) {
+    result.push_back(rng());
+  }
+  return result;
+}
+
+void ManualByteSwap(char* bytes, int length) {
+  if (length == 1)
+    return;
+
+  EXPECT_EQ(0, length % 2);
+  for (int i = 0; i < length / 2; ++i) {
+    int j = (length - 1) - i;
+    using std::swap;
+    swap(bytes[i], bytes[j]);
+  }
+}
+
+template<typename T>
+inline T UnalignedLoad(const char* p) {
+  static_assert(
+      sizeof(T) == 1 || sizeof(T) == 2 || sizeof(T) == 4 || sizeof(T) == 8,
+      "Unexpected type size");
+
+  switch (sizeof(T)) {
+    case 1: return *reinterpret_cast<const T*>(p);
+    case 2:
+      return ABSL_INTERNAL_UNALIGNED_LOAD16(p);
+    case 4:
+      return ABSL_INTERNAL_UNALIGNED_LOAD32(p);
+    case 8:
+      return ABSL_INTERNAL_UNALIGNED_LOAD64(p);
+    default:
+      // Suppresses invalid "not all control paths return a value" on MSVC
+      return {};
+  }
+}
+
+template <typename T, typename ByteSwapper>
+static void GBSwapHelper(const std::vector<T>& host_values_to_test,
+                         const ByteSwapper& byte_swapper) {
+  // Test byte_swapper against a manual byte swap.
+  for (typename std::vector<T>::const_iterator it = host_values_to_test.begin();
+       it != host_values_to_test.end(); ++it) {
+    T host_value = *it;
+
+    char actual_value[sizeof(host_value)];
+    memcpy(actual_value, &host_value, sizeof(host_value));
+    byte_swapper(actual_value);
+
+    char expected_value[sizeof(host_value)];
+    memcpy(expected_value, &host_value, sizeof(host_value));
+    ManualByteSwap(expected_value, sizeof(host_value));
+
+    ASSERT_EQ(0, memcmp(actual_value, expected_value, sizeof(host_value)))
+        << "Swap output for 0x" << std::hex << host_value << " does not match. "
+        << "Expected: 0x" << UnalignedLoad<T>(expected_value) << "; "
+        << "actual: 0x" <<  UnalignedLoad<T>(actual_value);
+  }
+}
+
+void Swap16(char* bytes) {
+  ABSL_INTERNAL_UNALIGNED_STORE16(
+      bytes, gbswap_16(ABSL_INTERNAL_UNALIGNED_LOAD16(bytes)));
+}
+
+void Swap32(char* bytes) {
+  ABSL_INTERNAL_UNALIGNED_STORE32(
+      bytes, gbswap_32(ABSL_INTERNAL_UNALIGNED_LOAD32(bytes)));
+}
+
+void Swap64(char* bytes) {
+  ABSL_INTERNAL_UNALIGNED_STORE64(
+      bytes, gbswap_64(ABSL_INTERNAL_UNALIGNED_LOAD64(bytes)));
+}
+
+TEST(EndianessTest, Uint16) {
+  GBSwapHelper(GenerateAllUint16Values(), &Swap16);
+}
+
+TEST(EndianessTest, Uint32) {
+  GBSwapHelper(GenerateRandomIntegers<uint32_t>(kNumValuesToTest), &Swap32);
+}
+
+TEST(EndianessTest, Uint64) {
+  GBSwapHelper(GenerateRandomIntegers<uint64_t>(kNumValuesToTest), &Swap64);
+}
+
+TEST(EndianessTest, ghtonll_gntohll) {
+  // Test that absl::ghtonl compiles correctly
+  uint32_t test = 0x01234567;
+  EXPECT_EQ(absl::gntohl(absl::ghtonl(test)), test);
+
+  uint64_t comp = absl::ghtonll(kInitialNumber);
+  EXPECT_EQ(comp, kInitialInNetworkOrder);
+  comp = absl::gntohll(kInitialInNetworkOrder);
+  EXPECT_EQ(comp, kInitialNumber);
+
+  // Test that htonll and ntohll are each others' inverse functions on a
+  // somewhat assorted batch of numbers. 37 is chosen to not be anything
+  // particularly nice base 2.
+  uint64_t value = 1;
+  for (int i = 0; i < 100; ++i) {
+    comp = absl::ghtonll(absl::gntohll(value));
+    EXPECT_EQ(value, comp);
+    comp = absl::gntohll(absl::ghtonll(value));
+    EXPECT_EQ(value, comp);
+    value *= 37;
+  }
+}
+
+TEST(EndianessTest, little_endian) {
+  // Check little_endian uint16_t.
+  uint64_t comp = little_endian::FromHost16(k16Value);
+  EXPECT_EQ(comp, k16ValueLE);
+  comp = little_endian::ToHost16(k16ValueLE);
+  EXPECT_EQ(comp, k16Value);
+
+  // Check little_endian uint32_t.
+  comp = little_endian::FromHost32(k32Value);
+  EXPECT_EQ(comp, k32ValueLE);
+  comp = little_endian::ToHost32(k32ValueLE);
+  EXPECT_EQ(comp, k32Value);
+
+  // Check little_endian uint64_t.
+  comp = little_endian::FromHost64(k64Value);
+  EXPECT_EQ(comp, k64ValueLE);
+  comp = little_endian::ToHost64(k64ValueLE);
+  EXPECT_EQ(comp, k64Value);
+
+  // Check little-endian Load and store functions.
+  uint16_t u16Buf;
+  uint32_t u32Buf;
+  uint64_t u64Buf;
+
+  little_endian::Store16(&u16Buf, k16Value);
+  EXPECT_EQ(u16Buf, k16ValueLE);
+  comp = little_endian::Load16(&u16Buf);
+  EXPECT_EQ(comp, k16Value);
+
+  little_endian::Store32(&u32Buf, k32Value);
+  EXPECT_EQ(u32Buf, k32ValueLE);
+  comp = little_endian::Load32(&u32Buf);
+  EXPECT_EQ(comp, k32Value);
+
+  little_endian::Store64(&u64Buf, k64Value);
+  EXPECT_EQ(u64Buf, k64ValueLE);
+  comp = little_endian::Load64(&u64Buf);
+  EXPECT_EQ(comp, k64Value);
+}
+
+TEST(EndianessTest, big_endian) {
+  // Check big-endian Load and store functions.
+  uint16_t u16Buf;
+  uint32_t u32Buf;
+  uint64_t u64Buf;
+
+  unsigned char buffer[10];
+  big_endian::Store16(&u16Buf, k16Value);
+  EXPECT_EQ(u16Buf, k16ValueBE);
+  uint64_t comp = big_endian::Load16(&u16Buf);
+  EXPECT_EQ(comp, k16Value);
+
+  big_endian::Store32(&u32Buf, k32Value);
+  EXPECT_EQ(u32Buf, k32ValueBE);
+  comp = big_endian::Load32(&u32Buf);
+  EXPECT_EQ(comp, k32Value);
+
+  big_endian::Store64(&u64Buf, k64Value);
+  EXPECT_EQ(u64Buf, k64ValueBE);
+  comp = big_endian::Load64(&u64Buf);
+  EXPECT_EQ(comp, k64Value);
+
+  big_endian::Store16(buffer + 1, k16Value);
+  EXPECT_EQ(u16Buf, k16ValueBE);
+  comp = big_endian::Load16(buffer + 1);
+  EXPECT_EQ(comp, k16Value);
+
+  big_endian::Store32(buffer + 1, k32Value);
+  EXPECT_EQ(u32Buf, k32ValueBE);
+  comp = big_endian::Load32(buffer + 1);
+  EXPECT_EQ(comp, k32Value);
+
+  big_endian::Store64(buffer + 1, k64Value);
+  EXPECT_EQ(u64Buf, k64ValueBE);
+  comp = big_endian::Load64(buffer + 1);
+  EXPECT_EQ(comp, k64Value);
+}
+
+}  // namespace
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/third_party/abseil_cpp/absl/base/internal/errno_saver.h b/third_party/abseil_cpp/absl/base/internal/errno_saver.h
new file mode 100644
index 000000000000..251de510fc93
--- /dev/null
+++ b/third_party/abseil_cpp/absl/base/internal/errno_saver.h
@@ -0,0 +1,43 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_BASE_INTERNAL_ERRNO_SAVER_H_
+#define ABSL_BASE_INTERNAL_ERRNO_SAVER_H_
+
+#include <cerrno>
+
+#include "absl/base/config.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+// `ErrnoSaver` captures the value of `errno` upon construction and restores it
+// upon deletion.  It is used in low-level code and must be super fast.  Do not
+// add instrumentation, even in debug modes.
+class ErrnoSaver {
+ public:
+  ErrnoSaver() : saved_errno_(errno) {}
+  ~ErrnoSaver() { errno = saved_errno_; }
+  int operator()() const { return saved_errno_; }
+
+ private:
+  const int saved_errno_;
+};
+
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_BASE_INTERNAL_ERRNO_SAVER_H_
diff --git a/third_party/abseil_cpp/absl/base/internal/errno_saver_test.cc b/third_party/abseil_cpp/absl/base/internal/errno_saver_test.cc
new file mode 100644
index 000000000000..e9b742c588b0
--- /dev/null
+++ b/third_party/abseil_cpp/absl/base/internal/errno_saver_test.cc
@@ -0,0 +1,45 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/internal/errno_saver.h"
+
+#include <cerrno>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/base/internal/strerror.h"
+
+namespace {
+using ::testing::Eq;
+
+struct ErrnoPrinter {
+  int no;
+};
+std::ostream &operator<<(std::ostream &os, ErrnoPrinter ep) {
+  return os << absl::base_internal::StrError(ep.no) << " [" << ep.no << "]";
+}
+bool operator==(ErrnoPrinter one, ErrnoPrinter two) { return one.no == two.no; }
+
+TEST(ErrnoSaverTest, Works) {
+  errno = EDOM;
+  {
+    absl::base_internal::ErrnoSaver errno_saver;
+    EXPECT_THAT(ErrnoPrinter{errno}, Eq(ErrnoPrinter{EDOM}));
+    errno = ERANGE;
+    EXPECT_THAT(ErrnoPrinter{errno}, Eq(ErrnoPrinter{ERANGE}));
+    EXPECT_THAT(ErrnoPrinter{errno_saver()}, Eq(ErrnoPrinter{EDOM}));
+  }
+  EXPECT_THAT(ErrnoPrinter{errno}, Eq(ErrnoPrinter{EDOM}));
+}
+}  // namespace
diff --git a/third_party/abseil_cpp/absl/base/internal/exception_safety_testing.cc b/third_party/abseil_cpp/absl/base/internal/exception_safety_testing.cc
new file mode 100644
index 000000000000..6ccac41864b1
--- /dev/null
+++ b/third_party/abseil_cpp/absl/base/internal/exception_safety_testing.cc
@@ -0,0 +1,79 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/internal/exception_safety_testing.h"
+
+#ifdef ABSL_HAVE_EXCEPTIONS
+
+#include "gtest/gtest.h"
+#include "absl/meta/type_traits.h"
+
+namespace testing {
+
+exceptions_internal::NoThrowTag nothrow_ctor;
+
+exceptions_internal::StrongGuaranteeTagType strong_guarantee;
+
+exceptions_internal::ExceptionSafetyTestBuilder<> MakeExceptionSafetyTester() {
+  return {};
+}
+
+namespace exceptions_internal {
+
+int countdown = -1;
+
+ConstructorTracker* ConstructorTracker::current_tracker_instance_ = nullptr;
+
+void MaybeThrow(absl::string_view msg, bool throw_bad_alloc) {
+  if (countdown-- == 0) {
+    if (throw_bad_alloc) throw TestBadAllocException(msg);
+    throw TestException(msg);
+  }
+}
+
+testing::AssertionResult FailureMessage(const TestException& e,
+                                        int countdown) noexcept {
+  return testing::AssertionFailure() << "Exception thrown from " << e.what();
+}
+
+std::string GetSpecString(TypeSpec spec) {
+  std::string out;
+  absl::string_view sep;
+  const auto append = [&](absl::string_view s) {
+    absl::StrAppend(&out, sep, s);
+    sep = " | ";
+  };
+  if (static_cast<bool>(TypeSpec::kNoThrowCopy & spec)) {
+    append("kNoThrowCopy");
+  }
+  if (static_cast<bool>(TypeSpec::kNoThrowMove & spec)) {
+    append("kNoThrowMove");
+  }
+  if (static_cast<bool>(TypeSpec::kNoThrowNew & spec)) {
+    append("kNoThrowNew");
+  }
+  return out;
+}
+
+std::string GetSpecString(AllocSpec spec) {
+  return static_cast<bool>(AllocSpec::kNoThrowAllocate & spec)
+             ? "kNoThrowAllocate"
+             : "";
+}
+
+}  // namespace exceptions_internal
+
+}  // namespace testing
+
+#endif  // ABSL_HAVE_EXCEPTIONS
diff --git a/third_party/abseil_cpp/absl/base/internal/exception_safety_testing.h b/third_party/abseil_cpp/absl/base/internal/exception_safety_testing.h
new file mode 100644
index 000000000000..6ba89d05dfca
--- /dev/null
+++ b/third_party/abseil_cpp/absl/base/internal/exception_safety_testing.h
@@ -0,0 +1,1101 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Utilities for testing exception-safety
+
+#ifndef ABSL_BASE_INTERNAL_EXCEPTION_SAFETY_TESTING_H_
+#define ABSL_BASE_INTERNAL_EXCEPTION_SAFETY_TESTING_H_
+
+#include "absl/base/config.h"
+
+#ifdef ABSL_HAVE_EXCEPTIONS
+
+#include <cstddef>
+#include <cstdint>
+#include <functional>
+#include <initializer_list>
+#include <iosfwd>
+#include <string>
+#include <tuple>
+#include <unordered_map>
+
+#include "gtest/gtest.h"
+#include "absl/base/internal/pretty_function.h"
+#include "absl/memory/memory.h"
+#include "absl/meta/type_traits.h"
+#include "absl/strings/string_view.h"
+#include "absl/strings/substitute.h"
+#include "absl/utility/utility.h"
+
+namespace testing {
+
+enum class TypeSpec;
+enum class AllocSpec;
+
+constexpr TypeSpec operator|(TypeSpec a, TypeSpec b) {
+  using T = absl::underlying_type_t<TypeSpec>;
+  return static_cast<TypeSpec>(static_cast<T>(a) | static_cast<T>(b));
+}
+
+constexpr TypeSpec operator&(TypeSpec a, TypeSpec b) {
+  using T = absl::underlying_type_t<TypeSpec>;
+  return static_cast<TypeSpec>(static_cast<T>(a) & static_cast<T>(b));
+}
+
+constexpr AllocSpec operator|(AllocSpec a, AllocSpec b) {
+  using T = absl::underlying_type_t<AllocSpec>;
+  return static_cast<AllocSpec>(static_cast<T>(a) | static_cast<T>(b));
+}
+
+constexpr AllocSpec operator&(AllocSpec a, AllocSpec b) {
+  using T = absl::underlying_type_t<AllocSpec>;
+  return static_cast<AllocSpec>(static_cast<T>(a) & static_cast<T>(b));
+}
+
+namespace exceptions_internal {
+
+std::string GetSpecString(TypeSpec);
+std::string GetSpecString(AllocSpec);
+
+struct NoThrowTag {};
+struct StrongGuaranteeTagType {};
+
+// A simple exception class.  We throw this so that test code can catch
+// exceptions specifically thrown by ThrowingValue.
+class TestException {
+ public:
+  explicit TestException(absl::string_view msg) : msg_(msg) {}
+  virtual ~TestException() {}
+  virtual const char* what() const noexcept { return msg_.c_str(); }
+
+ private:
+  std::string msg_;
+};
+
+// TestBadAllocException exists because allocation functions must throw an
+// exception which can be caught by a handler of std::bad_alloc.  We use a child
+// class of std::bad_alloc so we can customise the error message, and also
+// derive from TestException so we don't accidentally end up catching an actual
+// bad_alloc exception in TestExceptionSafety.
+class TestBadAllocException : public std::bad_alloc, public TestException {
+ public:
+  explicit TestBadAllocException(absl::string_view msg) : TestException(msg) {}
+  using TestException::what;
+};
+
+extern int countdown;
+
+// Allows the countdown variable to be set manually (defaulting to the initial
+// value of 0)
+inline void SetCountdown(int i = 0) { countdown = i; }
+// Sets the countdown to the terminal value -1
+inline void UnsetCountdown() { SetCountdown(-1); }
+
+void MaybeThrow(absl::string_view msg, bool throw_bad_alloc = false);
+
+testing::AssertionResult FailureMessage(const TestException& e,
+                                        int countdown) noexcept;
+
+struct TrackedAddress {
+  bool is_alive;
+  std::string description;
+};
+
+// Inspects the constructions and destructions of anything inheriting from
+// TrackedObject. This allows us to safely "leak" TrackedObjects, as
+// ConstructorTracker will destroy everything left over in its destructor.
+class ConstructorTracker {
+ public:
+  explicit ConstructorTracker(int count) : countdown_(count) {
+    assert(current_tracker_instance_ == nullptr);
+    current_tracker_instance_ = this;
+  }
+
+  ~ConstructorTracker() {
+    assert(current_tracker_instance_ == this);
+    current_tracker_instance_ = nullptr;
+
+    for (auto& it : address_map_) {
+      void* address = it.first;
+      TrackedAddress& tracked_address = it.second;
+      if (tracked_address.is_alive) {
+        ADD_FAILURE() << ErrorMessage(address, tracked_address.description,
+                                      countdown_, "Object was not destroyed.");
+      }
+    }
+  }
+
+  static void ObjectConstructed(void* address, std::string description) {
+    if (!CurrentlyTracking()) return;
+
+    TrackedAddress& tracked_address =
+        current_tracker_instance_->address_map_[address];
+    if (tracked_address.is_alive) {
+      ADD_FAILURE() << ErrorMessage(
+          address, tracked_address.description,
+          current_tracker_instance_->countdown_,
+          "Object was re-constructed. Current object was constructed by " +
+              description);
+    }
+    tracked_address = {true, std::move(description)};
+  }
+
+  static void ObjectDestructed(void* address) {
+    if (!CurrentlyTracking()) return;
+
+    auto it = current_tracker_instance_->address_map_.find(address);
+    // Not tracked. Ignore.
+    if (it == current_tracker_instance_->address_map_.end()) return;
+
+    TrackedAddress& tracked_address = it->second;
+    if (!tracked_address.is_alive) {
+      ADD_FAILURE() << ErrorMessage(address, tracked_address.description,
+                                    current_tracker_instance_->countdown_,
+                                    "Object was re-destroyed.");
+    }
+    tracked_address.is_alive = false;
+  }
+
+ private:
+  static bool CurrentlyTracking() {
+    return current_tracker_instance_ != nullptr;
+  }
+
+  static std::string ErrorMessage(void* address,
+                                  const std::string& address_description,
+                                  int countdown,
+                                  const std::string& error_description) {
+    return absl::Substitute(
+        "With coundtown at $0:\n"
+        "  $1\n"
+        "  Object originally constructed by $2\n"
+        "  Object address: $3\n",
+        countdown, error_description, address_description, address);
+  }
+
+  std::unordered_map<void*, TrackedAddress> address_map_;
+  int countdown_;
+
+  static ConstructorTracker* current_tracker_instance_;
+};
+
+class TrackedObject {
+ public:
+  TrackedObject(const TrackedObject&) = delete;
+  TrackedObject(TrackedObject&&) = delete;
+
+ protected:
+  explicit TrackedObject(std::string description) {
+    ConstructorTracker::ObjectConstructed(this, std::move(description));
+  }
+
+  ~TrackedObject() noexcept { ConstructorTracker::ObjectDestructed(this); }
+};
+}  // namespace exceptions_internal
+
+extern exceptions_internal::NoThrowTag nothrow_ctor;
+
+extern exceptions_internal::StrongGuaranteeTagType strong_guarantee;
+
+// A test class which is convertible to bool.  The conversion can be
+// instrumented to throw at a controlled time.
+class ThrowingBool {
+ public:
+  ThrowingBool(bool b) noexcept : b_(b) {}  // NOLINT(runtime/explicit)
+  operator bool() const {                   // NOLINT
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    return b_;
+  }
+
+ private:
+  bool b_;
+};
+
+/*
+ * Configuration enum for the ThrowingValue type that defines behavior for the
+ * lifetime of the instance. Use testing::nothrow_ctor to prevent the integer
+ * constructor from throwing.
+ *
+ * kEverythingThrows: Every operation can throw an exception
+ * kNoThrowCopy: Copy construction and copy assignment will not throw
+ * kNoThrowMove: Move construction and move assignment will not throw
+ * kNoThrowNew: Overloaded operators new and new[] will not throw
+ */
+enum class TypeSpec {
+  kEverythingThrows = 0,
+  kNoThrowCopy = 1,
+  kNoThrowMove = 1 << 1,
+  kNoThrowNew = 1 << 2,
+};
+
+/*
+ * A testing class instrumented to throw an exception at a controlled time.
+ *
+ * ThrowingValue implements a slightly relaxed version of the Regular concept --
+ * that is it's a value type with the expected semantics.  It also implements
+ * arithmetic operations.  It doesn't implement member and pointer operators
+ * like operator-> or operator[].
+ *
+ * ThrowingValue can be instrumented to have certain operations be noexcept by
+ * using compile-time bitfield template arguments.  That is, to make an
+ * ThrowingValue which has noexcept move construction/assignment and noexcept
+ * copy construction/assignment, use the following:
+ *   ThrowingValue<testing::kNoThrowMove | testing::kNoThrowCopy> my_thrwr{val};
+ */
+template <TypeSpec Spec = TypeSpec::kEverythingThrows>
+class ThrowingValue : private exceptions_internal::TrackedObject {
+  static constexpr bool IsSpecified(TypeSpec spec) {
+    return static_cast<bool>(Spec & spec);
+  }
+
+  static constexpr int kDefaultValue = 0;
+  static constexpr int kBadValue = 938550620;
+
+ public:
+  ThrowingValue() : TrackedObject(GetInstanceString(kDefaultValue)) {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    dummy_ = kDefaultValue;
+  }
+
+  ThrowingValue(const ThrowingValue& other) noexcept(
+      IsSpecified(TypeSpec::kNoThrowCopy))
+      : TrackedObject(GetInstanceString(other.dummy_)) {
+    if (!IsSpecified(TypeSpec::kNoThrowCopy)) {
+      exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    }
+    dummy_ = other.dummy_;
+  }
+
+  ThrowingValue(ThrowingValue&& other) noexcept(
+      IsSpecified(TypeSpec::kNoThrowMove))
+      : TrackedObject(GetInstanceString(other.dummy_)) {
+    if (!IsSpecified(TypeSpec::kNoThrowMove)) {
+      exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    }
+    dummy_ = other.dummy_;
+  }
+
+  explicit ThrowingValue(int i) : TrackedObject(GetInstanceString(i)) {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    dummy_ = i;
+  }
+
+  ThrowingValue(int i, exceptions_internal::NoThrowTag) noexcept
+      : TrackedObject(GetInstanceString(i)), dummy_(i) {}
+
+  // absl expects nothrow destructors
+  ~ThrowingValue() noexcept = default;
+
+  ThrowingValue& operator=(const ThrowingValue& other) noexcept(
+      IsSpecified(TypeSpec::kNoThrowCopy)) {
+    dummy_ = kBadValue;
+    if (!IsSpecified(TypeSpec::kNoThrowCopy)) {
+      exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    }
+    dummy_ = other.dummy_;
+    return *this;
+  }
+
+  ThrowingValue& operator=(ThrowingValue&& other) noexcept(
+      IsSpecified(TypeSpec::kNoThrowMove)) {
+    dummy_ = kBadValue;
+    if (!IsSpecified(TypeSpec::kNoThrowMove)) {
+      exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    }
+    dummy_ = other.dummy_;
+    return *this;
+  }
+
+  // Arithmetic Operators
+  ThrowingValue operator+(const ThrowingValue& other) const {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    return ThrowingValue(dummy_ + other.dummy_, nothrow_ctor);
+  }
+
+  ThrowingValue operator+() const {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    return ThrowingValue(dummy_, nothrow_ctor);
+  }
+
+  ThrowingValue operator-(const ThrowingValue& other) const {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    return ThrowingValue(dummy_ - other.dummy_, nothrow_ctor);
+  }
+
+  ThrowingValue operator-() const {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    return ThrowingValue(-dummy_, nothrow_ctor);
+  }
+
+  ThrowingValue& operator++() {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    ++dummy_;
+    return *this;
+  }
+
+  ThrowingValue operator++(int) {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    auto out = ThrowingValue(dummy_, nothrow_ctor);
+    ++dummy_;
+    return out;
+  }
+
+  ThrowingValue& operator--() {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    --dummy_;
+    return *this;
+  }
+
+  ThrowingValue operator--(int) {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    auto out = ThrowingValue(dummy_, nothrow_ctor);
+    --dummy_;
+    return out;
+  }
+
+  ThrowingValue operator*(const ThrowingValue& other) const {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    return ThrowingValue(dummy_ * other.dummy_, nothrow_ctor);
+  }
+
+  ThrowingValue operator/(const ThrowingValue& other) const {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    return ThrowingValue(dummy_ / other.dummy_, nothrow_ctor);
+  }
+
+  ThrowingValue operator%(const ThrowingValue& other) const {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    return ThrowingValue(dummy_ % other.dummy_, nothrow_ctor);
+  }
+
+  ThrowingValue operator<<(int shift) const {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    return ThrowingValue(dummy_ << shift, nothrow_ctor);
+  }
+
+  ThrowingValue operator>>(int shift) const {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    return ThrowingValue(dummy_ >> shift, nothrow_ctor);
+  }
+
+  // Comparison Operators
+  // NOTE: We use `ThrowingBool` instead of `bool` because most STL
+  // types/containers requires T to be convertible to bool.
+  friend ThrowingBool operator==(const ThrowingValue& a,
+                                 const ThrowingValue& b) {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    return a.dummy_ == b.dummy_;
+  }
+  friend ThrowingBool operator!=(const ThrowingValue& a,
+                                 const ThrowingValue& b) {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    return a.dummy_ != b.dummy_;
+  }
+  friend ThrowingBool operator<(const ThrowingValue& a,
+                                const ThrowingValue& b) {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    return a.dummy_ < b.dummy_;
+  }
+  friend ThrowingBool operator<=(const ThrowingValue& a,
+                                 const ThrowingValue& b) {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    return a.dummy_ <= b.dummy_;
+  }
+  friend ThrowingBool operator>(const ThrowingValue& a,
+                                const ThrowingValue& b) {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    return a.dummy_ > b.dummy_;
+  }
+  friend ThrowingBool operator>=(const ThrowingValue& a,
+                                 const ThrowingValue& b) {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    return a.dummy_ >= b.dummy_;
+  }
+
+  // Logical Operators
+  ThrowingBool operator!() const {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    return !dummy_;
+  }
+
+  ThrowingBool operator&&(const ThrowingValue& other) const {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    return dummy_ && other.dummy_;
+  }
+
+  ThrowingBool operator||(const ThrowingValue& other) const {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    return dummy_ || other.dummy_;
+  }
+
+  // Bitwise Logical Operators
+  ThrowingValue operator~() const {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    return ThrowingValue(~dummy_, nothrow_ctor);
+  }
+
+  ThrowingValue operator&(const ThrowingValue& other) const {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    return ThrowingValue(dummy_ & other.dummy_, nothrow_ctor);
+  }
+
+  ThrowingValue operator|(const ThrowingValue& other) const {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    return ThrowingValue(dummy_ | other.dummy_, nothrow_ctor);
+  }
+
+  ThrowingValue operator^(const ThrowingValue& other) const {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    return ThrowingValue(dummy_ ^ other.dummy_, nothrow_ctor);
+  }
+
+  // Compound Assignment operators
+  ThrowingValue& operator+=(const ThrowingValue& other) {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    dummy_ += other.dummy_;
+    return *this;
+  }
+
+  ThrowingValue& operator-=(const ThrowingValue& other) {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    dummy_ -= other.dummy_;
+    return *this;
+  }
+
+  ThrowingValue& operator*=(const ThrowingValue& other) {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    dummy_ *= other.dummy_;
+    return *this;
+  }
+
+  ThrowingValue& operator/=(const ThrowingValue& other) {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    dummy_ /= other.dummy_;
+    return *this;
+  }
+
+  ThrowingValue& operator%=(const ThrowingValue& other) {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    dummy_ %= other.dummy_;
+    return *this;
+  }
+
+  ThrowingValue& operator&=(const ThrowingValue& other) {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    dummy_ &= other.dummy_;
+    return *this;
+  }
+
+  ThrowingValue& operator|=(const ThrowingValue& other) {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    dummy_ |= other.dummy_;
+    return *this;
+  }
+
+  ThrowingValue& operator^=(const ThrowingValue& other) {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    dummy_ ^= other.dummy_;
+    return *this;
+  }
+
+  ThrowingValue& operator<<=(int shift) {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    dummy_ <<= shift;
+    return *this;
+  }
+
+  ThrowingValue& operator>>=(int shift) {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    dummy_ >>= shift;
+    return *this;
+  }
+
+  // Pointer operators
+  void operator&() const = delete;  // NOLINT(runtime/operator)
+
+  // Stream operators
+  friend std::ostream& operator<<(std::ostream& os, const ThrowingValue& tv) {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    return os << GetInstanceString(tv.dummy_);
+  }
+
+  friend std::istream& operator>>(std::istream& is, const ThrowingValue&) {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    return is;
+  }
+
+  // Memory management operators
+  // Args.. allows us to overload regular and placement new in one shot
+  template <typename... Args>
+  static void* operator new(size_t s, Args&&... args) noexcept(
+      IsSpecified(TypeSpec::kNoThrowNew)) {
+    if (!IsSpecified(TypeSpec::kNoThrowNew)) {
+      exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION, true);
+    }
+    return ::operator new(s, std::forward<Args>(args)...);
+  }
+
+  template <typename... Args>
+  static void* operator new[](size_t s, Args&&... args) noexcept(
+      IsSpecified(TypeSpec::kNoThrowNew)) {
+    if (!IsSpecified(TypeSpec::kNoThrowNew)) {
+      exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION, true);
+    }
+    return ::operator new[](s, std::forward<Args>(args)...);
+  }
+
+  // Abseil doesn't support throwing overloaded operator delete.  These are
+  // provided so a throwing operator-new can clean up after itself.
+  //
+  // We provide both regular and templated operator delete because if only the
+  // templated version is provided as we did with operator new, the compiler has
+  // no way of knowing which overload of operator delete to call. See
+  // https://en.cppreference.com/w/cpp/memory/new/operator_delete and
+  // https://en.cppreference.com/w/cpp/language/delete for the gory details.
+  void operator delete(void* p) noexcept { ::operator delete(p); }
+
+  template <typename... Args>
+  void operator delete(void* p, Args&&... args) noexcept {
+    ::operator delete(p, std::forward<Args>(args)...);
+  }
+
+  void operator delete[](void* p) noexcept { return ::operator delete[](p); }
+
+  template <typename... Args>
+  void operator delete[](void* p, Args&&... args) noexcept {
+    return ::operator delete[](p, std::forward<Args>(args)...);
+  }
+
+  // Non-standard access to the actual contained value.  No need for this to
+  // throw.
+  int& Get() noexcept { return dummy_; }
+  const int& Get() const noexcept { return dummy_; }
+
+ private:
+  static std::string GetInstanceString(int dummy) {
+    return absl::StrCat("ThrowingValue<",
+                        exceptions_internal::GetSpecString(Spec), ">(", dummy,
+                        ")");
+  }
+
+  int dummy_;
+};
+// While not having to do with exceptions, explicitly delete comma operator, to
+// make sure we don't use it on user-supplied types.
+template <TypeSpec Spec, typename T>
+void operator,(const ThrowingValue<Spec>&, T&&) = delete;
+template <TypeSpec Spec, typename T>
+void operator,(T&&, const ThrowingValue<Spec>&) = delete;
+
+/*
+ * Configuration enum for the ThrowingAllocator type that defines behavior for
+ * the lifetime of the instance.
+ *
+ * kEverythingThrows: Calls to the member functions may throw
+ * kNoThrowAllocate: Calls to the member functions will not throw
+ */
+enum class AllocSpec {
+  kEverythingThrows = 0,
+  kNoThrowAllocate = 1,
+};
+
+/*
+ * An allocator type which is instrumented to throw at a controlled time, or not
+ * to throw, using AllocSpec. The supported settings are the default of every
+ * function which is allowed to throw in a conforming allocator possibly
+ * throwing, or nothing throws, in line with the ABSL_ALLOCATOR_THROWS
+ * configuration macro.
+ */
+template <typename T, AllocSpec Spec = AllocSpec::kEverythingThrows>
+class ThrowingAllocator : private exceptions_internal::TrackedObject {
+  static constexpr bool IsSpecified(AllocSpec spec) {
+    return static_cast<bool>(Spec & spec);
+  }
+
+ public:
+  using pointer = T*;
+  using const_pointer = const T*;
+  using reference = T&;
+  using const_reference = const T&;
+  using void_pointer = void*;
+  using const_void_pointer = const void*;
+  using value_type = T;
+  using size_type = size_t;
+  using difference_type = ptrdiff_t;
+
+  using is_nothrow =
+      std::integral_constant<bool, Spec == AllocSpec::kNoThrowAllocate>;
+  using propagate_on_container_copy_assignment = std::true_type;
+  using propagate_on_container_move_assignment = std::true_type;
+  using propagate_on_container_swap = std::true_type;
+  using is_always_equal = std::false_type;
+
+  ThrowingAllocator() : TrackedObject(GetInstanceString(next_id_)) {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    dummy_ = std::make_shared<const int>(next_id_++);
+  }
+
+  template <typename U>
+  ThrowingAllocator(const ThrowingAllocator<U, Spec>& other) noexcept  // NOLINT
+      : TrackedObject(GetInstanceString(*other.State())),
+        dummy_(other.State()) {}
+
+  // According to C++11 standard [17.6.3.5], Table 28, the move/copy ctors of
+  // allocator shall not exit via an exception, thus they are marked noexcept.
+  ThrowingAllocator(const ThrowingAllocator& other) noexcept
+      : TrackedObject(GetInstanceString(*other.State())),
+        dummy_(other.State()) {}
+
+  template <typename U>
+  ThrowingAllocator(ThrowingAllocator<U, Spec>&& other) noexcept  // NOLINT
+      : TrackedObject(GetInstanceString(*other.State())),
+        dummy_(std::move(other.State())) {}
+
+  ThrowingAllocator(ThrowingAllocator&& other) noexcept
+      : TrackedObject(GetInstanceString(*other.State())),
+        dummy_(std::move(other.State())) {}
+
+  ~ThrowingAllocator() noexcept = default;
+
+  ThrowingAllocator& operator=(const ThrowingAllocator& other) noexcept {
+    dummy_ = other.State();
+    return *this;
+  }
+
+  template <typename U>
+  ThrowingAllocator& operator=(
+      const ThrowingAllocator<U, Spec>& other) noexcept {
+    dummy_ = other.State();
+    return *this;
+  }
+
+  template <typename U>
+  ThrowingAllocator& operator=(ThrowingAllocator<U, Spec>&& other) noexcept {
+    dummy_ = std::move(other.State());
+    return *this;
+  }
+
+  template <typename U>
+  struct rebind {
+    using other = ThrowingAllocator<U, Spec>;
+  };
+
+  pointer allocate(size_type n) noexcept(
+      IsSpecified(AllocSpec::kNoThrowAllocate)) {
+    ReadStateAndMaybeThrow(ABSL_PRETTY_FUNCTION);
+    return static_cast<pointer>(::operator new(n * sizeof(T)));
+  }
+
+  pointer allocate(size_type n, const_void_pointer) noexcept(
+      IsSpecified(AllocSpec::kNoThrowAllocate)) {
+    return allocate(n);
+  }
+
+  void deallocate(pointer ptr, size_type) noexcept {
+    ReadState();
+    ::operator delete(static_cast<void*>(ptr));
+  }
+
+  template <typename U, typename... Args>
+  void construct(U* ptr, Args&&... args) noexcept(
+      IsSpecified(AllocSpec::kNoThrowAllocate)) {
+    ReadStateAndMaybeThrow(ABSL_PRETTY_FUNCTION);
+    ::new (static_cast<void*>(ptr)) U(std::forward<Args>(args)...);
+  }
+
+  template <typename U>
+  void destroy(U* p) noexcept {
+    ReadState();
+    p->~U();
+  }
+
+  size_type max_size() const noexcept {
+    return (std::numeric_limits<difference_type>::max)() / sizeof(value_type);
+  }
+
+  ThrowingAllocator select_on_container_copy_construction() noexcept(
+      IsSpecified(AllocSpec::kNoThrowAllocate)) {
+    auto& out = *this;
+    ReadStateAndMaybeThrow(ABSL_PRETTY_FUNCTION);
+    return out;
+  }
+
+  template <typename U>
+  bool operator==(const ThrowingAllocator<U, Spec>& other) const noexcept {
+    return dummy_ == other.dummy_;
+  }
+
+  template <typename U>
+  bool operator!=(const ThrowingAllocator<U, Spec>& other) const noexcept {
+    return dummy_ != other.dummy_;
+  }
+
+  template <typename, AllocSpec>
+  friend class ThrowingAllocator;
+
+ private:
+  static std::string GetInstanceString(int dummy) {
+    return absl::StrCat("ThrowingAllocator<",
+                        exceptions_internal::GetSpecString(Spec), ">(", dummy,
+                        ")");
+  }
+
+  const std::shared_ptr<const int>& State() const { return dummy_; }
+  std::shared_ptr<const int>& State() { return dummy_; }
+
+  void ReadState() {
+    // we know that this will never be true, but the compiler doesn't, so this
+    // should safely force a read of the value.
+    if (*dummy_ < 0) std::abort();
+  }
+
+  void ReadStateAndMaybeThrow(absl::string_view msg) const {
+    if (!IsSpecified(AllocSpec::kNoThrowAllocate)) {
+      exceptions_internal::MaybeThrow(
+          absl::Substitute("Allocator id $0 threw from $1", *dummy_, msg));
+    }
+  }
+
+  static int next_id_;
+  std::shared_ptr<const int> dummy_;
+};
+
+template <typename T, AllocSpec Spec>
+int ThrowingAllocator<T, Spec>::next_id_ = 0;
+
+// Tests for resource leaks by attempting to construct a T using args repeatedly
+// until successful, using the countdown method.  Side effects can then be
+// tested for resource leaks.
+template <typename T, typename... Args>
+void TestThrowingCtor(Args&&... args) {
+  struct Cleanup {
+    ~Cleanup() { exceptions_internal::UnsetCountdown(); }
+  } c;
+  for (int count = 0;; ++count) {
+    exceptions_internal::ConstructorTracker ct(count);
+    exceptions_internal::SetCountdown(count);
+    try {
+      T temp(std::forward<Args>(args)...);
+      static_cast<void>(temp);
+      break;
+    } catch (const exceptions_internal::TestException&) {
+    }
+  }
+}
+
+// Tests the nothrow guarantee of the provided nullary operation. If the an
+// exception is thrown, the result will be AssertionFailure(). Otherwise, it
+// will be AssertionSuccess().
+template <typename Operation>
+testing::AssertionResult TestNothrowOp(const Operation& operation) {
+  struct Cleanup {
+    Cleanup() { exceptions_internal::SetCountdown(); }
+    ~Cleanup() { exceptions_internal::UnsetCountdown(); }
+  } c;
+  try {
+    operation();
+    return testing::AssertionSuccess();
+  } catch (const exceptions_internal::TestException&) {
+    return testing::AssertionFailure()
+           << "TestException thrown during call to operation() when nothrow "
+              "guarantee was expected.";
+  } catch (...) {
+    return testing::AssertionFailure()
+           << "Unknown exception thrown during call to operation() when "
+              "nothrow guarantee was expected.";
+  }
+}
+
+namespace exceptions_internal {
+
+// Dummy struct for ExceptionSafetyTestBuilder<> partial state.
+struct UninitializedT {};
+
+template <typename T>
+class DefaultFactory {
+ public:
+  explicit DefaultFactory(const T& t) : t_(t) {}
+  std::unique_ptr<T> operator()() const { return absl::make_unique<T>(t_); }
+
+ private:
+  T t_;
+};
+
+template <size_t LazyContractsCount, typename LazyFactory,
+          typename LazyOperation>
+using EnableIfTestable = typename absl::enable_if_t<
+    LazyContractsCount != 0 &&
+    !std::is_same<LazyFactory, UninitializedT>::value &&
+    !std::is_same<LazyOperation, UninitializedT>::value>;
+
+template <typename Factory = UninitializedT,
+          typename Operation = UninitializedT, typename... Contracts>
+class ExceptionSafetyTestBuilder;
+
+}  // namespace exceptions_internal
+
+/*
+ * Constructs an empty ExceptionSafetyTestBuilder. All
+ * ExceptionSafetyTestBuilder objects are immutable and all With[thing] mutation
+ * methods return new instances of ExceptionSafetyTestBuilder.
+ *
+ * In order to test a T for exception safety, a factory for that T, a testable
+ * operation, and at least one contract callback returning an assertion
+ * result must be applied using the respective methods.
+ */
+exceptions_internal::ExceptionSafetyTestBuilder<> MakeExceptionSafetyTester();
+
+namespace exceptions_internal {
+template <typename T>
+struct IsUniquePtr : std::false_type {};
+
+template <typename T, typename D>
+struct IsUniquePtr<std::unique_ptr<T, D>> : std::true_type {};
+
+template <typename Factory>
+struct FactoryPtrTypeHelper {
+  using type = decltype(std::declval<const Factory&>()());
+
+  static_assert(IsUniquePtr<type>::value, "Factories must return a unique_ptr");
+};
+
+template <typename Factory>
+using FactoryPtrType = typename FactoryPtrTypeHelper<Factory>::type;
+
+template <typename Factory>
+using FactoryElementType = typename FactoryPtrType<Factory>::element_type;
+
+template <typename T>
+class ExceptionSafetyTest {
+  using Factory = std::function<std::unique_ptr<T>()>;
+  using Operation = std::function<void(T*)>;
+  using Contract = std::function<AssertionResult(T*)>;
+
+ public:
+  template <typename... Contracts>
+  explicit ExceptionSafetyTest(const Factory& f, const Operation& op,
+                               const Contracts&... contracts)
+      : factory_(f), operation_(op), contracts_{WrapContract(contracts)...} {}
+
+  AssertionResult Test() const {
+    for (int count = 0;; ++count) {
+      exceptions_internal::ConstructorTracker ct(count);
+
+      for (const auto& contract : contracts_) {
+        auto t_ptr = factory_();
+        try {
+          SetCountdown(count);
+          operation_(t_ptr.get());
+          // Unset for the case that the operation throws no exceptions, which
+          // would leave the countdown set and break the *next* exception safety
+          // test after this one.
+          UnsetCountdown();
+          return AssertionSuccess();
+        } catch (const exceptions_internal::TestException& e) {
+          if (!contract(t_ptr.get())) {
+            return AssertionFailure() << e.what() << " failed contract check";
+          }
+        }
+      }
+    }
+  }
+
+ private:
+  template <typename ContractFn>
+  Contract WrapContract(const ContractFn& contract) {
+    return [contract](T* t_ptr) { return AssertionResult(contract(t_ptr)); };
+  }
+
+  Contract WrapContract(StrongGuaranteeTagType) {
+    return [this](T* t_ptr) { return AssertionResult(*factory_() == *t_ptr); };
+  }
+
+  Factory factory_;
+  Operation operation_;
+  std::vector<Contract> contracts_;
+};
+
+/*
+ * Builds a tester object that tests if performing a operation on a T follows
+ * exception safety guarantees. Verification is done via contract assertion
+ * callbacks applied to T instances post-throw.
+ *
+ * Template parameters for ExceptionSafetyTestBuilder:
+ *
+ * - Factory: The factory object (passed in via tester.WithFactory(...) or
+ *   tester.WithInitialValue(...)) must be invocable with the signature
+ *   `std::unique_ptr<T> operator()() const` where T is the type being tested.
+ *   It is used for reliably creating identical T instances to test on.
+ *
+ * - Operation: The operation object (passsed in via tester.WithOperation(...)
+ *   or tester.Test(...)) must be invocable with the signature
+ *   `void operator()(T*) const` where T is the type being tested. It is used
+ *   for performing steps on a T instance that may throw and that need to be
+ *   checked for exception safety. Each call to the operation will receive a
+ *   fresh T instance so it's free to modify and destroy the T instances as it
+ *   pleases.
+ *
+ * - Contracts...: The contract assertion callback objects (passed in via
+ *   tester.WithContracts(...)) must be invocable with the signature
+ *   `testing::AssertionResult operator()(T*) const` where T is the type being
+ *   tested. Contract assertion callbacks are provided T instances post-throw.
+ *   They must return testing::AssertionSuccess when the type contracts of the
+ *   provided T instance hold. If the type contracts of the T instance do not
+ *   hold, they must return testing::AssertionFailure. Execution order of
+ *   Contracts... is unspecified. They will each individually get a fresh T
+ *   instance so they are free to modify and destroy the T instances as they
+ *   please.
+ */
+template <typename Factory, typename Operation, typename... Contracts>
+class ExceptionSafetyTestBuilder {
+ public:
+  /*
+   * Returns a new ExceptionSafetyTestBuilder with an included T factory based
+   * on the provided T instance. The existing factory will not be included in
+   * the newly created tester instance. The created factory returns a new T
+   * instance by copy-constructing the provided const T& t.
+   *
+   * Preconditions for tester.WithInitialValue(const T& t):
+   *
+   * - The const T& t object must be copy-constructible where T is the type
+   *   being tested. For non-copy-constructible objects, use the method
+   *   tester.WithFactory(...).
+   */
+  template <typename T>
+  ExceptionSafetyTestBuilder<DefaultFactory<T>, Operation, Contracts...>
+  WithInitialValue(const T& t) const {
+    return WithFactory(DefaultFactory<T>(t));
+  }
+
+  /*
+   * Returns a new ExceptionSafetyTestBuilder with the provided T factory
+   * included. The existing factory will not be included in the newly-created
+   * tester instance. This method is intended for use with types lacking a copy
+   * constructor. Types that can be copy-constructed should instead use the
+   * method tester.WithInitialValue(...).
+   */
+  template <typename NewFactory>
+  ExceptionSafetyTestBuilder<absl::decay_t<NewFactory>, Operation, Contracts...>
+  WithFactory(const NewFactory& new_factory) const {
+    return {new_factory, operation_, contracts_};
+  }
+
+  /*
+   * Returns a new ExceptionSafetyTestBuilder with the provided testable
+   * operation included. The existing operation will not be included in the
+   * newly created tester.
+   */
+  template <typename NewOperation>
+  ExceptionSafetyTestBuilder<Factory, absl::decay_t<NewOperation>, Contracts...>
+  WithOperation(const NewOperation& new_operation) const {
+    return {factory_, new_operation, contracts_};
+  }
+
+  /*
+   * Returns a new ExceptionSafetyTestBuilder with the provided MoreContracts...
+   * combined with the Contracts... that were already included in the instance
+   * on which the method was called. Contracts... cannot be removed or replaced
+   * once added to an ExceptionSafetyTestBuilder instance. A fresh object must
+   * be created in order to get an empty Contracts... list.
+   *
+   * In addition to passing in custom contract assertion callbacks, this method
+   * accepts `testing::strong_guarantee` as an argument which checks T instances
+   * post-throw against freshly created T instances via operator== to verify
+   * that any state changes made during the execution of the operation were
+   * properly rolled back.
+   */
+  template <typename... MoreContracts>
+  ExceptionSafetyTestBuilder<Factory, Operation, Contracts...,
+                             absl::decay_t<MoreContracts>...>
+  WithContracts(const MoreContracts&... more_contracts) const {
+    return {
+        factory_, operation_,
+        std::tuple_cat(contracts_, std::tuple<absl::decay_t<MoreContracts>...>(
+                                       more_contracts...))};
+  }
+
+  /*
+   * Returns a testing::AssertionResult that is the reduced result of the
+   * exception safety algorithm. The algorithm short circuits and returns
+   * AssertionFailure after the first contract callback returns an
+   * AssertionFailure. Otherwise, if all contract callbacks return an
+   * AssertionSuccess, the reduced result is AssertionSuccess.
+   *
+   * The passed-in testable operation will not be saved in a new tester instance
+   * nor will it modify/replace the existing tester instance. This is useful
+   * when each operation being tested is unique and does not need to be reused.
+   *
+   * Preconditions for tester.Test(const NewOperation& new_operation):
+   *
+   * - May only be called after at least one contract assertion callback and a
+   *   factory or initial value have been provided.
+   */
+  template <
+      typename NewOperation,
+      typename = EnableIfTestable<sizeof...(Contracts), Factory, NewOperation>>
+  testing::AssertionResult Test(const NewOperation& new_operation) const {
+    return TestImpl(new_operation, absl::index_sequence_for<Contracts...>());
+  }
+
+  /*
+   * Returns a testing::AssertionResult that is the reduced result of the
+   * exception safety algorithm. The algorithm short circuits and returns
+   * AssertionFailure after the first contract callback returns an
+   * AssertionFailure. Otherwise, if all contract callbacks return an
+   * AssertionSuccess, the reduced result is AssertionSuccess.
+   *
+   * Preconditions for tester.Test():
+   *
+   * - May only be called after at least one contract assertion callback, a
+   *   factory or initial value and a testable operation have been provided.
+   */
+  template <
+      typename LazyOperation = Operation,
+      typename = EnableIfTestable<sizeof...(Contracts), Factory, LazyOperation>>
+  testing::AssertionResult Test() const {
+    return Test(operation_);
+  }
+
+ private:
+  template <typename, typename, typename...>
+  friend class ExceptionSafetyTestBuilder;
+
+  friend ExceptionSafetyTestBuilder<> testing::MakeExceptionSafetyTester();
+
+  ExceptionSafetyTestBuilder() {}
+
+  ExceptionSafetyTestBuilder(const Factory& f, const Operation& o,
+                             const std::tuple<Contracts...>& i)
+      : factory_(f), operation_(o), contracts_(i) {}
+
+  template <typename SelectedOperation, size_t... Indices>
+  testing::AssertionResult TestImpl(SelectedOperation selected_operation,
+                                    absl::index_sequence<Indices...>) const {
+    return ExceptionSafetyTest<FactoryElementType<Factory>>(
+               factory_, selected_operation, std::get<Indices>(contracts_)...)
+        .Test();
+  }
+
+  Factory factory_;
+  Operation operation_;
+  std::tuple<Contracts...> contracts_;
+};
+
+}  // namespace exceptions_internal
+
+}  // namespace testing
+
+#endif  // ABSL_HAVE_EXCEPTIONS
+
+#endif  // ABSL_BASE_INTERNAL_EXCEPTION_SAFETY_TESTING_H_
diff --git a/third_party/abseil_cpp/absl/base/internal/exception_testing.h b/third_party/abseil_cpp/absl/base/internal/exception_testing.h
new file mode 100644
index 000000000000..01b546557117
--- /dev/null
+++ b/third_party/abseil_cpp/absl/base/internal/exception_testing.h
@@ -0,0 +1,42 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Testing utilities for ABSL types which throw exceptions.
+
+#ifndef ABSL_BASE_INTERNAL_EXCEPTION_TESTING_H_
+#define ABSL_BASE_INTERNAL_EXCEPTION_TESTING_H_
+
+#include "gtest/gtest.h"
+#include "absl/base/config.h"
+
+// ABSL_BASE_INTERNAL_EXPECT_FAIL tests either for a specified thrown exception
+// if exceptions are enabled, or for death with a specified text in the error
+// message
+#ifdef ABSL_HAVE_EXCEPTIONS
+
+#define ABSL_BASE_INTERNAL_EXPECT_FAIL(expr, exception_t, text) \
+  EXPECT_THROW(expr, exception_t)
+
+#elif defined(__ANDROID__)
+// Android asserts do not log anywhere that gtest can currently inspect.
+// So we expect exit, but cannot match the message.
+#define ABSL_BASE_INTERNAL_EXPECT_FAIL(expr, exception_t, text) \
+  EXPECT_DEATH(expr, ".*")
+#else
+#define ABSL_BASE_INTERNAL_EXPECT_FAIL(expr, exception_t, text) \
+  EXPECT_DEATH_IF_SUPPORTED(expr, text)
+
+#endif
+
+#endif  // ABSL_BASE_INTERNAL_EXCEPTION_TESTING_H_
diff --git a/third_party/abseil_cpp/absl/base/internal/exponential_biased.cc b/third_party/abseil_cpp/absl/base/internal/exponential_biased.cc
new file mode 100644
index 000000000000..1b30c061e3bd
--- /dev/null
+++ b/third_party/abseil_cpp/absl/base/internal/exponential_biased.cc
@@ -0,0 +1,93 @@
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/internal/exponential_biased.h"
+
+#include <stdint.h>
+
+#include <algorithm>
+#include <atomic>
+#include <cmath>
+#include <limits>
+
+#include "absl/base/attributes.h"
+#include "absl/base/optimization.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+// The algorithm generates a random number between 0 and 1 and applies the
+// inverse cumulative distribution function for an exponential. Specifically:
+// Let m be the inverse of the sample period, then the probability
+// distribution function is m*exp(-mx) so the CDF is
+// p = 1 - exp(-mx), so
+// q = 1 - p = exp(-mx)
+// log_e(q) = -mx
+// -log_e(q)/m = x
+// log_2(q) * (-log_e(2) * 1/m) = x
+// In the code, q is actually in the range 1 to 2**26, hence the -26 below
+int64_t ExponentialBiased::GetSkipCount(int64_t mean) {
+  if (ABSL_PREDICT_FALSE(!initialized_)) {
+    Initialize();
+  }
+
+  uint64_t rng = NextRandom(rng_);
+  rng_ = rng;
+
+  // Take the top 26 bits as the random number
+  // (This plus the 1<<58 sampling bound give a max possible step of
+  // 5194297183973780480 bytes.)
+  // The uint32_t cast is to prevent a (hard-to-reproduce) NAN
+  // under piii debug for some binaries.
+  double q = static_cast<uint32_t>(rng >> (kPrngNumBits - 26)) + 1.0;
+  // Put the computed p-value through the CDF of a geometric.
+  double interval = bias_ + (std::log2(q) - 26) * (-std::log(2.0) * mean);
+  // Very large values of interval overflow int64_t. To avoid that, we will
+  // cheat and clamp any huge values to (int64_t max)/2. This is a potential
+  // source of bias, but the mean would need to be such a large value that it's
+  // not likely to come up. For example, with a mean of 1e18, the probability of
+  // hitting this condition is about 1/1000. For a mean of 1e17, standard
+  // calculators claim that this event won't happen.
+  if (interval > static_cast<double>(std::numeric_limits<int64_t>::max() / 2)) {
+    // Assume huge values are bias neutral, retain bias for next call.
+    return std::numeric_limits<int64_t>::max() / 2;
+  }
+  double value = std::round(interval);
+  bias_ = interval - value;
+  return value;
+}
+
+int64_t ExponentialBiased::GetStride(int64_t mean) {
+  return GetSkipCount(mean - 1) + 1;
+}
+
+void ExponentialBiased::Initialize() {
+  // We don't get well distributed numbers from `this` so we call NextRandom() a
+  // bunch to mush the bits around. We use a global_rand to handle the case
+  // where the same thread (by memory address) gets created and destroyed
+  // repeatedly.
+  ABSL_CONST_INIT static std::atomic<uint32_t> global_rand(0);
+  uint64_t r = reinterpret_cast<uint64_t>(this) +
+               global_rand.fetch_add(1, std::memory_order_relaxed);
+  for (int i = 0; i < 20; ++i) {
+    r = NextRandom(r);
+  }
+  rng_ = r;
+  initialized_ = true;
+}
+
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/third_party/abseil_cpp/absl/base/internal/exponential_biased.h b/third_party/abseil_cpp/absl/base/internal/exponential_biased.h
new file mode 100644
index 000000000000..94f79a3378db
--- /dev/null
+++ b/third_party/abseil_cpp/absl/base/internal/exponential_biased.h
@@ -0,0 +1,130 @@
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_BASE_INTERNAL_EXPONENTIAL_BIASED_H_
+#define ABSL_BASE_INTERNAL_EXPONENTIAL_BIASED_H_
+
+#include <stdint.h>
+
+#include "absl/base/config.h"
+#include "absl/base/macros.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+// ExponentialBiased provides a small and fast random number generator for a
+// rounded exponential distribution. This generator manages very little state,
+// and imposes no synchronization overhead. This makes it useful in specialized
+// scenarios requiring minimum overhead, such as stride based periodic sampling.
+//
+// ExponentialBiased provides two closely related functions, GetSkipCount() and
+// GetStride(), both returning a rounded integer defining a number of events
+// required before some event with a given mean probability occurs.
+//
+// The distribution is useful to generate a random wait time or some periodic
+// event with a given mean probability. For example, if an action is supposed to
+// happen on average once every 'N' events, then we can get a random 'stride'
+// counting down how long before the event to happen. For example, if we'd want
+// to sample one in every 1000 'Frobber' calls, our code could look like this:
+//
+//   Frobber::Frobber() {
+//     stride_ = exponential_biased_.GetStride(1000);
+//   }
+//
+//   void Frobber::Frob(int arg) {
+//     if (--stride == 0) {
+//       SampleFrob(arg);
+//       stride_ = exponential_biased_.GetStride(1000);
+//     }
+//     ...
+//   }
+//
+// The rounding of the return value creates a bias, especially for smaller means
+// where the distribution of the fraction is not evenly distributed. We correct
+// this bias by tracking the fraction we rounded up or down on each iteration,
+// effectively tracking the distance between the cumulative value, and the
+// rounded cumulative value. For example, given a mean of 2:
+//
+//   raw = 1.63076, cumulative = 1.63076, rounded = 2, bias = -0.36923
+//   raw = 0.14624, cumulative = 1.77701, rounded = 2, bias =  0.14624
+//   raw = 4.93194, cumulative = 6.70895, rounded = 7, bias = -0.06805
+//   raw = 0.24206, cumulative = 6.95101, rounded = 7, bias =  0.24206
+//   etc...
+//
+// Adjusting with rounding bias is relatively trivial:
+//
+//    double value = bias_ + exponential_distribution(mean)();
+//    double rounded_value = std::round(value);
+//    bias_ = value - rounded_value;
+//    return rounded_value;
+//
+// This class is thread-compatible.
+class ExponentialBiased {
+ public:
+  // The number of bits set by NextRandom.
+  static constexpr int kPrngNumBits = 48;
+
+  // `GetSkipCount()` returns the number of events to skip before some chosen
+  // event happens. For example, randomly tossing a coin, we will on average
+  // throw heads once before we get tails. We can simulate random coin tosses
+  // using GetSkipCount() as:
+  //
+  //   ExponentialBiased eb;
+  //   for (...) {
+  //     int number_of_heads_before_tail = eb.GetSkipCount(1);
+  //     for (int flips = 0; flips < number_of_heads_before_tail; ++flips) {
+  //       printf("head...");
+  //     }
+  //     printf("tail\n");
+  //   }
+  //
+  int64_t GetSkipCount(int64_t mean);
+
+  // GetStride() returns the number of events required for a specific event to
+  // happen. See the class comments for a usage example. `GetStride()` is
+  // equivalent to `GetSkipCount(mean - 1) + 1`. When to use `GetStride()` or
+  // `GetSkipCount()` depends mostly on what best fits the use case.
+  int64_t GetStride(int64_t mean);
+
+  // Computes a random number in the range [0, 1<<(kPrngNumBits+1) - 1]
+  //
+  // This is public to enable testing.
+  static uint64_t NextRandom(uint64_t rnd);
+
+ private:
+  void Initialize();
+
+  uint64_t rng_{0};
+  double bias_{0};
+  bool initialized_{false};
+};
+
+// Returns the next prng value.
+// pRNG is: aX+b mod c with a = 0x5DEECE66D, b =  0xB, c = 1<<48
+// This is the lrand64 generator.
+inline uint64_t ExponentialBiased::NextRandom(uint64_t rnd) {
+  const uint64_t prng_mult = uint64_t{0x5DEECE66D};
+  const uint64_t prng_add = 0xB;
+  const uint64_t prng_mod_power = 48;
+  const uint64_t prng_mod_mask =
+      ~((~static_cast<uint64_t>(0)) << prng_mod_power);
+  return (prng_mult * rnd + prng_add) & prng_mod_mask;
+}
+
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_BASE_INTERNAL_EXPONENTIAL_BIASED_H_
diff --git a/third_party/abseil_cpp/absl/base/internal/exponential_biased_test.cc b/third_party/abseil_cpp/absl/base/internal/exponential_biased_test.cc
new file mode 100644
index 000000000000..90a482d2a9d5
--- /dev/null
+++ b/third_party/abseil_cpp/absl/base/internal/exponential_biased_test.cc
@@ -0,0 +1,199 @@
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/internal/exponential_biased.h"
+
+#include <stddef.h>
+
+#include <cmath>
+#include <cstdint>
+#include <vector>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/strings/str_cat.h"
+
+using ::testing::Ge;
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+MATCHER_P2(IsBetween, a, b,
+           absl::StrCat(std::string(negation ? "isn't" : "is"), " between ", a,
+                        " and ", b)) {
+  return a <= arg && arg <= b;
+}
+
+// Tests of the quality of the random numbers generated
+// This uses the Anderson Darling test for uniformity.
+// See "Evaluating the Anderson-Darling Distribution" by Marsaglia
+// for details.
+
+// Short cut version of ADinf(z), z>0 (from Marsaglia)
+// This returns the p-value for Anderson Darling statistic in
+// the limit as n-> infinity. For finite n, apply the error fix below.
+double AndersonDarlingInf(double z) {
+  if (z < 2) {
+    return exp(-1.2337141 / z) / sqrt(z) *
+           (2.00012 +
+            (0.247105 -
+             (0.0649821 - (0.0347962 - (0.011672 - 0.00168691 * z) * z) * z) *
+                 z) *
+                z);
+  }
+  return exp(
+      -exp(1.0776 -
+           (2.30695 -
+            (0.43424 - (0.082433 - (0.008056 - 0.0003146 * z) * z) * z) * z) *
+               z));
+}
+
+// Corrects the approximation error in AndersonDarlingInf for small values of n
+// Add this to AndersonDarlingInf to get a better approximation
+// (from Marsaglia)
+double AndersonDarlingErrFix(int n, double x) {
+  if (x > 0.8) {
+    return (-130.2137 +
+            (745.2337 -
+             (1705.091 - (1950.646 - (1116.360 - 255.7844 * x) * x) * x) * x) *
+                x) /
+           n;
+  }
+  double cutoff = 0.01265 + 0.1757 / n;
+  if (x < cutoff) {
+    double t = x / cutoff;
+    t = sqrt(t) * (1 - t) * (49 * t - 102);
+    return t * (0.0037 / (n * n) + 0.00078 / n + 0.00006) / n;
+  } else {
+    double t = (x - cutoff) / (0.8 - cutoff);
+    t = -0.00022633 +
+        (6.54034 - (14.6538 - (14.458 - (8.259 - 1.91864 * t) * t) * t) * t) *
+            t;
+    return t * (0.04213 + 0.01365 / n) / n;
+  }
+}
+
+// Returns the AndersonDarling p-value given n and the value of the statistic
+double AndersonDarlingPValue(int n, double z) {
+  double ad = AndersonDarlingInf(z);
+  double errfix = AndersonDarlingErrFix(n, ad);
+  return ad + errfix;
+}
+
+double AndersonDarlingStatistic(const std::vector<double>& random_sample) {
+  int n = random_sample.size();
+  double ad_sum = 0;
+  for (int i = 0; i < n; i++) {
+    ad_sum += (2 * i + 1) *
+              std::log(random_sample[i] * (1 - random_sample[n - 1 - i]));
+  }
+  double ad_statistic = -n - 1 / static_cast<double>(n) * ad_sum;
+  return ad_statistic;
+}
+
+// Tests if the array of doubles is uniformly distributed.
+// Returns the p-value of the Anderson Darling Statistic
+// for the given set of sorted random doubles
+// See "Evaluating the Anderson-Darling Distribution" by
+// Marsaglia and Marsaglia for details.
+double AndersonDarlingTest(const std::vector<double>& random_sample) {
+  double ad_statistic = AndersonDarlingStatistic(random_sample);
+  double p = AndersonDarlingPValue(random_sample.size(), ad_statistic);
+  return p;
+}
+
+TEST(ExponentialBiasedTest, CoinTossDemoWithGetSkipCount) {
+  ExponentialBiased eb;
+  for (int runs = 0; runs < 10; ++runs) {
+    for (int flips = eb.GetSkipCount(1); flips > 0; --flips) {
+      printf("head...");
+    }
+    printf("tail\n");
+  }
+  int heads = 0;
+  for (int i = 0; i < 10000000; i += 1 + eb.GetSkipCount(1)) {
+    ++heads;
+  }
+  printf("Heads = %d (%f%%)\n", heads, 100.0 * heads / 10000000);
+}
+
+TEST(ExponentialBiasedTest, SampleDemoWithStride) {
+  ExponentialBiased eb;
+  int stride = eb.GetStride(10);
+  int samples = 0;
+  for (int i = 0; i < 10000000; ++i) {
+    if (--stride == 0) {
+      ++samples;
+      stride = eb.GetStride(10);
+    }
+  }
+  printf("Samples = %d (%f%%)\n", samples, 100.0 * samples / 10000000);
+}
+
+
+// Testing that NextRandom generates uniform random numbers. Applies the
+// Anderson-Darling test for uniformity
+TEST(ExponentialBiasedTest, TestNextRandom) {
+  for (auto n : std::vector<int>({
+           10,  // Check short-range correlation
+           100, 1000,
+           10000  // Make sure there's no systemic error
+       })) {
+    uint64_t x = 1;
+    // This assumes that the prng returns 48 bit numbers
+    uint64_t max_prng_value = static_cast<uint64_t>(1) << 48;
+    // Initialize.
+    for (int i = 1; i <= 20; i++) {
+      x = ExponentialBiased::NextRandom(x);
+    }
+    std::vector<uint64_t> int_random_sample(n);
+    // Collect samples
+    for (int i = 0; i < n; i++) {
+      int_random_sample[i] = x;
+      x = ExponentialBiased::NextRandom(x);
+    }
+    // First sort them...
+    std::sort(int_random_sample.begin(), int_random_sample.end());
+    std::vector<double> random_sample(n);
+    // Convert them to uniform randoms (in the range [0,1])
+    for (int i = 0; i < n; i++) {
+      random_sample[i] =
+          static_cast<double>(int_random_sample[i]) / max_prng_value;
+    }
+    // Now compute the Anderson-Darling statistic
+    double ad_pvalue = AndersonDarlingTest(random_sample);
+    EXPECT_GT(std::min(ad_pvalue, 1 - ad_pvalue), 0.0001)
+        << "prng is not uniform: n = " << n << " p = " << ad_pvalue;
+  }
+}
+
+// The generator needs to be available as a thread_local and as a static
+// variable.
+TEST(ExponentialBiasedTest, InitializationModes) {
+  ABSL_CONST_INIT static ExponentialBiased eb_static;
+  EXPECT_THAT(eb_static.GetSkipCount(2), Ge(0));
+
+#if ABSL_HAVE_THREAD_LOCAL
+  thread_local ExponentialBiased eb_thread;
+  EXPECT_THAT(eb_thread.GetSkipCount(2), Ge(0));
+#endif
+
+  ExponentialBiased eb_stack;
+  EXPECT_THAT(eb_stack.GetSkipCount(2), Ge(0));
+}
+
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/third_party/abseil_cpp/absl/base/internal/fast_type_id.h b/third_party/abseil_cpp/absl/base/internal/fast_type_id.h
new file mode 100644
index 000000000000..3db59e83745b
--- /dev/null
+++ b/third_party/abseil_cpp/absl/base/internal/fast_type_id.h
@@ -0,0 +1,48 @@
+//
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef ABSL_BASE_INTERNAL_FAST_TYPE_ID_H_
+#define ABSL_BASE_INTERNAL_FAST_TYPE_ID_H_
+
+#include "absl/base/config.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+template <typename Type>
+struct FastTypeTag {
+  constexpr static char dummy_var = 0;
+};
+
+template <typename Type>
+constexpr char FastTypeTag<Type>::dummy_var;
+
+// FastTypeId<Type>() evaluates at compile/link-time to a unique pointer for the
+// passed-in type. These are meant to be good match for keys into maps or
+// straight up comparisons.
+using FastTypeIdType = const void*;
+
+template <typename Type>
+constexpr inline FastTypeIdType FastTypeId() {
+  return &FastTypeTag<Type>::dummy_var;
+}
+
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_BASE_INTERNAL_FAST_TYPE_ID_H_
diff --git a/third_party/abseil_cpp/absl/base/internal/fast_type_id_test.cc b/third_party/abseil_cpp/absl/base/internal/fast_type_id_test.cc
new file mode 100644
index 000000000000..16f3c1458bdf
--- /dev/null
+++ b/third_party/abseil_cpp/absl/base/internal/fast_type_id_test.cc
@@ -0,0 +1,123 @@
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/internal/fast_type_id.h"
+
+#include <cstdint>
+#include <map>
+#include <vector>
+
+#include "gtest/gtest.h"
+
+namespace {
+namespace bi = absl::base_internal;
+
+// NOLINTNEXTLINE
+#define PRIM_TYPES(A)   \
+  A(bool)               \
+  A(short)              \
+  A(unsigned short)     \
+  A(int)                \
+  A(unsigned int)       \
+  A(long)               \
+  A(unsigned long)      \
+  A(long long)          \
+  A(unsigned long long) \
+  A(float)              \
+  A(double)             \
+  A(long double)
+
+TEST(FastTypeIdTest, PrimitiveTypes) {
+  bi::FastTypeIdType type_ids[] = {
+#define A(T) bi::FastTypeId<T>(),
+    PRIM_TYPES(A)
+#undef A
+#define A(T) bi::FastTypeId<const T>(),
+    PRIM_TYPES(A)
+#undef A
+#define A(T) bi::FastTypeId<volatile T>(),
+    PRIM_TYPES(A)
+#undef A
+#define A(T) bi::FastTypeId<const volatile T>(),
+    PRIM_TYPES(A)
+#undef A
+  };
+  size_t total_type_ids = sizeof(type_ids) / sizeof(bi::FastTypeIdType);
+
+  for (int i = 0; i < total_type_ids; ++i) {
+    EXPECT_EQ(type_ids[i], type_ids[i]);
+    for (int j = 0; j < i; ++j) {
+      EXPECT_NE(type_ids[i], type_ids[j]);
+    }
+  }
+}
+
+#define FIXED_WIDTH_TYPES(A) \
+  A(int8_t)                  \
+  A(uint8_t)                 \
+  A(int16_t)                 \
+  A(uint16_t)                \
+  A(int32_t)                 \
+  A(uint32_t)                \
+  A(int64_t)                 \
+  A(uint64_t)
+
+TEST(FastTypeIdTest, FixedWidthTypes) {
+  bi::FastTypeIdType type_ids[] = {
+#define A(T) bi::FastTypeId<T>(),
+    FIXED_WIDTH_TYPES(A)
+#undef A
+#define A(T) bi::FastTypeId<const T>(),
+    FIXED_WIDTH_TYPES(A)
+#undef A
+#define A(T) bi::FastTypeId<volatile T>(),
+    FIXED_WIDTH_TYPES(A)
+#undef A
+#define A(T) bi::FastTypeId<const volatile T>(),
+    FIXED_WIDTH_TYPES(A)
+#undef A
+  };
+  size_t total_type_ids = sizeof(type_ids) / sizeof(bi::FastTypeIdType);
+
+  for (int i = 0; i < total_type_ids; ++i) {
+    EXPECT_EQ(type_ids[i], type_ids[i]);
+    for (int j = 0; j < i; ++j) {
+      EXPECT_NE(type_ids[i], type_ids[j]);
+    }
+  }
+}
+
+TEST(FastTypeIdTest, AliasTypes) {
+  using int_alias = int;
+  EXPECT_EQ(bi::FastTypeId<int_alias>(), bi::FastTypeId<int>());
+}
+
+TEST(FastTypeIdTest, TemplateSpecializations) {
+  EXPECT_NE(bi::FastTypeId<std::vector<int>>(),
+            bi::FastTypeId<std::vector<long>>());
+
+  EXPECT_NE((bi::FastTypeId<std::map<int, float>>()),
+            (bi::FastTypeId<std::map<int, double>>()));
+}
+
+struct Base {};
+struct Derived : Base {};
+struct PDerived : private Base {};
+
+TEST(FastTypeIdTest, Inheritance) {
+  EXPECT_NE(bi::FastTypeId<Base>(), bi::FastTypeId<Derived>());
+  EXPECT_NE(bi::FastTypeId<Base>(), bi::FastTypeId<PDerived>());
+}
+
+}  // namespace
diff --git a/third_party/abseil_cpp/absl/base/internal/hide_ptr.h b/third_party/abseil_cpp/absl/base/internal/hide_ptr.h
new file mode 100644
index 000000000000..1dba80909a9b
--- /dev/null
+++ b/third_party/abseil_cpp/absl/base/internal/hide_ptr.h
@@ -0,0 +1,51 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_BASE_INTERNAL_HIDE_PTR_H_
+#define ABSL_BASE_INTERNAL_HIDE_PTR_H_
+
+#include <cstdint>
+
+#include "absl/base/config.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+// Arbitrary value with high bits set. Xor'ing with it is unlikely
+// to map one valid pointer to another valid pointer.
+constexpr uintptr_t HideMask() {
+  return (uintptr_t{0xF03A5F7BU} << (sizeof(uintptr_t) - 4) * 8) | 0xF03A5F7BU;
+}
+
+// Hide a pointer from the leak checker. For internal use only.
+// Differs from absl::IgnoreLeak(ptr) in that absl::IgnoreLeak(ptr) causes ptr
+// and all objects reachable from ptr to be ignored by the leak checker.
+template <class T>
+inline uintptr_t HidePtr(T* ptr) {
+  return reinterpret_cast<uintptr_t>(ptr) ^ HideMask();
+}
+
+// Return a pointer that has been hidden from the leak checker.
+// For internal use only.
+template <class T>
+inline T* UnhidePtr(uintptr_t hidden) {
+  return reinterpret_cast<T*>(hidden ^ HideMask());
+}
+
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_BASE_INTERNAL_HIDE_PTR_H_
diff --git a/third_party/abseil_cpp/absl/base/internal/identity.h b/third_party/abseil_cpp/absl/base/internal/identity.h
new file mode 100644
index 000000000000..a3154ed7bc59
--- /dev/null
+++ b/third_party/abseil_cpp/absl/base/internal/identity.h
@@ -0,0 +1,37 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef ABSL_BASE_INTERNAL_IDENTITY_H_
+#define ABSL_BASE_INTERNAL_IDENTITY_H_
+
+#include "absl/base/config.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace internal {
+
+template <typename T>
+struct identity {
+  typedef T type;
+};
+
+template <typename T>
+using identity_t = typename identity<T>::type;
+
+}  // namespace internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_BASE_INTERNAL_IDENTITY_H_
diff --git a/third_party/abseil_cpp/absl/base/internal/inline_variable.h b/third_party/abseil_cpp/absl/base/internal/inline_variable.h
new file mode 100644
index 000000000000..130d8c2476dd
--- /dev/null
+++ b/third_party/abseil_cpp/absl/base/internal/inline_variable.h
@@ -0,0 +1,107 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_BASE_INTERNAL_INLINE_VARIABLE_EMULATION_H_
+#define ABSL_BASE_INTERNAL_INLINE_VARIABLE_EMULATION_H_
+
+#include <type_traits>
+
+#include "absl/base/internal/identity.h"
+
+// File:
+//   This file define a macro that allows the creation of or emulation of C++17
+//   inline variables based on whether or not the feature is supported.
+
+////////////////////////////////////////////////////////////////////////////////
+// Macro: ABSL_INTERNAL_INLINE_CONSTEXPR(type, name, init)
+//
+// Description:
+//   Expands to the equivalent of an inline constexpr instance of the specified
+//   `type` and `name`, initialized to the value `init`. If the compiler being
+//   used is detected as supporting actual inline variables as a language
+//   feature, then the macro expands to an actual inline variable definition.
+//
+// Requires:
+//   `type` is a type that is usable in an extern variable declaration.
+//
+// Requires: `name` is a valid identifier
+//
+// Requires:
+//   `init` is an expression that can be used in the following definition:
+//     constexpr type name = init;
+//
+// Usage:
+//
+//   // Equivalent to: `inline constexpr size_t variant_npos = -1;`
+//   ABSL_INTERNAL_INLINE_CONSTEXPR(size_t, variant_npos, -1);
+//
+// Differences in implementation:
+//   For a direct, language-level inline variable, decltype(name) will be the
+//   type that was specified along with const qualification, whereas for
+//   emulated inline variables, decltype(name) may be different (in practice
+//   it will likely be a reference type).
+////////////////////////////////////////////////////////////////////////////////
+
+#ifdef __cpp_inline_variables
+
+// Clang's -Wmissing-variable-declarations option erroneously warned that
+// inline constexpr objects need to be pre-declared. This has now been fixed,
+// but we will need to support this workaround for people building with older
+// versions of clang.
+//
+// Bug: https://bugs.llvm.org/show_bug.cgi?id=35862
+//
+// Note:
+//   identity_t is used here so that the const and name are in the
+//   appropriate place for pointer types, reference types, function pointer
+//   types, etc..
+#if defined(__clang__)
+#define ABSL_INTERNAL_EXTERN_DECL(type, name) \
+  extern const ::absl::internal::identity_t<type> name;
+#else  // Otherwise, just define the macro to do nothing.
+#define ABSL_INTERNAL_EXTERN_DECL(type, name)
+#endif  // defined(__clang__)
+
+// See above comment at top of file for details.
+#define ABSL_INTERNAL_INLINE_CONSTEXPR(type, name, init) \
+  ABSL_INTERNAL_EXTERN_DECL(type, name)                  \
+  inline constexpr ::absl::internal::identity_t<type> name = init
+
+#else
+
+// See above comment at top of file for details.
+//
+// Note:
+//   identity_t is used here so that the const and name are in the
+//   appropriate place for pointer types, reference types, function pointer
+//   types, etc..
+#define ABSL_INTERNAL_INLINE_CONSTEXPR(var_type, name, init)                  \
+  template <class /*AbslInternalDummy*/ = void>                               \
+  struct AbslInternalInlineVariableHolder##name {                             \
+    static constexpr ::absl::internal::identity_t<var_type> kInstance = init; \
+  };                                                                          \
+                                                                              \
+  template <class AbslInternalDummy>                                          \
+  constexpr ::absl::internal::identity_t<var_type>                            \
+      AbslInternalInlineVariableHolder##name<AbslInternalDummy>::kInstance;   \
+                                                                              \
+  static constexpr const ::absl::internal::identity_t<var_type>&              \
+      name = /* NOLINT */                                                     \
+      AbslInternalInlineVariableHolder##name<>::kInstance;                    \
+  static_assert(sizeof(void (*)(decltype(name))) != 0,                        \
+                "Silence unused variable warnings.")
+
+#endif  // __cpp_inline_variables
+
+#endif  // ABSL_BASE_INTERNAL_INLINE_VARIABLE_EMULATION_H_
diff --git a/third_party/abseil_cpp/absl/base/internal/inline_variable_testing.h b/third_party/abseil_cpp/absl/base/internal/inline_variable_testing.h
new file mode 100644
index 000000000000..3856b9f80f20
--- /dev/null
+++ b/third_party/abseil_cpp/absl/base/internal/inline_variable_testing.h
@@ -0,0 +1,46 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_BASE_INLINE_VARIABLE_TESTING_H_
+#define ABSL_BASE_INLINE_VARIABLE_TESTING_H_
+
+#include "absl/base/internal/inline_variable.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace inline_variable_testing_internal {
+
+struct Foo {
+  int value = 5;
+};
+
+ABSL_INTERNAL_INLINE_CONSTEXPR(Foo, inline_variable_foo, {});
+ABSL_INTERNAL_INLINE_CONSTEXPR(Foo, other_inline_variable_foo, {});
+
+ABSL_INTERNAL_INLINE_CONSTEXPR(int, inline_variable_int, 5);
+ABSL_INTERNAL_INLINE_CONSTEXPR(int, other_inline_variable_int, 5);
+
+ABSL_INTERNAL_INLINE_CONSTEXPR(void(*)(), inline_variable_fun_ptr, nullptr);
+
+const Foo& get_foo_a();
+const Foo& get_foo_b();
+
+const int& get_int_a();
+const int& get_int_b();
+
+}  // namespace inline_variable_testing_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_BASE_INLINE_VARIABLE_TESTING_H_
diff --git a/third_party/abseil_cpp/absl/base/internal/invoke.h b/third_party/abseil_cpp/absl/base/internal/invoke.h
new file mode 100644
index 000000000000..c4eceebd7cda
--- /dev/null
+++ b/third_party/abseil_cpp/absl/base/internal/invoke.h
@@ -0,0 +1,187 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// absl::base_internal::Invoke(f, args...) is an implementation of
+// INVOKE(f, args...) from section [func.require] of the C++ standard.
+//
+// [func.require]
+// Define INVOKE (f, t1, t2, ..., tN) as follows:
+// 1. (t1.*f)(t2, ..., tN) when f is a pointer to a member function of a class T
+//    and t1 is an object of type T or a reference to an object of type T or a
+//    reference to an object of a type derived from T;
+// 2. ((*t1).*f)(t2, ..., tN) when f is a pointer to a member function of a
+//    class T and t1 is not one of the types described in the previous item;
+// 3. t1.*f when N == 1 and f is a pointer to member data of a class T and t1 is
+//    an object of type T or a reference to an object of type T or a reference
+//    to an object of a type derived from T;
+// 4. (*t1).*f when N == 1 and f is a pointer to member data of a class T and t1
+//    is not one of the types described in the previous item;
+// 5. f(t1, t2, ..., tN) in all other cases.
+//
+// The implementation is SFINAE-friendly: substitution failure within Invoke()
+// isn't an error.
+
+#ifndef ABSL_BASE_INTERNAL_INVOKE_H_
+#define ABSL_BASE_INTERNAL_INVOKE_H_
+
+#include <algorithm>
+#include <type_traits>
+#include <utility>
+
+#include "absl/meta/type_traits.h"
+
+// The following code is internal implementation detail.  See the comment at the
+// top of this file for the API documentation.
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+// The five classes below each implement one of the clauses from the definition
+// of INVOKE. The inner class template Accept<F, Args...> checks whether the
+// clause is applicable; static function template Invoke(f, args...) does the
+// invocation.
+//
+// By separating the clause selection logic from invocation we make sure that
+// Invoke() does exactly what the standard says.
+
+template <typename Derived>
+struct StrippedAccept {
+  template <typename... Args>
+  struct Accept : Derived::template AcceptImpl<typename std::remove_cv<
+                      typename std::remove_reference<Args>::type>::type...> {};
+};
+
+// (t1.*f)(t2, ..., tN) when f is a pointer to a member function of a class T
+// and t1 is an object of type T or a reference to an object of type T or a
+// reference to an object of a type derived from T.
+struct MemFunAndRef : StrippedAccept<MemFunAndRef> {
+  template <typename... Args>
+  struct AcceptImpl : std::false_type {};
+
+  template <typename MemFunType, typename C, typename Obj, typename... Args>
+  struct AcceptImpl<MemFunType C::*, Obj, Args...>
+      : std::integral_constant<bool, std::is_base_of<C, Obj>::value &&
+                                         absl::is_function<MemFunType>::value> {
+  };
+
+  template <typename MemFun, typename Obj, typename... Args>
+  static decltype((std::declval<Obj>().*
+                   std::declval<MemFun>())(std::declval<Args>()...))
+  Invoke(MemFun&& mem_fun, Obj&& obj, Args&&... args) {
+    return (std::forward<Obj>(obj).*
+            std::forward<MemFun>(mem_fun))(std::forward<Args>(args)...);
+  }
+};
+
+// ((*t1).*f)(t2, ..., tN) when f is a pointer to a member function of a
+// class T and t1 is not one of the types described in the previous item.
+struct MemFunAndPtr : StrippedAccept<MemFunAndPtr> {
+  template <typename... Args>
+  struct AcceptImpl : std::false_type {};
+
+  template <typename MemFunType, typename C, typename Ptr, typename... Args>
+  struct AcceptImpl<MemFunType C::*, Ptr, Args...>
+      : std::integral_constant<bool, !std::is_base_of<C, Ptr>::value &&
+                                         absl::is_function<MemFunType>::value> {
+  };
+
+  template <typename MemFun, typename Ptr, typename... Args>
+  static decltype(((*std::declval<Ptr>()).*
+                   std::declval<MemFun>())(std::declval<Args>()...))
+  Invoke(MemFun&& mem_fun, Ptr&& ptr, Args&&... args) {
+    return ((*std::forward<Ptr>(ptr)).*
+            std::forward<MemFun>(mem_fun))(std::forward<Args>(args)...);
+  }
+};
+
+// t1.*f when N == 1 and f is a pointer to member data of a class T and t1 is
+// an object of type T or a reference to an object of type T or a reference
+// to an object of a type derived from T.
+struct DataMemAndRef : StrippedAccept<DataMemAndRef> {
+  template <typename... Args>
+  struct AcceptImpl : std::false_type {};
+
+  template <typename R, typename C, typename Obj>
+  struct AcceptImpl<R C::*, Obj>
+      : std::integral_constant<bool, std::is_base_of<C, Obj>::value &&
+                                         !absl::is_function<R>::value> {};
+
+  template <typename DataMem, typename Ref>
+  static decltype(std::declval<Ref>().*std::declval<DataMem>()) Invoke(
+      DataMem&& data_mem, Ref&& ref) {
+    return std::forward<Ref>(ref).*std::forward<DataMem>(data_mem);
+  }
+};
+
+// (*t1).*f when N == 1 and f is a pointer to member data of a class T and t1
+// is not one of the types described in the previous item.
+struct DataMemAndPtr : StrippedAccept<DataMemAndPtr> {
+  template <typename... Args>
+  struct AcceptImpl : std::false_type {};
+
+  template <typename R, typename C, typename Ptr>
+  struct AcceptImpl<R C::*, Ptr>
+      : std::integral_constant<bool, !std::is_base_of<C, Ptr>::value &&
+                                         !absl::is_function<R>::value> {};
+
+  template <typename DataMem, typename Ptr>
+  static decltype((*std::declval<Ptr>()).*std::declval<DataMem>()) Invoke(
+      DataMem&& data_mem, Ptr&& ptr) {
+    return (*std::forward<Ptr>(ptr)).*std::forward<DataMem>(data_mem);
+  }
+};
+
+// f(t1, t2, ..., tN) in all other cases.
+struct Callable {
+  // Callable doesn't have Accept because it's the last clause that gets picked
+  // when none of the previous clauses are applicable.
+  template <typename F, typename... Args>
+  static decltype(std::declval<F>()(std::declval<Args>()...)) Invoke(
+      F&& f, Args&&... args) {
+    return std::forward<F>(f)(std::forward<Args>(args)...);
+  }
+};
+
+// Resolves to the first matching clause.
+template <typename... Args>
+struct Invoker {
+  typedef typename std::conditional<
+      MemFunAndRef::Accept<Args...>::value, MemFunAndRef,
+      typename std::conditional<
+          MemFunAndPtr::Accept<Args...>::value, MemFunAndPtr,
+          typename std::conditional<
+              DataMemAndRef::Accept<Args...>::value, DataMemAndRef,
+              typename std::conditional<DataMemAndPtr::Accept<Args...>::value,
+                                        DataMemAndPtr, Callable>::type>::type>::
+          type>::type type;
+};
+
+// The result type of Invoke<F, Args...>.
+template <typename F, typename... Args>
+using InvokeT = decltype(Invoker<F, Args...>::type::Invoke(
+    std::declval<F>(), std::declval<Args>()...));
+
+// Invoke(f, args...) is an implementation of INVOKE(f, args...) from section
+// [func.require] of the C++ standard.
+template <typename F, typename... Args>
+InvokeT<F, Args...> Invoke(F&& f, Args&&... args) {
+  return Invoker<F, Args...>::type::Invoke(std::forward<F>(f),
+                                           std::forward<Args>(args)...);
+}
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_BASE_INTERNAL_INVOKE_H_
diff --git a/third_party/abseil_cpp/absl/base/internal/low_level_alloc.cc b/third_party/abseil_cpp/absl/base/internal/low_level_alloc.cc
new file mode 100644
index 000000000000..1bf94438d6d0
--- /dev/null
+++ b/third_party/abseil_cpp/absl/base/internal/low_level_alloc.cc
@@ -0,0 +1,620 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// A low-level allocator that can be used by other low-level
+// modules without introducing dependency cycles.
+// This allocator is slow and wasteful of memory;
+// it should not be used when performance is key.
+
+#include "absl/base/internal/low_level_alloc.h"
+
+#include <type_traits>
+
+#include "absl/base/call_once.h"
+#include "absl/base/config.h"
+#include "absl/base/internal/direct_mmap.h"
+#include "absl/base/internal/scheduling_mode.h"
+#include "absl/base/macros.h"
+#include "absl/base/thread_annotations.h"
+
+// LowLevelAlloc requires that the platform support low-level
+// allocation of virtual memory. Platforms lacking this cannot use
+// LowLevelAlloc.
+#ifndef ABSL_LOW_LEVEL_ALLOC_MISSING
+
+#ifndef _WIN32
+#include <pthread.h>
+#include <signal.h>
+#include <sys/mman.h>
+#include <unistd.h>
+#else
+#include <windows.h>
+#endif
+
+#include <string.h>
+#include <algorithm>
+#include <atomic>
+#include <cerrno>
+#include <cstddef>
+#include <new>                   // for placement-new
+
+#include "absl/base/dynamic_annotations.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/base/internal/spinlock.h"
+
+// MAP_ANONYMOUS
+#if defined(__APPLE__)
+// For mmap, Linux defines both MAP_ANONYMOUS and MAP_ANON and says MAP_ANON is
+// deprecated. In Darwin, MAP_ANON is all there is.
+#if !defined MAP_ANONYMOUS
+#define MAP_ANONYMOUS MAP_ANON
+#endif  // !MAP_ANONYMOUS
+#endif  // __APPLE__
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+// A first-fit allocator with amortized logarithmic free() time.
+
+// ---------------------------------------------------------------------------
+static const int kMaxLevel = 30;
+
+namespace {
+// This struct describes one allocated block, or one free block.
+struct AllocList {
+  struct Header {
+    // Size of entire region, including this field. Must be
+    // first. Valid in both allocated and unallocated blocks.
+    uintptr_t size;
+
+    // kMagicAllocated or kMagicUnallocated xor this.
+    uintptr_t magic;
+
+    // Pointer to parent arena.
+    LowLevelAlloc::Arena *arena;
+
+    // Aligns regions to 0 mod 2*sizeof(void*).
+    void *dummy_for_alignment;
+  } header;
+
+  // Next two fields: in unallocated blocks: freelist skiplist data
+  //                  in allocated blocks: overlaps with client data
+
+  // Levels in skiplist used.
+  int levels;
+
+  // Actually has levels elements. The AllocList node may not have room
+  // for all kMaxLevel entries. See max_fit in LLA_SkiplistLevels().
+  AllocList *next[kMaxLevel];
+};
+}  // namespace
+
+// ---------------------------------------------------------------------------
+// A trivial skiplist implementation.  This is used to keep the freelist
+// in address order while taking only logarithmic time per insert and delete.
+
+// An integer approximation of log2(size/base)
+// Requires size >= base.
+static int IntLog2(size_t size, size_t base) {
+  int result = 0;
+  for (size_t i = size; i > base; i >>= 1) {  // i == floor(size/2**result)
+    result++;
+  }
+  //    floor(size / 2**result) <= base < floor(size / 2**(result-1))
+  // =>     log2(size/(base+1)) <= result < 1+log2(size/base)
+  // => result ~= log2(size/base)
+  return result;
+}
+
+// Return a random integer n:  p(n)=1/(2**n) if 1 <= n; p(n)=0 if n < 1.
+static int Random(uint32_t *state) {
+  uint32_t r = *state;
+  int result = 1;
+  while ((((r = r*1103515245 + 12345) >> 30) & 1) == 0) {
+    result++;
+  }
+  *state = r;
+  return result;
+}
+
+// Return a number of skiplist levels for a node of size bytes, where
+// base is the minimum node size.  Compute level=log2(size / base)+n
+// where n is 1 if random is false and otherwise a random number generated with
+// the standard distribution for a skiplist:  See Random() above.
+// Bigger nodes tend to have more skiplist levels due to the log2(size / base)
+// term, so first-fit searches touch fewer nodes.  "level" is clipped so
+// level<kMaxLevel and next[level-1] will fit in the node.
+// 0 < LLA_SkiplistLevels(x,y,false) <= LLA_SkiplistLevels(x,y,true) < kMaxLevel
+static int LLA_SkiplistLevels(size_t size, size_t base, uint32_t *random) {
+  // max_fit is the maximum number of levels that will fit in a node for the
+  // given size.   We can't return more than max_fit, no matter what the
+  // random number generator says.
+  size_t max_fit = (size - offsetof(AllocList, next)) / sizeof(AllocList *);
+  int level = IntLog2(size, base) + (random != nullptr ? Random(random) : 1);
+  if (static_cast<size_t>(level) > max_fit) level = static_cast<int>(max_fit);
+  if (level > kMaxLevel-1) level = kMaxLevel - 1;
+  ABSL_RAW_CHECK(level >= 1, "block not big enough for even one level");
+  return level;
+}
+
+// Return "atleast", the first element of AllocList *head s.t. *atleast >= *e.
+// For 0 <= i < head->levels, set prev[i] to "no_greater", where no_greater
+// points to the last element at level i in the AllocList less than *e, or is
+// head if no such element exists.
+static AllocList *LLA_SkiplistSearch(AllocList *head,
+                                     AllocList *e, AllocList **prev) {
+  AllocList *p = head;
+  for (int level = head->levels - 1; level >= 0; level--) {
+    for (AllocList *n; (n = p->next[level]) != nullptr && n < e; p = n) {
+    }
+    prev[level] = p;
+  }
+  return (head->levels == 0) ? nullptr : prev[0]->next[0];
+}
+
+// Insert element *e into AllocList *head.  Set prev[] as LLA_SkiplistSearch.
+// Requires that e->levels be previously set by the caller (using
+// LLA_SkiplistLevels())
+static void LLA_SkiplistInsert(AllocList *head, AllocList *e,
+                               AllocList **prev) {
+  LLA_SkiplistSearch(head, e, prev);
+  for (; head->levels < e->levels; head->levels++) {  // extend prev pointers
+    prev[head->levels] = head;                        // to all *e's levels
+  }
+  for (int i = 0; i != e->levels; i++) {  // add element to list
+    e->next[i] = prev[i]->next[i];
+    prev[i]->next[i] = e;
+  }
+}
+
+// Remove element *e from AllocList *head.  Set prev[] as LLA_SkiplistSearch().
+// Requires that e->levels be previous set by the caller (using
+// LLA_SkiplistLevels())
+static void LLA_SkiplistDelete(AllocList *head, AllocList *e,
+                               AllocList **prev) {
+  AllocList *found = LLA_SkiplistSearch(head, e, prev);
+  ABSL_RAW_CHECK(e == found, "element not in freelist");
+  for (int i = 0; i != e->levels && prev[i]->next[i] == e; i++) {
+    prev[i]->next[i] = e->next[i];
+  }
+  while (head->levels > 0 && head->next[head->levels - 1] == nullptr) {
+    head->levels--;   // reduce head->levels if level unused
+  }
+}
+
+// ---------------------------------------------------------------------------
+// Arena implementation
+
+// Metadata for an LowLevelAlloc arena instance.
+struct LowLevelAlloc::Arena {
+  // Constructs an arena with the given LowLevelAlloc flags.
+  explicit Arena(uint32_t flags_value);
+
+  base_internal::SpinLock mu;
+  // Head of free list, sorted by address
+  AllocList freelist ABSL_GUARDED_BY(mu);
+  // Count of allocated blocks
+  int32_t allocation_count ABSL_GUARDED_BY(mu);
+  // flags passed to NewArena
+  const uint32_t flags;
+  // Result of sysconf(_SC_PAGESIZE)
+  const size_t pagesize;
+  // Lowest power of two >= max(16, sizeof(AllocList))
+  const size_t round_up;
+  // Smallest allocation block size
+  const size_t min_size;
+  // PRNG state
+  uint32_t random ABSL_GUARDED_BY(mu);
+};
+
+namespace {
+// Static storage space for the lazily-constructed, default global arena
+// instances.  We require this space because the whole point of LowLevelAlloc
+// is to avoid relying on malloc/new.
+alignas(LowLevelAlloc::Arena) unsigned char default_arena_storage[sizeof(
+    LowLevelAlloc::Arena)];
+alignas(LowLevelAlloc::Arena) unsigned char unhooked_arena_storage[sizeof(
+    LowLevelAlloc::Arena)];
+#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
+alignas(
+    LowLevelAlloc::Arena) unsigned char unhooked_async_sig_safe_arena_storage
+    [sizeof(LowLevelAlloc::Arena)];
+#endif
+
+// We must use LowLevelCallOnce here to construct the global arenas, rather than
+// using function-level statics, to avoid recursively invoking the scheduler.
+absl::once_flag create_globals_once;
+
+void CreateGlobalArenas() {
+  new (&default_arena_storage)
+      LowLevelAlloc::Arena(LowLevelAlloc::kCallMallocHook);
+  new (&unhooked_arena_storage) LowLevelAlloc::Arena(0);
+#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
+  new (&unhooked_async_sig_safe_arena_storage)
+      LowLevelAlloc::Arena(LowLevelAlloc::kAsyncSignalSafe);
+#endif
+}
+
+// Returns a global arena that does not call into hooks.  Used by NewArena()
+// when kCallMallocHook is not set.
+LowLevelAlloc::Arena* UnhookedArena() {
+  base_internal::LowLevelCallOnce(&create_globals_once, CreateGlobalArenas);
+  return reinterpret_cast<LowLevelAlloc::Arena*>(&unhooked_arena_storage);
+}
+
+#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
+// Returns a global arena that is async-signal safe.  Used by NewArena() when
+// kAsyncSignalSafe is set.
+LowLevelAlloc::Arena *UnhookedAsyncSigSafeArena() {
+  base_internal::LowLevelCallOnce(&create_globals_once, CreateGlobalArenas);
+  return reinterpret_cast<LowLevelAlloc::Arena *>(
+      &unhooked_async_sig_safe_arena_storage);
+}
+#endif
+
+}  // namespace
+
+// Returns the default arena, as used by LowLevelAlloc::Alloc() and friends.
+LowLevelAlloc::Arena *LowLevelAlloc::DefaultArena() {
+  base_internal::LowLevelCallOnce(&create_globals_once, CreateGlobalArenas);
+  return reinterpret_cast<LowLevelAlloc::Arena*>(&default_arena_storage);
+}
+
+// magic numbers to identify allocated and unallocated blocks
+static const uintptr_t kMagicAllocated = 0x4c833e95U;
+static const uintptr_t kMagicUnallocated = ~kMagicAllocated;
+
+namespace {
+class ABSL_SCOPED_LOCKABLE ArenaLock {
+ public:
+  explicit ArenaLock(LowLevelAlloc::Arena *arena)
+      ABSL_EXCLUSIVE_LOCK_FUNCTION(arena->mu)
+      : arena_(arena) {
+#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
+    if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
+      sigset_t all;
+      sigfillset(&all);
+      mask_valid_ = pthread_sigmask(SIG_BLOCK, &all, &mask_) == 0;
+    }
+#endif
+    arena_->mu.Lock();
+  }
+  ~ArenaLock() { ABSL_RAW_CHECK(left_, "haven't left Arena region"); }
+  void Leave() ABSL_UNLOCK_FUNCTION() {
+    arena_->mu.Unlock();
+#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
+    if (mask_valid_) {
+      const int err = pthread_sigmask(SIG_SETMASK, &mask_, nullptr);
+      if (err != 0) {
+        ABSL_RAW_LOG(FATAL, "pthread_sigmask failed: %d", err);
+      }
+    }
+#endif
+    left_ = true;
+  }
+
+ private:
+  bool left_ = false;  // whether left region
+#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
+  bool mask_valid_ = false;
+  sigset_t mask_;  // old mask of blocked signals
+#endif
+  LowLevelAlloc::Arena *arena_;
+  ArenaLock(const ArenaLock &) = delete;
+  ArenaLock &operator=(const ArenaLock &) = delete;
+};
+}  // namespace
+
+// create an appropriate magic number for an object at "ptr"
+// "magic" should be kMagicAllocated or kMagicUnallocated
+inline static uintptr_t Magic(uintptr_t magic, AllocList::Header *ptr) {
+  return magic ^ reinterpret_cast<uintptr_t>(ptr);
+}
+
+namespace {
+size_t GetPageSize() {
+#ifdef _WIN32
+  SYSTEM_INFO system_info;
+  GetSystemInfo(&system_info);
+  return std::max(system_info.dwPageSize, system_info.dwAllocationGranularity);
+#elif defined(__wasm__) || defined(__asmjs__)
+  return getpagesize();
+#else
+  return sysconf(_SC_PAGESIZE);
+#endif
+}
+
+size_t RoundedUpBlockSize() {
+  // Round up block sizes to a power of two close to the header size.
+  size_t round_up = 16;
+  while (round_up < sizeof(AllocList::Header)) {
+    round_up += round_up;
+  }
+  return round_up;
+}
+
+}  // namespace
+
+LowLevelAlloc::Arena::Arena(uint32_t flags_value)
+    : mu(base_internal::SCHEDULE_KERNEL_ONLY),
+      allocation_count(0),
+      flags(flags_value),
+      pagesize(GetPageSize()),
+      round_up(RoundedUpBlockSize()),
+      min_size(2 * round_up),
+      random(0) {
+  freelist.header.size = 0;
+  freelist.header.magic =
+      Magic(kMagicUnallocated, &freelist.header);
+  freelist.header.arena = this;
+  freelist.levels = 0;
+  memset(freelist.next, 0, sizeof(freelist.next));
+}
+
+// L < meta_data_arena->mu
+LowLevelAlloc::Arena *LowLevelAlloc::NewArena(int32_t flags) {
+  Arena *meta_data_arena = DefaultArena();
+#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
+  if ((flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
+    meta_data_arena = UnhookedAsyncSigSafeArena();
+  } else  // NOLINT(readability/braces)
+#endif
+      if ((flags & LowLevelAlloc::kCallMallocHook) == 0) {
+    meta_data_arena = UnhookedArena();
+  }
+  Arena *result =
+    new (AllocWithArena(sizeof (*result), meta_data_arena)) Arena(flags);
+  return result;
+}
+
+// L < arena->mu, L < arena->arena->mu
+bool LowLevelAlloc::DeleteArena(Arena *arena) {
+  ABSL_RAW_CHECK(
+      arena != nullptr && arena != DefaultArena() && arena != UnhookedArena(),
+      "may not delete default arena");
+  ArenaLock section(arena);
+  if (arena->allocation_count != 0) {
+    section.Leave();
+    return false;
+  }
+  while (arena->freelist.next[0] != nullptr) {
+    AllocList *region = arena->freelist.next[0];
+    size_t size = region->header.size;
+    arena->freelist.next[0] = region->next[0];
+    ABSL_RAW_CHECK(
+        region->header.magic == Magic(kMagicUnallocated, &region->header),
+        "bad magic number in DeleteArena()");
+    ABSL_RAW_CHECK(region->header.arena == arena,
+                   "bad arena pointer in DeleteArena()");
+    ABSL_RAW_CHECK(size % arena->pagesize == 0,
+                   "empty arena has non-page-aligned block size");
+    ABSL_RAW_CHECK(reinterpret_cast<uintptr_t>(region) % arena->pagesize == 0,
+                   "empty arena has non-page-aligned block");
+    int munmap_result;
+#ifdef _WIN32
+    munmap_result = VirtualFree(region, 0, MEM_RELEASE);
+    ABSL_RAW_CHECK(munmap_result != 0,
+                   "LowLevelAlloc::DeleteArena: VitualFree failed");
+#else
+#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
+    if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) == 0) {
+      munmap_result = munmap(region, size);
+    } else {
+      munmap_result = base_internal::DirectMunmap(region, size);
+    }
+#else
+    munmap_result = munmap(region, size);
+#endif  // ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
+    if (munmap_result != 0) {
+      ABSL_RAW_LOG(FATAL, "LowLevelAlloc::DeleteArena: munmap failed: %d",
+                   errno);
+    }
+#endif  // _WIN32
+  }
+  section.Leave();
+  arena->~Arena();
+  Free(arena);
+  return true;
+}
+
+// ---------------------------------------------------------------------------
+
+// Addition, checking for overflow.  The intent is to die if an external client
+// manages to push through a request that would cause arithmetic to fail.
+static inline uintptr_t CheckedAdd(uintptr_t a, uintptr_t b) {
+  uintptr_t sum = a + b;
+  ABSL_RAW_CHECK(sum >= a, "LowLevelAlloc arithmetic overflow");
+  return sum;
+}
+
+// Return value rounded up to next multiple of align.
+// align must be a power of two.
+static inline uintptr_t RoundUp(uintptr_t addr, uintptr_t align) {
+  return CheckedAdd(addr, align - 1) & ~(align - 1);
+}
+
+// Equivalent to "return prev->next[i]" but with sanity checking
+// that the freelist is in the correct order, that it
+// consists of regions marked "unallocated", and that no two regions
+// are adjacent in memory (they should have been coalesced).
+// L >= arena->mu
+static AllocList *Next(int i, AllocList *prev, LowLevelAlloc::Arena *arena) {
+  ABSL_RAW_CHECK(i < prev->levels, "too few levels in Next()");
+  AllocList *next = prev->next[i];
+  if (next != nullptr) {
+    ABSL_RAW_CHECK(
+        next->header.magic == Magic(kMagicUnallocated, &next->header),
+        "bad magic number in Next()");
+    ABSL_RAW_CHECK(next->header.arena == arena, "bad arena pointer in Next()");
+    if (prev != &arena->freelist) {
+      ABSL_RAW_CHECK(prev < next, "unordered freelist");
+      ABSL_RAW_CHECK(reinterpret_cast<char *>(prev) + prev->header.size <
+                         reinterpret_cast<char *>(next),
+                     "malformed freelist");
+    }
+  }
+  return next;
+}
+
+// Coalesce list item "a" with its successor if they are adjacent.
+static void Coalesce(AllocList *a) {
+  AllocList *n = a->next[0];
+  if (n != nullptr && reinterpret_cast<char *>(a) + a->header.size ==
+                          reinterpret_cast<char *>(n)) {
+    LowLevelAlloc::Arena *arena = a->header.arena;
+    a->header.size += n->header.size;
+    n->header.magic = 0;
+    n->header.arena = nullptr;
+    AllocList *prev[kMaxLevel];
+    LLA_SkiplistDelete(&arena->freelist, n, prev);
+    LLA_SkiplistDelete(&arena->freelist, a, prev);
+    a->levels = LLA_SkiplistLevels(a->header.size, arena->min_size,
+                                   &arena->random);
+    LLA_SkiplistInsert(&arena->freelist, a, prev);
+  }
+}
+
+// Adds block at location "v" to the free list
+// L >= arena->mu
+static void AddToFreelist(void *v, LowLevelAlloc::Arena *arena) {
+  AllocList *f = reinterpret_cast<AllocList *>(
+                        reinterpret_cast<char *>(v) - sizeof (f->header));
+  ABSL_RAW_CHECK(f->header.magic == Magic(kMagicAllocated, &f->header),
+                 "bad magic number in AddToFreelist()");
+  ABSL_RAW_CHECK(f->header.arena == arena,
+                 "bad arena pointer in AddToFreelist()");
+  f->levels = LLA_SkiplistLevels(f->header.size, arena->min_size,
+                                 &arena->random);
+  AllocList *prev[kMaxLevel];
+  LLA_SkiplistInsert(&arena->freelist, f, prev);
+  f->header.magic = Magic(kMagicUnallocated, &f->header);
+  Coalesce(f);                  // maybe coalesce with successor
+  Coalesce(prev[0]);            // maybe coalesce with predecessor
+}
+
+// Frees storage allocated by LowLevelAlloc::Alloc().
+// L < arena->mu
+void LowLevelAlloc::Free(void *v) {
+  if (v != nullptr) {
+    AllocList *f = reinterpret_cast<AllocList *>(
+                        reinterpret_cast<char *>(v) - sizeof (f->header));
+    LowLevelAlloc::Arena *arena = f->header.arena;
+    ArenaLock section(arena);
+    AddToFreelist(v, arena);
+    ABSL_RAW_CHECK(arena->allocation_count > 0, "nothing in arena to free");
+    arena->allocation_count--;
+    section.Leave();
+  }
+}
+
+// allocates and returns a block of size bytes, to be freed with Free()
+// L < arena->mu
+static void *DoAllocWithArena(size_t request, LowLevelAlloc::Arena *arena) {
+  void *result = nullptr;
+  if (request != 0) {
+    AllocList *s;       // will point to region that satisfies request
+    ArenaLock section(arena);
+    // round up with header
+    size_t req_rnd = RoundUp(CheckedAdd(request, sizeof (s->header)),
+                             arena->round_up);
+    for (;;) {      // loop until we find a suitable region
+      // find the minimum levels that a block of this size must have
+      int i = LLA_SkiplistLevels(req_rnd, arena->min_size, nullptr) - 1;
+      if (i < arena->freelist.levels) {   // potential blocks exist
+        AllocList *before = &arena->freelist;  // predecessor of s
+        while ((s = Next(i, before, arena)) != nullptr &&
+               s->header.size < req_rnd) {
+          before = s;
+        }
+        if (s != nullptr) {       // we found a region
+          break;
+        }
+      }
+      // we unlock before mmap() both because mmap() may call a callback hook,
+      // and because it may be slow.
+      arena->mu.Unlock();
+      // mmap generous 64K chunks to decrease
+      // the chances/impact of fragmentation:
+      size_t new_pages_size = RoundUp(req_rnd, arena->pagesize * 16);
+      void *new_pages;
+#ifdef _WIN32
+      new_pages = VirtualAlloc(0, new_pages_size,
+                               MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
+      ABSL_RAW_CHECK(new_pages != nullptr, "VirtualAlloc failed");
+#else
+#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
+      if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
+        new_pages = base_internal::DirectMmap(nullptr, new_pages_size,
+            PROT_WRITE|PROT_READ, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
+      } else {
+        new_pages = mmap(nullptr, new_pages_size, PROT_WRITE | PROT_READ,
+                         MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+      }
+#else
+      new_pages = mmap(nullptr, new_pages_size, PROT_WRITE | PROT_READ,
+                       MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+#endif  // ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
+      if (new_pages == MAP_FAILED) {
+        ABSL_RAW_LOG(FATAL, "mmap error: %d", errno);
+      }
+
+#endif  // _WIN32
+      arena->mu.Lock();
+      s = reinterpret_cast<AllocList *>(new_pages);
+      s->header.size = new_pages_size;
+      // Pretend the block is allocated; call AddToFreelist() to free it.
+      s->header.magic = Magic(kMagicAllocated, &s->header);
+      s->header.arena = arena;
+      AddToFreelist(&s->levels, arena);  // insert new region into free list
+    }
+    AllocList *prev[kMaxLevel];
+    LLA_SkiplistDelete(&arena->freelist, s, prev);    // remove from free list
+    // s points to the first free region that's big enough
+    if (CheckedAdd(req_rnd, arena->min_size) <= s->header.size) {
+      // big enough to split
+      AllocList *n = reinterpret_cast<AllocList *>
+                        (req_rnd + reinterpret_cast<char *>(s));
+      n->header.size = s->header.size - req_rnd;
+      n->header.magic = Magic(kMagicAllocated, &n->header);
+      n->header.arena = arena;
+      s->header.size = req_rnd;
+      AddToFreelist(&n->levels, arena);
+    }
+    s->header.magic = Magic(kMagicAllocated, &s->header);
+    ABSL_RAW_CHECK(s->header.arena == arena, "");
+    arena->allocation_count++;
+    section.Leave();
+    result = &s->levels;
+  }
+  ANNOTATE_MEMORY_IS_UNINITIALIZED(result, request);
+  return result;
+}
+
+void *LowLevelAlloc::Alloc(size_t request) {
+  void *result = DoAllocWithArena(request, DefaultArena());
+  return result;
+}
+
+void *LowLevelAlloc::AllocWithArena(size_t request, Arena *arena) {
+  ABSL_RAW_CHECK(arena != nullptr, "must pass a valid arena");
+  void *result = DoAllocWithArena(request, arena);
+  return result;
+}
+
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_LOW_LEVEL_ALLOC_MISSING
diff --git a/third_party/abseil_cpp/absl/base/internal/low_level_alloc.h b/third_party/abseil_cpp/absl/base/internal/low_level_alloc.h
new file mode 100644
index 000000000000..db91951c825f
--- /dev/null
+++ b/third_party/abseil_cpp/absl/base/internal/low_level_alloc.h
@@ -0,0 +1,126 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef ABSL_BASE_INTERNAL_LOW_LEVEL_ALLOC_H_
+#define ABSL_BASE_INTERNAL_LOW_LEVEL_ALLOC_H_
+
+// A simple thread-safe memory allocator that does not depend on
+// mutexes or thread-specific data.  It is intended to be used
+// sparingly, and only when malloc() would introduce an unwanted
+// dependency, such as inside the heap-checker, or the Mutex
+// implementation.
+
+// IWYU pragma: private, include "base/low_level_alloc.h"
+
+#include <sys/types.h>
+
+#include <cstdint>
+
+#include "absl/base/attributes.h"
+#include "absl/base/config.h"
+
+// LowLevelAlloc requires that the platform support low-level
+// allocation of virtual memory. Platforms lacking this cannot use
+// LowLevelAlloc.
+#ifdef ABSL_LOW_LEVEL_ALLOC_MISSING
+#error ABSL_LOW_LEVEL_ALLOC_MISSING cannot be directly set
+#elif !defined(ABSL_HAVE_MMAP) && !defined(_WIN32)
+#define ABSL_LOW_LEVEL_ALLOC_MISSING 1
+#endif
+
+// Using LowLevelAlloc with kAsyncSignalSafe isn't supported on Windows or
+// asm.js / WebAssembly.
+// See https://kripken.github.io/emscripten-site/docs/porting/pthreads.html
+// for more information.
+#ifdef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
+#error ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING cannot be directly set
+#elif defined(_WIN32) || defined(__asmjs__) || defined(__wasm__)
+#define ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING 1
+#endif
+
+#include <cstddef>
+
+#include "absl/base/port.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+class LowLevelAlloc {
+ public:
+  struct Arena;       // an arena from which memory may be allocated
+
+  // Returns a pointer to a block of at least "request" bytes
+  // that have been newly allocated from the specific arena.
+  // for Alloc() call the DefaultArena() is used.
+  // Returns 0 if passed request==0.
+  // Does not return 0 under other circumstances; it crashes if memory
+  // is not available.
+  static void *Alloc(size_t request) ABSL_ATTRIBUTE_SECTION(malloc_hook);
+  static void *AllocWithArena(size_t request, Arena *arena)
+      ABSL_ATTRIBUTE_SECTION(malloc_hook);
+
+  // Deallocates a region of memory that was previously allocated with
+  // Alloc().   Does nothing if passed 0.   "s" must be either 0,
+  // or must have been returned from a call to Alloc() and not yet passed to
+  // Free() since that call to Alloc().  The space is returned to the arena
+  // from which it was allocated.
+  static void Free(void *s) ABSL_ATTRIBUTE_SECTION(malloc_hook);
+
+  // ABSL_ATTRIBUTE_SECTION(malloc_hook) for Alloc* and Free
+  // are to put all callers of MallocHook::Invoke* in this module
+  // into special section,
+  // so that MallocHook::GetCallerStackTrace can function accurately.
+
+  // Create a new arena.
+  // The root metadata for the new arena is allocated in the
+  // meta_data_arena; the DefaultArena() can be passed for meta_data_arena.
+  // These values may be ored into flags:
+  enum {
+    // Report calls to Alloc() and Free() via the MallocHook interface.
+    // Set in the DefaultArena.
+    kCallMallocHook = 0x0001,
+
+#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
+    // Make calls to Alloc(), Free() be async-signal-safe. Not set in
+    // DefaultArena(). Not supported on all platforms.
+    kAsyncSignalSafe = 0x0002,
+#endif
+  };
+  // Construct a new arena.  The allocation of the underlying metadata honors
+  // the provided flags.  For example, the call NewArena(kAsyncSignalSafe)
+  // is itself async-signal-safe, as well as generatating an arena that provides
+  // async-signal-safe Alloc/Free.
+  static Arena *NewArena(int32_t flags);
+
+  // Destroys an arena allocated by NewArena and returns true,
+  // provided no allocated blocks remain in the arena.
+  // If allocated blocks remain in the arena, does nothing and
+  // returns false.
+  // It is illegal to attempt to destroy the DefaultArena().
+  static bool DeleteArena(Arena *arena);
+
+  // The default arena that always exists.
+  static Arena *DefaultArena();
+
+ private:
+  LowLevelAlloc();      // no instances
+};
+
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_BASE_INTERNAL_LOW_LEVEL_ALLOC_H_
diff --git a/third_party/abseil_cpp/absl/base/internal/low_level_alloc_test.cc b/third_party/abseil_cpp/absl/base/internal/low_level_alloc_test.cc
new file mode 100644
index 000000000000..2f2eaffa032e
--- /dev/null
+++ b/third_party/abseil_cpp/absl/base/internal/low_level_alloc_test.cc
@@ -0,0 +1,162 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/internal/low_level_alloc.h"
+
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <thread>  // NOLINT(build/c++11)
+#include <unordered_map>
+#include <utility>
+
+#include "absl/container/node_hash_map.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+namespace {
+
+// This test doesn't use gtest since it needs to test that everything
+// works before main().
+#define TEST_ASSERT(x)                                           \
+  if (!(x)) {                                                    \
+    printf("TEST_ASSERT(%s) FAILED ON LINE %d\n", #x, __LINE__); \
+    abort();                                                     \
+  }
+
+// a block of memory obtained from the allocator
+struct BlockDesc {
+  char *ptr;      // pointer to memory
+  int len;        // number of bytes
+  int fill;       // filled with data starting with this
+};
+
+// Check that the pattern placed in the block d
+// by RandomizeBlockDesc is still there.
+static void CheckBlockDesc(const BlockDesc &d) {
+  for (int i = 0; i != d.len; i++) {
+    TEST_ASSERT((d.ptr[i] & 0xff) == ((d.fill + i) & 0xff));
+  }
+}
+
+// Fill the block "*d" with a pattern
+// starting with a random byte.
+static void RandomizeBlockDesc(BlockDesc *d) {
+  d->fill = rand() & 0xff;
+  for (int i = 0; i != d->len; i++) {
+    d->ptr[i] = (d->fill + i) & 0xff;
+  }
+}
+
+// Use to indicate to the malloc hooks that
+// this calls is from LowLevelAlloc.
+static bool using_low_level_alloc = false;
+
+// n times, toss a coin, and based on the outcome
+// either allocate a new block or deallocate an old block.
+// New blocks are placed in a std::unordered_map with a random key
+// and initialized with RandomizeBlockDesc().
+// If keys conflict, the older block is freed.
+// Old blocks are always checked with CheckBlockDesc()
+// before being freed.  At the end of the run,
+// all remaining allocated blocks are freed.
+// If use_new_arena is true, use a fresh arena, and then delete it.
+// If call_malloc_hook is true and user_arena is true,
+// allocations and deallocations are reported via the MallocHook
+// interface.
+static void Test(bool use_new_arena, bool call_malloc_hook, int n) {
+  typedef absl::node_hash_map<int, BlockDesc> AllocMap;
+  AllocMap allocated;
+  AllocMap::iterator it;
+  BlockDesc block_desc;
+  int rnd;
+  LowLevelAlloc::Arena *arena = 0;
+  if (use_new_arena) {
+    int32_t flags = call_malloc_hook ? LowLevelAlloc::kCallMallocHook : 0;
+    arena = LowLevelAlloc::NewArena(flags);
+  }
+  for (int i = 0; i != n; i++) {
+    if (i != 0 && i % 10000 == 0) {
+      printf(".");
+      fflush(stdout);
+    }
+
+    switch (rand() & 1) {      // toss a coin
+    case 0:     // coin came up heads: add a block
+      using_low_level_alloc = true;
+      block_desc.len = rand() & 0x3fff;
+      block_desc.ptr =
+        reinterpret_cast<char *>(
+                        arena == 0
+                        ? LowLevelAlloc::Alloc(block_desc.len)
+                        : LowLevelAlloc::AllocWithArena(block_desc.len, arena));
+      using_low_level_alloc = false;
+      RandomizeBlockDesc(&block_desc);
+      rnd = rand();
+      it = allocated.find(rnd);
+      if (it != allocated.end()) {
+        CheckBlockDesc(it->second);
+        using_low_level_alloc = true;
+        LowLevelAlloc::Free(it->second.ptr);
+        using_low_level_alloc = false;
+        it->second = block_desc;
+      } else {
+        allocated[rnd] = block_desc;
+      }
+      break;
+    case 1:     // coin came up tails: remove a block
+      it = allocated.begin();
+      if (it != allocated.end()) {
+        CheckBlockDesc(it->second);
+        using_low_level_alloc = true;
+        LowLevelAlloc::Free(it->second.ptr);
+        using_low_level_alloc = false;
+        allocated.erase(it);
+      }
+      break;
+    }
+  }
+  // remove all remaining blocks
+  while ((it = allocated.begin()) != allocated.end()) {
+    CheckBlockDesc(it->second);
+    using_low_level_alloc = true;
+    LowLevelAlloc::Free(it->second.ptr);
+    using_low_level_alloc = false;
+    allocated.erase(it);
+  }
+  if (use_new_arena) {
+    TEST_ASSERT(LowLevelAlloc::DeleteArena(arena));
+  }
+}
+
+// LowLevelAlloc is designed to be safe to call before main().
+static struct BeforeMain {
+  BeforeMain() {
+    Test(false, false, 50000);
+    Test(true, false, 50000);
+    Test(true, true, 50000);
+  }
+} before_main;
+
+}  // namespace
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+int main(int argc, char *argv[]) {
+  // The actual test runs in the global constructor of `before_main`.
+  printf("PASS\n");
+  return 0;
+}
diff --git a/third_party/abseil_cpp/absl/base/internal/low_level_scheduling.h b/third_party/abseil_cpp/absl/base/internal/low_level_scheduling.h
new file mode 100644
index 000000000000..961cc981b864
--- /dev/null
+++ b/third_party/abseil_cpp/absl/base/internal/low_level_scheduling.h
@@ -0,0 +1,107 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Core interfaces and definitions used by by low-level interfaces such as
+// SpinLock.
+
+#ifndef ABSL_BASE_INTERNAL_LOW_LEVEL_SCHEDULING_H_
+#define ABSL_BASE_INTERNAL_LOW_LEVEL_SCHEDULING_H_
+
+#include "absl/base/internal/scheduling_mode.h"
+#include "absl/base/macros.h"
+
+// The following two declarations exist so SchedulingGuard may friend them with
+// the appropriate language linkage.  These callbacks allow libc internals, such
+// as function level statics, to schedule cooperatively when locking.
+extern "C" bool __google_disable_rescheduling(void);
+extern "C" void __google_enable_rescheduling(bool disable_result);
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+class SchedulingHelper;  // To allow use of SchedulingGuard.
+class SpinLock;          // To allow use of SchedulingGuard.
+
+// SchedulingGuard
+// Provides guard semantics that may be used to disable cooperative rescheduling
+// of the calling thread within specific program blocks.  This is used to
+// protect resources (e.g. low-level SpinLocks or Domain code) that cooperative
+// scheduling depends on.
+//
+// Domain implementations capable of rescheduling in reaction to involuntary
+// kernel thread actions (e.g blocking due to a pagefault or syscall) must
+// guarantee that an annotated thread is not allowed to (cooperatively)
+// reschedule until the annotated region is complete.
+//
+// It is an error to attempt to use a cooperatively scheduled resource (e.g.
+// Mutex) within a rescheduling-disabled region.
+//
+// All methods are async-signal safe.
+class SchedulingGuard {
+ public:
+  // Returns true iff the calling thread may be cooperatively rescheduled.
+  static bool ReschedulingIsAllowed();
+
+ private:
+  // Disable cooperative rescheduling of the calling thread.  It may still
+  // initiate scheduling operations (e.g. wake-ups), however, it may not itself
+  // reschedule.  Nestable.  The returned result is opaque, clients should not
+  // attempt to interpret it.
+  // REQUIRES: Result must be passed to a pairing EnableScheduling().
+  static bool DisableRescheduling();
+
+  // Marks the end of a rescheduling disabled region, previously started by
+  // DisableRescheduling().
+  // REQUIRES: Pairs with innermost call (and result) of DisableRescheduling().
+  static void EnableRescheduling(bool disable_result);
+
+  // A scoped helper for {Disable, Enable}Rescheduling().
+  // REQUIRES: destructor must run in same thread as constructor.
+  struct ScopedDisable {
+    ScopedDisable() { disabled = SchedulingGuard::DisableRescheduling(); }
+    ~ScopedDisable() { SchedulingGuard::EnableRescheduling(disabled); }
+
+    bool disabled;
+  };
+
+  // Access to SchedulingGuard is explicitly white-listed.
+  friend class SchedulingHelper;
+  friend class SpinLock;
+
+  SchedulingGuard(const SchedulingGuard&) = delete;
+  SchedulingGuard& operator=(const SchedulingGuard&) = delete;
+};
+
+//------------------------------------------------------------------------------
+// End of public interfaces.
+//------------------------------------------------------------------------------
+
+inline bool SchedulingGuard::ReschedulingIsAllowed() {
+  return false;
+}
+
+inline bool SchedulingGuard::DisableRescheduling() {
+  return false;
+}
+
+inline void SchedulingGuard::EnableRescheduling(bool /* disable_result */) {
+  return;
+}
+
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_BASE_INTERNAL_LOW_LEVEL_SCHEDULING_H_
diff --git a/third_party/abseil_cpp/absl/base/internal/per_thread_tls.h b/third_party/abseil_cpp/absl/base/internal/per_thread_tls.h
new file mode 100644
index 000000000000..cf5e97a0470e
--- /dev/null
+++ b/third_party/abseil_cpp/absl/base/internal/per_thread_tls.h
@@ -0,0 +1,52 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_BASE_INTERNAL_PER_THREAD_TLS_H_
+#define ABSL_BASE_INTERNAL_PER_THREAD_TLS_H_
+
+// This header defines two macros:
+//
+// If the platform supports thread-local storage:
+//
+// * ABSL_PER_THREAD_TLS_KEYWORD is the C keyword needed to declare a
+//   thread-local variable
+// * ABSL_PER_THREAD_TLS is 1
+//
+// Otherwise:
+//
+// * ABSL_PER_THREAD_TLS_KEYWORD is empty
+// * ABSL_PER_THREAD_TLS is 0
+//
+// Microsoft C supports thread-local storage.
+// GCC supports it if the appropriate version of glibc is available,
+// which the programmer can indicate by defining ABSL_HAVE_TLS
+
+#include "absl/base/port.h"  // For ABSL_HAVE_TLS
+
+#if defined(ABSL_PER_THREAD_TLS)
+#error ABSL_PER_THREAD_TLS cannot be directly set
+#elif defined(ABSL_PER_THREAD_TLS_KEYWORD)
+#error ABSL_PER_THREAD_TLS_KEYWORD cannot be directly set
+#elif defined(ABSL_HAVE_TLS)
+#define ABSL_PER_THREAD_TLS_KEYWORD __thread
+#define ABSL_PER_THREAD_TLS 1
+#elif defined(_MSC_VER)
+#define ABSL_PER_THREAD_TLS_KEYWORD __declspec(thread)
+#define ABSL_PER_THREAD_TLS 1
+#else
+#define ABSL_PER_THREAD_TLS_KEYWORD
+#define ABSL_PER_THREAD_TLS 0
+#endif
+
+#endif  // ABSL_BASE_INTERNAL_PER_THREAD_TLS_H_
diff --git a/third_party/abseil_cpp/absl/base/internal/periodic_sampler.cc b/third_party/abseil_cpp/absl/base/internal/periodic_sampler.cc
new file mode 100644
index 000000000000..520dabbaa006
--- /dev/null
+++ b/third_party/abseil_cpp/absl/base/internal/periodic_sampler.cc
@@ -0,0 +1,53 @@
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/internal/periodic_sampler.h"
+
+#include <atomic>
+
+#include "absl/base/internal/exponential_biased.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+int64_t PeriodicSamplerBase::GetExponentialBiased(int period) noexcept {
+  return rng_.GetStride(period);
+}
+
+bool PeriodicSamplerBase::SubtleConfirmSample() noexcept {
+  int current_period = period();
+
+  // Deal with period case 0 (always off) and 1 (always on)
+  if (ABSL_PREDICT_FALSE(current_period < 2)) {
+    stride_ = 0;
+    return current_period == 1;
+  }
+
+  // Check if this is the first call to Sample()
+  if (ABSL_PREDICT_FALSE(stride_ == 1)) {
+    stride_ = static_cast<uint64_t>(-GetExponentialBiased(current_period));
+    if (static_cast<int64_t>(stride_) < -1) {
+      ++stride_;
+      return false;
+    }
+  }
+
+  stride_ = static_cast<uint64_t>(-GetExponentialBiased(current_period));
+  return true;
+}
+
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/third_party/abseil_cpp/absl/base/internal/periodic_sampler.h b/third_party/abseil_cpp/absl/base/internal/periodic_sampler.h
new file mode 100644
index 000000000000..f8a86796b117
--- /dev/null
+++ b/third_party/abseil_cpp/absl/base/internal/periodic_sampler.h
@@ -0,0 +1,211 @@
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_BASE_INTERNAL_PERIODIC_SAMPLER_H_
+#define ABSL_BASE_INTERNAL_PERIODIC_SAMPLER_H_
+
+#include <stdint.h>
+
+#include <atomic>
+
+#include "absl/base/internal/exponential_biased.h"
+#include "absl/base/optimization.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+// PeriodicSamplerBase provides the basic period sampler implementation.
+//
+// This is the base class for the templated PeriodicSampler class, which holds
+// a global std::atomic value identified by a user defined tag, such that
+// each specific PeriodSampler implementation holds its own global period.
+//
+// PeriodicSamplerBase is thread-compatible except where stated otherwise.
+class PeriodicSamplerBase {
+ public:
+  // PeriodicSamplerBase is trivial / copyable / movable / destructible.
+  PeriodicSamplerBase() = default;
+  PeriodicSamplerBase(PeriodicSamplerBase&&) = default;
+  PeriodicSamplerBase(const PeriodicSamplerBase&) = default;
+
+  // Returns true roughly once every `period` calls. This is established by a
+  // randomly picked `stride` that is counted down on each call to `Sample`.
+  // This stride is picked such that the probability of `Sample()` returning
+  // true is 1 in `period`.
+  inline bool Sample() noexcept;
+
+  // The below methods are intended for optimized use cases where the
+  // size of the inlined fast path code is highly important. Applications
+  // should use the `Sample()` method unless they have proof that their
+  // specific use case requires the optimizations offered by these methods.
+  //
+  // An example of such a use case is SwissTable sampling. All sampling checks
+  // are in inlined SwissTable methods, and the number of call sites is huge.
+  // In this case, the inlined code size added to each translation unit calling
+  // SwissTable methods is non-trivial.
+  //
+  // The `SubtleMaybeSample()` function spuriously returns true even if the
+  // function should not be sampled, applications MUST match each call to
+  // 'SubtleMaybeSample()' returning true with a `SubtleConfirmSample()` call,
+  // and use the result of the latter as the sampling decision.
+  // In other words: the code should logically be equivalent to:
+  //
+  //    if (SubtleMaybeSample() && SubtleConfirmSample()) {
+  //      // Sample this call
+  //    }
+  //
+  // In the 'inline-size' optimized case, the `SubtleConfirmSample()` call can
+  // be placed out of line, for example, the typical use case looks as follows:
+  //
+  //   // --- frobber.h -----------
+  //   void FrobberSampled();
+  //
+  //   inline void FrobberImpl() {
+  //     // ...
+  //   }
+  //
+  //   inline void Frobber() {
+  //     if (ABSL_PREDICT_FALSE(sampler.SubtleMaybeSample())) {
+  //       FrobberSampled();
+  //     } else {
+  //       FrobberImpl();
+  //     }
+  //   }
+  //
+  //   // --- frobber.cc -----------
+  //   void FrobberSampled() {
+  //     if (!sampler.SubtleConfirmSample())) {
+  //       // Spurious false positive
+  //       FrobberImpl();
+  //       return;
+  //     }
+  //
+  //     // Sampled execution
+  //     // ...
+  //   }
+  inline bool SubtleMaybeSample() noexcept;
+  bool SubtleConfirmSample() noexcept;
+
+ protected:
+  // We explicitly don't use a virtual destructor as this class is never
+  // virtually destroyed, and it keeps the class trivial, which avoids TLS
+  // prologue and epilogue code for our TLS instances.
+  ~PeriodicSamplerBase() = default;
+
+  // Returns the next stride for our sampler.
+  // This function is virtual for testing purposes only.
+  virtual int64_t GetExponentialBiased(int period) noexcept;
+
+ private:
+  // Returns the current period of this sampler. Thread-safe.
+  virtual int period() const noexcept = 0;
+
+  // Keep and decrement stride_ as an unsigned integer, but compare the value
+  // to zero casted as a signed int. clang and msvc do not create optimum code
+  // if we use signed for the combined decrement and sign comparison.
+  //
+  // Below 3 alternative options, all compiles generate the best code
+  // using the unsigned increment <---> signed int comparison option.
+  //
+  // Option 1:
+  //   int64_t stride_;
+  //   if (ABSL_PREDICT_TRUE(++stride_ < 0)) { ... }
+  //
+  //   GCC   x64 (OK) : https://gcc.godbolt.org/z/R5MzzA
+  //   GCC   ppc (OK) : https://gcc.godbolt.org/z/z7NZAt
+  //   Clang x64 (BAD): https://gcc.godbolt.org/z/t4gPsd
+  //   ICC   x64 (OK) : https://gcc.godbolt.org/z/rE6s8W
+  //   MSVC  x64 (OK) : https://gcc.godbolt.org/z/ARMXqS
+  //
+  // Option 2:
+  //   int64_t stride_ = 0;
+  //   if (ABSL_PREDICT_TRUE(--stride_ >= 0)) { ... }
+  //
+  //   GCC   x64 (OK) : https://gcc.godbolt.org/z/jSQxYK
+  //   GCC   ppc (OK) : https://gcc.godbolt.org/z/VJdYaA
+  //   Clang x64 (BAD): https://gcc.godbolt.org/z/Xm4NjX
+  //   ICC   x64 (OK) : https://gcc.godbolt.org/z/4snaFd
+  //   MSVC  x64 (BAD): https://gcc.godbolt.org/z/BgnEKE
+  //
+  // Option 3:
+  //   uint64_t stride_;
+  //   if (ABSL_PREDICT_TRUE(static_cast<int64_t>(++stride_) < 0)) { ... }
+  //
+  //   GCC   x64 (OK) : https://gcc.godbolt.org/z/bFbfPy
+  //   GCC   ppc (OK) : https://gcc.godbolt.org/z/S9KkUE
+  //   Clang x64 (OK) : https://gcc.godbolt.org/z/UYzRb4
+  //   ICC   x64 (OK) : https://gcc.godbolt.org/z/ptTNfD
+  //   MSVC  x64 (OK) : https://gcc.godbolt.org/z/76j4-5
+  uint64_t stride_ = 0;
+  ExponentialBiased rng_;
+};
+
+inline bool PeriodicSamplerBase::SubtleMaybeSample() noexcept {
+  // See comments on `stride_` for the unsigned increment / signed compare.
+  if (ABSL_PREDICT_TRUE(static_cast<int64_t>(++stride_) < 0)) {
+    return false;
+  }
+  return true;
+}
+
+inline bool PeriodicSamplerBase::Sample() noexcept {
+  return ABSL_PREDICT_FALSE(SubtleMaybeSample()) ? SubtleConfirmSample()
+                                                 : false;
+}
+
+// PeriodicSampler is a concreted periodic sampler implementation.
+// The user provided Tag identifies the implementation, and is required to
+// isolate the global state of this instance from other instances.
+//
+// Typical use case:
+//
+//   struct HashTablezTag {};
+//   thread_local PeriodicSampler sampler;
+//
+//   void HashTableSamplingLogic(...) {
+//     if (sampler.Sample()) {
+//       HashTableSlowSamplePath(...);
+//     }
+//   }
+//
+template <typename Tag, int default_period = 0>
+class PeriodicSampler final : public PeriodicSamplerBase {
+ public:
+  ~PeriodicSampler() = default;
+
+  int period() const noexcept final {
+    return period_.load(std::memory_order_relaxed);
+  }
+
+  // Sets the global period for this sampler. Thread-safe.
+  // Setting a period of 0 disables the sampler, i.e., every call to Sample()
+  // will return false. Setting a period of 1 puts the sampler in 'always on'
+  // mode, i.e., every call to Sample() returns true.
+  static void SetGlobalPeriod(int period) {
+    period_.store(period, std::memory_order_relaxed);
+  }
+
+ private:
+  static std::atomic<int> period_;
+};
+
+template <typename Tag, int default_period>
+std::atomic<int> PeriodicSampler<Tag, default_period>::period_(default_period);
+
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_BASE_INTERNAL_PERIODIC_SAMPLER_H_
diff --git a/third_party/abseil_cpp/absl/base/internal/periodic_sampler_benchmark.cc b/third_party/abseil_cpp/absl/base/internal/periodic_sampler_benchmark.cc
new file mode 100644
index 000000000000..5ad469ce79c2
--- /dev/null
+++ b/third_party/abseil_cpp/absl/base/internal/periodic_sampler_benchmark.cc
@@ -0,0 +1,79 @@
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "benchmark/benchmark.h"
+#include "absl/base/internal/periodic_sampler.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+namespace {
+
+template <typename Sampler>
+void BM_Sample(Sampler* sampler, benchmark::State& state) {
+  for (auto _ : state) {
+    benchmark::DoNotOptimize(sampler);
+    benchmark::DoNotOptimize(sampler->Sample());
+  }
+}
+
+template <typename Sampler>
+void BM_SampleMinunumInlined(Sampler* sampler, benchmark::State& state) {
+  for (auto _ : state) {
+    benchmark::DoNotOptimize(sampler);
+    if (ABSL_PREDICT_FALSE(sampler->SubtleMaybeSample())) {
+      benchmark::DoNotOptimize(sampler->SubtleConfirmSample());
+    }
+  }
+}
+
+void BM_PeriodicSampler_TinySample(benchmark::State& state) {
+  struct Tag {};
+  PeriodicSampler<Tag, 10> sampler;
+  BM_Sample(&sampler, state);
+}
+BENCHMARK(BM_PeriodicSampler_TinySample);
+
+void BM_PeriodicSampler_ShortSample(benchmark::State& state) {
+  struct Tag {};
+  PeriodicSampler<Tag, 1024> sampler;
+  BM_Sample(&sampler, state);
+}
+BENCHMARK(BM_PeriodicSampler_ShortSample);
+
+void BM_PeriodicSampler_LongSample(benchmark::State& state) {
+  struct Tag {};
+  PeriodicSampler<Tag, 1024 * 1024> sampler;
+  BM_Sample(&sampler, state);
+}
+BENCHMARK(BM_PeriodicSampler_LongSample);
+
+void BM_PeriodicSampler_LongSampleMinunumInlined(benchmark::State& state) {
+  struct Tag {};
+  PeriodicSampler<Tag, 1024 * 1024> sampler;
+  BM_SampleMinunumInlined(&sampler, state);
+}
+BENCHMARK(BM_PeriodicSampler_LongSampleMinunumInlined);
+
+void BM_PeriodicSampler_Disabled(benchmark::State& state) {
+  struct Tag {};
+  PeriodicSampler<Tag, 0> sampler;
+  BM_Sample(&sampler, state);
+}
+BENCHMARK(BM_PeriodicSampler_Disabled);
+
+}  // namespace
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/third_party/abseil_cpp/absl/base/internal/periodic_sampler_test.cc b/third_party/abseil_cpp/absl/base/internal/periodic_sampler_test.cc
new file mode 100644
index 000000000000..3b301e37ab98
--- /dev/null
+++ b/third_party/abseil_cpp/absl/base/internal/periodic_sampler_test.cc
@@ -0,0 +1,177 @@
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/internal/periodic_sampler.h"
+
+#include <thread>  // NOLINT(build/c++11)
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/base/attributes.h"
+#include "absl/base/macros.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+namespace {
+
+using testing::Eq;
+using testing::Return;
+using testing::StrictMock;
+
+class MockPeriodicSampler : public PeriodicSamplerBase {
+ public:
+  virtual ~MockPeriodicSampler() = default;
+
+  MOCK_METHOD(int, period, (), (const, noexcept));
+  MOCK_METHOD(int64_t, GetExponentialBiased, (int), (noexcept));
+};
+
+TEST(PeriodicSamplerBaseTest, Sample) {
+  StrictMock<MockPeriodicSampler> sampler;
+
+  EXPECT_CALL(sampler, period()).Times(3).WillRepeatedly(Return(16));
+  EXPECT_CALL(sampler, GetExponentialBiased(16))
+      .WillOnce(Return(2))
+      .WillOnce(Return(3))
+      .WillOnce(Return(4));
+
+  EXPECT_FALSE(sampler.Sample());
+  EXPECT_TRUE(sampler.Sample());
+
+  EXPECT_FALSE(sampler.Sample());
+  EXPECT_FALSE(sampler.Sample());
+  EXPECT_TRUE(sampler.Sample());
+
+  EXPECT_FALSE(sampler.Sample());
+  EXPECT_FALSE(sampler.Sample());
+  EXPECT_FALSE(sampler.Sample());
+}
+
+TEST(PeriodicSamplerBaseTest, ImmediatelySample) {
+  StrictMock<MockPeriodicSampler> sampler;
+
+  EXPECT_CALL(sampler, period()).Times(2).WillRepeatedly(Return(16));
+  EXPECT_CALL(sampler, GetExponentialBiased(16))
+      .WillOnce(Return(1))
+      .WillOnce(Return(2))
+      .WillOnce(Return(3));
+
+  EXPECT_TRUE(sampler.Sample());
+
+  EXPECT_FALSE(sampler.Sample());
+  EXPECT_TRUE(sampler.Sample());
+
+  EXPECT_FALSE(sampler.Sample());
+  EXPECT_FALSE(sampler.Sample());
+}
+
+TEST(PeriodicSamplerBaseTest, Disabled) {
+  StrictMock<MockPeriodicSampler> sampler;
+
+  EXPECT_CALL(sampler, period()).Times(3).WillRepeatedly(Return(0));
+
+  EXPECT_FALSE(sampler.Sample());
+  EXPECT_FALSE(sampler.Sample());
+  EXPECT_FALSE(sampler.Sample());
+}
+
+TEST(PeriodicSamplerBaseTest, AlwaysOn) {
+  StrictMock<MockPeriodicSampler> sampler;
+
+  EXPECT_CALL(sampler, period()).Times(3).WillRepeatedly(Return(1));
+
+  EXPECT_TRUE(sampler.Sample());
+  EXPECT_TRUE(sampler.Sample());
+  EXPECT_TRUE(sampler.Sample());
+}
+
+TEST(PeriodicSamplerBaseTest, Disable) {
+  StrictMock<MockPeriodicSampler> sampler;
+
+  EXPECT_CALL(sampler, period()).WillOnce(Return(16));
+  EXPECT_CALL(sampler, GetExponentialBiased(16)).WillOnce(Return(3));
+  EXPECT_FALSE(sampler.Sample());
+  EXPECT_FALSE(sampler.Sample());
+
+  EXPECT_CALL(sampler, period()).Times(2).WillRepeatedly(Return(0));
+
+  EXPECT_FALSE(sampler.Sample());
+  EXPECT_FALSE(sampler.Sample());
+}
+
+TEST(PeriodicSamplerBaseTest, Enable) {
+  StrictMock<MockPeriodicSampler> sampler;
+
+  EXPECT_CALL(sampler, period()).WillOnce(Return(0));
+  EXPECT_FALSE(sampler.Sample());
+
+  EXPECT_CALL(sampler, period()).Times(2).WillRepeatedly(Return(16));
+  EXPECT_CALL(sampler, GetExponentialBiased(16))
+      .Times(2)
+      .WillRepeatedly(Return(3));
+
+  EXPECT_FALSE(sampler.Sample());
+  EXPECT_FALSE(sampler.Sample());
+  EXPECT_TRUE(sampler.Sample());
+
+  EXPECT_FALSE(sampler.Sample());
+  EXPECT_FALSE(sampler.Sample());
+}
+
+TEST(PeriodicSamplerTest, ConstructConstInit) {
+  struct Tag {};
+  ABSL_CONST_INIT static PeriodicSampler<Tag> sampler;
+  (void)sampler;
+}
+
+TEST(PeriodicSamplerTest, DefaultPeriod0) {
+  struct Tag {};
+  PeriodicSampler<Tag> sampler;
+  EXPECT_THAT(sampler.period(), Eq(0));
+}
+
+TEST(PeriodicSamplerTest, DefaultPeriod) {
+  struct Tag {};
+  PeriodicSampler<Tag, 100> sampler;
+  EXPECT_THAT(sampler.period(), Eq(100));
+}
+
+TEST(PeriodicSamplerTest, SetGlobalPeriod) {
+  struct Tag1 {};
+  struct Tag2 {};
+  PeriodicSampler<Tag1, 25> sampler1;
+  PeriodicSampler<Tag2, 50> sampler2;
+
+  EXPECT_THAT(sampler1.period(), Eq(25));
+  EXPECT_THAT(sampler2.period(), Eq(50));
+
+  std::thread thread([] {
+    PeriodicSampler<Tag1, 25> sampler1;
+    PeriodicSampler<Tag2, 50> sampler2;
+    EXPECT_THAT(sampler1.period(), Eq(25));
+    EXPECT_THAT(sampler2.period(), Eq(50));
+    sampler1.SetGlobalPeriod(10);
+    sampler2.SetGlobalPeriod(20);
+  });
+  thread.join();
+
+  EXPECT_THAT(sampler1.period(), Eq(10));
+  EXPECT_THAT(sampler2.period(), Eq(20));
+}
+
+}  // namespace
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/third_party/abseil_cpp/absl/base/internal/pretty_function.h b/third_party/abseil_cpp/absl/base/internal/pretty_function.h
new file mode 100644
index 000000000000..35d51676dc2e
--- /dev/null
+++ b/third_party/abseil_cpp/absl/base/internal/pretty_function.h
@@ -0,0 +1,33 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_BASE_INTERNAL_PRETTY_FUNCTION_H_
+#define ABSL_BASE_INTERNAL_PRETTY_FUNCTION_H_
+
+// ABSL_PRETTY_FUNCTION
+//
+// In C++11, __func__ gives the undecorated name of the current function.  That
+// is, "main", not "int main()".  Various compilers give extra macros to get the
+// decorated function name, including return type and arguments, to
+// differentiate between overload sets.  ABSL_PRETTY_FUNCTION is a portable
+// version of these macros which forwards to the correct macro on each compiler.
+#if defined(_MSC_VER)
+#define ABSL_PRETTY_FUNCTION __FUNCSIG__
+#elif defined(__GNUC__)
+#define ABSL_PRETTY_FUNCTION __PRETTY_FUNCTION__
+#else
+#error "Unsupported compiler"
+#endif
+
+#endif  // ABSL_BASE_INTERNAL_PRETTY_FUNCTION_H_
diff --git a/third_party/abseil_cpp/absl/base/internal/raw_logging.cc b/third_party/abseil_cpp/absl/base/internal/raw_logging.cc
new file mode 100644
index 000000000000..40cea5506172
--- /dev/null
+++ b/third_party/abseil_cpp/absl/base/internal/raw_logging.cc
@@ -0,0 +1,240 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/internal/raw_logging.h"
+
+#include <stddef.h>
+#include <cstdarg>
+#include <cstdio>
+#include <cstdlib>
+#include <cstring>
+
+#include "absl/base/attributes.h"
+#include "absl/base/config.h"
+#include "absl/base/internal/atomic_hook.h"
+#include "absl/base/log_severity.h"
+
+// We know how to perform low-level writes to stderr in POSIX and Windows.  For
+// these platforms, we define the token ABSL_LOW_LEVEL_WRITE_SUPPORTED.
+// Much of raw_logging.cc becomes a no-op when we can't output messages,
+// although a FATAL ABSL_RAW_LOG message will still abort the process.
+
+// ABSL_HAVE_POSIX_WRITE is defined when the platform provides posix write()
+// (as from unistd.h)
+//
+// This preprocessor token is also defined in raw_io.cc.  If you need to copy
+// this, consider moving both to config.h instead.
+#if defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \
+    defined(__Fuchsia__) || defined(__native_client__) || \
+    defined(__EMSCRIPTEN__) || defined(__ASYLO__)
+
+#include <unistd.h>
+
+#define ABSL_HAVE_POSIX_WRITE 1
+#define ABSL_LOW_LEVEL_WRITE_SUPPORTED 1
+#else
+#undef ABSL_HAVE_POSIX_WRITE
+#endif
+
+// ABSL_HAVE_SYSCALL_WRITE is defined when the platform provides the syscall
+//   syscall(SYS_write, /*int*/ fd, /*char* */ buf, /*size_t*/ len);
+// for low level operations that want to avoid libc.
+#if (defined(__linux__) || defined(__FreeBSD__)) && !defined(__ANDROID__)
+#include <sys/syscall.h>
+#define ABSL_HAVE_SYSCALL_WRITE 1
+#define ABSL_LOW_LEVEL_WRITE_SUPPORTED 1
+#else
+#undef ABSL_HAVE_SYSCALL_WRITE
+#endif
+
+#ifdef _WIN32
+#include <io.h>
+
+#define ABSL_HAVE_RAW_IO 1
+#define ABSL_LOW_LEVEL_WRITE_SUPPORTED 1
+#else
+#undef ABSL_HAVE_RAW_IO
+#endif
+
+// TODO(gfalcon): We want raw-logging to work on as many platforms as possible.
+// Explicitly #error out when not ABSL_LOW_LEVEL_WRITE_SUPPORTED, except for a
+// whitelisted set of platforms for which we expect not to be able to raw log.
+
+ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES static absl::base_internal::AtomicHook<
+    absl::raw_logging_internal::LogPrefixHook>
+    log_prefix_hook;
+ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES static absl::base_internal::AtomicHook<
+    absl::raw_logging_internal::AbortHook>
+    abort_hook;
+
+#ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED
+static const char kTruncated[] = " ... (message truncated)\n";
+
+// sprintf the format to the buffer, adjusting *buf and *size to reflect the
+// consumed bytes, and return whether the message fit without truncation.  If
+// truncation occurred, if possible leave room in the buffer for the message
+// kTruncated[].
+inline static bool VADoRawLog(char** buf, int* size, const char* format,
+                              va_list ap) ABSL_PRINTF_ATTRIBUTE(3, 0);
+inline static bool VADoRawLog(char** buf, int* size,
+                              const char* format, va_list ap) {
+  int n = vsnprintf(*buf, *size, format, ap);
+  bool result = true;
+  if (n < 0 || n > *size) {
+    result = false;
+    if (static_cast<size_t>(*size) > sizeof(kTruncated)) {
+      n = *size - sizeof(kTruncated);  // room for truncation message
+    } else {
+      n = 0;                           // no room for truncation message
+    }
+  }
+  *size -= n;
+  *buf += n;
+  return result;
+}
+#endif  // ABSL_LOW_LEVEL_WRITE_SUPPORTED
+
+static constexpr int kLogBufSize = 3000;
+
+namespace {
+
+// CAVEAT: vsnprintf called from *DoRawLog below has some (exotic) code paths
+// that invoke malloc() and getenv() that might acquire some locks.
+
+// Helper for RawLog below.
+// *DoRawLog writes to *buf of *size and move them past the written portion.
+// It returns true iff there was no overflow or error.
+bool DoRawLog(char** buf, int* size, const char* format, ...)
+    ABSL_PRINTF_ATTRIBUTE(3, 4);
+bool DoRawLog(char** buf, int* size, const char* format, ...) {
+  va_list ap;
+  va_start(ap, format);
+  int n = vsnprintf(*buf, *size, format, ap);
+  va_end(ap);
+  if (n < 0 || n > *size) return false;
+  *size -= n;
+  *buf += n;
+  return true;
+}
+
+void RawLogVA(absl::LogSeverity severity, const char* file, int line,
+              const char* format, va_list ap) ABSL_PRINTF_ATTRIBUTE(4, 0);
+void RawLogVA(absl::LogSeverity severity, const char* file, int line,
+              const char* format, va_list ap) {
+  char buffer[kLogBufSize];
+  char* buf = buffer;
+  int size = sizeof(buffer);
+#ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED
+  bool enabled = true;
+#else
+  bool enabled = false;
+#endif
+
+#ifdef ABSL_MIN_LOG_LEVEL
+  if (severity < static_cast<absl::LogSeverity>(ABSL_MIN_LOG_LEVEL) &&
+      severity < absl::LogSeverity::kFatal) {
+    enabled = false;
+  }
+#endif
+
+  auto log_prefix_hook_ptr = log_prefix_hook.Load();
+  if (log_prefix_hook_ptr) {
+    enabled = log_prefix_hook_ptr(severity, file, line, &buf, &size);
+  } else {
+    if (enabled) {
+      DoRawLog(&buf, &size, "[%s : %d] RAW: ", file, line);
+    }
+  }
+  const char* const prefix_end = buf;
+
+#ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED
+  if (enabled) {
+    bool no_chop = VADoRawLog(&buf, &size, format, ap);
+    if (no_chop) {
+      DoRawLog(&buf, &size, "\n");
+    } else {
+      DoRawLog(&buf, &size, "%s", kTruncated);
+    }
+    absl::raw_logging_internal::SafeWriteToStderr(buffer, strlen(buffer));
+  }
+#else
+  static_cast<void>(format);
+  static_cast<void>(ap);
+#endif
+
+  // Abort the process after logging a FATAL message, even if the output itself
+  // was suppressed.
+  if (severity == absl::LogSeverity::kFatal) {
+    abort_hook(file, line, buffer, prefix_end, buffer + kLogBufSize);
+    abort();
+  }
+}
+
+}  // namespace
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace raw_logging_internal {
+void SafeWriteToStderr(const char *s, size_t len) {
+#if defined(ABSL_HAVE_SYSCALL_WRITE)
+  syscall(SYS_write, STDERR_FILENO, s, len);
+#elif defined(ABSL_HAVE_POSIX_WRITE)
+  write(STDERR_FILENO, s, len);
+#elif defined(ABSL_HAVE_RAW_IO)
+  _write(/* stderr */ 2, s, len);
+#else
+  // stderr logging unsupported on this platform
+  (void) s;
+  (void) len;
+#endif
+}
+
+void RawLog(absl::LogSeverity severity, const char* file, int line,
+            const char* format, ...) ABSL_PRINTF_ATTRIBUTE(4, 5);
+void RawLog(absl::LogSeverity severity, const char* file, int line,
+            const char* format, ...) {
+  va_list ap;
+  va_start(ap, format);
+  RawLogVA(severity, file, line, format, ap);
+  va_end(ap);
+}
+
+// Non-formatting version of RawLog().
+//
+// TODO(gfalcon): When string_view no longer depends on base, change this
+// interface to take its message as a string_view instead.
+static void DefaultInternalLog(absl::LogSeverity severity, const char* file,
+                               int line, const std::string& message) {
+  RawLog(severity, file, line, "%s", message.c_str());
+}
+
+bool RawLoggingFullySupported() {
+#ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED
+  return true;
+#else  // !ABSL_LOW_LEVEL_WRITE_SUPPORTED
+  return false;
+#endif  // !ABSL_LOW_LEVEL_WRITE_SUPPORTED
+}
+
+ABSL_DLL ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
+    absl::base_internal::AtomicHook<InternalLogFunction>
+        internal_log_function(DefaultInternalLog);
+
+void RegisterInternalLogFunction(InternalLogFunction func) {
+  internal_log_function.Store(func);
+}
+
+}  // namespace raw_logging_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/third_party/abseil_cpp/absl/base/internal/raw_logging.h b/third_party/abseil_cpp/absl/base/internal/raw_logging.h
new file mode 100644
index 000000000000..418d6c856feb
--- /dev/null
+++ b/third_party/abseil_cpp/absl/base/internal/raw_logging.h
@@ -0,0 +1,183 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Thread-safe logging routines that do not allocate any memory or
+// acquire any locks, and can therefore be used by low-level memory
+// allocation, synchronization, and signal-handling code.
+
+#ifndef ABSL_BASE_INTERNAL_RAW_LOGGING_H_
+#define ABSL_BASE_INTERNAL_RAW_LOGGING_H_
+
+#include <string>
+
+#include "absl/base/attributes.h"
+#include "absl/base/config.h"
+#include "absl/base/internal/atomic_hook.h"
+#include "absl/base/log_severity.h"
+#include "absl/base/macros.h"
+#include "absl/base/optimization.h"
+#include "absl/base/port.h"
+
+// This is similar to LOG(severity) << format..., but
+// * it is to be used ONLY by low-level modules that can't use normal LOG()
+// * it is designed to be a low-level logger that does not allocate any
+//   memory and does not need any locks, hence:
+// * it logs straight and ONLY to STDERR w/o buffering
+// * it uses an explicit printf-format and arguments list
+// * it will silently chop off really long message strings
+// Usage example:
+//   ABSL_RAW_LOG(ERROR, "Failed foo with %i: %s", status, error);
+// This will print an almost standard log line like this to stderr only:
+//   E0821 211317 file.cc:123] RAW: Failed foo with 22: bad_file
+
+#define ABSL_RAW_LOG(severity, ...)                                            \
+  do {                                                                         \
+    constexpr const char* absl_raw_logging_internal_basename =                 \
+        ::absl::raw_logging_internal::Basename(__FILE__,                       \
+                                               sizeof(__FILE__) - 1);          \
+    ::absl::raw_logging_internal::RawLog(ABSL_RAW_LOGGING_INTERNAL_##severity, \
+                                         absl_raw_logging_internal_basename,   \
+                                         __LINE__, __VA_ARGS__);               \
+  } while (0)
+
+// Similar to CHECK(condition) << message, but for low-level modules:
+// we use only ABSL_RAW_LOG that does not allocate memory.
+// We do not want to provide args list here to encourage this usage:
+//   if (!cond)  ABSL_RAW_LOG(FATAL, "foo ...", hard_to_compute_args);
+// so that the args are not computed when not needed.
+#define ABSL_RAW_CHECK(condition, message)                             \
+  do {                                                                 \
+    if (ABSL_PREDICT_FALSE(!(condition))) {                            \
+      ABSL_RAW_LOG(FATAL, "Check %s failed: %s", #condition, message); \
+    }                                                                  \
+  } while (0)
+
+// ABSL_INTERNAL_LOG and ABSL_INTERNAL_CHECK work like the RAW variants above,
+// except that if the richer log library is linked into the binary, we dispatch
+// to that instead.  This is potentially useful for internal logging and
+// assertions, where we are using RAW_LOG neither for its async-signal-safety
+// nor for its non-allocating nature, but rather because raw logging has very
+// few other dependencies.
+//
+// The API is a subset of the above: each macro only takes two arguments.  Use
+// StrCat if you need to build a richer message.
+#define ABSL_INTERNAL_LOG(severity, message)                                \
+  do {                                                                      \
+    ::absl::raw_logging_internal::internal_log_function(                    \
+        ABSL_RAW_LOGGING_INTERNAL_##severity, __FILE__, __LINE__, message); \
+  } while (0)
+
+#define ABSL_INTERNAL_CHECK(condition, message)                    \
+  do {                                                             \
+    if (ABSL_PREDICT_FALSE(!(condition))) {                        \
+      std::string death_message = "Check " #condition " failed: "; \
+      death_message += std::string(message);                       \
+      ABSL_INTERNAL_LOG(FATAL, death_message);                     \
+    }                                                              \
+  } while (0)
+
+#define ABSL_RAW_LOGGING_INTERNAL_INFO ::absl::LogSeverity::kInfo
+#define ABSL_RAW_LOGGING_INTERNAL_WARNING ::absl::LogSeverity::kWarning
+#define ABSL_RAW_LOGGING_INTERNAL_ERROR ::absl::LogSeverity::kError
+#define ABSL_RAW_LOGGING_INTERNAL_FATAL ::absl::LogSeverity::kFatal
+#define ABSL_RAW_LOGGING_INTERNAL_LEVEL(severity) \
+  ::absl::NormalizeLogSeverity(severity)
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace raw_logging_internal {
+
+// Helper function to implement ABSL_RAW_LOG
+// Logs format... at "severity" level, reporting it
+// as called from file:line.
+// This does not allocate memory or acquire locks.
+void RawLog(absl::LogSeverity severity, const char* file, int line,
+            const char* format, ...) ABSL_PRINTF_ATTRIBUTE(4, 5);
+
+// Writes the provided buffer directly to stderr, in a safe, low-level manner.
+//
+// In POSIX this means calling write(), which is async-signal safe and does
+// not malloc.  If the platform supports the SYS_write syscall, we invoke that
+// directly to side-step any libc interception.
+void SafeWriteToStderr(const char *s, size_t len);
+
+// compile-time function to get the "base" filename, that is, the part of
+// a filename after the last "/" or "\" path separator.  The search starts at
+// the end of the string; the second parameter is the length of the string.
+constexpr const char* Basename(const char* fname, int offset) {
+  return offset == 0 || fname[offset - 1] == '/' || fname[offset - 1] == '\\'
+             ? fname + offset
+             : Basename(fname, offset - 1);
+}
+
+// For testing only.
+// Returns true if raw logging is fully supported. When it is not
+// fully supported, no messages will be emitted, but a log at FATAL
+// severity will cause an abort.
+//
+// TODO(gfalcon): Come up with a better name for this method.
+bool RawLoggingFullySupported();
+
+// Function type for a raw_logging customization hook for suppressing messages
+// by severity, and for writing custom prefixes on non-suppressed messages.
+//
+// The installed hook is called for every raw log invocation.  The message will
+// be logged to stderr only if the hook returns true.  FATAL errors will cause
+// the process to abort, even if writing to stderr is suppressed.  The hook is
+// also provided with an output buffer, where it can write a custom log message
+// prefix.
+//
+// The raw_logging system does not allocate memory or grab locks.  User-provided
+// hooks must avoid these operations, and must not throw exceptions.
+//
+// 'severity' is the severity level of the message being written.
+// 'file' and 'line' are the file and line number where the ABSL_RAW_LOG macro
+// was located.
+// 'buffer' and 'buf_size' are pointers to the buffer and buffer size.  If the
+// hook writes a prefix, it must increment *buffer and decrement *buf_size
+// accordingly.
+using LogPrefixHook = bool (*)(absl::LogSeverity severity, const char* file,
+                               int line, char** buffer, int* buf_size);
+
+// Function type for a raw_logging customization hook called to abort a process
+// when a FATAL message is logged.  If the provided AbortHook() returns, the
+// logging system will call abort().
+//
+// 'file' and 'line' are the file and line number where the ABSL_RAW_LOG macro
+// was located.
+// The NUL-terminated logged message lives in the buffer between 'buf_start'
+// and 'buf_end'.  'prefix_end' points to the first non-prefix character of the
+// buffer (as written by the LogPrefixHook.)
+using AbortHook = void (*)(const char* file, int line, const char* buf_start,
+                           const char* prefix_end, const char* buf_end);
+
+// Internal logging function for ABSL_INTERNAL_LOG to dispatch to.
+//
+// TODO(gfalcon): When string_view no longer depends on base, change this
+// interface to take its message as a string_view instead.
+using InternalLogFunction = void (*)(absl::LogSeverity severity,
+                                     const char* file, int line,
+                                     const std::string& message);
+
+ABSL_DLL ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES extern base_internal::AtomicHook<
+    InternalLogFunction>
+    internal_log_function;
+
+void RegisterInternalLogFunction(InternalLogFunction func);
+
+}  // namespace raw_logging_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_BASE_INTERNAL_RAW_LOGGING_H_
diff --git a/third_party/abseil_cpp/absl/base/internal/scheduling_mode.h b/third_party/abseil_cpp/absl/base/internal/scheduling_mode.h
new file mode 100644
index 000000000000..8be5ab6dd3c0
--- /dev/null
+++ b/third_party/abseil_cpp/absl/base/internal/scheduling_mode.h
@@ -0,0 +1,58 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Core interfaces and definitions used by by low-level interfaces such as
+// SpinLock.
+
+#ifndef ABSL_BASE_INTERNAL_SCHEDULING_MODE_H_
+#define ABSL_BASE_INTERNAL_SCHEDULING_MODE_H_
+
+#include "absl/base/config.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+// Used to describe how a thread may be scheduled.  Typically associated with
+// the declaration of a resource supporting synchronized access.
+//
+// SCHEDULE_COOPERATIVE_AND_KERNEL:
+// Specifies that when waiting, a cooperative thread (e.g. a Fiber) may
+// reschedule (using base::scheduling semantics); allowing other cooperative
+// threads to proceed.
+//
+// SCHEDULE_KERNEL_ONLY: (Also described as "non-cooperative")
+// Specifies that no cooperative scheduling semantics may be used, even if the
+// current thread is itself cooperatively scheduled.  This means that
+// cooperative threads will NOT allow other cooperative threads to execute in
+// their place while waiting for a resource of this type.  Host operating system
+// semantics (e.g. a futex) may still be used.
+//
+// When optional, clients should strongly prefer SCHEDULE_COOPERATIVE_AND_KERNEL
+// by default.  SCHEDULE_KERNEL_ONLY should only be used for resources on which
+// base::scheduling (e.g. the implementation of a Scheduler) may depend.
+//
+// NOTE: Cooperative resources may not be nested below non-cooperative ones.
+// This means that it is invalid to to acquire a SCHEDULE_COOPERATIVE_AND_KERNEL
+// resource if a SCHEDULE_KERNEL_ONLY resource is already held.
+enum SchedulingMode {
+  SCHEDULE_KERNEL_ONLY = 0,         // Allow scheduling only the host OS.
+  SCHEDULE_COOPERATIVE_AND_KERNEL,  // Also allow cooperative scheduling.
+};
+
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_BASE_INTERNAL_SCHEDULING_MODE_H_
diff --git a/third_party/abseil_cpp/absl/base/internal/scoped_set_env.cc b/third_party/abseil_cpp/absl/base/internal/scoped_set_env.cc
new file mode 100644
index 000000000000..8a934cb51191
--- /dev/null
+++ b/third_party/abseil_cpp/absl/base/internal/scoped_set_env.cc
@@ -0,0 +1,81 @@
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/internal/scoped_set_env.h"
+
+#ifdef _WIN32
+#include <windows.h>
+#endif
+
+#include <cstdlib>
+
+#include "absl/base/internal/raw_logging.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+namespace {
+
+#ifdef _WIN32
+const int kMaxEnvVarValueSize = 1024;
+#endif
+
+void SetEnvVar(const char* name, const char* value) {
+#ifdef _WIN32
+  SetEnvironmentVariableA(name, value);
+#else
+  if (value == nullptr) {
+    ::unsetenv(name);
+  } else {
+    ::setenv(name, value, 1);
+  }
+#endif
+}
+
+}  // namespace
+
+ScopedSetEnv::ScopedSetEnv(const char* var_name, const char* new_value)
+    : var_name_(var_name), was_unset_(false) {
+#ifdef _WIN32
+  char buf[kMaxEnvVarValueSize];
+  auto get_res = GetEnvironmentVariableA(var_name_.c_str(), buf, sizeof(buf));
+  ABSL_INTERNAL_CHECK(get_res < sizeof(buf), "value exceeds buffer size");
+
+  if (get_res == 0) {
+    was_unset_ = (GetLastError() == ERROR_ENVVAR_NOT_FOUND);
+  } else {
+    old_value_.assign(buf, get_res);
+  }
+
+  SetEnvironmentVariableA(var_name_.c_str(), new_value);
+#else
+  const char* val = ::getenv(var_name_.c_str());
+  if (val == nullptr) {
+    was_unset_ = true;
+  } else {
+    old_value_ = val;
+  }
+#endif
+
+  SetEnvVar(var_name_.c_str(), new_value);
+}
+
+ScopedSetEnv::~ScopedSetEnv() {
+  SetEnvVar(var_name_.c_str(), was_unset_ ? nullptr : old_value_.c_str());
+}
+
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/third_party/abseil_cpp/absl/base/internal/scoped_set_env.h b/third_party/abseil_cpp/absl/base/internal/scoped_set_env.h
new file mode 100644
index 000000000000..19ec7b5d8a04
--- /dev/null
+++ b/third_party/abseil_cpp/absl/base/internal/scoped_set_env.h
@@ -0,0 +1,45 @@
+//
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef ABSL_BASE_INTERNAL_SCOPED_SET_ENV_H_
+#define ABSL_BASE_INTERNAL_SCOPED_SET_ENV_H_
+
+#include <string>
+
+#include "absl/base/config.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+class ScopedSetEnv {
+ public:
+  ScopedSetEnv(const char* var_name, const char* new_value);
+  ~ScopedSetEnv();
+
+ private:
+  std::string var_name_;
+  std::string old_value_;
+
+  // True if the environment variable was initially not set.
+  bool was_unset_;
+};
+
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_BASE_INTERNAL_SCOPED_SET_ENV_H_
diff --git a/third_party/abseil_cpp/absl/base/internal/scoped_set_env_test.cc b/third_party/abseil_cpp/absl/base/internal/scoped_set_env_test.cc
new file mode 100644
index 000000000000..5cbad246c647
--- /dev/null
+++ b/third_party/abseil_cpp/absl/base/internal/scoped_set_env_test.cc
@@ -0,0 +1,99 @@
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifdef _WIN32
+#include <windows.h>
+#endif
+
+#include "gtest/gtest.h"
+#include "absl/base/internal/scoped_set_env.h"
+
+namespace {
+
+using absl::base_internal::ScopedSetEnv;
+
+std::string GetEnvVar(const char* name) {
+#ifdef _WIN32
+  char buf[1024];
+  auto get_res = GetEnvironmentVariableA(name, buf, sizeof(buf));
+  if (get_res >= sizeof(buf)) {
+    return "TOO_BIG";
+  }
+
+  if (get_res == 0) {
+    return "UNSET";
+  }
+
+  return std::string(buf, get_res);
+#else
+  const char* val = ::getenv(name);
+  if (val == nullptr) {
+    return "UNSET";
+  }
+
+  return val;
+#endif
+}
+
+TEST(ScopedSetEnvTest, SetNonExistingVarToString) {
+  EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "UNSET");
+
+  {
+    ScopedSetEnv scoped_set("SCOPED_SET_ENV_TEST_VAR", "value");
+
+    EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "value");
+  }
+
+  EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "UNSET");
+}
+
+TEST(ScopedSetEnvTest, SetNonExistingVarToNull) {
+  EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "UNSET");
+
+  {
+    ScopedSetEnv scoped_set("SCOPED_SET_ENV_TEST_VAR", nullptr);
+
+    EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "UNSET");
+  }
+
+  EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "UNSET");
+}
+
+TEST(ScopedSetEnvTest, SetExistingVarToString) {
+  ScopedSetEnv scoped_set("SCOPED_SET_ENV_TEST_VAR", "value");
+  EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "value");
+
+  {
+    ScopedSetEnv scoped_set("SCOPED_SET_ENV_TEST_VAR", "new_value");
+
+    EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "new_value");
+  }
+
+  EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "value");
+}
+
+TEST(ScopedSetEnvTest, SetExistingVarToNull) {
+  ScopedSetEnv scoped_set("SCOPED_SET_ENV_TEST_VAR", "value");
+  EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "value");
+
+  {
+    ScopedSetEnv scoped_set("SCOPED_SET_ENV_TEST_VAR", nullptr);
+
+    EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "UNSET");
+  }
+
+  EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "value");
+}
+
+}  // namespace
diff --git a/third_party/abseil_cpp/absl/base/internal/spinlock.cc b/third_party/abseil_cpp/absl/base/internal/spinlock.cc
new file mode 100644
index 000000000000..a7d44f3eb091
--- /dev/null
+++ b/third_party/abseil_cpp/absl/base/internal/spinlock.cc
@@ -0,0 +1,220 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/internal/spinlock.h"
+
+#include <algorithm>
+#include <atomic>
+#include <limits>
+
+#include "absl/base/attributes.h"
+#include "absl/base/internal/atomic_hook.h"
+#include "absl/base/internal/cycleclock.h"
+#include "absl/base/internal/spinlock_wait.h"
+#include "absl/base/internal/sysinfo.h" /* For NumCPUs() */
+#include "absl/base/call_once.h"
+
+// Description of lock-word:
+//  31..00: [............................3][2][1][0]
+//
+//     [0]: kSpinLockHeld
+//     [1]: kSpinLockCooperative
+//     [2]: kSpinLockDisabledScheduling
+// [31..3]: ONLY kSpinLockSleeper OR
+//          Wait time in cycles >> PROFILE_TIMESTAMP_SHIFT
+//
+// Detailed descriptions:
+//
+// Bit [0]: The lock is considered held iff kSpinLockHeld is set.
+//
+// Bit [1]: Eligible waiters (e.g. Fibers) may co-operatively reschedule when
+//          contended iff kSpinLockCooperative is set.
+//
+// Bit [2]: This bit is exclusive from bit [1].  It is used only by a
+//          non-cooperative lock.  When set, indicates that scheduling was
+//          successfully disabled when the lock was acquired.  May be unset,
+//          even if non-cooperative, if a ThreadIdentity did not yet exist at
+//          time of acquisition.
+//
+// Bit [3]: If this is the only upper bit ([31..3]) set then this lock was
+//          acquired without contention, however, at least one waiter exists.
+//
+//          Otherwise, bits [31..3] represent the time spent by the current lock
+//          holder to acquire the lock.  There may be outstanding waiter(s).
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES static base_internal::AtomicHook<void (*)(
+    const void *lock, int64_t wait_cycles)>
+    submit_profile_data;
+
+void RegisterSpinLockProfiler(void (*fn)(const void *contendedlock,
+                                         int64_t wait_cycles)) {
+  submit_profile_data.Store(fn);
+}
+
+// Static member variable definitions.
+constexpr uint32_t SpinLock::kSpinLockHeld;
+constexpr uint32_t SpinLock::kSpinLockCooperative;
+constexpr uint32_t SpinLock::kSpinLockDisabledScheduling;
+constexpr uint32_t SpinLock::kSpinLockSleeper;
+constexpr uint32_t SpinLock::kWaitTimeMask;
+
+// Uncommon constructors.
+SpinLock::SpinLock(base_internal::SchedulingMode mode)
+    : lockword_(IsCooperative(mode) ? kSpinLockCooperative : 0) {
+  ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static);
+}
+
+// Monitor the lock to see if its value changes within some time period
+// (adaptive_spin_count loop iterations). The last value read from the lock
+// is returned from the method.
+uint32_t SpinLock::SpinLoop() {
+  // We are already in the slow path of SpinLock, initialize the
+  // adaptive_spin_count here.
+  ABSL_CONST_INIT static absl::once_flag init_adaptive_spin_count;
+  ABSL_CONST_INIT static int adaptive_spin_count = 0;
+  base_internal::LowLevelCallOnce(&init_adaptive_spin_count, []() {
+    adaptive_spin_count = base_internal::NumCPUs() > 1 ? 1000 : 1;
+  });
+
+  int c = adaptive_spin_count;
+  uint32_t lock_value;
+  do {
+    lock_value = lockword_.load(std::memory_order_relaxed);
+  } while ((lock_value & kSpinLockHeld) != 0 && --c > 0);
+  return lock_value;
+}
+
+void SpinLock::SlowLock() {
+  uint32_t lock_value = SpinLoop();
+  lock_value = TryLockInternal(lock_value, 0);
+  if ((lock_value & kSpinLockHeld) == 0) {
+    return;
+  }
+
+  base_internal::SchedulingMode scheduling_mode;
+  if ((lock_value & kSpinLockCooperative) != 0) {
+    scheduling_mode = base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL;
+  } else {
+    scheduling_mode = base_internal::SCHEDULE_KERNEL_ONLY;
+  }
+
+  // The lock was not obtained initially, so this thread needs to wait for
+  // it.  Record the current timestamp in the local variable wait_start_time
+  // so the total wait time can be stored in the lockword once this thread
+  // obtains the lock.
+  int64_t wait_start_time = CycleClock::Now();
+  uint32_t wait_cycles = 0;
+  int lock_wait_call_count = 0;
+  while ((lock_value & kSpinLockHeld) != 0) {
+    // If the lock is currently held, but not marked as having a sleeper, mark
+    // it as having a sleeper.
+    if ((lock_value & kWaitTimeMask) == 0) {
+      // Here, just "mark" that the thread is going to sleep.  Don't store the
+      // lock wait time in the lock as that will cause the current lock
+      // owner to think it experienced contention.
+      if (lockword_.compare_exchange_strong(
+              lock_value, lock_value | kSpinLockSleeper,
+              std::memory_order_relaxed, std::memory_order_relaxed)) {
+        // Successfully transitioned to kSpinLockSleeper.  Pass
+        // kSpinLockSleeper to the SpinLockWait routine to properly indicate
+        // the last lock_value observed.
+        lock_value |= kSpinLockSleeper;
+      } else if ((lock_value & kSpinLockHeld) == 0) {
+        // Lock is free again, so try and acquire it before sleeping.  The
+        // new lock state will be the number of cycles this thread waited if
+        // this thread obtains the lock.
+        lock_value = TryLockInternal(lock_value, wait_cycles);
+        continue;   // Skip the delay at the end of the loop.
+      }
+    }
+
+    // SpinLockDelay() calls into fiber scheduler, we need to see
+    // synchronization there to avoid false positives.
+    ABSL_TSAN_MUTEX_PRE_DIVERT(this, 0);
+    // Wait for an OS specific delay.
+    base_internal::SpinLockDelay(&lockword_, lock_value, ++lock_wait_call_count,
+                                 scheduling_mode);
+    ABSL_TSAN_MUTEX_POST_DIVERT(this, 0);
+    // Spin again after returning from the wait routine to give this thread
+    // some chance of obtaining the lock.
+    lock_value = SpinLoop();
+    wait_cycles = EncodeWaitCycles(wait_start_time, CycleClock::Now());
+    lock_value = TryLockInternal(lock_value, wait_cycles);
+  }
+}
+
+void SpinLock::SlowUnlock(uint32_t lock_value) {
+  base_internal::SpinLockWake(&lockword_,
+                              false);  // wake waiter if necessary
+
+  // If our acquisition was contended, collect contentionz profile info.  We
+  // reserve a unitary wait time to represent that a waiter exists without our
+  // own acquisition having been contended.
+  if ((lock_value & kWaitTimeMask) != kSpinLockSleeper) {
+    const uint64_t wait_cycles = DecodeWaitCycles(lock_value);
+    ABSL_TSAN_MUTEX_PRE_DIVERT(this, 0);
+    submit_profile_data(this, wait_cycles);
+    ABSL_TSAN_MUTEX_POST_DIVERT(this, 0);
+  }
+}
+
+// We use the upper 29 bits of the lock word to store the time spent waiting to
+// acquire this lock.  This is reported by contentionz profiling.  Since the
+// lower bits of the cycle counter wrap very quickly on high-frequency
+// processors we divide to reduce the granularity to 2^kProfileTimestampShift
+// sized units.  On a 4Ghz machine this will lose track of wait times greater
+// than (2^29/4 Ghz)*128 =~ 17.2 seconds.  Such waits should be extremely rare.
+static constexpr int kProfileTimestampShift = 7;
+
+// We currently reserve the lower 3 bits.
+static constexpr int kLockwordReservedShift = 3;
+
+uint32_t SpinLock::EncodeWaitCycles(int64_t wait_start_time,
+                                    int64_t wait_end_time) {
+  static const int64_t kMaxWaitTime =
+      std::numeric_limits<uint32_t>::max() >> kLockwordReservedShift;
+  int64_t scaled_wait_time =
+      (wait_end_time - wait_start_time) >> kProfileTimestampShift;
+
+  // Return a representation of the time spent waiting that can be stored in
+  // the lock word's upper bits.
+  uint32_t clamped = static_cast<uint32_t>(
+      std::min(scaled_wait_time, kMaxWaitTime) << kLockwordReservedShift);
+
+  if (clamped == 0) {
+    return kSpinLockSleeper;  // Just wake waiters, but don't record contention.
+  }
+  // Bump up value if necessary to avoid returning kSpinLockSleeper.
+  const uint32_t kMinWaitTime =
+      kSpinLockSleeper + (1 << kLockwordReservedShift);
+  if (clamped == kSpinLockSleeper) {
+    return kMinWaitTime;
+  }
+  return clamped;
+}
+
+uint64_t SpinLock::DecodeWaitCycles(uint32_t lock_value) {
+  // Cast to uint32_t first to ensure bits [63:32] are cleared.
+  const uint64_t scaled_wait_time =
+      static_cast<uint32_t>(lock_value & kWaitTimeMask);
+  return scaled_wait_time << (kProfileTimestampShift - kLockwordReservedShift);
+}
+
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/third_party/abseil_cpp/absl/base/internal/spinlock.h b/third_party/abseil_cpp/absl/base/internal/spinlock.h
new file mode 100644
index 000000000000..2222398b16b4
--- /dev/null
+++ b/third_party/abseil_cpp/absl/base/internal/spinlock.h
@@ -0,0 +1,230 @@
+//
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+//  Most users requiring mutual exclusion should use Mutex.
+//  SpinLock is provided for use in three situations:
+//   - for use in code that Mutex itself depends on
+//   - to get a faster fast-path release under low contention (without an
+//     atomic read-modify-write) In return, SpinLock has worse behaviour under
+//     contention, which is why Mutex is preferred in most situations.
+//   - for async signal safety (see below)
+
+// SpinLock is async signal safe.  If a spinlock is used within a signal
+// handler, all code that acquires the lock must ensure that the signal cannot
+// arrive while they are holding the lock.  Typically, this is done by blocking
+// the signal.
+
+#ifndef ABSL_BASE_INTERNAL_SPINLOCK_H_
+#define ABSL_BASE_INTERNAL_SPINLOCK_H_
+
+#include <stdint.h>
+#include <sys/types.h>
+
+#include <atomic>
+
+#include "absl/base/attributes.h"
+#include "absl/base/const_init.h"
+#include "absl/base/dynamic_annotations.h"
+#include "absl/base/internal/low_level_scheduling.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/base/internal/scheduling_mode.h"
+#include "absl/base/internal/tsan_mutex_interface.h"
+#include "absl/base/macros.h"
+#include "absl/base/port.h"
+#include "absl/base/thread_annotations.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+class ABSL_LOCKABLE SpinLock {
+ public:
+  SpinLock() : lockword_(kSpinLockCooperative) {
+    ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static);
+  }
+
+  // Constructors that allow non-cooperative spinlocks to be created for use
+  // inside thread schedulers.  Normal clients should not use these.
+  explicit SpinLock(base_internal::SchedulingMode mode);
+
+  // Constructor for global SpinLock instances.  See absl/base/const_init.h.
+  constexpr SpinLock(absl::ConstInitType, base_internal::SchedulingMode mode)
+      : lockword_(IsCooperative(mode) ? kSpinLockCooperative : 0) {}
+
+  ~SpinLock() { ABSL_TSAN_MUTEX_DESTROY(this, __tsan_mutex_not_static); }
+
+  // Acquire this SpinLock.
+  inline void Lock() ABSL_EXCLUSIVE_LOCK_FUNCTION() {
+    ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
+    if (!TryLockImpl()) {
+      SlowLock();
+    }
+    ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
+  }
+
+  // Try to acquire this SpinLock without blocking and return true if the
+  // acquisition was successful.  If the lock was not acquired, false is
+  // returned.  If this SpinLock is free at the time of the call, TryLock
+  // will return true with high probability.
+  inline bool TryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) {
+    ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_try_lock);
+    bool res = TryLockImpl();
+    ABSL_TSAN_MUTEX_POST_LOCK(
+        this, __tsan_mutex_try_lock | (res ? 0 : __tsan_mutex_try_lock_failed),
+        0);
+    return res;
+  }
+
+  // Release this SpinLock, which must be held by the calling thread.
+  inline void Unlock() ABSL_UNLOCK_FUNCTION() {
+    ABSL_TSAN_MUTEX_PRE_UNLOCK(this, 0);
+    uint32_t lock_value = lockword_.load(std::memory_order_relaxed);
+    lock_value = lockword_.exchange(lock_value & kSpinLockCooperative,
+                                    std::memory_order_release);
+
+    if ((lock_value & kSpinLockDisabledScheduling) != 0) {
+      base_internal::SchedulingGuard::EnableRescheduling(true);
+    }
+    if ((lock_value & kWaitTimeMask) != 0) {
+      // Collect contentionz profile info, and speed the wakeup of any waiter.
+      // The wait_cycles value indicates how long this thread spent waiting
+      // for the lock.
+      SlowUnlock(lock_value);
+    }
+    ABSL_TSAN_MUTEX_POST_UNLOCK(this, 0);
+  }
+
+  // Determine if the lock is held.  When the lock is held by the invoking
+  // thread, true will always be returned. Intended to be used as
+  // CHECK(lock.IsHeld()).
+  inline bool IsHeld() const {
+    return (lockword_.load(std::memory_order_relaxed) & kSpinLockHeld) != 0;
+  }
+
+ protected:
+  // These should not be exported except for testing.
+
+  // Store number of cycles between wait_start_time and wait_end_time in a
+  // lock value.
+  static uint32_t EncodeWaitCycles(int64_t wait_start_time,
+                                   int64_t wait_end_time);
+
+  // Extract number of wait cycles in a lock value.
+  static uint64_t DecodeWaitCycles(uint32_t lock_value);
+
+  // Provide access to protected method above.  Use for testing only.
+  friend struct SpinLockTest;
+
+ private:
+  // lockword_ is used to store the following:
+  //
+  // bit[0] encodes whether a lock is being held.
+  // bit[1] encodes whether a lock uses cooperative scheduling.
+  // bit[2] encodes whether a lock disables scheduling.
+  // bit[3:31] encodes time a lock spent on waiting as a 29-bit unsigned int.
+  static constexpr uint32_t kSpinLockHeld = 1;
+  static constexpr uint32_t kSpinLockCooperative = 2;
+  static constexpr uint32_t kSpinLockDisabledScheduling = 4;
+  static constexpr uint32_t kSpinLockSleeper = 8;
+  // Includes kSpinLockSleeper.
+  static constexpr uint32_t kWaitTimeMask =
+      ~(kSpinLockHeld | kSpinLockCooperative | kSpinLockDisabledScheduling);
+
+  // Returns true if the provided scheduling mode is cooperative.
+  static constexpr bool IsCooperative(
+      base_internal::SchedulingMode scheduling_mode) {
+    return scheduling_mode == base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL;
+  }
+
+  uint32_t TryLockInternal(uint32_t lock_value, uint32_t wait_cycles);
+  void SlowLock() ABSL_ATTRIBUTE_COLD;
+  void SlowUnlock(uint32_t lock_value) ABSL_ATTRIBUTE_COLD;
+  uint32_t SpinLoop();
+
+  inline bool TryLockImpl() {
+    uint32_t lock_value = lockword_.load(std::memory_order_relaxed);
+    return (TryLockInternal(lock_value, 0) & kSpinLockHeld) == 0;
+  }
+
+  std::atomic<uint32_t> lockword_;
+
+  SpinLock(const SpinLock&) = delete;
+  SpinLock& operator=(const SpinLock&) = delete;
+};
+
+// Corresponding locker object that arranges to acquire a spinlock for
+// the duration of a C++ scope.
+class ABSL_SCOPED_LOCKABLE SpinLockHolder {
+ public:
+  inline explicit SpinLockHolder(SpinLock* l) ABSL_EXCLUSIVE_LOCK_FUNCTION(l)
+      : lock_(l) {
+    l->Lock();
+  }
+  inline ~SpinLockHolder() ABSL_UNLOCK_FUNCTION() { lock_->Unlock(); }
+
+  SpinLockHolder(const SpinLockHolder&) = delete;
+  SpinLockHolder& operator=(const SpinLockHolder&) = delete;
+
+ private:
+  SpinLock* lock_;
+};
+
+// Register a hook for profiling support.
+//
+// The function pointer registered here will be called whenever a spinlock is
+// contended.  The callback is given an opaque handle to the contended spinlock
+// and the number of wait cycles.  This is thread-safe, but only a single
+// profiler can be registered.  It is an error to call this function multiple
+// times with different arguments.
+void RegisterSpinLockProfiler(void (*fn)(const void* lock,
+                                         int64_t wait_cycles));
+
+//------------------------------------------------------------------------------
+// Public interface ends here.
+//------------------------------------------------------------------------------
+
+// If (result & kSpinLockHeld) == 0, then *this was successfully locked.
+// Otherwise, returns last observed value for lockword_.
+inline uint32_t SpinLock::TryLockInternal(uint32_t lock_value,
+                                          uint32_t wait_cycles) {
+  if ((lock_value & kSpinLockHeld) != 0) {
+    return lock_value;
+  }
+
+  uint32_t sched_disabled_bit = 0;
+  if ((lock_value & kSpinLockCooperative) == 0) {
+    // For non-cooperative locks we must make sure we mark ourselves as
+    // non-reschedulable before we attempt to CompareAndSwap.
+    if (base_internal::SchedulingGuard::DisableRescheduling()) {
+      sched_disabled_bit = kSpinLockDisabledScheduling;
+    }
+  }
+
+  if (!lockword_.compare_exchange_strong(
+          lock_value,
+          kSpinLockHeld | lock_value | wait_cycles | sched_disabled_bit,
+          std::memory_order_acquire, std::memory_order_relaxed)) {
+    base_internal::SchedulingGuard::EnableRescheduling(sched_disabled_bit != 0);
+  }
+
+  return lock_value;
+}
+
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_BASE_INTERNAL_SPINLOCK_H_
diff --git a/third_party/abseil_cpp/absl/base/internal/spinlock_akaros.inc b/third_party/abseil_cpp/absl/base/internal/spinlock_akaros.inc
new file mode 100644
index 000000000000..bc468940fc5d
--- /dev/null
+++ b/third_party/abseil_cpp/absl/base/internal/spinlock_akaros.inc
@@ -0,0 +1,35 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// This file is an Akaros-specific part of spinlock_wait.cc
+
+#include <atomic>
+
+#include "absl/base/internal/scheduling_mode.h"
+
+extern "C" {
+
+ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockDelay(
+    std::atomic<uint32_t>* /* lock_word */, uint32_t /* value */,
+    int /* loop */, absl::base_internal::SchedulingMode /* mode */) {
+  // In Akaros, one must take care not to call anything that could cause a
+  // malloc(), a blocking system call, or a uthread_yield() while holding a
+  // spinlock. Our callers assume will not call into libraries or other
+  // arbitrary code.
+}
+
+ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockWake(
+    std::atomic<uint32_t>* /* lock_word */, bool /* all */) {}
+
+}  // extern "C"
diff --git a/third_party/abseil_cpp/absl/base/internal/spinlock_benchmark.cc b/third_party/abseil_cpp/absl/base/internal/spinlock_benchmark.cc
new file mode 100644
index 000000000000..0451c65f9559
--- /dev/null
+++ b/third_party/abseil_cpp/absl/base/internal/spinlock_benchmark.cc
@@ -0,0 +1,52 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// See also //absl/synchronization:mutex_benchmark for a comparison of SpinLock
+// and Mutex performance under varying levels of contention.
+
+#include "absl/base/internal/raw_logging.h"
+#include "absl/base/internal/scheduling_mode.h"
+#include "absl/base/internal/spinlock.h"
+#include "absl/synchronization/internal/create_thread_identity.h"
+#include "benchmark/benchmark.h"
+
+namespace {
+
+template <absl::base_internal::SchedulingMode scheduling_mode>
+static void BM_SpinLock(benchmark::State& state) {
+  // Ensure a ThreadIdentity is installed.
+  ABSL_INTERNAL_CHECK(
+      absl::synchronization_internal::GetOrCreateCurrentThreadIdentity() !=
+          nullptr,
+      "GetOrCreateCurrentThreadIdentity() failed");
+
+  static auto* spinlock = new absl::base_internal::SpinLock(scheduling_mode);
+  for (auto _ : state) {
+    absl::base_internal::SpinLockHolder holder(spinlock);
+  }
+}
+
+BENCHMARK_TEMPLATE(BM_SpinLock,
+                   absl::base_internal::SCHEDULE_KERNEL_ONLY)
+    ->UseRealTime()
+    ->Threads(1)
+    ->ThreadPerCpu();
+
+BENCHMARK_TEMPLATE(BM_SpinLock,
+                   absl::base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL)
+    ->UseRealTime()
+    ->Threads(1)
+    ->ThreadPerCpu();
+
+}  // namespace
diff --git a/third_party/abseil_cpp/absl/base/internal/spinlock_linux.inc b/third_party/abseil_cpp/absl/base/internal/spinlock_linux.inc
new file mode 100644
index 000000000000..e31c6ed477c0
--- /dev/null
+++ b/third_party/abseil_cpp/absl/base/internal/spinlock_linux.inc
@@ -0,0 +1,74 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// This file is a Linux-specific part of spinlock_wait.cc
+
+#include <linux/futex.h>
+#include <sys/syscall.h>
+#include <unistd.h>
+
+#include <atomic>
+#include <climits>
+#include <cstdint>
+#include <ctime>
+
+#include "absl/base/attributes.h"
+#include "absl/base/internal/errno_saver.h"
+
+// The SpinLock lockword is `std::atomic<uint32_t>`. Here we assert that
+// `std::atomic<uint32_t>` is bitwise equivalent of the `int` expected
+// by SYS_futex. We also assume that reads/writes done to the lockword
+// by SYS_futex have rational semantics with regard to the
+// std::atomic<> API. C++ provides no guarantees of these assumptions,
+// but they are believed to hold in practice.
+static_assert(sizeof(std::atomic<uint32_t>) == sizeof(int),
+              "SpinLock lockword has the wrong size for a futex");
+
+// Some Android headers are missing these definitions even though they
+// support these futex operations.
+#ifdef __BIONIC__
+#ifndef SYS_futex
+#define SYS_futex __NR_futex
+#endif
+#ifndef FUTEX_PRIVATE_FLAG
+#define FUTEX_PRIVATE_FLAG 128
+#endif
+#endif
+
+#if defined(__NR_futex_time64) && !defined(SYS_futex_time64)
+#define SYS_futex_time64 __NR_futex_time64
+#endif
+
+#if defined(SYS_futex_time64) && !defined(SYS_futex)
+#define SYS_futex SYS_futex_time64
+#endif
+
+extern "C" {
+
+ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockDelay(
+    std::atomic<uint32_t> *w, uint32_t value, int loop,
+    absl::base_internal::SchedulingMode) {
+  absl::base_internal::ErrnoSaver errno_saver;
+  struct timespec tm;
+  tm.tv_sec = 0;
+  tm.tv_nsec = absl::base_internal::SpinLockSuggestedDelayNS(loop);
+  syscall(SYS_futex, w, FUTEX_WAIT | FUTEX_PRIVATE_FLAG, value, &tm);
+}
+
+ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockWake(std::atomic<uint32_t> *w,
+                                                  bool all) {
+  syscall(SYS_futex, w, FUTEX_WAKE | FUTEX_PRIVATE_FLAG, all ? INT_MAX : 1, 0);
+}
+
+}  // extern "C"
diff --git a/third_party/abseil_cpp/absl/base/internal/spinlock_posix.inc b/third_party/abseil_cpp/absl/base/internal/spinlock_posix.inc
new file mode 100644
index 000000000000..fcd21b151b6e
--- /dev/null
+++ b/third_party/abseil_cpp/absl/base/internal/spinlock_posix.inc
@@ -0,0 +1,46 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// This file is a Posix-specific part of spinlock_wait.cc
+
+#include <sched.h>
+
+#include <atomic>
+#include <ctime>
+
+#include "absl/base/internal/errno_saver.h"
+#include "absl/base/internal/scheduling_mode.h"
+#include "absl/base/port.h"
+
+extern "C" {
+
+ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockDelay(
+    std::atomic<uint32_t>* /* lock_word */, uint32_t /* value */, int loop,
+    absl::base_internal::SchedulingMode /* mode */) {
+  absl::base_internal::ErrnoSaver errno_saver;
+  if (loop == 0) {
+  } else if (loop == 1) {
+    sched_yield();
+  } else {
+    struct timespec tm;
+    tm.tv_sec = 0;
+    tm.tv_nsec = absl::base_internal::SpinLockSuggestedDelayNS(loop);
+    nanosleep(&tm, nullptr);
+  }
+}
+
+ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockWake(
+    std::atomic<uint32_t>* /* lock_word */, bool /* all */) {}
+
+}  // extern "C"
diff --git a/third_party/abseil_cpp/absl/base/internal/spinlock_wait.cc b/third_party/abseil_cpp/absl/base/internal/spinlock_wait.cc
new file mode 100644
index 000000000000..fa824be1c008
--- /dev/null
+++ b/third_party/abseil_cpp/absl/base/internal/spinlock_wait.cc
@@ -0,0 +1,81 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// The OS-specific header included below must provide two calls:
+// AbslInternalSpinLockDelay() and AbslInternalSpinLockWake().
+// See spinlock_wait.h for the specs.
+
+#include <atomic>
+#include <cstdint>
+
+#include "absl/base/internal/spinlock_wait.h"
+
+#if defined(_WIN32)
+#include "absl/base/internal/spinlock_win32.inc"
+#elif defined(__linux__)
+#include "absl/base/internal/spinlock_linux.inc"
+#elif defined(__akaros__)
+#include "absl/base/internal/spinlock_akaros.inc"
+#else
+#include "absl/base/internal/spinlock_posix.inc"
+#endif
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+// See spinlock_wait.h for spec.
+uint32_t SpinLockWait(std::atomic<uint32_t> *w, int n,
+                      const SpinLockWaitTransition trans[],
+                      base_internal::SchedulingMode scheduling_mode) {
+  int loop = 0;
+  for (;;) {
+    uint32_t v = w->load(std::memory_order_acquire);
+    int i;
+    for (i = 0; i != n && v != trans[i].from; i++) {
+    }
+    if (i == n) {
+      SpinLockDelay(w, v, ++loop, scheduling_mode);  // no matching transition
+    } else if (trans[i].to == v ||                   // null transition
+               w->compare_exchange_strong(v, trans[i].to,
+                                          std::memory_order_acquire,
+                                          std::memory_order_relaxed)) {
+      if (trans[i].done) return v;
+    }
+  }
+}
+
+static std::atomic<uint64_t> delay_rand;
+
+// Return a suggested delay in nanoseconds for iteration number "loop"
+int SpinLockSuggestedDelayNS(int loop) {
+  // Weak pseudo-random number generator to get some spread between threads
+  // when many are spinning.
+  uint64_t r = delay_rand.load(std::memory_order_relaxed);
+  r = 0x5deece66dLL * r + 0xb;   // numbers from nrand48()
+  delay_rand.store(r, std::memory_order_relaxed);
+
+  if (loop < 0 || loop > 32) {   // limit loop to 0..32
+    loop = 32;
+  }
+  const int kMinDelay = 128 << 10;  // 128us
+  // Double delay every 8 iterations, up to 16x (2ms).
+  int delay = kMinDelay << (loop / 8);
+  // Randomize in delay..2*delay range, for resulting 128us..4ms range.
+  return delay | ((delay - 1) & static_cast<int>(r));
+}
+
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/third_party/abseil_cpp/absl/base/internal/spinlock_wait.h b/third_party/abseil_cpp/absl/base/internal/spinlock_wait.h
new file mode 100644
index 000000000000..169bc749fbc5
--- /dev/null
+++ b/third_party/abseil_cpp/absl/base/internal/spinlock_wait.h
@@ -0,0 +1,93 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_BASE_INTERNAL_SPINLOCK_WAIT_H_
+#define ABSL_BASE_INTERNAL_SPINLOCK_WAIT_H_
+
+// Operations to make atomic transitions on a word, and to allow
+// waiting for those transitions to become possible.
+
+#include <stdint.h>
+#include <atomic>
+
+#include "absl/base/internal/scheduling_mode.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+// SpinLockWait() waits until it can perform one of several transitions from
+// "from" to "to".  It returns when it performs a transition where done==true.
+struct SpinLockWaitTransition {
+  uint32_t from;
+  uint32_t to;
+  bool done;
+};
+
+// Wait until *w can transition from trans[i].from to trans[i].to for some i
+// satisfying 0<=i<n && trans[i].done, atomically make the transition,
+// then return the old value of *w.   Make any other atomic transitions
+// where !trans[i].done, but continue waiting.
+uint32_t SpinLockWait(std::atomic<uint32_t> *w, int n,
+                      const SpinLockWaitTransition trans[],
+                      SchedulingMode scheduling_mode);
+
+// If possible, wake some thread that has called SpinLockDelay(w, ...). If
+// "all" is true, wake all such threads.  This call is a hint, and on some
+// systems it may be a no-op; threads calling SpinLockDelay() will always wake
+// eventually even if SpinLockWake() is never called.
+void SpinLockWake(std::atomic<uint32_t> *w, bool all);
+
+// Wait for an appropriate spin delay on iteration "loop" of a
+// spin loop on location *w, whose previously observed value was "value".
+// SpinLockDelay() may do nothing, may yield the CPU, may sleep a clock tick,
+// or may wait for a delay that can be truncated by a call to SpinLockWake(w).
+// In all cases, it must return in bounded time even if SpinLockWake() is not
+// called.
+void SpinLockDelay(std::atomic<uint32_t> *w, uint32_t value, int loop,
+                   base_internal::SchedulingMode scheduling_mode);
+
+// Helper used by AbslInternalSpinLockDelay.
+// Returns a suggested delay in nanoseconds for iteration number "loop".
+int SpinLockSuggestedDelayNS(int loop);
+
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+// In some build configurations we pass --detect-odr-violations to the
+// gold linker.  This causes it to flag weak symbol overrides as ODR
+// violations.  Because ODR only applies to C++ and not C,
+// --detect-odr-violations ignores symbols not mangled with C++ names.
+// By changing our extension points to be extern "C", we dodge this
+// check.
+extern "C" {
+void AbslInternalSpinLockWake(std::atomic<uint32_t> *w, bool all);
+void AbslInternalSpinLockDelay(
+    std::atomic<uint32_t> *w, uint32_t value, int loop,
+    absl::base_internal::SchedulingMode scheduling_mode);
+}
+
+inline void absl::base_internal::SpinLockWake(std::atomic<uint32_t> *w,
+                                              bool all) {
+  AbslInternalSpinLockWake(w, all);
+}
+
+inline void absl::base_internal::SpinLockDelay(
+    std::atomic<uint32_t> *w, uint32_t value, int loop,
+    absl::base_internal::SchedulingMode scheduling_mode) {
+  AbslInternalSpinLockDelay(w, value, loop, scheduling_mode);
+}
+
+#endif  // ABSL_BASE_INTERNAL_SPINLOCK_WAIT_H_
diff --git a/third_party/abseil_cpp/absl/base/internal/spinlock_win32.inc b/third_party/abseil_cpp/absl/base/internal/spinlock_win32.inc
new file mode 100644
index 000000000000..78654b5b5966
--- /dev/null
+++ b/third_party/abseil_cpp/absl/base/internal/spinlock_win32.inc
@@ -0,0 +1,37 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// This file is a Win32-specific part of spinlock_wait.cc
+
+#include <windows.h>
+#include <atomic>
+#include "absl/base/internal/scheduling_mode.h"
+
+extern "C" {
+
+void AbslInternalSpinLockDelay(std::atomic<uint32_t>* /* lock_word */,
+                               uint32_t /* value */, int loop,
+                               absl::base_internal::SchedulingMode /* mode */) {
+  if (loop == 0) {
+  } else if (loop == 1) {
+    Sleep(0);
+  } else {
+    Sleep(absl::base_internal::SpinLockSuggestedDelayNS(loop) / 1000000);
+  }
+}
+
+void AbslInternalSpinLockWake(std::atomic<uint32_t>* /* lock_word */,
+                              bool /* all */) {}
+
+}  // extern "C"
diff --git a/third_party/abseil_cpp/absl/base/internal/strerror.cc b/third_party/abseil_cpp/absl/base/internal/strerror.cc
new file mode 100644
index 000000000000..af181513cde9
--- /dev/null
+++ b/third_party/abseil_cpp/absl/base/internal/strerror.cc
@@ -0,0 +1,75 @@
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/internal/strerror.h"
+
+#include <cerrno>
+#include <cstddef>
+#include <cstdio>
+#include <cstring>
+#include <string>
+#include <type_traits>
+
+#include "absl/base/attributes.h"
+#include "absl/base/internal/errno_saver.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+namespace {
+const char* StrErrorAdaptor(int errnum, char* buf, size_t buflen) {
+#if defined(_WIN32)
+  int rc = strerror_s(buf, buflen, errnum);
+  buf[buflen - 1] = '\0';  // guarantee NUL termination
+  if (rc == 0 && strncmp(buf, "Unknown error", buflen) == 0) *buf = '\0';
+  return buf;
+#else
+#if defined(__GLIBC__) || defined(__APPLE__)
+  // Use the BSD sys_errlist API provided by GNU glibc and others to
+  // avoid any need to copy the message into the local buffer first.
+  if (0 <= errnum && errnum < sys_nerr) {
+    if (const char* p = sys_errlist[errnum]) {
+      return p;
+    }
+  }
+#endif
+  // The type of `ret` is platform-specific; both of these branches must compile
+  // either way but only one will execute on any given platform:
+  auto ret = strerror_r(errnum, buf, buflen);
+  if (std::is_same<decltype(ret), int>::value) {
+    // XSI `strerror_r`; `ret` is `int`:
+    if (ret) *buf = '\0';
+    return buf;
+  } else {
+    // GNU `strerror_r`; `ret` is `char *`:
+    return reinterpret_cast<const char*>(ret);
+  }
+#endif
+}
+}  // namespace
+
+std::string StrError(int errnum) {
+  absl::base_internal::ErrnoSaver errno_saver;
+  char buf[100];
+  const char* str = StrErrorAdaptor(errnum, buf, sizeof buf);
+  if (*str == '\0') {
+    snprintf(buf, sizeof buf, "Unknown error %d", errnum);
+    str = buf;
+  }
+  return str;
+}
+
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/third_party/abseil_cpp/absl/base/internal/strerror.h b/third_party/abseil_cpp/absl/base/internal/strerror.h
new file mode 100644
index 000000000000..350097366eed
--- /dev/null
+++ b/third_party/abseil_cpp/absl/base/internal/strerror.h
@@ -0,0 +1,39 @@
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_BASE_INTERNAL_STRERROR_H_
+#define ABSL_BASE_INTERNAL_STRERROR_H_
+
+#include <string>
+
+#include "absl/base/config.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+// A portable and thread-safe alternative to C89's `strerror`.
+//
+// The C89 specification of `strerror` is not suitable for use in a
+// multi-threaded application as the returned string may be changed by calls to
+// `strerror` from another thread.  The many non-stdlib alternatives differ
+// enough in their names, availability, and semantics to justify this wrapper
+// around them.  `errno` will not be modified by a call to `absl::StrError`.
+std::string StrError(int errnum);
+
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_BASE_INTERNAL_STRERROR_H_
diff --git a/third_party/abseil_cpp/absl/base/internal/strerror_benchmark.cc b/third_party/abseil_cpp/absl/base/internal/strerror_benchmark.cc
new file mode 100644
index 000000000000..d8ca86b95beb
--- /dev/null
+++ b/third_party/abseil_cpp/absl/base/internal/strerror_benchmark.cc
@@ -0,0 +1,38 @@
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <cerrno>
+#include <cstdio>
+#include <string>
+
+#include "absl/base/internal/strerror.h"
+#include "benchmark/benchmark.h"
+
+namespace {
+#if defined(__GLIBC__) || defined(__APPLE__)
+void BM_SysErrList(benchmark::State& state) {
+  for (auto _ : state) {
+    benchmark::DoNotOptimize(std::string(sys_errlist[ERANGE]));
+  }
+}
+BENCHMARK(BM_SysErrList);
+#endif
+
+void BM_AbslStrError(benchmark::State& state) {
+  for (auto _ : state) {
+    benchmark::DoNotOptimize(absl::base_internal::StrError(ERANGE));
+  }
+}
+BENCHMARK(BM_AbslStrError);
+}  // namespace
diff --git a/third_party/abseil_cpp/absl/base/internal/strerror_test.cc b/third_party/abseil_cpp/absl/base/internal/strerror_test.cc
new file mode 100644
index 000000000000..a53da97f92c9
--- /dev/null
+++ b/third_party/abseil_cpp/absl/base/internal/strerror_test.cc
@@ -0,0 +1,86 @@
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/internal/strerror.h"
+
+#include <atomic>
+#include <cerrno>
+#include <cstdio>
+#include <cstring>
+#include <string>
+#include <thread>  // NOLINT(build/c++11)
+#include <vector>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/strings/match.h"
+
+namespace {
+using ::testing::AnyOf;
+using ::testing::Eq;
+
+TEST(StrErrorTest, ValidErrorCode) {
+  errno = ERANGE;
+  EXPECT_THAT(absl::base_internal::StrError(EDOM), Eq(strerror(EDOM)));
+  EXPECT_THAT(errno, Eq(ERANGE));
+}
+
+TEST(StrErrorTest, InvalidErrorCode) {
+  errno = ERANGE;
+  EXPECT_THAT(absl::base_internal::StrError(-1),
+              AnyOf(Eq("No error information"), Eq("Unknown error -1")));
+  EXPECT_THAT(errno, Eq(ERANGE));
+}
+
+TEST(StrErrorTest, MultipleThreads) {
+  // In this test, we will start up 2 threads and have each one call
+  // StrError 1000 times, each time with a different errnum.  We
+  // expect that StrError(errnum) will return a string equal to the
+  // one returned by strerror(errnum), if the code is known.  Since
+  // strerror is known to be thread-hostile, collect all the expected
+  // strings up front.
+  const int kNumCodes = 1000;
+  std::vector<std::string> expected_strings(kNumCodes);
+  for (int i = 0; i < kNumCodes; ++i) {
+    expected_strings[i] = strerror(i);
+  }
+
+  std::atomic_int counter(0);
+  auto thread_fun = [&]() {
+    for (int i = 0; i < kNumCodes; ++i) {
+      ++counter;
+      errno = ERANGE;
+      const std::string value = absl::base_internal::StrError(i);
+      // Only the GNU implementation is guaranteed to provide the
+      // string "Unknown error nnn". POSIX doesn't say anything.
+      if (!absl::StartsWith(value, "Unknown error ")) {
+        EXPECT_THAT(absl::base_internal::StrError(i), Eq(expected_strings[i]));
+      }
+      EXPECT_THAT(errno, Eq(ERANGE));
+    }
+  };
+
+  const int kNumThreads = 100;
+  std::vector<std::thread> threads;
+  for (int i = 0; i < kNumThreads; ++i) {
+    threads.push_back(std::thread(thread_fun));
+  }
+  for (auto& thread : threads) {
+    thread.join();
+  }
+
+  EXPECT_THAT(counter, Eq(kNumThreads * kNumCodes));
+}
+
+}  // namespace
diff --git a/third_party/abseil_cpp/absl/base/internal/sysinfo.cc b/third_party/abseil_cpp/absl/base/internal/sysinfo.cc
new file mode 100644
index 000000000000..6c69683faf67
--- /dev/null
+++ b/third_party/abseil_cpp/absl/base/internal/sysinfo.cc
@@ -0,0 +1,425 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/internal/sysinfo.h"
+
+#include "absl/base/attributes.h"
+
+#ifdef _WIN32
+#include <windows.h>
+#else
+#include <fcntl.h>
+#include <pthread.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+#endif
+
+#ifdef __linux__
+#include <sys/syscall.h>
+#endif
+
+#if defined(__APPLE__) || defined(__FreeBSD__)
+#include <sys/sysctl.h>
+#endif
+
+#if defined(__myriad2__)
+#include <rtems.h>
+#endif
+
+#include <string.h>
+#include <cassert>
+#include <cstdint>
+#include <cstdio>
+#include <cstdlib>
+#include <ctime>
+#include <limits>
+#include <thread>  // NOLINT(build/c++11)
+#include <utility>
+#include <vector>
+
+#include "absl/base/call_once.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/base/internal/spinlock.h"
+#include "absl/base/internal/unscaledcycleclock.h"
+#include "absl/base/thread_annotations.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+static int GetNumCPUs() {
+#if defined(__myriad2__)
+  return 1;
+#else
+  // Other possibilities:
+  //  - Read /sys/devices/system/cpu/online and use cpumask_parse()
+  //  - sysconf(_SC_NPROCESSORS_ONLN)
+  return std::thread::hardware_concurrency();
+#endif
+}
+
+#if defined(_WIN32)
+
+static double GetNominalCPUFrequency() {
+#if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP) && \
+    !WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP)
+  // UWP apps don't have access to the registry and currently don't provide an
+  // API informing about CPU nominal frequency.
+  return 1.0;
+#else
+#pragma comment(lib, "advapi32.lib")  // For Reg* functions.
+  HKEY key;
+  // Use the Reg* functions rather than the SH functions because shlwapi.dll
+  // pulls in gdi32.dll which makes process destruction much more costly.
+  if (RegOpenKeyExA(HKEY_LOCAL_MACHINE,
+                    "HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0", 0,
+                    KEY_READ, &key) == ERROR_SUCCESS) {
+    DWORD type = 0;
+    DWORD data = 0;
+    DWORD data_size = sizeof(data);
+    auto result = RegQueryValueExA(key, "~MHz", 0, &type,
+                                   reinterpret_cast<LPBYTE>(&data), &data_size);
+    RegCloseKey(key);
+    if (result == ERROR_SUCCESS && type == REG_DWORD &&
+        data_size == sizeof(data)) {
+      return data * 1e6;  // Value is MHz.
+    }
+  }
+  return 1.0;
+#endif  // WINAPI_PARTITION_APP && !WINAPI_PARTITION_DESKTOP
+}
+
+#elif defined(CTL_HW) && defined(HW_CPU_FREQ)
+
+static double GetNominalCPUFrequency() {
+  unsigned freq;
+  size_t size = sizeof(freq);
+  int mib[2] = {CTL_HW, HW_CPU_FREQ};
+  if (sysctl(mib, 2, &freq, &size, nullptr, 0) == 0) {
+    return static_cast<double>(freq);
+  }
+  return 1.0;
+}
+
+#else
+
+// Helper function for reading a long from a file. Returns true if successful
+// and the memory location pointed to by value is set to the value read.
+static bool ReadLongFromFile(const char *file, long *value) {
+  bool ret = false;
+  int fd = open(file, O_RDONLY);
+  if (fd != -1) {
+    char line[1024];
+    char *err;
+    memset(line, '\0', sizeof(line));
+    int len = read(fd, line, sizeof(line) - 1);
+    if (len <= 0) {
+      ret = false;
+    } else {
+      const long temp_value = strtol(line, &err, 10);
+      if (line[0] != '\0' && (*err == '\n' || *err == '\0')) {
+        *value = temp_value;
+        ret = true;
+      }
+    }
+    close(fd);
+  }
+  return ret;
+}
+
+#if defined(ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY)
+
+// Reads a monotonic time source and returns a value in
+// nanoseconds. The returned value uses an arbitrary epoch, not the
+// Unix epoch.
+static int64_t ReadMonotonicClockNanos() {
+  struct timespec t;
+#ifdef CLOCK_MONOTONIC_RAW
+  int rc = clock_gettime(CLOCK_MONOTONIC_RAW, &t);
+#else
+  int rc = clock_gettime(CLOCK_MONOTONIC, &t);
+#endif
+  if (rc != 0) {
+    perror("clock_gettime() failed");
+    abort();
+  }
+  return int64_t{t.tv_sec} * 1000000000 + t.tv_nsec;
+}
+
+class UnscaledCycleClockWrapperForInitializeFrequency {
+ public:
+  static int64_t Now() { return base_internal::UnscaledCycleClock::Now(); }
+};
+
+struct TimeTscPair {
+  int64_t time;  // From ReadMonotonicClockNanos().
+  int64_t tsc;   // From UnscaledCycleClock::Now().
+};
+
+// Returns a pair of values (monotonic kernel time, TSC ticks) that
+// approximately correspond to each other.  This is accomplished by
+// doing several reads and picking the reading with the lowest
+// latency.  This approach is used to minimize the probability that
+// our thread was preempted between clock reads.
+static TimeTscPair GetTimeTscPair() {
+  int64_t best_latency = std::numeric_limits<int64_t>::max();
+  TimeTscPair best;
+  for (int i = 0; i < 10; ++i) {
+    int64_t t0 = ReadMonotonicClockNanos();
+    int64_t tsc = UnscaledCycleClockWrapperForInitializeFrequency::Now();
+    int64_t t1 = ReadMonotonicClockNanos();
+    int64_t latency = t1 - t0;
+    if (latency < best_latency) {
+      best_latency = latency;
+      best.time = t0;
+      best.tsc = tsc;
+    }
+  }
+  return best;
+}
+
+// Measures and returns the TSC frequency by taking a pair of
+// measurements approximately `sleep_nanoseconds` apart.
+static double MeasureTscFrequencyWithSleep(int sleep_nanoseconds) {
+  auto t0 = GetTimeTscPair();
+  struct timespec ts;
+  ts.tv_sec = 0;
+  ts.tv_nsec = sleep_nanoseconds;
+  while (nanosleep(&ts, &ts) != 0 && errno == EINTR) {}
+  auto t1 = GetTimeTscPair();
+  double elapsed_ticks = t1.tsc - t0.tsc;
+  double elapsed_time = (t1.time - t0.time) * 1e-9;
+  return elapsed_ticks / elapsed_time;
+}
+
+// Measures and returns the TSC frequency by calling
+// MeasureTscFrequencyWithSleep(), doubling the sleep interval until the
+// frequency measurement stabilizes.
+static double MeasureTscFrequency() {
+  double last_measurement = -1.0;
+  int sleep_nanoseconds = 1000000;  // 1 millisecond.
+  for (int i = 0; i < 8; ++i) {
+    double measurement = MeasureTscFrequencyWithSleep(sleep_nanoseconds);
+    if (measurement * 0.99 < last_measurement &&
+        last_measurement < measurement * 1.01) {
+      // Use the current measurement if it is within 1% of the
+      // previous measurement.
+      return measurement;
+    }
+    last_measurement = measurement;
+    sleep_nanoseconds *= 2;
+  }
+  return last_measurement;
+}
+
+#endif  // ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY
+
+static double GetNominalCPUFrequency() {
+  long freq = 0;
+
+  // Google's production kernel has a patch to export the TSC
+  // frequency through sysfs. If the kernel is exporting the TSC
+  // frequency use that. There are issues where cpuinfo_max_freq
+  // cannot be relied on because the BIOS may be exporting an invalid
+  // p-state (on x86) or p-states may be used to put the processor in
+  // a new mode (turbo mode). Essentially, those frequencies cannot
+  // always be relied upon. The same reasons apply to /proc/cpuinfo as
+  // well.
+  if (ReadLongFromFile("/sys/devices/system/cpu/cpu0/tsc_freq_khz", &freq)) {
+    return freq * 1e3;  // Value is kHz.
+  }
+
+#if defined(ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY)
+  // On these platforms, the TSC frequency is the nominal CPU
+  // frequency.  But without having the kernel export it directly
+  // though /sys/devices/system/cpu/cpu0/tsc_freq_khz, there is no
+  // other way to reliably get the TSC frequency, so we have to
+  // measure it ourselves.  Some CPUs abuse cpuinfo_max_freq by
+  // exporting "fake" frequencies for implementing new features. For
+  // example, Intel's turbo mode is enabled by exposing a p-state
+  // value with a higher frequency than that of the real TSC
+  // rate. Because of this, we prefer to measure the TSC rate
+  // ourselves on i386 and x86-64.
+  return MeasureTscFrequency();
+#else
+
+  // If CPU scaling is in effect, we want to use the *maximum*
+  // frequency, not whatever CPU speed some random processor happens
+  // to be using now.
+  if (ReadLongFromFile("/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq",
+                       &freq)) {
+    return freq * 1e3;  // Value is kHz.
+  }
+
+  return 1.0;
+#endif  // !ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY
+}
+
+#endif
+
+ABSL_CONST_INIT static once_flag init_num_cpus_once;
+ABSL_CONST_INIT static int num_cpus = 0;
+
+// NumCPUs() may be called before main() and before malloc is properly
+// initialized, therefore this must not allocate memory.
+int NumCPUs() {
+  base_internal::LowLevelCallOnce(
+      &init_num_cpus_once, []() { num_cpus = GetNumCPUs(); });
+  return num_cpus;
+}
+
+// A default frequency of 0.0 might be dangerous if it is used in division.
+ABSL_CONST_INIT static once_flag init_nominal_cpu_frequency_once;
+ABSL_CONST_INIT static double nominal_cpu_frequency = 1.0;
+
+// NominalCPUFrequency() may be called before main() and before malloc is
+// properly initialized, therefore this must not allocate memory.
+double NominalCPUFrequency() {
+  base_internal::LowLevelCallOnce(
+      &init_nominal_cpu_frequency_once,
+      []() { nominal_cpu_frequency = GetNominalCPUFrequency(); });
+  return nominal_cpu_frequency;
+}
+
+#if defined(_WIN32)
+
+pid_t GetTID() {
+  return pid_t{GetCurrentThreadId()};
+}
+
+#elif defined(__linux__)
+
+#ifndef SYS_gettid
+#define SYS_gettid __NR_gettid
+#endif
+
+pid_t GetTID() {
+  return syscall(SYS_gettid);
+}
+
+#elif defined(__akaros__)
+
+pid_t GetTID() {
+  // Akaros has a concept of "vcore context", which is the state the program
+  // is forced into when we need to make a user-level scheduling decision, or
+  // run a signal handler.  This is analogous to the interrupt context that a
+  // CPU might enter if it encounters some kind of exception.
+  //
+  // There is no current thread context in vcore context, but we need to give
+  // a reasonable answer if asked for a thread ID (e.g., in a signal handler).
+  // Thread 0 always exists, so if we are in vcore context, we return that.
+  //
+  // Otherwise, we know (since we are using pthreads) that the uthread struct
+  // current_uthread is pointing to is the first element of a
+  // struct pthread_tcb, so we extract and return the thread ID from that.
+  //
+  // TODO(dcross): Akaros anticipates moving the thread ID to the uthread
+  // structure at some point. We should modify this code to remove the cast
+  // when that happens.
+  if (in_vcore_context())
+    return 0;
+  return reinterpret_cast<struct pthread_tcb *>(current_uthread)->id;
+}
+
+#elif defined(__myriad2__)
+
+pid_t GetTID() {
+  uint32_t tid;
+  rtems_task_ident(RTEMS_SELF, 0, &tid);
+  return tid;
+}
+
+#else
+
+// Fallback implementation of GetTID using pthread_getspecific.
+ABSL_CONST_INIT static once_flag tid_once;
+ABSL_CONST_INIT static pthread_key_t tid_key;
+ABSL_CONST_INIT static absl::base_internal::SpinLock tid_lock(
+    absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);
+
+// We set a bit per thread in this array to indicate that an ID is in
+// use. ID 0 is unused because it is the default value returned by
+// pthread_getspecific().
+ABSL_CONST_INIT static std::vector<uint32_t> *tid_array
+    ABSL_GUARDED_BY(tid_lock) = nullptr;
+static constexpr int kBitsPerWord = 32;  // tid_array is uint32_t.
+
+// Returns the TID to tid_array.
+static void FreeTID(void *v) {
+  intptr_t tid = reinterpret_cast<intptr_t>(v);
+  int word = tid / kBitsPerWord;
+  uint32_t mask = ~(1u << (tid % kBitsPerWord));
+  absl::base_internal::SpinLockHolder lock(&tid_lock);
+  assert(0 <= word && static_cast<size_t>(word) < tid_array->size());
+  (*tid_array)[word] &= mask;
+}
+
+static void InitGetTID() {
+  if (pthread_key_create(&tid_key, FreeTID) != 0) {
+    // The logging system calls GetTID() so it can't be used here.
+    perror("pthread_key_create failed");
+    abort();
+  }
+
+  // Initialize tid_array.
+  absl::base_internal::SpinLockHolder lock(&tid_lock);
+  tid_array = new std::vector<uint32_t>(1);
+  (*tid_array)[0] = 1;  // ID 0 is never-allocated.
+}
+
+// Return a per-thread small integer ID from pthread's thread-specific data.
+pid_t GetTID() {
+  absl::call_once(tid_once, InitGetTID);
+
+  intptr_t tid = reinterpret_cast<intptr_t>(pthread_getspecific(tid_key));
+  if (tid != 0) {
+    return tid;
+  }
+
+  int bit;  // tid_array[word] = 1u << bit;
+  size_t word;
+  {
+    // Search for the first unused ID.
+    absl::base_internal::SpinLockHolder lock(&tid_lock);
+    // First search for a word in the array that is not all ones.
+    word = 0;
+    while (word < tid_array->size() && ~(*tid_array)[word] == 0) {
+      ++word;
+    }
+    if (word == tid_array->size()) {
+      tid_array->push_back(0);  // No space left, add kBitsPerWord more IDs.
+    }
+    // Search for a zero bit in the word.
+    bit = 0;
+    while (bit < kBitsPerWord && (((*tid_array)[word] >> bit) & 1) != 0) {
+      ++bit;
+    }
+    tid = (word * kBitsPerWord) + bit;
+    (*tid_array)[word] |= 1u << bit;  // Mark the TID as allocated.
+  }
+
+  if (pthread_setspecific(tid_key, reinterpret_cast<void *>(tid)) != 0) {
+    perror("pthread_setspecific failed");
+    abort();
+  }
+
+  return static_cast<pid_t>(tid);
+}
+
+#endif
+
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/third_party/abseil_cpp/absl/base/internal/sysinfo.h b/third_party/abseil_cpp/absl/base/internal/sysinfo.h
new file mode 100644
index 000000000000..7246d5dd95c3
--- /dev/null
+++ b/third_party/abseil_cpp/absl/base/internal/sysinfo.h
@@ -0,0 +1,66 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// This file includes routines to find out characteristics
+// of the machine a program is running on.  It is undoubtedly
+// system-dependent.
+
+// Functions listed here that accept a pid_t as an argument act on the
+// current process if the pid_t argument is 0
+// All functions here are thread-hostile due to file caching unless
+// commented otherwise.
+
+#ifndef ABSL_BASE_INTERNAL_SYSINFO_H_
+#define ABSL_BASE_INTERNAL_SYSINFO_H_
+
+#ifndef _WIN32
+#include <sys/types.h>
+#endif
+
+#include <cstdint>
+
+#include "absl/base/port.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+// Nominal core processor cycles per second of each processor.   This is _not_
+// necessarily the frequency of the CycleClock counter (see cycleclock.h)
+// Thread-safe.
+double NominalCPUFrequency();
+
+// Number of logical processors (hyperthreads) in system. Thread-safe.
+int NumCPUs();
+
+// Return the thread id of the current thread, as told by the system.
+// No two currently-live threads implemented by the OS shall have the same ID.
+// Thread ids of exited threads may be reused.   Multiple user-level threads
+// may have the same thread ID if multiplexed on the same OS thread.
+//
+// On Linux, you may send a signal to the resulting ID with kill().  However,
+// it is recommended for portability that you use pthread_kill() instead.
+#ifdef _WIN32
+// On Windows, process id and thread id are of the same type according to the
+// return types of GetProcessId() and GetThreadId() are both DWORD, an unsigned
+// 32-bit type.
+using pid_t = uint32_t;
+#endif
+pid_t GetTID();
+
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_BASE_INTERNAL_SYSINFO_H_
diff --git a/third_party/abseil_cpp/absl/base/internal/sysinfo_test.cc b/third_party/abseil_cpp/absl/base/internal/sysinfo_test.cc
new file mode 100644
index 000000000000..fa8b88b1dc07
--- /dev/null
+++ b/third_party/abseil_cpp/absl/base/internal/sysinfo_test.cc
@@ -0,0 +1,100 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/internal/sysinfo.h"
+
+#ifndef _WIN32
+#include <sys/types.h>
+#include <unistd.h>
+#endif
+
+#include <thread>  // NOLINT(build/c++11)
+#include <unordered_set>
+#include <vector>
+
+#include "gtest/gtest.h"
+#include "absl/synchronization/barrier.h"
+#include "absl/synchronization/mutex.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+namespace {
+
+TEST(SysinfoTest, NumCPUs) {
+  EXPECT_NE(NumCPUs(), 0)
+      << "NumCPUs() should not have the default value of 0";
+}
+
+TEST(SysinfoTest, NominalCPUFrequency) {
+#if !(defined(__aarch64__) && defined(__linux__)) && !defined(__EMSCRIPTEN__)
+  EXPECT_GE(NominalCPUFrequency(), 1000.0)
+      << "NominalCPUFrequency() did not return a reasonable value";
+#else
+  // Aarch64 cannot read the CPU frequency from sysfs, so we get back 1.0.
+  // Emscripten does not have a sysfs to read from at all.
+  EXPECT_EQ(NominalCPUFrequency(), 1.0)
+      << "CPU frequency detection was fixed! Please update unittest.";
+#endif
+}
+
+TEST(SysinfoTest, GetTID) {
+  EXPECT_EQ(GetTID(), GetTID());  // Basic compile and equality test.
+#ifdef __native_client__
+  // Native Client has a race condition bug that leads to memory
+  // exaustion when repeatedly creating and joining threads.
+  // https://bugs.chromium.org/p/nativeclient/issues/detail?id=1027
+  return;
+#endif
+  // Test that TIDs are unique to each thread.
+  // Uses a few loops to exercise implementations that reallocate IDs.
+  for (int i = 0; i < 10; ++i) {
+    constexpr int kNumThreads = 10;
+    Barrier all_threads_done(kNumThreads);
+    std::vector<std::thread> threads;
+
+    Mutex mutex;
+    std::unordered_set<pid_t> tids;
+
+    for (int j = 0; j < kNumThreads; ++j) {
+      threads.push_back(std::thread([&]() {
+        pid_t id = GetTID();
+        {
+          MutexLock lock(&mutex);
+          ASSERT_TRUE(tids.find(id) == tids.end());
+          tids.insert(id);
+        }
+        // We can't simply join the threads here. The threads need to
+        // be alive otherwise the TID might have been reallocated to
+        // another live thread.
+        all_threads_done.Block();
+      }));
+    }
+    for (auto& thread : threads) {
+      thread.join();
+    }
+  }
+}
+
+#ifdef __linux__
+TEST(SysinfoTest, LinuxGetTID) {
+  // On Linux, for the main thread, GetTID()==getpid() is guaranteed by the API.
+  EXPECT_EQ(GetTID(), getpid());
+}
+#endif
+
+}  // namespace
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/third_party/abseil_cpp/absl/base/internal/thread_annotations.h b/third_party/abseil_cpp/absl/base/internal/thread_annotations.h
new file mode 100644
index 000000000000..4dab6a9c150a
--- /dev/null
+++ b/third_party/abseil_cpp/absl/base/internal/thread_annotations.h
@@ -0,0 +1,271 @@
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: thread_annotations.h
+// -----------------------------------------------------------------------------
+//
+// WARNING: This is a backwards compatible header and it will be removed after
+// the migration to prefixed thread annotations is finished; please include
+// "absl/base/thread_annotations.h".
+//
+// This header file contains macro definitions for thread safety annotations
+// that allow developers to document the locking policies of multi-threaded
+// code. The annotations can also help program analysis tools to identify
+// potential thread safety issues.
+//
+// These annotations are implemented using compiler attributes. Using the macros
+// defined here instead of raw attributes allow for portability and future
+// compatibility.
+//
+// When referring to mutexes in the arguments of the attributes, you should
+// use variable names or more complex expressions (e.g. my_object->mutex_)
+// that evaluate to a concrete mutex object whenever possible. If the mutex
+// you want to refer to is not in scope, you may use a member pointer
+// (e.g. &MyClass::mutex_) to refer to a mutex in some (unknown) object.
+
+#ifndef ABSL_BASE_INTERNAL_THREAD_ANNOTATIONS_H_
+#define ABSL_BASE_INTERNAL_THREAD_ANNOTATIONS_H_
+
+#if defined(__clang__)
+#define THREAD_ANNOTATION_ATTRIBUTE__(x)   __attribute__((x))
+#else
+#define THREAD_ANNOTATION_ATTRIBUTE__(x)   // no-op
+#endif
+
+// GUARDED_BY()
+//
+// Documents if a shared field or global variable needs to be protected by a
+// mutex. GUARDED_BY() allows the user to specify a particular mutex that
+// should be held when accessing the annotated variable.
+//
+// Although this annotation (and PT_GUARDED_BY, below) cannot be applied to
+// local variables, a local variable and its associated mutex can often be
+// combined into a small class or struct, thereby allowing the annotation.
+//
+// Example:
+//
+//   class Foo {
+//     Mutex mu_;
+//     int p1_ GUARDED_BY(mu_);
+//     ...
+//   };
+#define GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(guarded_by(x))
+
+// PT_GUARDED_BY()
+//
+// Documents if the memory location pointed to by a pointer should be guarded
+// by a mutex when dereferencing the pointer.
+//
+// Example:
+//   class Foo {
+//     Mutex mu_;
+//     int *p1_ PT_GUARDED_BY(mu_);
+//     ...
+//   };
+//
+// Note that a pointer variable to a shared memory location could itself be a
+// shared variable.
+//
+// Example:
+//
+//   // `q_`, guarded by `mu1_`, points to a shared memory location that is
+//   // guarded by `mu2_`:
+//   int *q_ GUARDED_BY(mu1_) PT_GUARDED_BY(mu2_);
+#define PT_GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(pt_guarded_by(x))
+
+// ACQUIRED_AFTER() / ACQUIRED_BEFORE()
+//
+// Documents the acquisition order between locks that can be held
+// simultaneously by a thread. For any two locks that need to be annotated
+// to establish an acquisition order, only one of them needs the annotation.
+// (i.e. You don't have to annotate both locks with both ACQUIRED_AFTER
+// and ACQUIRED_BEFORE.)
+//
+// As with GUARDED_BY, this is only applicable to mutexes that are shared
+// fields or global variables.
+//
+// Example:
+//
+//   Mutex m1_;
+//   Mutex m2_ ACQUIRED_AFTER(m1_);
+#define ACQUIRED_AFTER(...) \
+  THREAD_ANNOTATION_ATTRIBUTE__(acquired_after(__VA_ARGS__))
+
+#define ACQUIRED_BEFORE(...) \
+  THREAD_ANNOTATION_ATTRIBUTE__(acquired_before(__VA_ARGS__))
+
+// EXCLUSIVE_LOCKS_REQUIRED() / SHARED_LOCKS_REQUIRED()
+//
+// Documents a function that expects a mutex to be held prior to entry.
+// The mutex is expected to be held both on entry to, and exit from, the
+// function.
+//
+// An exclusive lock allows read-write access to the guarded data member(s), and
+// only one thread can acquire a lock exclusively at any one time. A shared lock
+// allows read-only access, and any number of threads can acquire a shared lock
+// concurrently.
+//
+// Generally, non-const methods should be annotated with
+// EXCLUSIVE_LOCKS_REQUIRED, while const methods should be annotated with
+// SHARED_LOCKS_REQUIRED.
+//
+// Example:
+//
+//   Mutex mu1, mu2;
+//   int a GUARDED_BY(mu1);
+//   int b GUARDED_BY(mu2);
+//
+//   void foo() EXCLUSIVE_LOCKS_REQUIRED(mu1, mu2) { ... }
+//   void bar() const SHARED_LOCKS_REQUIRED(mu1, mu2) { ... }
+#define EXCLUSIVE_LOCKS_REQUIRED(...) \
+  THREAD_ANNOTATION_ATTRIBUTE__(exclusive_locks_required(__VA_ARGS__))
+
+#define SHARED_LOCKS_REQUIRED(...) \
+  THREAD_ANNOTATION_ATTRIBUTE__(shared_locks_required(__VA_ARGS__))
+
+// LOCKS_EXCLUDED()
+//
+// Documents the locks acquired in the body of the function. These locks
+// cannot be held when calling this function (as Abseil's `Mutex` locks are
+// non-reentrant).
+#define LOCKS_EXCLUDED(...) \
+  THREAD_ANNOTATION_ATTRIBUTE__(locks_excluded(__VA_ARGS__))
+
+// LOCK_RETURNED()
+//
+// Documents a function that returns a mutex without acquiring it.  For example,
+// a public getter method that returns a pointer to a private mutex should
+// be annotated with LOCK_RETURNED.
+#define LOCK_RETURNED(x) \
+  THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x))
+
+// LOCKABLE
+//
+// Documents if a class/type is a lockable type (such as the `Mutex` class).
+#define LOCKABLE \
+  THREAD_ANNOTATION_ATTRIBUTE__(lockable)
+
+// SCOPED_LOCKABLE
+//
+// Documents if a class does RAII locking (such as the `MutexLock` class).
+// The constructor should use `LOCK_FUNCTION()` to specify the mutex that is
+// acquired, and the destructor should use `UNLOCK_FUNCTION()` with no
+// arguments; the analysis will assume that the destructor unlocks whatever the
+// constructor locked.
+#define SCOPED_LOCKABLE \
+  THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable)
+
+// EXCLUSIVE_LOCK_FUNCTION()
+//
+// Documents functions that acquire a lock in the body of a function, and do
+// not release it.
+#define EXCLUSIVE_LOCK_FUNCTION(...) \
+  THREAD_ANNOTATION_ATTRIBUTE__(exclusive_lock_function(__VA_ARGS__))
+
+// SHARED_LOCK_FUNCTION()
+//
+// Documents functions that acquire a shared (reader) lock in the body of a
+// function, and do not release it.
+#define SHARED_LOCK_FUNCTION(...) \
+  THREAD_ANNOTATION_ATTRIBUTE__(shared_lock_function(__VA_ARGS__))
+
+// UNLOCK_FUNCTION()
+//
+// Documents functions that expect a lock to be held on entry to the function,
+// and release it in the body of the function.
+#define UNLOCK_FUNCTION(...) \
+  THREAD_ANNOTATION_ATTRIBUTE__(unlock_function(__VA_ARGS__))
+
+// EXCLUSIVE_TRYLOCK_FUNCTION() / SHARED_TRYLOCK_FUNCTION()
+//
+// Documents functions that try to acquire a lock, and return success or failure
+// (or a non-boolean value that can be interpreted as a boolean).
+// The first argument should be `true` for functions that return `true` on
+// success, or `false` for functions that return `false` on success. The second
+// argument specifies the mutex that is locked on success. If unspecified, this
+// mutex is assumed to be `this`.
+#define EXCLUSIVE_TRYLOCK_FUNCTION(...) \
+  THREAD_ANNOTATION_ATTRIBUTE__(exclusive_trylock_function(__VA_ARGS__))
+
+#define SHARED_TRYLOCK_FUNCTION(...) \
+  THREAD_ANNOTATION_ATTRIBUTE__(shared_trylock_function(__VA_ARGS__))
+
+// ASSERT_EXCLUSIVE_LOCK() / ASSERT_SHARED_LOCK()
+//
+// Documents functions that dynamically check to see if a lock is held, and fail
+// if it is not held.
+#define ASSERT_EXCLUSIVE_LOCK(...) \
+  THREAD_ANNOTATION_ATTRIBUTE__(assert_exclusive_lock(__VA_ARGS__))
+
+#define ASSERT_SHARED_LOCK(...) \
+  THREAD_ANNOTATION_ATTRIBUTE__(assert_shared_lock(__VA_ARGS__))
+
+// NO_THREAD_SAFETY_ANALYSIS
+//
+// Turns off thread safety checking within the body of a particular function.
+// This annotation is used to mark functions that are known to be correct, but
+// the locking behavior is more complicated than the analyzer can handle.
+#define NO_THREAD_SAFETY_ANALYSIS \
+  THREAD_ANNOTATION_ATTRIBUTE__(no_thread_safety_analysis)
+
+//------------------------------------------------------------------------------
+// Tool-Supplied Annotations
+//------------------------------------------------------------------------------
+
+// TS_UNCHECKED should be placed around lock expressions that are not valid
+// C++ syntax, but which are present for documentation purposes.  These
+// annotations will be ignored by the analysis.
+#define TS_UNCHECKED(x) ""
+
+// TS_FIXME is used to mark lock expressions that are not valid C++ syntax.
+// It is used by automated tools to mark and disable invalid expressions.
+// The annotation should either be fixed, or changed to TS_UNCHECKED.
+#define TS_FIXME(x) ""
+
+// Like NO_THREAD_SAFETY_ANALYSIS, this turns off checking within the body of
+// a particular function.  However, this attribute is used to mark functions
+// that are incorrect and need to be fixed.  It is used by automated tools to
+// avoid breaking the build when the analysis is updated.
+// Code owners are expected to eventually fix the routine.
+#define NO_THREAD_SAFETY_ANALYSIS_FIXME  NO_THREAD_SAFETY_ANALYSIS
+
+// Similar to NO_THREAD_SAFETY_ANALYSIS_FIXME, this macro marks a GUARDED_BY
+// annotation that needs to be fixed, because it is producing thread safety
+// warning.  It disables the GUARDED_BY.
+#define GUARDED_BY_FIXME(x)
+
+// Disables warnings for a single read operation.  This can be used to avoid
+// warnings when it is known that the read is not actually involved in a race,
+// but the compiler cannot confirm that.
+#define TS_UNCHECKED_READ(x) thread_safety_analysis::ts_unchecked_read(x)
+
+
+namespace thread_safety_analysis {
+
+// Takes a reference to a guarded data member, and returns an unguarded
+// reference.
+template <typename T>
+inline const T& ts_unchecked_read(const T& v) NO_THREAD_SAFETY_ANALYSIS {
+  return v;
+}
+
+template <typename T>
+inline T& ts_unchecked_read(T& v) NO_THREAD_SAFETY_ANALYSIS {
+  return v;
+}
+
+}  // namespace thread_safety_analysis
+
+#endif  // ABSL_BASE_INTERNAL_THREAD_ANNOTATIONS_H_
diff --git a/third_party/abseil_cpp/absl/base/internal/thread_identity.cc b/third_party/abseil_cpp/absl/base/internal/thread_identity.cc
new file mode 100644
index 000000000000..d63a04ae91d5
--- /dev/null
+++ b/third_party/abseil_cpp/absl/base/internal/thread_identity.cc
@@ -0,0 +1,152 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/internal/thread_identity.h"
+
+#ifndef _WIN32
+#include <pthread.h>
+#include <signal.h>
+#endif
+
+#include <atomic>
+#include <cassert>
+#include <memory>
+
+#include "absl/base/call_once.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/base/internal/spinlock.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+#if ABSL_THREAD_IDENTITY_MODE != ABSL_THREAD_IDENTITY_MODE_USE_CPP11
+namespace {
+// Used to co-ordinate one-time creation of our pthread_key
+absl::once_flag init_thread_identity_key_once;
+pthread_key_t thread_identity_pthread_key;
+std::atomic<bool> pthread_key_initialized(false);
+
+void AllocateThreadIdentityKey(ThreadIdentityReclaimerFunction reclaimer) {
+  pthread_key_create(&thread_identity_pthread_key, reclaimer);
+  pthread_key_initialized.store(true, std::memory_order_release);
+}
+}  // namespace
+#endif
+
+#if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_TLS || \
+    ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_CPP11
+// The actual TLS storage for a thread's currently associated ThreadIdentity.
+// This is referenced by inline accessors in the header.
+// "protected" visibility ensures that if multiple instances of Abseil code
+// exist within a process (via dlopen() or similar), references to
+// thread_identity_ptr from each instance of the code will refer to
+// *different* instances of this ptr.
+#ifdef __GNUC__
+__attribute__((visibility("protected")))
+#endif  // __GNUC__
+#if ABSL_PER_THREAD_TLS
+// Prefer __thread to thread_local as benchmarks indicate it is a bit faster.
+ABSL_PER_THREAD_TLS_KEYWORD ThreadIdentity* thread_identity_ptr = nullptr;
+#elif defined(ABSL_HAVE_THREAD_LOCAL)
+thread_local ThreadIdentity* thread_identity_ptr = nullptr;
+#endif  // ABSL_PER_THREAD_TLS
+#endif  // TLS or CPP11
+
+void SetCurrentThreadIdentity(
+    ThreadIdentity* identity, ThreadIdentityReclaimerFunction reclaimer) {
+  assert(CurrentThreadIdentityIfPresent() == nullptr);
+  // Associate our destructor.
+  // NOTE: This call to pthread_setspecific is currently the only immovable
+  // barrier to CurrentThreadIdentity() always being async signal safe.
+#if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC
+  // NOTE: Not async-safe.  But can be open-coded.
+  absl::call_once(init_thread_identity_key_once, AllocateThreadIdentityKey,
+                  reclaimer);
+
+#if defined(__EMSCRIPTEN__) || defined(__MINGW32__)
+  // Emscripten and MinGW pthread implementations does not support signals.
+  // See https://kripken.github.io/emscripten-site/docs/porting/pthreads.html
+  // for more information.
+  pthread_setspecific(thread_identity_pthread_key,
+                      reinterpret_cast<void*>(identity));
+#else
+  // We must mask signals around the call to setspecific as with current glibc,
+  // a concurrent getspecific (needed for GetCurrentThreadIdentityIfPresent())
+  // may zero our value.
+  //
+  // While not officially async-signal safe, getspecific within a signal handler
+  // is otherwise OK.
+  sigset_t all_signals;
+  sigset_t curr_signals;
+  sigfillset(&all_signals);
+  pthread_sigmask(SIG_SETMASK, &all_signals, &curr_signals);
+  pthread_setspecific(thread_identity_pthread_key,
+                      reinterpret_cast<void*>(identity));
+  pthread_sigmask(SIG_SETMASK, &curr_signals, nullptr);
+#endif  // !__EMSCRIPTEN__ && !__MINGW32__
+
+#elif ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_TLS
+  // NOTE: Not async-safe.  But can be open-coded.
+  absl::call_once(init_thread_identity_key_once, AllocateThreadIdentityKey,
+                  reclaimer);
+  pthread_setspecific(thread_identity_pthread_key,
+                      reinterpret_cast<void*>(identity));
+  thread_identity_ptr = identity;
+#elif ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_CPP11
+  thread_local std::unique_ptr<ThreadIdentity, ThreadIdentityReclaimerFunction>
+      holder(identity, reclaimer);
+  thread_identity_ptr = identity;
+#else
+#error Unimplemented ABSL_THREAD_IDENTITY_MODE
+#endif
+}
+
+#if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_TLS || \
+    ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_CPP11
+
+// Please see the comment on `CurrentThreadIdentityIfPresent` in
+// thread_identity.h. Because DLLs cannot expose thread_local variables in
+// headers, we opt for the correct-but-slower option of placing the definition
+// of this function only in a translation unit inside DLL.
+#if defined(ABSL_BUILD_DLL) || defined(ABSL_CONSUME_DLL)
+ThreadIdentity* CurrentThreadIdentityIfPresent() { return thread_identity_ptr; }
+#endif
+#endif
+
+void ClearCurrentThreadIdentity() {
+#if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_TLS || \
+    ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_CPP11
+  thread_identity_ptr = nullptr;
+#elif ABSL_THREAD_IDENTITY_MODE == \
+      ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC
+  // pthread_setspecific expected to clear value on destruction
+  assert(CurrentThreadIdentityIfPresent() == nullptr);
+#endif
+}
+
+#if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC
+ThreadIdentity* CurrentThreadIdentityIfPresent() {
+  bool initialized = pthread_key_initialized.load(std::memory_order_acquire);
+  if (!initialized) {
+    return nullptr;
+  }
+  return reinterpret_cast<ThreadIdentity*>(
+      pthread_getspecific(thread_identity_pthread_key));
+}
+#endif
+
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/third_party/abseil_cpp/absl/base/internal/thread_identity.h b/third_party/abseil_cpp/absl/base/internal/thread_identity.h
new file mode 100644
index 000000000000..ceb109b41c6a
--- /dev/null
+++ b/third_party/abseil_cpp/absl/base/internal/thread_identity.h
@@ -0,0 +1,259 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Each active thread has an ThreadIdentity that may represent the thread in
+// various level interfaces.  ThreadIdentity objects are never deallocated.
+// When a thread terminates, its ThreadIdentity object may be reused for a
+// thread created later.
+
+#ifndef ABSL_BASE_INTERNAL_THREAD_IDENTITY_H_
+#define ABSL_BASE_INTERNAL_THREAD_IDENTITY_H_
+
+#ifndef _WIN32
+#include <pthread.h>
+// Defines __GOOGLE_GRTE_VERSION__ (via glibc-specific features.h) when
+// supported.
+#include <unistd.h>
+#endif
+
+#include <atomic>
+#include <cstdint>
+
+#include "absl/base/config.h"
+#include "absl/base/internal/per_thread_tls.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+
+struct SynchLocksHeld;
+struct SynchWaitParams;
+
+namespace base_internal {
+
+class SpinLock;
+struct ThreadIdentity;
+
+// Used by the implementation of absl::Mutex and absl::CondVar.
+struct PerThreadSynch {
+  // The internal representation of absl::Mutex and absl::CondVar rely
+  // on the alignment of PerThreadSynch. Both store the address of the
+  // PerThreadSynch in the high-order bits of their internal state,
+  // which means the low kLowZeroBits of the address of PerThreadSynch
+  // must be zero.
+  static constexpr int kLowZeroBits = 8;
+  static constexpr int kAlignment = 1 << kLowZeroBits;
+
+  // Returns the associated ThreadIdentity.
+  // This can be implemented as a cast because we guarantee
+  // PerThreadSynch is the first element of ThreadIdentity.
+  ThreadIdentity* thread_identity() {
+    return reinterpret_cast<ThreadIdentity*>(this);
+  }
+
+  PerThreadSynch *next;  // Circular waiter queue; initialized to 0.
+  PerThreadSynch *skip;  // If non-zero, all entries in Mutex queue
+                         // up to and including "skip" have same
+                         // condition as this, and will be woken later
+  bool may_skip;         // if false while on mutex queue, a mutex unlocker
+                         // is using this PerThreadSynch as a terminator.  Its
+                         // skip field must not be filled in because the loop
+                         // might then skip over the terminator.
+
+  // The wait parameters of the current wait.  waitp is null if the
+  // thread is not waiting. Transitions from null to non-null must
+  // occur before the enqueue commit point (state = kQueued in
+  // Enqueue() and CondVarEnqueue()). Transitions from non-null to
+  // null must occur after the wait is finished (state = kAvailable in
+  // Mutex::Block() and CondVar::WaitCommon()). This field may be
+  // changed only by the thread that describes this PerThreadSynch.  A
+  // special case is Fer(), which calls Enqueue() on another thread,
+  // but with an identical SynchWaitParams pointer, thus leaving the
+  // pointer unchanged.
+  SynchWaitParams *waitp;
+
+  bool suppress_fatal_errors;  // If true, try to proceed even in the face of
+                               // broken invariants.  This is used within fatal
+                               // signal handlers to improve the chances of
+                               // debug logging information being output
+                               // successfully.
+
+  intptr_t readers;     // Number of readers in mutex.
+  int priority;         // Priority of thread (updated every so often).
+
+  // When priority will next be read (cycles).
+  int64_t next_priority_read_cycles;
+
+  // State values:
+  //   kAvailable: This PerThreadSynch is available.
+  //   kQueued: This PerThreadSynch is unavailable, it's currently queued on a
+  //            Mutex or CondVar waistlist.
+  //
+  // Transitions from kQueued to kAvailable require a release
+  // barrier. This is needed as a waiter may use "state" to
+  // independently observe that it's no longer queued.
+  //
+  // Transitions from kAvailable to kQueued require no barrier, they
+  // are externally ordered by the Mutex.
+  enum State {
+    kAvailable,
+    kQueued
+  };
+  std::atomic<State> state;
+
+  bool maybe_unlocking;  // Valid at head of Mutex waiter queue;
+                         // true if UnlockSlow could be searching
+                         // for a waiter to wake.  Used for an optimization
+                         // in Enqueue().  true is always a valid value.
+                         // Can be reset to false when the unlocker or any
+                         // writer releases the lock, or a reader fully releases
+                         // the lock.  It may not be set to false by a reader
+                         // that decrements the count to non-zero.
+                         // protected by mutex spinlock
+
+  bool wake;  // This thread is to be woken from a Mutex.
+
+  // If "x" is on a waiter list for a mutex, "x->cond_waiter" is true iff the
+  // waiter is waiting on the mutex as part of a CV Wait or Mutex Await.
+  //
+  // The value of "x->cond_waiter" is meaningless if "x" is not on a
+  // Mutex waiter list.
+  bool cond_waiter;
+
+  // Locks held; used during deadlock detection.
+  // Allocated in Synch_GetAllLocks() and freed in ReclaimThreadIdentity().
+  SynchLocksHeld *all_locks;
+};
+
+struct ThreadIdentity {
+  // Must be the first member.  The Mutex implementation requires that
+  // the PerThreadSynch object associated with each thread is
+  // PerThreadSynch::kAlignment aligned.  We provide this alignment on
+  // ThreadIdentity itself.
+  PerThreadSynch per_thread_synch;
+
+  // Private: Reserved for absl::synchronization_internal::Waiter.
+  struct WaiterState {
+    char data[128];
+  } waiter_state;
+
+  // Used by PerThreadSem::{Get,Set}ThreadBlockedCounter().
+  std::atomic<int>* blocked_count_ptr;
+
+  // The following variables are mostly read/written just by the
+  // thread itself.  The only exception is that these are read by
+  // a ticker thread as a hint.
+  std::atomic<int> ticker;      // Tick counter, incremented once per second.
+  std::atomic<int> wait_start;  // Ticker value when thread started waiting.
+  std::atomic<bool> is_idle;    // Has thread become idle yet?
+
+  ThreadIdentity* next;
+};
+
+// Returns the ThreadIdentity object representing the calling thread; guaranteed
+// to be unique for its lifetime.  The returned object will remain valid for the
+// program's lifetime; although it may be re-assigned to a subsequent thread.
+// If one does not exist, return nullptr instead.
+//
+// Does not malloc(*), and is async-signal safe.
+// [*] Technically pthread_setspecific() does malloc on first use; however this
+// is handled internally within tcmalloc's initialization already.
+//
+// New ThreadIdentity objects can be constructed and associated with a thread
+// by calling GetOrCreateCurrentThreadIdentity() in per-thread-sem.h.
+ThreadIdentity* CurrentThreadIdentityIfPresent();
+
+using ThreadIdentityReclaimerFunction = void (*)(void*);
+
+// Sets the current thread identity to the given value.  'reclaimer' is a
+// pointer to the global function for cleaning up instances on thread
+// destruction.
+void SetCurrentThreadIdentity(ThreadIdentity* identity,
+                              ThreadIdentityReclaimerFunction reclaimer);
+
+// Removes the currently associated ThreadIdentity from the running thread.
+// This must be called from inside the ThreadIdentityReclaimerFunction, and only
+// from that function.
+void ClearCurrentThreadIdentity();
+
+// May be chosen at compile time via: -DABSL_FORCE_THREAD_IDENTITY_MODE=<mode
+// index>
+#ifdef ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC
+#error ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC cannot be direcly set
+#else
+#define ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC 0
+#endif
+
+#ifdef ABSL_THREAD_IDENTITY_MODE_USE_TLS
+#error ABSL_THREAD_IDENTITY_MODE_USE_TLS cannot be direcly set
+#else
+#define ABSL_THREAD_IDENTITY_MODE_USE_TLS 1
+#endif
+
+#ifdef ABSL_THREAD_IDENTITY_MODE_USE_CPP11
+#error ABSL_THREAD_IDENTITY_MODE_USE_CPP11 cannot be direcly set
+#else
+#define ABSL_THREAD_IDENTITY_MODE_USE_CPP11 2
+#endif
+
+#ifdef ABSL_THREAD_IDENTITY_MODE
+#error ABSL_THREAD_IDENTITY_MODE cannot be direcly set
+#elif defined(ABSL_FORCE_THREAD_IDENTITY_MODE)
+#define ABSL_THREAD_IDENTITY_MODE ABSL_FORCE_THREAD_IDENTITY_MODE
+#elif defined(_WIN32) && !defined(__MINGW32__)
+#define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_CPP11
+#elif ABSL_PER_THREAD_TLS && defined(__GOOGLE_GRTE_VERSION__) && \
+    (__GOOGLE_GRTE_VERSION__ >= 20140228L)
+// Support for async-safe TLS was specifically added in GRTEv4.  It's not
+// present in the upstream eglibc.
+// Note:  Current default for production systems.
+#define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_TLS
+#else
+#define ABSL_THREAD_IDENTITY_MODE \
+  ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC
+#endif
+
+#if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_TLS || \
+    ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_CPP11
+
+#if ABSL_PER_THREAD_TLS
+ABSL_CONST_INIT extern ABSL_PER_THREAD_TLS_KEYWORD ThreadIdentity*
+    thread_identity_ptr;
+#elif defined(ABSL_HAVE_THREAD_LOCAL)
+ABSL_CONST_INIT extern thread_local ThreadIdentity* thread_identity_ptr;
+#else
+#error Thread-local storage not detected on this platform
+#endif
+
+// thread_local variables cannot be in headers exposed by DLLs. However, it is
+// important for performance reasons in general that
+// `CurrentThreadIdentityIfPresent` be inlined. This is not possible across a
+// DLL boundary so, with DLLs, we opt to have the function not be inlined. Note
+// that `CurrentThreadIdentityIfPresent` is declared above so we can exclude
+// this entire inline definition when compiling as a DLL.
+#if !defined(ABSL_BUILD_DLL) && !defined(ABSL_CONSUME_DLL)
+inline ThreadIdentity* CurrentThreadIdentityIfPresent() {
+  return thread_identity_ptr;
+}
+#endif
+
+#elif ABSL_THREAD_IDENTITY_MODE != \
+    ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC
+#error Unknown ABSL_THREAD_IDENTITY_MODE
+#endif
+
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_BASE_INTERNAL_THREAD_IDENTITY_H_
diff --git a/third_party/abseil_cpp/absl/base/internal/thread_identity_benchmark.cc b/third_party/abseil_cpp/absl/base/internal/thread_identity_benchmark.cc
new file mode 100644
index 000000000000..0ae10f2b1e8a
--- /dev/null
+++ b/third_party/abseil_cpp/absl/base/internal/thread_identity_benchmark.cc
@@ -0,0 +1,38 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "benchmark/benchmark.h"
+#include "absl/base/internal/thread_identity.h"
+#include "absl/synchronization/internal/create_thread_identity.h"
+#include "absl/synchronization/internal/per_thread_sem.h"
+
+namespace {
+
+void BM_SafeCurrentThreadIdentity(benchmark::State& state) {
+  for (auto _ : state) {
+    benchmark::DoNotOptimize(
+        absl::synchronization_internal::GetOrCreateCurrentThreadIdentity());
+  }
+}
+BENCHMARK(BM_SafeCurrentThreadIdentity);
+
+void BM_UnsafeCurrentThreadIdentity(benchmark::State& state) {
+  for (auto _ : state) {
+    benchmark::DoNotOptimize(
+        absl::base_internal::CurrentThreadIdentityIfPresent());
+  }
+}
+BENCHMARK(BM_UnsafeCurrentThreadIdentity);
+
+}  // namespace
diff --git a/third_party/abseil_cpp/absl/base/internal/thread_identity_test.cc b/third_party/abseil_cpp/absl/base/internal/thread_identity_test.cc
new file mode 100644
index 000000000000..624d5b96b6ee
--- /dev/null
+++ b/third_party/abseil_cpp/absl/base/internal/thread_identity_test.cc
@@ -0,0 +1,129 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/internal/thread_identity.h"
+
+#include <thread>  // NOLINT(build/c++11)
+#include <vector>
+
+#include "gtest/gtest.h"
+#include "absl/base/attributes.h"
+#include "absl/base/internal/spinlock.h"
+#include "absl/base/macros.h"
+#include "absl/base/thread_annotations.h"
+#include "absl/synchronization/internal/per_thread_sem.h"
+#include "absl/synchronization/mutex.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+namespace {
+
+ABSL_CONST_INIT static absl::base_internal::SpinLock map_lock(
+    absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);
+ABSL_CONST_INIT static int num_identities_reused ABSL_GUARDED_BY(map_lock);
+
+static const void* const kCheckNoIdentity = reinterpret_cast<void*>(1);
+
+static void TestThreadIdentityCurrent(const void* assert_no_identity) {
+  ThreadIdentity* identity;
+
+  // We have to test this conditionally, because if the test framework relies
+  // on Abseil, then some previous action may have already allocated an
+  // identity.
+  if (assert_no_identity == kCheckNoIdentity) {
+    identity = CurrentThreadIdentityIfPresent();
+    EXPECT_TRUE(identity == nullptr);
+  }
+
+  identity = synchronization_internal::GetOrCreateCurrentThreadIdentity();
+  EXPECT_TRUE(identity != nullptr);
+  ThreadIdentity* identity_no_init;
+  identity_no_init = CurrentThreadIdentityIfPresent();
+  EXPECT_TRUE(identity == identity_no_init);
+
+  // Check that per_thread_synch is correctly aligned.
+  EXPECT_EQ(0, reinterpret_cast<intptr_t>(&identity->per_thread_synch) %
+                   PerThreadSynch::kAlignment);
+  EXPECT_EQ(identity, identity->per_thread_synch.thread_identity());
+
+  absl::base_internal::SpinLockHolder l(&map_lock);
+  num_identities_reused++;
+}
+
+TEST(ThreadIdentityTest, BasicIdentityWorks) {
+  // This tests for the main() thread.
+  TestThreadIdentityCurrent(nullptr);
+}
+
+TEST(ThreadIdentityTest, BasicIdentityWorksThreaded) {
+  // Now try the same basic test with multiple threads being created and
+  // destroyed.  This makes sure that:
+  // - New threads are created without a ThreadIdentity.
+  // - We re-allocate ThreadIdentity objects from the free-list.
+  // - If a thread implementation chooses to recycle threads, that
+  //   correct re-initialization occurs.
+  static const int kNumLoops = 3;
+  static const int kNumThreads = 400;
+  for (int iter = 0; iter < kNumLoops; iter++) {
+    std::vector<std::thread> threads;
+    for (int i = 0; i < kNumThreads; ++i) {
+      threads.push_back(
+          std::thread(TestThreadIdentityCurrent, kCheckNoIdentity));
+    }
+    for (auto& thread : threads) {
+      thread.join();
+    }
+  }
+
+  // We should have recycled ThreadIdentity objects above; while (external)
+  // library threads allocating their own identities may preclude some
+  // reuse, we should have sufficient repetitions to exclude this.
+  absl::base_internal::SpinLockHolder l(&map_lock);
+  EXPECT_LT(kNumThreads, num_identities_reused);
+}
+
+TEST(ThreadIdentityTest, ReusedThreadIdentityMutexTest) {
+  // This test repeatly creates and joins a series of threads, each of
+  // which acquires and releases shared Mutex locks. This verifies
+  // Mutex operations work correctly under a reused
+  // ThreadIdentity. Note that the most likely failure mode of this
+  // test is a crash or deadlock.
+  static const int kNumLoops = 10;
+  static const int kNumThreads = 12;
+  static const int kNumMutexes = 3;
+  static const int kNumLockLoops = 5;
+
+  Mutex mutexes[kNumMutexes];
+  for (int iter = 0; iter < kNumLoops; ++iter) {
+    std::vector<std::thread> threads;
+    for (int thread = 0; thread < kNumThreads; ++thread) {
+      threads.push_back(std::thread([&]() {
+        for (int l = 0; l < kNumLockLoops; ++l) {
+          for (int m = 0; m < kNumMutexes; ++m) {
+            MutexLock lock(&mutexes[m]);
+          }
+        }
+      }));
+    }
+    for (auto& thread : threads) {
+      thread.join();
+    }
+  }
+}
+
+}  // namespace
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/third_party/abseil_cpp/absl/base/internal/throw_delegate.cc b/third_party/abseil_cpp/absl/base/internal/throw_delegate.cc
new file mode 100644
index 000000000000..c055f75d9d28
--- /dev/null
+++ b/third_party/abseil_cpp/absl/base/internal/throw_delegate.cc
@@ -0,0 +1,108 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/internal/throw_delegate.h"
+
+#include <cstdlib>
+#include <functional>
+#include <new>
+#include <stdexcept>
+#include "absl/base/config.h"
+#include "absl/base/internal/raw_logging.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+namespace {
+template <typename T>
+[[noreturn]] void Throw(const T& error) {
+#ifdef ABSL_HAVE_EXCEPTIONS
+  throw error;
+#else
+  ABSL_RAW_LOG(FATAL, "%s", error.what());
+  std::abort();
+#endif
+}
+}  // namespace
+
+void ThrowStdLogicError(const std::string& what_arg) {
+  Throw(std::logic_error(what_arg));
+}
+void ThrowStdLogicError(const char* what_arg) {
+  Throw(std::logic_error(what_arg));
+}
+void ThrowStdInvalidArgument(const std::string& what_arg) {
+  Throw(std::invalid_argument(what_arg));
+}
+void ThrowStdInvalidArgument(const char* what_arg) {
+  Throw(std::invalid_argument(what_arg));
+}
+
+void ThrowStdDomainError(const std::string& what_arg) {
+  Throw(std::domain_error(what_arg));
+}
+void ThrowStdDomainError(const char* what_arg) {
+  Throw(std::domain_error(what_arg));
+}
+
+void ThrowStdLengthError(const std::string& what_arg) {
+  Throw(std::length_error(what_arg));
+}
+void ThrowStdLengthError(const char* what_arg) {
+  Throw(std::length_error(what_arg));
+}
+
+void ThrowStdOutOfRange(const std::string& what_arg) {
+  Throw(std::out_of_range(what_arg));
+}
+void ThrowStdOutOfRange(const char* what_arg) {
+  Throw(std::out_of_range(what_arg));
+}
+
+void ThrowStdRuntimeError(const std::string& what_arg) {
+  Throw(std::runtime_error(what_arg));
+}
+void ThrowStdRuntimeError(const char* what_arg) {
+  Throw(std::runtime_error(what_arg));
+}
+
+void ThrowStdRangeError(const std::string& what_arg) {
+  Throw(std::range_error(what_arg));
+}
+void ThrowStdRangeError(const char* what_arg) {
+  Throw(std::range_error(what_arg));
+}
+
+void ThrowStdOverflowError(const std::string& what_arg) {
+  Throw(std::overflow_error(what_arg));
+}
+void ThrowStdOverflowError(const char* what_arg) {
+  Throw(std::overflow_error(what_arg));
+}
+
+void ThrowStdUnderflowError(const std::string& what_arg) {
+  Throw(std::underflow_error(what_arg));
+}
+void ThrowStdUnderflowError(const char* what_arg) {
+  Throw(std::underflow_error(what_arg));
+}
+
+void ThrowStdBadFunctionCall() { Throw(std::bad_function_call()); }
+
+void ThrowStdBadAlloc() { Throw(std::bad_alloc()); }
+
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/third_party/abseil_cpp/absl/base/internal/throw_delegate.h b/third_party/abseil_cpp/absl/base/internal/throw_delegate.h
new file mode 100644
index 000000000000..075f5272543a
--- /dev/null
+++ b/third_party/abseil_cpp/absl/base/internal/throw_delegate.h
@@ -0,0 +1,75 @@
+//
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef ABSL_BASE_INTERNAL_THROW_DELEGATE_H_
+#define ABSL_BASE_INTERNAL_THROW_DELEGATE_H_
+
+#include <string>
+
+#include "absl/base/config.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+// Helper functions that allow throwing exceptions consistently from anywhere.
+// The main use case is for header-based libraries (eg templates), as they will
+// be built by many different targets with their own compiler options.
+// In particular, this will allow a safe way to throw exceptions even if the
+// caller is compiled with -fno-exceptions.  This is intended for implementing
+// things like map<>::at(), which the standard documents as throwing an
+// exception on error.
+//
+// Using other techniques like #if tricks could lead to ODR violations.
+//
+// You shouldn't use it unless you're writing code that you know will be built
+// both with and without exceptions and you need to conform to an interface
+// that uses exceptions.
+
+[[noreturn]] void ThrowStdLogicError(const std::string& what_arg);
+[[noreturn]] void ThrowStdLogicError(const char* what_arg);
+[[noreturn]] void ThrowStdInvalidArgument(const std::string& what_arg);
+[[noreturn]] void ThrowStdInvalidArgument(const char* what_arg);
+[[noreturn]] void ThrowStdDomainError(const std::string& what_arg);
+[[noreturn]] void ThrowStdDomainError(const char* what_arg);
+[[noreturn]] void ThrowStdLengthError(const std::string& what_arg);
+[[noreturn]] void ThrowStdLengthError(const char* what_arg);
+[[noreturn]] void ThrowStdOutOfRange(const std::string& what_arg);
+[[noreturn]] void ThrowStdOutOfRange(const char* what_arg);
+[[noreturn]] void ThrowStdRuntimeError(const std::string& what_arg);
+[[noreturn]] void ThrowStdRuntimeError(const char* what_arg);
+[[noreturn]] void ThrowStdRangeError(const std::string& what_arg);
+[[noreturn]] void ThrowStdRangeError(const char* what_arg);
+[[noreturn]] void ThrowStdOverflowError(const std::string& what_arg);
+[[noreturn]] void ThrowStdOverflowError(const char* what_arg);
+[[noreturn]] void ThrowStdUnderflowError(const std::string& what_arg);
+[[noreturn]] void ThrowStdUnderflowError(const char* what_arg);
+
+[[noreturn]] void ThrowStdBadFunctionCall();
+[[noreturn]] void ThrowStdBadAlloc();
+
+// ThrowStdBadArrayNewLength() cannot be consistently supported because
+// std::bad_array_new_length is missing in libstdc++ until 4.9.0.
+// https://gcc.gnu.org/onlinedocs/gcc-4.8.3/libstdc++/api/a01379_source.html
+// https://gcc.gnu.org/onlinedocs/gcc-4.9.0/libstdc++/api/a01327_source.html
+// libcxx (as of 3.2) and msvc (as of 2015) both have it.
+// [[noreturn]] void ThrowStdBadArrayNewLength();
+
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_BASE_INTERNAL_THROW_DELEGATE_H_
diff --git a/third_party/abseil_cpp/absl/base/internal/tsan_mutex_interface.h b/third_party/abseil_cpp/absl/base/internal/tsan_mutex_interface.h
new file mode 100644
index 000000000000..2a510603bc8c
--- /dev/null
+++ b/third_party/abseil_cpp/absl/base/internal/tsan_mutex_interface.h
@@ -0,0 +1,66 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// This file is intended solely for spinlock.h.
+// It provides ThreadSanitizer annotations for custom mutexes.
+// See <sanitizer/tsan_interface.h> for meaning of these annotations.
+
+#ifndef ABSL_BASE_INTERNAL_TSAN_MUTEX_INTERFACE_H_
+#define ABSL_BASE_INTERNAL_TSAN_MUTEX_INTERFACE_H_
+
+// ABSL_INTERNAL_HAVE_TSAN_INTERFACE
+// Macro intended only for internal use.
+//
+// Checks whether LLVM Thread Sanitizer interfaces are available.
+// First made available in LLVM 5.0 (Sep 2017).
+#ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE
+#error "ABSL_INTERNAL_HAVE_TSAN_INTERFACE cannot be directly set."
+#endif
+
+#if defined(THREAD_SANITIZER) && defined(__has_include)
+#if __has_include(<sanitizer/tsan_interface.h>)
+#define ABSL_INTERNAL_HAVE_TSAN_INTERFACE 1
+#endif
+#endif
+
+#ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE
+#include <sanitizer/tsan_interface.h>
+
+#define ABSL_TSAN_MUTEX_CREATE __tsan_mutex_create
+#define ABSL_TSAN_MUTEX_DESTROY __tsan_mutex_destroy
+#define ABSL_TSAN_MUTEX_PRE_LOCK __tsan_mutex_pre_lock
+#define ABSL_TSAN_MUTEX_POST_LOCK __tsan_mutex_post_lock
+#define ABSL_TSAN_MUTEX_PRE_UNLOCK __tsan_mutex_pre_unlock
+#define ABSL_TSAN_MUTEX_POST_UNLOCK __tsan_mutex_post_unlock
+#define ABSL_TSAN_MUTEX_PRE_SIGNAL __tsan_mutex_pre_signal
+#define ABSL_TSAN_MUTEX_POST_SIGNAL __tsan_mutex_post_signal
+#define ABSL_TSAN_MUTEX_PRE_DIVERT __tsan_mutex_pre_divert
+#define ABSL_TSAN_MUTEX_POST_DIVERT __tsan_mutex_post_divert
+
+#else
+
+#define ABSL_TSAN_MUTEX_CREATE(...)
+#define ABSL_TSAN_MUTEX_DESTROY(...)
+#define ABSL_TSAN_MUTEX_PRE_LOCK(...)
+#define ABSL_TSAN_MUTEX_POST_LOCK(...)
+#define ABSL_TSAN_MUTEX_PRE_UNLOCK(...)
+#define ABSL_TSAN_MUTEX_POST_UNLOCK(...)
+#define ABSL_TSAN_MUTEX_PRE_SIGNAL(...)
+#define ABSL_TSAN_MUTEX_POST_SIGNAL(...)
+#define ABSL_TSAN_MUTEX_PRE_DIVERT(...)
+#define ABSL_TSAN_MUTEX_POST_DIVERT(...)
+
+#endif
+
+#endif  // ABSL_BASE_INTERNAL_TSAN_MUTEX_INTERFACE_H_
diff --git a/third_party/abseil_cpp/absl/base/internal/unaligned_access.h b/third_party/abseil_cpp/absl/base/internal/unaligned_access.h
new file mode 100644
index 000000000000..6be56c865b3d
--- /dev/null
+++ b/third_party/abseil_cpp/absl/base/internal/unaligned_access.h
@@ -0,0 +1,158 @@
+//
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef ABSL_BASE_INTERNAL_UNALIGNED_ACCESS_H_
+#define ABSL_BASE_INTERNAL_UNALIGNED_ACCESS_H_
+
+#include <string.h>
+
+#include <cstdint>
+
+#include "absl/base/attributes.h"
+#include "absl/base/config.h"
+
+// unaligned APIs
+
+// Portable handling of unaligned loads, stores, and copies.
+
+// The unaligned API is C++ only.  The declarations use C++ features
+// (namespaces, inline) which are absent or incompatible in C.
+#if defined(__cplusplus)
+
+#if defined(ADDRESS_SANITIZER) || defined(THREAD_SANITIZER) ||\
+    defined(MEMORY_SANITIZER)
+// Consider we have an unaligned load/store of 4 bytes from address 0x...05.
+// AddressSanitizer will treat it as a 3-byte access to the range 05:07 and
+// will miss a bug if 08 is the first unaddressable byte.
+// ThreadSanitizer will also treat this as a 3-byte access to 05:07 and will
+// miss a race between this access and some other accesses to 08.
+// MemorySanitizer will correctly propagate the shadow on unaligned stores
+// and correctly report bugs on unaligned loads, but it may not properly
+// update and report the origin of the uninitialized memory.
+// For all three tools, replacing an unaligned access with a tool-specific
+// callback solves the problem.
+
+// Make sure uint16_t/uint32_t/uint64_t are defined.
+#include <stdint.h>
+
+extern "C" {
+uint16_t __sanitizer_unaligned_load16(const void *p);
+uint32_t __sanitizer_unaligned_load32(const void *p);
+uint64_t __sanitizer_unaligned_load64(const void *p);
+void __sanitizer_unaligned_store16(void *p, uint16_t v);
+void __sanitizer_unaligned_store32(void *p, uint32_t v);
+void __sanitizer_unaligned_store64(void *p, uint64_t v);
+}  // extern "C"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+inline uint16_t UnalignedLoad16(const void *p) {
+  return __sanitizer_unaligned_load16(p);
+}
+
+inline uint32_t UnalignedLoad32(const void *p) {
+  return __sanitizer_unaligned_load32(p);
+}
+
+inline uint64_t UnalignedLoad64(const void *p) {
+  return __sanitizer_unaligned_load64(p);
+}
+
+inline void UnalignedStore16(void *p, uint16_t v) {
+  __sanitizer_unaligned_store16(p, v);
+}
+
+inline void UnalignedStore32(void *p, uint32_t v) {
+  __sanitizer_unaligned_store32(p, v);
+}
+
+inline void UnalignedStore64(void *p, uint64_t v) {
+  __sanitizer_unaligned_store64(p, v);
+}
+
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#define ABSL_INTERNAL_UNALIGNED_LOAD16(_p) \
+  (absl::base_internal::UnalignedLoad16(_p))
+#define ABSL_INTERNAL_UNALIGNED_LOAD32(_p) \
+  (absl::base_internal::UnalignedLoad32(_p))
+#define ABSL_INTERNAL_UNALIGNED_LOAD64(_p) \
+  (absl::base_internal::UnalignedLoad64(_p))
+
+#define ABSL_INTERNAL_UNALIGNED_STORE16(_p, _val) \
+  (absl::base_internal::UnalignedStore16(_p, _val))
+#define ABSL_INTERNAL_UNALIGNED_STORE32(_p, _val) \
+  (absl::base_internal::UnalignedStore32(_p, _val))
+#define ABSL_INTERNAL_UNALIGNED_STORE64(_p, _val) \
+  (absl::base_internal::UnalignedStore64(_p, _val))
+
+#else
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+inline uint16_t UnalignedLoad16(const void *p) {
+  uint16_t t;
+  memcpy(&t, p, sizeof t);
+  return t;
+}
+
+inline uint32_t UnalignedLoad32(const void *p) {
+  uint32_t t;
+  memcpy(&t, p, sizeof t);
+  return t;
+}
+
+inline uint64_t UnalignedLoad64(const void *p) {
+  uint64_t t;
+  memcpy(&t, p, sizeof t);
+  return t;
+}
+
+inline void UnalignedStore16(void *p, uint16_t v) { memcpy(p, &v, sizeof v); }
+
+inline void UnalignedStore32(void *p, uint32_t v) { memcpy(p, &v, sizeof v); }
+
+inline void UnalignedStore64(void *p, uint64_t v) { memcpy(p, &v, sizeof v); }
+
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#define ABSL_INTERNAL_UNALIGNED_LOAD16(_p) \
+  (absl::base_internal::UnalignedLoad16(_p))
+#define ABSL_INTERNAL_UNALIGNED_LOAD32(_p) \
+  (absl::base_internal::UnalignedLoad32(_p))
+#define ABSL_INTERNAL_UNALIGNED_LOAD64(_p) \
+  (absl::base_internal::UnalignedLoad64(_p))
+
+#define ABSL_INTERNAL_UNALIGNED_STORE16(_p, _val) \
+  (absl::base_internal::UnalignedStore16(_p, _val))
+#define ABSL_INTERNAL_UNALIGNED_STORE32(_p, _val) \
+  (absl::base_internal::UnalignedStore32(_p, _val))
+#define ABSL_INTERNAL_UNALIGNED_STORE64(_p, _val) \
+  (absl::base_internal::UnalignedStore64(_p, _val))
+
+#endif
+
+#endif  // defined(__cplusplus), end of unaligned API
+
+#endif  // ABSL_BASE_INTERNAL_UNALIGNED_ACCESS_H_
diff --git a/third_party/abseil_cpp/absl/base/internal/unique_small_name_test.cc b/third_party/abseil_cpp/absl/base/internal/unique_small_name_test.cc
new file mode 100644
index 000000000000..ff8c2b3fb427
--- /dev/null
+++ b/third_party/abseil_cpp/absl/base/internal/unique_small_name_test.cc
@@ -0,0 +1,77 @@
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "gtest/gtest.h"
+#include "absl/base/optimization.h"
+#include "absl/strings/string_view.h"
+
+// This test by itself does not do anything fancy, but it serves as binary I can
+// query in shell test.
+
+namespace {
+
+template <class T>
+void DoNotOptimize(const T& var) {
+#ifdef __GNUC__
+  asm volatile("" : "+m"(const_cast<T&>(var)));
+#else
+  std::cout << (void*)&var;
+#endif
+}
+
+int very_long_int_variable_name ABSL_INTERNAL_UNIQUE_SMALL_NAME() = 0;
+char very_long_str_variable_name[] ABSL_INTERNAL_UNIQUE_SMALL_NAME() = "abc";
+
+TEST(UniqueSmallName, NonAutomaticVar) {
+  EXPECT_EQ(very_long_int_variable_name, 0);
+  EXPECT_EQ(absl::string_view(very_long_str_variable_name), "abc");
+}
+
+int VeryLongFreeFunctionName() ABSL_INTERNAL_UNIQUE_SMALL_NAME();
+
+TEST(UniqueSmallName, FreeFunction) {
+  DoNotOptimize(&VeryLongFreeFunctionName);
+
+  EXPECT_EQ(VeryLongFreeFunctionName(), 456);
+}
+
+int VeryLongFreeFunctionName() { return 456; }
+
+struct VeryLongStructName {
+  explicit VeryLongStructName(int i);
+
+  int VeryLongMethodName() ABSL_INTERNAL_UNIQUE_SMALL_NAME();
+
+  static int VeryLongStaticMethodName() ABSL_INTERNAL_UNIQUE_SMALL_NAME();
+
+ private:
+  int fld;
+};
+
+TEST(UniqueSmallName, Struct) {
+  VeryLongStructName var(10);
+
+  DoNotOptimize(var);
+  DoNotOptimize(&VeryLongStructName::VeryLongMethodName);
+  DoNotOptimize(&VeryLongStructName::VeryLongStaticMethodName);
+
+  EXPECT_EQ(var.VeryLongMethodName(), 10);
+  EXPECT_EQ(VeryLongStructName::VeryLongStaticMethodName(), 123);
+}
+
+VeryLongStructName::VeryLongStructName(int i) : fld(i) {}
+int VeryLongStructName::VeryLongMethodName() { return fld; }
+int VeryLongStructName::VeryLongStaticMethodName() { return 123; }
+
+}  // namespace
diff --git a/third_party/abseil_cpp/absl/base/internal/unscaledcycleclock.cc b/third_party/abseil_cpp/absl/base/internal/unscaledcycleclock.cc
new file mode 100644
index 000000000000..f1e7bbef844a
--- /dev/null
+++ b/third_party/abseil_cpp/absl/base/internal/unscaledcycleclock.cc
@@ -0,0 +1,140 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/internal/unscaledcycleclock.h"
+
+#if ABSL_USE_UNSCALED_CYCLECLOCK
+
+#if defined(_WIN32)
+#include <intrin.h>
+#endif
+
+#if defined(__powerpc__) || defined(__ppc__)
+#ifdef __GLIBC__
+#include <sys/platform/ppc.h>
+#elif defined(__FreeBSD__)
+#include <sys/sysctl.h>
+#include <sys/types.h>
+#endif
+#endif
+
+#include "absl/base/internal/sysinfo.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+#if defined(__i386__)
+
+int64_t UnscaledCycleClock::Now() {
+  int64_t ret;
+  __asm__ volatile("rdtsc" : "=A"(ret));
+  return ret;
+}
+
+double UnscaledCycleClock::Frequency() {
+  return base_internal::NominalCPUFrequency();
+}
+
+#elif defined(__x86_64__)
+
+int64_t UnscaledCycleClock::Now() {
+  uint64_t low, high;
+  __asm__ volatile("rdtsc" : "=a"(low), "=d"(high));
+  return (high << 32) | low;
+}
+
+double UnscaledCycleClock::Frequency() {
+  return base_internal::NominalCPUFrequency();
+}
+
+#elif defined(__powerpc__) || defined(__ppc__)
+
+int64_t UnscaledCycleClock::Now() {
+#ifdef __GLIBC__
+  return __ppc_get_timebase();
+#else
+#ifdef __powerpc64__
+  int64_t tbr;
+  asm volatile("mfspr %0, 268" : "=r"(tbr));
+  return tbr;
+#else
+  int32_t tbu, tbl, tmp;
+  asm volatile(
+      "0:\n"
+      "mftbu %[hi32]\n"
+      "mftb %[lo32]\n"
+      "mftbu %[tmp]\n"
+      "cmpw %[tmp],%[hi32]\n"
+      "bne 0b\n"
+      : [ hi32 ] "=r"(tbu), [ lo32 ] "=r"(tbl), [ tmp ] "=r"(tmp));
+  return (static_cast<int64_t>(tbu) << 32) | tbl;
+#endif
+#endif
+}
+
+double UnscaledCycleClock::Frequency() {
+#ifdef __GLIBC__
+  return __ppc_get_timebase_freq();
+#elif defined(__FreeBSD__)
+  static once_flag init_timebase_frequency_once;
+  static double timebase_frequency = 0.0;
+  base_internal::LowLevelCallOnce(&init_timebase_frequency_once, [&]() {
+    size_t length = sizeof(timebase_frequency);
+    sysctlbyname("kern.timecounter.tc.timebase.frequency", &timebase_frequency,
+                 &length, nullptr, 0);
+  });
+  return timebase_frequency;
+#else
+#error Must implement UnscaledCycleClock::Frequency()
+#endif
+}
+
+#elif defined(__aarch64__)
+
+// System timer of ARMv8 runs at a different frequency than the CPU's.
+// The frequency is fixed, typically in the range 1-50MHz.  It can be
+// read at CNTFRQ special register.  We assume the OS has set up
+// the virtual timer properly.
+int64_t UnscaledCycleClock::Now() {
+  int64_t virtual_timer_value;
+  asm volatile("mrs %0, cntvct_el0" : "=r"(virtual_timer_value));
+  return virtual_timer_value;
+}
+
+double UnscaledCycleClock::Frequency() {
+  uint64_t aarch64_timer_frequency;
+  asm volatile("mrs %0, cntfrq_el0" : "=r"(aarch64_timer_frequency));
+  return aarch64_timer_frequency;
+}
+
+#elif defined(_M_IX86) || defined(_M_X64)
+
+#pragma intrinsic(__rdtsc)
+
+int64_t UnscaledCycleClock::Now() {
+  return __rdtsc();
+}
+
+double UnscaledCycleClock::Frequency() {
+  return base_internal::NominalCPUFrequency();
+}
+
+#endif
+
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_USE_UNSCALED_CYCLECLOCK
diff --git a/third_party/abseil_cpp/absl/base/internal/unscaledcycleclock.h b/third_party/abseil_cpp/absl/base/internal/unscaledcycleclock.h
new file mode 100644
index 000000000000..cdce9bf8a83c
--- /dev/null
+++ b/third_party/abseil_cpp/absl/base/internal/unscaledcycleclock.h
@@ -0,0 +1,124 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// UnscaledCycleClock
+//    An UnscaledCycleClock yields the value and frequency of a cycle counter
+//    that increments at a rate that is approximately constant.
+//    This class is for internal / whitelisted use only, you should consider
+//    using CycleClock instead.
+//
+// Notes:
+// The cycle counter frequency is not necessarily the core clock frequency.
+// That is, CycleCounter cycles are not necessarily "CPU cycles".
+//
+// An arbitrary offset may have been added to the counter at power on.
+//
+// On some platforms, the rate and offset of the counter may differ
+// slightly when read from different CPUs of a multiprocessor.  Usually,
+// we try to ensure that the operating system adjusts values periodically
+// so that values agree approximately.   If you need stronger guarantees,
+// consider using alternate interfaces.
+//
+// The CPU is not required to maintain the ordering of a cycle counter read
+// with respect to surrounding instructions.
+
+#ifndef ABSL_BASE_INTERNAL_UNSCALEDCYCLECLOCK_H_
+#define ABSL_BASE_INTERNAL_UNSCALEDCYCLECLOCK_H_
+
+#include <cstdint>
+
+#if defined(__APPLE__)
+#include <TargetConditionals.h>
+#endif
+
+#include "absl/base/port.h"
+
+// The following platforms have an implementation of a hardware counter.
+#if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__) || \
+  defined(__powerpc__) || defined(__ppc__) || \
+  defined(_M_IX86) || defined(_M_X64)
+#define ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION 1
+#else
+#define ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION 0
+#endif
+
+// The following platforms often disable access to the hardware
+// counter (through a sandbox) even if the underlying hardware has a
+// usable counter. The CycleTimer interface also requires a *scaled*
+// CycleClock that runs at atleast 1 MHz. We've found some Android
+// ARM64 devices where this is not the case, so we disable it by
+// default on Android ARM64.
+#if defined(__native_client__) ||                      \
+    (defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE) || \
+    (defined(__ANDROID__) && defined(__aarch64__))
+#define ABSL_USE_UNSCALED_CYCLECLOCK_DEFAULT 0
+#else
+#define ABSL_USE_UNSCALED_CYCLECLOCK_DEFAULT 1
+#endif
+
+// UnscaledCycleClock is an optional internal feature.
+// Use "#if ABSL_USE_UNSCALED_CYCLECLOCK" to test for its presence.
+// Can be overridden at compile-time via -DABSL_USE_UNSCALED_CYCLECLOCK=0|1
+#if !defined(ABSL_USE_UNSCALED_CYCLECLOCK)
+#define ABSL_USE_UNSCALED_CYCLECLOCK               \
+  (ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION && \
+   ABSL_USE_UNSCALED_CYCLECLOCK_DEFAULT)
+#endif
+
+#if ABSL_USE_UNSCALED_CYCLECLOCK
+
+// This macro can be used to test if UnscaledCycleClock::Frequency()
+// is NominalCPUFrequency() on a particular platform.
+#if  (defined(__i386__) || defined(__x86_64__) || \
+      defined(_M_IX86) || defined(_M_X64))
+#define ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY
+#endif
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace time_internal {
+class UnscaledCycleClockWrapperForGetCurrentTime;
+}  // namespace time_internal
+
+namespace base_internal {
+class CycleClock;
+class UnscaledCycleClockWrapperForInitializeFrequency;
+
+class UnscaledCycleClock {
+ private:
+  UnscaledCycleClock() = delete;
+
+  // Return the value of a cycle counter that counts at a rate that is
+  // approximately constant.
+  static int64_t Now();
+
+  // Return the how much UnscaledCycleClock::Now() increases per second.
+  // This is not necessarily the core CPU clock frequency.
+  // It may be the nominal value report by the kernel, rather than a measured
+  // value.
+  static double Frequency();
+
+  // Whitelisted friends.
+  friend class base_internal::CycleClock;
+  friend class time_internal::UnscaledCycleClockWrapperForGetCurrentTime;
+  friend class base_internal::UnscaledCycleClockWrapperForInitializeFrequency;
+};
+
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_USE_UNSCALED_CYCLECLOCK
+
+#endif  // ABSL_BASE_INTERNAL_UNSCALEDCYCLECLOCK_H_