about summary refs log tree commit diff
path: root/third_party/abseil_cpp/absl/synchronization
diff options
context:
space:
mode:
Diffstat (limited to 'third_party/abseil_cpp/absl/synchronization')
-rw-r--r--third_party/abseil_cpp/absl/synchronization/BUILD.bazel288
-rw-r--r--third_party/abseil_cpp/absl/synchronization/CMakeLists.txt216
-rw-r--r--third_party/abseil_cpp/absl/synchronization/barrier.cc52
-rw-r--r--third_party/abseil_cpp/absl/synchronization/barrier.h79
-rw-r--r--third_party/abseil_cpp/absl/synchronization/barrier_test.cc75
-rw-r--r--third_party/abseil_cpp/absl/synchronization/blocking_counter.cc57
-rw-r--r--third_party/abseil_cpp/absl/synchronization/blocking_counter.h99
-rw-r--r--third_party/abseil_cpp/absl/synchronization/blocking_counter_test.cc68
-rw-r--r--third_party/abseil_cpp/absl/synchronization/internal/create_thread_identity.cc140
-rw-r--r--third_party/abseil_cpp/absl/synchronization/internal/create_thread_identity.h60
-rw-r--r--third_party/abseil_cpp/absl/synchronization/internal/futex.h154
-rw-r--r--third_party/abseil_cpp/absl/synchronization/internal/graphcycles.cc698
-rw-r--r--third_party/abseil_cpp/absl/synchronization/internal/graphcycles.h141
-rw-r--r--third_party/abseil_cpp/absl/synchronization/internal/graphcycles_benchmark.cc44
-rw-r--r--third_party/abseil_cpp/absl/synchronization/internal/graphcycles_test.cc464
-rw-r--r--third_party/abseil_cpp/absl/synchronization/internal/kernel_timeout.h156
-rw-r--r--third_party/abseil_cpp/absl/synchronization/internal/per_thread_sem.cc106
-rw-r--r--third_party/abseil_cpp/absl/synchronization/internal/per_thread_sem.h115
-rw-r--r--third_party/abseil_cpp/absl/synchronization/internal/per_thread_sem_test.cc181
-rw-r--r--third_party/abseil_cpp/absl/synchronization/internal/thread_pool.h93
-rw-r--r--third_party/abseil_cpp/absl/synchronization/internal/waiter.cc428
-rw-r--r--third_party/abseil_cpp/absl/synchronization/internal/waiter.h155
-rw-r--r--third_party/abseil_cpp/absl/synchronization/lifetime_test.cc181
-rw-r--r--third_party/abseil_cpp/absl/synchronization/mutex.cc2740
-rw-r--r--third_party/abseil_cpp/absl/synchronization/mutex.h1084
-rw-r--r--third_party/abseil_cpp/absl/synchronization/mutex_benchmark.cc224
-rw-r--r--third_party/abseil_cpp/absl/synchronization/mutex_test.cc1706
-rw-r--r--third_party/abseil_cpp/absl/synchronization/notification.cc78
-rw-r--r--third_party/abseil_cpp/absl/synchronization/notification.h123
-rw-r--r--third_party/abseil_cpp/absl/synchronization/notification_test.cc133
30 files changed, 0 insertions, 10138 deletions
diff --git a/third_party/abseil_cpp/absl/synchronization/BUILD.bazel b/third_party/abseil_cpp/absl/synchronization/BUILD.bazel
deleted file mode 100644
index cd4009a15739..000000000000
--- a/third_party/abseil_cpp/absl/synchronization/BUILD.bazel
+++ /dev/null
@@ -1,288 +0,0 @@
-#
-# Copyright 2017 The Abseil Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      https://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-load("@rules_cc//cc:defs.bzl", "cc_binary", "cc_library", "cc_test")
-load(
-    "//absl:copts/configure_copts.bzl",
-    "ABSL_DEFAULT_COPTS",
-    "ABSL_DEFAULT_LINKOPTS",
-    "ABSL_TEST_COPTS",
-)
-
-package(default_visibility = ["//visibility:public"])
-
-licenses(["notice"])
-
-# Internal data structure for efficiently detecting mutex dependency cycles
-cc_library(
-    name = "graphcycles_internal",
-    srcs = [
-        "internal/graphcycles.cc",
-    ],
-    hdrs = [
-        "internal/graphcycles.h",
-    ],
-    copts = ABSL_DEFAULT_COPTS,
-    linkopts = ABSL_DEFAULT_LINKOPTS,
-    visibility = [
-        "//absl:__subpackages__",
-    ],
-    deps = [
-        "//absl/base",
-        "//absl/base:base_internal",
-        "//absl/base:config",
-        "//absl/base:core_headers",
-        "//absl/base:malloc_internal",
-        "//absl/base:raw_logging_internal",
-    ],
-)
-
-cc_library(
-    name = "kernel_timeout_internal",
-    hdrs = ["internal/kernel_timeout.h"],
-    copts = ABSL_DEFAULT_COPTS,
-    linkopts = ABSL_DEFAULT_LINKOPTS,
-    visibility = [
-        "//absl/synchronization:__pkg__",
-    ],
-    deps = [
-        "//absl/base:core_headers",
-        "//absl/base:raw_logging_internal",
-        "//absl/time",
-    ],
-)
-
-cc_library(
-    name = "synchronization",
-    srcs = [
-        "barrier.cc",
-        "blocking_counter.cc",
-        "internal/create_thread_identity.cc",
-        "internal/per_thread_sem.cc",
-        "internal/waiter.cc",
-        "mutex.cc",
-        "notification.cc",
-    ],
-    hdrs = [
-        "barrier.h",
-        "blocking_counter.h",
-        "internal/create_thread_identity.h",
-        "internal/futex.h",
-        "internal/per_thread_sem.h",
-        "internal/waiter.h",
-        "mutex.h",
-        "notification.h",
-    ],
-    copts = ABSL_DEFAULT_COPTS,
-    linkopts = select({
-        "//absl:windows": [],
-        "//absl:wasm": [],
-        "//conditions:default": ["-pthread"],
-    }) + ABSL_DEFAULT_LINKOPTS,
-    deps = [
-        ":graphcycles_internal",
-        ":kernel_timeout_internal",
-        "//absl/base",
-        "//absl/base:atomic_hook",
-        "//absl/base:base_internal",
-        "//absl/base:config",
-        "//absl/base:core_headers",
-        "//absl/base:dynamic_annotations",
-        "//absl/base:malloc_internal",
-        "//absl/base:raw_logging_internal",
-        "//absl/debugging:stacktrace",
-        "//absl/debugging:symbolize",
-        "//absl/time",
-    ],
-)
-
-cc_test(
-    name = "barrier_test",
-    size = "small",
-    srcs = ["barrier_test.cc"],
-    copts = ABSL_TEST_COPTS,
-    linkopts = ABSL_DEFAULT_LINKOPTS,
-    deps = [
-        ":synchronization",
-        "//absl/time",
-        "@com_google_googletest//:gtest_main",
-    ],
-)
-
-cc_test(
-    name = "blocking_counter_test",
-    size = "small",
-    srcs = ["blocking_counter_test.cc"],
-    copts = ABSL_TEST_COPTS,
-    linkopts = ABSL_DEFAULT_LINKOPTS,
-    deps = [
-        ":synchronization",
-        "//absl/time",
-        "@com_google_googletest//:gtest_main",
-    ],
-)
-
-cc_test(
-    name = "graphcycles_test",
-    size = "medium",
-    srcs = ["internal/graphcycles_test.cc"],
-    copts = ABSL_TEST_COPTS,
-    linkopts = ABSL_DEFAULT_LINKOPTS,
-    deps = [
-        ":graphcycles_internal",
-        "//absl/base:core_headers",
-        "//absl/base:raw_logging_internal",
-        "@com_google_googletest//:gtest_main",
-    ],
-)
-
-cc_test(
-    name = "graphcycles_benchmark",
-    srcs = ["internal/graphcycles_benchmark.cc"],
-    copts = ABSL_TEST_COPTS,
-    linkopts = ABSL_DEFAULT_LINKOPTS,
-    tags = [
-        "benchmark",
-    ],
-    deps = [
-        ":graphcycles_internal",
-        "//absl/base:raw_logging_internal",
-        "@com_github_google_benchmark//:benchmark_main",
-    ],
-)
-
-cc_library(
-    name = "thread_pool",
-    testonly = 1,
-    hdrs = ["internal/thread_pool.h"],
-    linkopts = ABSL_DEFAULT_LINKOPTS,
-    visibility = [
-        "//absl:__subpackages__",
-    ],
-    deps = [
-        ":synchronization",
-        "//absl/base:core_headers",
-    ],
-)
-
-cc_test(
-    name = "mutex_test",
-    size = "large",
-    srcs = ["mutex_test.cc"],
-    copts = ABSL_TEST_COPTS,
-    linkopts = ABSL_DEFAULT_LINKOPTS,
-    shard_count = 25,
-    deps = [
-        ":synchronization",
-        ":thread_pool",
-        "//absl/base",
-        "//absl/base:config",
-        "//absl/base:core_headers",
-        "//absl/base:raw_logging_internal",
-        "//absl/memory",
-        "//absl/time",
-        "@com_google_googletest//:gtest_main",
-    ],
-)
-
-cc_library(
-    name = "mutex_benchmark_common",
-    testonly = 1,
-    srcs = ["mutex_benchmark.cc"],
-    copts = ABSL_TEST_COPTS,
-    linkopts = ABSL_DEFAULT_LINKOPTS,
-    visibility = [
-        "//absl/synchronization:__pkg__",
-    ],
-    deps = [
-        ":synchronization",
-        ":thread_pool",
-        "//absl/base",
-        "//absl/base:config",
-        "@com_github_google_benchmark//:benchmark_main",
-    ],
-    alwayslink = 1,
-)
-
-cc_binary(
-    name = "mutex_benchmark",
-    testonly = 1,
-    copts = ABSL_DEFAULT_COPTS,
-    linkopts = ABSL_DEFAULT_LINKOPTS,
-    visibility = ["//visibility:private"],
-    deps = [
-        ":mutex_benchmark_common",
-    ],
-)
-
-cc_test(
-    name = "notification_test",
-    size = "small",
-    srcs = ["notification_test.cc"],
-    copts = ABSL_TEST_COPTS,
-    linkopts = ABSL_DEFAULT_LINKOPTS,
-    deps = [
-        ":synchronization",
-        "//absl/time",
-        "@com_google_googletest//:gtest_main",
-    ],
-)
-
-cc_library(
-    name = "per_thread_sem_test_common",
-    testonly = 1,
-    srcs = ["internal/per_thread_sem_test.cc"],
-    copts = ABSL_TEST_COPTS,
-    linkopts = ABSL_DEFAULT_LINKOPTS,
-    deps = [
-        ":synchronization",
-        "//absl/base",
-        "//absl/base:config",
-        "//absl/strings",
-        "//absl/time",
-        "@com_google_googletest//:gtest",
-    ],
-    alwayslink = 1,
-)
-
-cc_test(
-    name = "per_thread_sem_test",
-    size = "medium",
-    copts = ABSL_TEST_COPTS,
-    linkopts = ABSL_DEFAULT_LINKOPTS,
-    deps = [
-        ":per_thread_sem_test_common",
-        ":synchronization",
-        "//absl/strings",
-        "//absl/time",
-        "@com_google_googletest//:gtest_main",
-    ],
-)
-
-cc_test(
-    name = "lifetime_test",
-    srcs = [
-        "lifetime_test.cc",
-    ],
-    copts = ABSL_TEST_COPTS,
-    linkopts = ABSL_DEFAULT_LINKOPTS,
-    tags = ["no_test_ios_x86_64"],
-    deps = [
-        ":synchronization",
-        "//absl/base:core_headers",
-        "//absl/base:raw_logging_internal",
-    ],
-)
diff --git a/third_party/abseil_cpp/absl/synchronization/CMakeLists.txt b/third_party/abseil_cpp/absl/synchronization/CMakeLists.txt
deleted file mode 100644
index e633d0bf5312..000000000000
--- a/third_party/abseil_cpp/absl/synchronization/CMakeLists.txt
+++ /dev/null
@@ -1,216 +0,0 @@
-#
-# Copyright 2017 The Abseil Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      https://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-absl_cc_library(
-  NAME
-    graphcycles_internal
-  HDRS
-    "internal/graphcycles.h"
-  SRCS
-    "internal/graphcycles.cc"
-  COPTS
-    ${ABSL_DEFAULT_COPTS}
-  DEPS
-    absl::base
-    absl::base_internal
-    absl::config
-    absl::core_headers
-    absl::malloc_internal
-    absl::raw_logging_internal
-)
-
-absl_cc_library(
-  NAME
-    kernel_timeout_internal
-  HDRS
-    "internal/kernel_timeout.h"
-  COPTS
-    ${ABSL_DEFAULT_COPTS}
-  DEPS
-    absl::core_headers
-    absl::raw_logging_internal
-    absl::time
-)
-
-absl_cc_library(
-  NAME
-    synchronization
-  HDRS
-    "barrier.h"
-    "blocking_counter.h"
-    "internal/create_thread_identity.h"
-    "internal/futex.h"
-    "internal/per_thread_sem.h"
-    "internal/waiter.h"
-    "mutex.h"
-    "notification.h"
-  SRCS
-    "barrier.cc"
-    "blocking_counter.cc"
-    "internal/create_thread_identity.cc"
-    "internal/per_thread_sem.cc"
-    "internal/waiter.cc"
-    "notification.cc"
-    "mutex.cc"
-  COPTS
-    ${ABSL_DEFAULT_COPTS}
-  DEPS
-    absl::graphcycles_internal
-    absl::kernel_timeout_internal
-    absl::atomic_hook
-    absl::base
-    absl::base_internal
-    absl::config
-    absl::core_headers
-    absl::dynamic_annotations
-    absl::malloc_internal
-    absl::raw_logging_internal
-    absl::stacktrace
-    absl::symbolize
-    absl::time
-    Threads::Threads
-  PUBLIC
-)
-
-absl_cc_test(
-  NAME
-    barrier_test
-  SRCS
-    "barrier_test.cc"
-  COPTS
-    ${ABSL_TEST_COPTS}
-  DEPS
-    absl::synchronization
-    absl::time
-    gmock_main
-)
-
-absl_cc_test(
-  NAME
-    blocking_counter_test
-  SRCS
-    "blocking_counter_test.cc"
-  COPTS
-    ${ABSL_TEST_COPTS}
-  DEPS
-    absl::synchronization
-    absl::time
-    gmock_main
-)
-
-absl_cc_test(
-  NAME
-    graphcycles_test
-  SRCS
-    "internal/graphcycles_test.cc"
-  COPTS
-    ${ABSL_TEST_COPTS}
-  DEPS
-    absl::graphcycles_internal
-    absl::core_headers
-    absl::raw_logging_internal
-    gmock_main
-)
-
-absl_cc_library(
-  NAME
-    thread_pool
-  HDRS
-    "internal/thread_pool.h"
-  COPTS
-    ${ABSL_DEFAULT_COPTS}
-  DEPS
-    absl::synchronization
-    absl::core_headers
-  TESTONLY
-)
-
-absl_cc_test(
-  NAME
-    mutex_test
-  SRCS
-    "mutex_test.cc"
-  COPTS
-    ${ABSL_TEST_COPTS}
-  DEPS
-    absl::synchronization
-    absl::thread_pool
-    absl::base
-    absl::config
-    absl::core_headers
-    absl::memory
-    absl::raw_logging_internal
-    absl::time
-    gmock_main
-)
-
-absl_cc_test(
-  NAME
-    notification_test
-  SRCS
-    "notification_test.cc"
-  COPTS
-    ${ABSL_TEST_COPTS}
-  DEPS
-    absl::synchronization
-    absl::time
-    gmock_main
-)
-
-absl_cc_library(
-  NAME
-    per_thread_sem_test_common
-  SRCS
-    "internal/per_thread_sem_test.cc"
-  COPTS
-    ${ABSL_TEST_COPTS}
-  DEPS
-    absl::synchronization
-    absl::base
-    absl::config
-    absl::strings
-    absl::time
-    gmock
-  TESTONLY
-)
-
-absl_cc_test(
-  NAME
-    per_thread_sem_test
-  SRCS
-    "internal/per_thread_sem_test.cc"
-  COPTS
-    ${ABSL_TEST_COPTS}
-  DEPS
-    absl::per_thread_sem_test_common
-    absl::synchronization
-    absl::strings
-    absl::time
-    gmock_main
-)
-
-absl_cc_test(
-  NAME
-    lifetime_test
-  SRCS
-    "lifetime_test.cc"
-  COPTS
-    ${ABSL_TEST_COPTS}
-  DEPS
-    absl::synchronization
-    absl::core_headers
-    absl::raw_logging_internal
-)
diff --git a/third_party/abseil_cpp/absl/synchronization/barrier.cc b/third_party/abseil_cpp/absl/synchronization/barrier.cc
deleted file mode 100644
index 0dfd795e7b5f..000000000000
--- a/third_party/abseil_cpp/absl/synchronization/barrier.cc
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "absl/synchronization/barrier.h"
-
-#include "absl/base/internal/raw_logging.h"
-#include "absl/synchronization/mutex.h"
-
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-
-// Return whether int *arg is zero.
-static bool IsZero(void *arg) {
-  return 0 == *reinterpret_cast<int *>(arg);
-}
-
-bool Barrier::Block() {
-  MutexLock l(&this->lock_);
-
-  this->num_to_block_--;
-  if (this->num_to_block_ < 0) {
-    ABSL_RAW_LOG(
-        FATAL,
-        "Block() called too many times.  num_to_block_=%d out of total=%d",
-        this->num_to_block_, this->num_to_exit_);
-  }
-
-  this->lock_.Await(Condition(IsZero, &this->num_to_block_));
-
-  // Determine which thread can safely delete this Barrier object
-  this->num_to_exit_--;
-  ABSL_RAW_CHECK(this->num_to_exit_ >= 0, "barrier underflow");
-
-  // If num_to_exit_ == 0 then all other threads in the barrier have
-  // exited the Wait() and have released the Mutex so this thread is
-  // free to delete the barrier.
-  return this->num_to_exit_ == 0;
-}
-
-ABSL_NAMESPACE_END
-}  // namespace absl
diff --git a/third_party/abseil_cpp/absl/synchronization/barrier.h b/third_party/abseil_cpp/absl/synchronization/barrier.h
deleted file mode 100644
index d8e754406f4b..000000000000
--- a/third_party/abseil_cpp/absl/synchronization/barrier.h
+++ /dev/null
@@ -1,79 +0,0 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// -----------------------------------------------------------------------------
-// barrier.h
-// -----------------------------------------------------------------------------
-
-#ifndef ABSL_SYNCHRONIZATION_BARRIER_H_
-#define ABSL_SYNCHRONIZATION_BARRIER_H_
-
-#include "absl/base/thread_annotations.h"
-#include "absl/synchronization/mutex.h"
-
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-
-// Barrier
-//
-// This class creates a barrier which blocks threads until a prespecified
-// threshold of threads (`num_threads`) utilizes the barrier. A thread utilizes
-// the `Barrier` by calling `Block()` on the barrier, which will block that
-// thread; no call to `Block()` will return until `num_threads` threads have
-// called it.
-//
-// Exactly one call to `Block()` will return `true`, which is then responsible
-// for destroying the barrier; because stack allocation will cause the barrier
-// to be deleted when it is out of scope, barriers should not be stack
-// allocated.
-//
-// Example:
-//
-//   // Main thread creates a `Barrier`:
-//   barrier = new Barrier(num_threads);
-//
-//   // Each participating thread could then call:
-//   if (barrier->Block()) delete barrier;  // Exactly one call to `Block()`
-//                                          // returns `true`; that call
-//                                          // deletes the barrier.
-class Barrier {
- public:
-  // `num_threads` is the number of threads that will participate in the barrier
-  explicit Barrier(int num_threads)
-      : num_to_block_(num_threads), num_to_exit_(num_threads) {}
-
-  Barrier(const Barrier&) = delete;
-  Barrier& operator=(const Barrier&) = delete;
-
-  // Barrier::Block()
-  //
-  // Blocks the current thread, and returns only when the `num_threads`
-  // threshold of threads utilizing this barrier has been reached. `Block()`
-  // returns `true` for precisely one caller, which may then destroy the
-  // barrier.
-  //
-  // Memory ordering: For any threads X and Y, any action taken by X
-  // before X calls `Block()` will be visible to Y after Y returns from
-  // `Block()`.
-  bool Block();
-
- private:
-  Mutex lock_;
-  int num_to_block_ ABSL_GUARDED_BY(lock_);
-  int num_to_exit_ ABSL_GUARDED_BY(lock_);
-};
-
-ABSL_NAMESPACE_END
-}  // namespace absl
-#endif  // ABSL_SYNCHRONIZATION_BARRIER_H_
diff --git a/third_party/abseil_cpp/absl/synchronization/barrier_test.cc b/third_party/abseil_cpp/absl/synchronization/barrier_test.cc
deleted file mode 100644
index bfc6cb1883b1..000000000000
--- a/third_party/abseil_cpp/absl/synchronization/barrier_test.cc
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "absl/synchronization/barrier.h"
-
-#include <thread>  // NOLINT(build/c++11)
-#include <vector>
-
-#include "gtest/gtest.h"
-#include "absl/synchronization/mutex.h"
-#include "absl/time/clock.h"
-
-
-TEST(Barrier, SanityTest) {
-  constexpr int kNumThreads = 10;
-  absl::Barrier* barrier = new absl::Barrier(kNumThreads);
-
-  absl::Mutex mutex;
-  int counter = 0;  // Guarded by mutex.
-
-  auto thread_func = [&] {
-    if (barrier->Block()) {
-      // This thread is the last thread to reach the barrier so it is
-      // responsible for deleting it.
-      delete barrier;
-    }
-
-    // Increment the counter.
-    absl::MutexLock lock(&mutex);
-    ++counter;
-  };
-
-  // Start (kNumThreads - 1) threads running thread_func.
-  std::vector<std::thread> threads;
-  for (int i = 0; i < kNumThreads - 1; ++i) {
-    threads.push_back(std::thread(thread_func));
-  }
-
-  // Give (kNumThreads - 1) threads a chance to reach the barrier.
-  // This test assumes at least one thread will have run after the
-  // sleep has elapsed. Sleeping in a test is usually bad form, but we
-  // need to make sure that we are testing the barrier instead of some
-  // other synchronization method.
-  absl::SleepFor(absl::Seconds(1));
-
-  // The counter should still be zero since no thread should have
-  // been able to pass the barrier yet.
-  {
-    absl::MutexLock lock(&mutex);
-    EXPECT_EQ(counter, 0);
-  }
-
-  // Start 1 more thread. This should make all threads pass the barrier.
-  threads.push_back(std::thread(thread_func));
-
-  // All threads should now be able to proceed and finish.
-  for (auto& thread : threads) {
-    thread.join();
-  }
-
-  // All threads should now have incremented the counter.
-  absl::MutexLock lock(&mutex);
-  EXPECT_EQ(counter, kNumThreads);
-}
diff --git a/third_party/abseil_cpp/absl/synchronization/blocking_counter.cc b/third_party/abseil_cpp/absl/synchronization/blocking_counter.cc
deleted file mode 100644
index 3cea7aed2441..000000000000
--- a/third_party/abseil_cpp/absl/synchronization/blocking_counter.cc
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "absl/synchronization/blocking_counter.h"
-
-#include "absl/base/internal/raw_logging.h"
-
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-
-// Return whether int *arg is zero.
-static bool IsZero(void *arg) {
-  return 0 == *reinterpret_cast<int *>(arg);
-}
-
-bool BlockingCounter::DecrementCount() {
-  MutexLock l(&lock_);
-  count_--;
-  if (count_ < 0) {
-    ABSL_RAW_LOG(
-        FATAL,
-        "BlockingCounter::DecrementCount() called too many times.  count=%d",
-        count_);
-  }
-  return count_ == 0;
-}
-
-void BlockingCounter::Wait() {
-  MutexLock l(&this->lock_);
-  ABSL_RAW_CHECK(count_ >= 0, "BlockingCounter underflow");
-
-  // only one thread may call Wait(). To support more than one thread,
-  // implement a counter num_to_exit, like in the Barrier class.
-  ABSL_RAW_CHECK(num_waiting_ == 0, "multiple threads called Wait()");
-  num_waiting_++;
-
-  this->lock_.Await(Condition(IsZero, &this->count_));
-
-  // At this point, We know that all threads executing DecrementCount have
-  // released the lock, and so will not touch this object again.
-  // Therefore, the thread calling this method is free to delete the object
-  // after we return from this method.
-}
-
-ABSL_NAMESPACE_END
-}  // namespace absl
diff --git a/third_party/abseil_cpp/absl/synchronization/blocking_counter.h b/third_party/abseil_cpp/absl/synchronization/blocking_counter.h
deleted file mode 100644
index 1f53f9f24045..000000000000
--- a/third_party/abseil_cpp/absl/synchronization/blocking_counter.h
+++ /dev/null
@@ -1,99 +0,0 @@
-//
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// -----------------------------------------------------------------------------
-// blocking_counter.h
-// -----------------------------------------------------------------------------
-
-#ifndef ABSL_SYNCHRONIZATION_BLOCKING_COUNTER_H_
-#define ABSL_SYNCHRONIZATION_BLOCKING_COUNTER_H_
-
-#include "absl/base/thread_annotations.h"
-#include "absl/synchronization/mutex.h"
-
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-
-// BlockingCounter
-//
-// This class allows a thread to block for a pre-specified number of actions.
-// `BlockingCounter` maintains a single non-negative abstract integer "count"
-// with an initial value `initial_count`. A thread can then call `Wait()` on
-// this blocking counter to block until the specified number of events occur;
-// worker threads then call 'DecrementCount()` on the counter upon completion of
-// their work. Once the counter's internal "count" reaches zero, the blocked
-// thread unblocks.
-//
-// A `BlockingCounter` requires the following:
-//     - its `initial_count` is non-negative.
-//     - the number of calls to `DecrementCount()` on it is at most
-//       `initial_count`.
-//     - `Wait()` is called at most once on it.
-//
-// Given the above requirements, a `BlockingCounter` provides the following
-// guarantees:
-//     - Once its internal "count" reaches zero, no legal action on the object
-//       can further change the value of "count".
-//     - When `Wait()` returns, it is legal to destroy the `BlockingCounter`.
-//     - When `Wait()` returns, the number of calls to `DecrementCount()` on
-//       this blocking counter exactly equals `initial_count`.
-//
-// Example:
-//     BlockingCounter bcount(N);         // there are N items of work
-//     ... Allow worker threads to start.
-//     ... On completing each work item, workers do:
-//     ... bcount.DecrementCount();      // an item of work has been completed
-//
-//     bcount.Wait();                    // wait for all work to be complete
-//
-class BlockingCounter {
- public:
-  explicit BlockingCounter(int initial_count)
-      : count_(initial_count), num_waiting_(0) {}
-
-  BlockingCounter(const BlockingCounter&) = delete;
-  BlockingCounter& operator=(const BlockingCounter&) = delete;
-
-  // BlockingCounter::DecrementCount()
-  //
-  // Decrements the counter's "count" by one, and return "count == 0". This
-  // function requires that "count != 0" when it is called.
-  //
-  // Memory ordering: For any threads X and Y, any action taken by X
-  // before it calls `DecrementCount()` is visible to thread Y after
-  // Y's call to `DecrementCount()`, provided Y's call returns `true`.
-  bool DecrementCount();
-
-  // BlockingCounter::Wait()
-  //
-  // Blocks until the counter reaches zero. This function may be called at most
-  // once. On return, `DecrementCount()` will have been called "initial_count"
-  // times and the blocking counter may be destroyed.
-  //
-  // Memory ordering: For any threads X and Y, any action taken by X
-  // before X calls `DecrementCount()` is visible to Y after Y returns
-  // from `Wait()`.
-  void Wait();
-
- private:
-  Mutex lock_;
-  int count_ ABSL_GUARDED_BY(lock_);
-  int num_waiting_ ABSL_GUARDED_BY(lock_);
-};
-
-ABSL_NAMESPACE_END
-}  // namespace absl
-
-#endif  // ABSL_SYNCHRONIZATION_BLOCKING_COUNTER_H_
diff --git a/third_party/abseil_cpp/absl/synchronization/blocking_counter_test.cc b/third_party/abseil_cpp/absl/synchronization/blocking_counter_test.cc
deleted file mode 100644
index 2926224af750..000000000000
--- a/third_party/abseil_cpp/absl/synchronization/blocking_counter_test.cc
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "absl/synchronization/blocking_counter.h"
-
-#include <thread>  // NOLINT(build/c++11)
-#include <vector>
-
-#include "gtest/gtest.h"
-#include "absl/time/clock.h"
-#include "absl/time/time.h"
-
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-namespace {
-
-void PauseAndDecreaseCounter(BlockingCounter* counter, int* done) {
-  absl::SleepFor(absl::Seconds(1));
-  *done = 1;
-  counter->DecrementCount();
-}
-
-TEST(BlockingCounterTest, BasicFunctionality) {
-  // This test verifies that BlockingCounter functions correctly. Starts a
-  // number of threads that just sleep for a second and decrement a counter.
-
-  // Initialize the counter.
-  const int num_workers = 10;
-  BlockingCounter counter(num_workers);
-
-  std::vector<std::thread> workers;
-  std::vector<int> done(num_workers, 0);
-
-  // Start a number of parallel tasks that will just wait for a seconds and
-  // then decrement the count.
-  workers.reserve(num_workers);
-  for (int k = 0; k < num_workers; k++) {
-    workers.emplace_back(
-        [&counter, &done, k] { PauseAndDecreaseCounter(&counter, &done[k]); });
-  }
-
-  // Wait for the threads to have all finished.
-  counter.Wait();
-
-  // Check that all the workers have completed.
-  for (int k = 0; k < num_workers; k++) {
-    EXPECT_EQ(1, done[k]);
-  }
-
-  for (std::thread& w : workers) {
-    w.join();
-  }
-}
-
-}  // namespace
-ABSL_NAMESPACE_END
-}  // namespace absl
diff --git a/third_party/abseil_cpp/absl/synchronization/internal/create_thread_identity.cc b/third_party/abseil_cpp/absl/synchronization/internal/create_thread_identity.cc
deleted file mode 100644
index 53a71b342b9f..000000000000
--- a/third_party/abseil_cpp/absl/synchronization/internal/create_thread_identity.cc
+++ /dev/null
@@ -1,140 +0,0 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <stdint.h>
-#include <new>
-
-// This file is a no-op if the required LowLevelAlloc support is missing.
-#include "absl/base/internal/low_level_alloc.h"
-#ifndef ABSL_LOW_LEVEL_ALLOC_MISSING
-
-#include <string.h>
-
-#include "absl/base/attributes.h"
-#include "absl/base/internal/spinlock.h"
-#include "absl/base/internal/thread_identity.h"
-#include "absl/synchronization/internal/per_thread_sem.h"
-
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-namespace synchronization_internal {
-
-// ThreadIdentity storage is persistent, we maintain a free-list of previously
-// released ThreadIdentity objects.
-ABSL_CONST_INIT static base_internal::SpinLock freelist_lock(
-    absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);
-ABSL_CONST_INIT static base_internal::ThreadIdentity* thread_identity_freelist;
-
-// A per-thread destructor for reclaiming associated ThreadIdentity objects.
-// Since we must preserve their storage we cache them for re-use.
-void ReclaimThreadIdentity(void* v) {
-  base_internal::ThreadIdentity* identity =
-      static_cast<base_internal::ThreadIdentity*>(v);
-
-  // all_locks might have been allocated by the Mutex implementation.
-  // We free it here when we are notified that our thread is dying.
-  if (identity->per_thread_synch.all_locks != nullptr) {
-    base_internal::LowLevelAlloc::Free(identity->per_thread_synch.all_locks);
-  }
-
-  PerThreadSem::Destroy(identity);
-
-  // We must explicitly clear the current thread's identity:
-  // (a) Subsequent (unrelated) per-thread destructors may require an identity.
-  //     We must guarantee a new identity is used in this case (this instructor
-  //     will be reinvoked up to PTHREAD_DESTRUCTOR_ITERATIONS in this case).
-  // (b) ThreadIdentity implementations may depend on memory that is not
-  //     reinitialized before reuse.  We must allow explicit clearing of the
-  //     association state in this case.
-  base_internal::ClearCurrentThreadIdentity();
-  {
-    base_internal::SpinLockHolder l(&freelist_lock);
-    identity->next = thread_identity_freelist;
-    thread_identity_freelist = identity;
-  }
-}
-
-// Return value rounded up to next multiple of align.
-// Align must be a power of two.
-static intptr_t RoundUp(intptr_t addr, intptr_t align) {
-  return (addr + align - 1) & ~(align - 1);
-}
-
-static void ResetThreadIdentity(base_internal::ThreadIdentity* identity) {
-  base_internal::PerThreadSynch* pts = &identity->per_thread_synch;
-  pts->next = nullptr;
-  pts->skip = nullptr;
-  pts->may_skip = false;
-  pts->waitp = nullptr;
-  pts->suppress_fatal_errors = false;
-  pts->readers = 0;
-  pts->priority = 0;
-  pts->next_priority_read_cycles = 0;
-  pts->state.store(base_internal::PerThreadSynch::State::kAvailable,
-                   std::memory_order_relaxed);
-  pts->maybe_unlocking = false;
-  pts->wake = false;
-  pts->cond_waiter = false;
-  pts->all_locks = nullptr;
-  identity->blocked_count_ptr = nullptr;
-  identity->ticker.store(0, std::memory_order_relaxed);
-  identity->wait_start.store(0, std::memory_order_relaxed);
-  identity->is_idle.store(false, std::memory_order_relaxed);
-  identity->next = nullptr;
-}
-
-static base_internal::ThreadIdentity* NewThreadIdentity() {
-  base_internal::ThreadIdentity* identity = nullptr;
-
-  {
-    // Re-use a previously released object if possible.
-    base_internal::SpinLockHolder l(&freelist_lock);
-    if (thread_identity_freelist) {
-      identity = thread_identity_freelist;  // Take list-head.
-      thread_identity_freelist = thread_identity_freelist->next;
-    }
-  }
-
-  if (identity == nullptr) {
-    // Allocate enough space to align ThreadIdentity to a multiple of
-    // PerThreadSynch::kAlignment. This space is never released (it is
-    // added to a freelist by ReclaimThreadIdentity instead).
-    void* allocation = base_internal::LowLevelAlloc::Alloc(
-        sizeof(*identity) + base_internal::PerThreadSynch::kAlignment - 1);
-    // Round up the address to the required alignment.
-    identity = reinterpret_cast<base_internal::ThreadIdentity*>(
-        RoundUp(reinterpret_cast<intptr_t>(allocation),
-                base_internal::PerThreadSynch::kAlignment));
-  }
-  ResetThreadIdentity(identity);
-
-  return identity;
-}
-
-// Allocates and attaches ThreadIdentity object for the calling thread.  Returns
-// the new identity.
-// REQUIRES: CurrentThreadIdentity(false) == nullptr
-base_internal::ThreadIdentity* CreateThreadIdentity() {
-  base_internal::ThreadIdentity* identity = NewThreadIdentity();
-  PerThreadSem::Init(identity);
-  // Associate the value with the current thread, and attach our destructor.
-  base_internal::SetCurrentThreadIdentity(identity, ReclaimThreadIdentity);
-  return identity;
-}
-
-}  // namespace synchronization_internal
-ABSL_NAMESPACE_END
-}  // namespace absl
-
-#endif  // ABSL_LOW_LEVEL_ALLOC_MISSING
diff --git a/third_party/abseil_cpp/absl/synchronization/internal/create_thread_identity.h b/third_party/abseil_cpp/absl/synchronization/internal/create_thread_identity.h
deleted file mode 100644
index e121f68377b1..000000000000
--- a/third_party/abseil_cpp/absl/synchronization/internal/create_thread_identity.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Copyright 2017 The Abseil Authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      https://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// Interface for getting the current ThreadIdentity, creating one if necessary.
-// See thread_identity.h.
-//
-// This file is separate from thread_identity.h because creating a new
-// ThreadIdentity requires slightly higher level libraries (per_thread_sem
-// and low_level_alloc) than accessing an existing one.  This separation allows
-// us to have a smaller //absl/base:base.
-
-#ifndef ABSL_SYNCHRONIZATION_INTERNAL_CREATE_THREAD_IDENTITY_H_
-#define ABSL_SYNCHRONIZATION_INTERNAL_CREATE_THREAD_IDENTITY_H_
-
-#include "absl/base/internal/thread_identity.h"
-#include "absl/base/port.h"
-
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-namespace synchronization_internal {
-
-// Allocates and attaches a ThreadIdentity object for the calling thread.
-// For private use only.
-base_internal::ThreadIdentity* CreateThreadIdentity();
-
-// A per-thread destructor for reclaiming associated ThreadIdentity objects.
-// For private use only.
-void ReclaimThreadIdentity(void* v);
-
-// Returns the ThreadIdentity object representing the calling thread; guaranteed
-// to be unique for its lifetime.  The returned object will remain valid for the
-// program's lifetime; although it may be re-assigned to a subsequent thread.
-// If one does not exist for the calling thread, allocate it now.
-inline base_internal::ThreadIdentity* GetOrCreateCurrentThreadIdentity() {
-  base_internal::ThreadIdentity* identity =
-      base_internal::CurrentThreadIdentityIfPresent();
-  if (ABSL_PREDICT_FALSE(identity == nullptr)) {
-    return CreateThreadIdentity();
-  }
-  return identity;
-}
-
-}  // namespace synchronization_internal
-ABSL_NAMESPACE_END
-}  // namespace absl
-
-#endif  // ABSL_SYNCHRONIZATION_INTERNAL_CREATE_THREAD_IDENTITY_H_
diff --git a/third_party/abseil_cpp/absl/synchronization/internal/futex.h b/third_party/abseil_cpp/absl/synchronization/internal/futex.h
deleted file mode 100644
index 06fbd6d072d1..000000000000
--- a/third_party/abseil_cpp/absl/synchronization/internal/futex.h
+++ /dev/null
@@ -1,154 +0,0 @@
-// Copyright 2020 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-#ifndef ABSL_SYNCHRONIZATION_INTERNAL_FUTEX_H_
-#define ABSL_SYNCHRONIZATION_INTERNAL_FUTEX_H_
-
-#include "absl/base/config.h"
-
-#ifdef _WIN32
-#include <windows.h>
-#else
-#include <sys/time.h>
-#include <unistd.h>
-#endif
-
-#ifdef __linux__
-#include <linux/futex.h>
-#include <sys/syscall.h>
-#endif
-
-#include <errno.h>
-#include <stdio.h>
-#include <time.h>
-
-#include <atomic>
-#include <cstdint>
-
-#include "absl/base/optimization.h"
-#include "absl/synchronization/internal/kernel_timeout.h"
-
-#ifdef ABSL_INTERNAL_HAVE_FUTEX
-#error ABSL_INTERNAL_HAVE_FUTEX may not be set on the command line
-#elif defined(__BIONIC__)
-// Bionic supports all the futex operations we need even when some of the futex
-// definitions are missing.
-#define ABSL_INTERNAL_HAVE_FUTEX
-#elif defined(__linux__) && defined(FUTEX_CLOCK_REALTIME)
-// FUTEX_CLOCK_REALTIME requires Linux >= 2.6.28.
-#define ABSL_INTERNAL_HAVE_FUTEX
-#endif
-
-#ifdef ABSL_INTERNAL_HAVE_FUTEX
-
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-namespace synchronization_internal {
-
-// Some Android headers are missing these definitions even though they
-// support these futex operations.
-#ifdef __BIONIC__
-#ifndef SYS_futex
-#define SYS_futex __NR_futex
-#endif
-#ifndef FUTEX_WAIT_BITSET
-#define FUTEX_WAIT_BITSET 9
-#endif
-#ifndef FUTEX_PRIVATE_FLAG
-#define FUTEX_PRIVATE_FLAG 128
-#endif
-#ifndef FUTEX_CLOCK_REALTIME
-#define FUTEX_CLOCK_REALTIME 256
-#endif
-#ifndef FUTEX_BITSET_MATCH_ANY
-#define FUTEX_BITSET_MATCH_ANY 0xFFFFFFFF
-#endif
-#endif
-
-#if defined(__NR_futex_time64) && !defined(SYS_futex_time64)
-#define SYS_futex_time64 __NR_futex_time64
-#endif
-
-#if defined(SYS_futex_time64) && !defined(SYS_futex)
-#define SYS_futex SYS_futex_time64
-#endif
-
-class FutexImpl {
- public:
-  static int WaitUntil(std::atomic<int32_t> *v, int32_t val,
-                       KernelTimeout t) {
-    int err = 0;
-    if (t.has_timeout()) {
-      // https://locklessinc.com/articles/futex_cheat_sheet/
-      // Unlike FUTEX_WAIT, FUTEX_WAIT_BITSET uses absolute time.
-      struct timespec abs_timeout = t.MakeAbsTimespec();
-      // Atomically check that the futex value is still 0, and if it
-      // is, sleep until abs_timeout or until woken by FUTEX_WAKE.
-      err = syscall(
-          SYS_futex, reinterpret_cast<int32_t *>(v),
-          FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME, val,
-          &abs_timeout, nullptr, FUTEX_BITSET_MATCH_ANY);
-    } else {
-      // Atomically check that the futex value is still 0, and if it
-      // is, sleep until woken by FUTEX_WAKE.
-      err = syscall(SYS_futex, reinterpret_cast<int32_t *>(v),
-                    FUTEX_WAIT | FUTEX_PRIVATE_FLAG, val, nullptr);
-    }
-    if (ABSL_PREDICT_FALSE(err != 0)) {
-      err = -errno;
-    }
-    return err;
-  }
-
-  static int WaitBitsetAbsoluteTimeout(std::atomic<int32_t> *v, int32_t val,
-                                       int32_t bits,
-                                       const struct timespec *abstime) {
-    int err = syscall(SYS_futex, reinterpret_cast<int32_t *>(v),
-                      FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG, val, abstime,
-                      nullptr, bits);
-    if (ABSL_PREDICT_FALSE(err != 0)) {
-      err = -errno;
-    }
-    return err;
-  }
-
-  static int Wake(std::atomic<int32_t> *v, int32_t count) {
-    int err = syscall(SYS_futex, reinterpret_cast<int32_t *>(v),
-                      FUTEX_WAKE | FUTEX_PRIVATE_FLAG, count);
-    if (ABSL_PREDICT_FALSE(err < 0)) {
-      err = -errno;
-    }
-    return err;
-  }
-
-  // FUTEX_WAKE_BITSET
-  static int WakeBitset(std::atomic<int32_t> *v, int32_t count, int32_t bits) {
-    int err = syscall(SYS_futex, reinterpret_cast<int32_t *>(v),
-                      FUTEX_WAKE_BITSET | FUTEX_PRIVATE_FLAG, count, nullptr,
-                      nullptr, bits);
-    if (ABSL_PREDICT_FALSE(err < 0)) {
-      err = -errno;
-    }
-    return err;
-  }
-};
-
-class Futex : public FutexImpl {};
-
-}  // namespace synchronization_internal
-ABSL_NAMESPACE_END
-}  // namespace absl
-
-#endif  // ABSL_INTERNAL_HAVE_FUTEX
-
-#endif  // ABSL_SYNCHRONIZATION_INTERNAL_FUTEX_H_
diff --git a/third_party/abseil_cpp/absl/synchronization/internal/graphcycles.cc b/third_party/abseil_cpp/absl/synchronization/internal/graphcycles.cc
deleted file mode 100644
index 27fec21681dc..000000000000
--- a/third_party/abseil_cpp/absl/synchronization/internal/graphcycles.cc
+++ /dev/null
@@ -1,698 +0,0 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// GraphCycles provides incremental cycle detection on a dynamic
-// graph using the following algorithm:
-//
-// A dynamic topological sort algorithm for directed acyclic graphs
-// David J. Pearce, Paul H. J. Kelly
-// Journal of Experimental Algorithmics (JEA) JEA Homepage archive
-// Volume 11, 2006, Article No. 1.7
-//
-// Brief summary of the algorithm:
-//
-// (1) Maintain a rank for each node that is consistent
-//     with the topological sort of the graph. I.e., path from x to y
-//     implies rank[x] < rank[y].
-// (2) When a new edge (x->y) is inserted, do nothing if rank[x] < rank[y].
-// (3) Otherwise: adjust ranks in the neighborhood of x and y.
-
-#include "absl/base/attributes.h"
-// This file is a no-op if the required LowLevelAlloc support is missing.
-#include "absl/base/internal/low_level_alloc.h"
-#ifndef ABSL_LOW_LEVEL_ALLOC_MISSING
-
-#include "absl/synchronization/internal/graphcycles.h"
-
-#include <algorithm>
-#include <array>
-#include <limits>
-#include "absl/base/internal/hide_ptr.h"
-#include "absl/base/internal/raw_logging.h"
-#include "absl/base/internal/spinlock.h"
-
-// Do not use STL.   This module does not use standard memory allocation.
-
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-namespace synchronization_internal {
-
-namespace {
-
-// Avoid LowLevelAlloc's default arena since it calls malloc hooks in
-// which people are doing things like acquiring Mutexes.
-ABSL_CONST_INIT static absl::base_internal::SpinLock arena_mu(
-    absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);
-ABSL_CONST_INIT static base_internal::LowLevelAlloc::Arena* arena;
-
-static void InitArenaIfNecessary() {
-  arena_mu.Lock();
-  if (arena == nullptr) {
-    arena = base_internal::LowLevelAlloc::NewArena(0);
-  }
-  arena_mu.Unlock();
-}
-
-// Number of inlined elements in Vec.  Hash table implementation
-// relies on this being a power of two.
-static const uint32_t kInline = 8;
-
-// A simple LowLevelAlloc based resizable vector with inlined storage
-// for a few elements.  T must be a plain type since constructor
-// and destructor are not run on elements of type T managed by Vec.
-template <typename T>
-class Vec {
- public:
-  Vec() { Init(); }
-  ~Vec() { Discard(); }
-
-  void clear() {
-    Discard();
-    Init();
-  }
-
-  bool empty() const { return size_ == 0; }
-  uint32_t size() const { return size_; }
-  T* begin() { return ptr_; }
-  T* end() { return ptr_ + size_; }
-  const T& operator[](uint32_t i) const { return ptr_[i]; }
-  T& operator[](uint32_t i) { return ptr_[i]; }
-  const T& back() const { return ptr_[size_-1]; }
-  void pop_back() { size_--; }
-
-  void push_back(const T& v) {
-    if (size_ == capacity_) Grow(size_ + 1);
-    ptr_[size_] = v;
-    size_++;
-  }
-
-  void resize(uint32_t n) {
-    if (n > capacity_) Grow(n);
-    size_ = n;
-  }
-
-  void fill(const T& val) {
-    for (uint32_t i = 0; i < size(); i++) {
-      ptr_[i] = val;
-    }
-  }
-
-  // Guarantees src is empty at end.
-  // Provided for the hash table resizing code below.
-  void MoveFrom(Vec<T>* src) {
-    if (src->ptr_ == src->space_) {
-      // Need to actually copy
-      resize(src->size_);
-      std::copy(src->ptr_, src->ptr_ + src->size_, ptr_);
-      src->size_ = 0;
-    } else {
-      Discard();
-      ptr_ = src->ptr_;
-      size_ = src->size_;
-      capacity_ = src->capacity_;
-      src->Init();
-    }
-  }
-
- private:
-  T* ptr_;
-  T space_[kInline];
-  uint32_t size_;
-  uint32_t capacity_;
-
-  void Init() {
-    ptr_ = space_;
-    size_ = 0;
-    capacity_ = kInline;
-  }
-
-  void Discard() {
-    if (ptr_ != space_) base_internal::LowLevelAlloc::Free(ptr_);
-  }
-
-  void Grow(uint32_t n) {
-    while (capacity_ < n) {
-      capacity_ *= 2;
-    }
-    size_t request = static_cast<size_t>(capacity_) * sizeof(T);
-    T* copy = static_cast<T*>(
-        base_internal::LowLevelAlloc::AllocWithArena(request, arena));
-    std::copy(ptr_, ptr_ + size_, copy);
-    Discard();
-    ptr_ = copy;
-  }
-
-  Vec(const Vec&) = delete;
-  Vec& operator=(const Vec&) = delete;
-};
-
-// A hash set of non-negative int32_t that uses Vec for its underlying storage.
-class NodeSet {
- public:
-  NodeSet() { Init(); }
-
-  void clear() { Init(); }
-  bool contains(int32_t v) const { return table_[FindIndex(v)] == v; }
-
-  bool insert(int32_t v) {
-    uint32_t i = FindIndex(v);
-    if (table_[i] == v) {
-      return false;
-    }
-    if (table_[i] == kEmpty) {
-      // Only inserting over an empty cell increases the number of occupied
-      // slots.
-      occupied_++;
-    }
-    table_[i] = v;
-    // Double when 75% full.
-    if (occupied_ >= table_.size() - table_.size()/4) Grow();
-    return true;
-  }
-
-  void erase(uint32_t v) {
-    uint32_t i = FindIndex(v);
-    if (static_cast<uint32_t>(table_[i]) == v) {
-      table_[i] = kDel;
-    }
-  }
-
-  // Iteration: is done via HASH_FOR_EACH
-  // Example:
-  //    HASH_FOR_EACH(elem, node->out) { ... }
-#define HASH_FOR_EACH(elem, eset) \
-  for (int32_t elem, _cursor = 0; (eset).Next(&_cursor, &elem); )
-  bool Next(int32_t* cursor, int32_t* elem) {
-    while (static_cast<uint32_t>(*cursor) < table_.size()) {
-      int32_t v = table_[*cursor];
-      (*cursor)++;
-      if (v >= 0) {
-        *elem = v;
-        return true;
-      }
-    }
-    return false;
-  }
-
- private:
-  enum : int32_t { kEmpty = -1, kDel = -2 };
-  Vec<int32_t> table_;
-  uint32_t occupied_;     // Count of non-empty slots (includes deleted slots)
-
-  static uint32_t Hash(uint32_t a) { return a * 41; }
-
-  // Return index for storing v.  May return an empty index or deleted index
-  int FindIndex(int32_t v) const {
-    // Search starting at hash index.
-    const uint32_t mask = table_.size() - 1;
-    uint32_t i = Hash(v) & mask;
-    int deleted_index = -1;  // If >= 0, index of first deleted element we see
-    while (true) {
-      int32_t e = table_[i];
-      if (v == e) {
-        return i;
-      } else if (e == kEmpty) {
-        // Return any previously encountered deleted slot.
-        return (deleted_index >= 0) ? deleted_index : i;
-      } else if (e == kDel && deleted_index < 0) {
-        // Keep searching since v might be present later.
-        deleted_index = i;
-      }
-      i = (i + 1) & mask;  // Linear probing; quadratic is slightly slower.
-    }
-  }
-
-  void Init() {
-    table_.clear();
-    table_.resize(kInline);
-    table_.fill(kEmpty);
-    occupied_ = 0;
-  }
-
-  void Grow() {
-    Vec<int32_t> copy;
-    copy.MoveFrom(&table_);
-    occupied_ = 0;
-    table_.resize(copy.size() * 2);
-    table_.fill(kEmpty);
-
-    for (const auto& e : copy) {
-      if (e >= 0) insert(e);
-    }
-  }
-
-  NodeSet(const NodeSet&) = delete;
-  NodeSet& operator=(const NodeSet&) = delete;
-};
-
-// We encode a node index and a node version in GraphId.  The version
-// number is incremented when the GraphId is freed which automatically
-// invalidates all copies of the GraphId.
-
-inline GraphId MakeId(int32_t index, uint32_t version) {
-  GraphId g;
-  g.handle =
-      (static_cast<uint64_t>(version) << 32) | static_cast<uint32_t>(index);
-  return g;
-}
-
-inline int32_t NodeIndex(GraphId id) {
-  return static_cast<uint32_t>(id.handle & 0xfffffffful);
-}
-
-inline uint32_t NodeVersion(GraphId id) {
-  return static_cast<uint32_t>(id.handle >> 32);
-}
-
-struct Node {
-  int32_t rank;               // rank number assigned by Pearce-Kelly algorithm
-  uint32_t version;           // Current version number
-  int32_t next_hash;          // Next entry in hash table
-  bool visited;               // Temporary marker used by depth-first-search
-  uintptr_t masked_ptr;       // User-supplied pointer
-  NodeSet in;                 // List of immediate predecessor nodes in graph
-  NodeSet out;                // List of immediate successor nodes in graph
-  int priority;               // Priority of recorded stack trace.
-  int nstack;                 // Depth of recorded stack trace.
-  void* stack[40];            // stack[0,nstack-1] holds stack trace for node.
-};
-
-// Hash table for pointer to node index lookups.
-class PointerMap {
- public:
-  explicit PointerMap(const Vec<Node*>* nodes) : nodes_(nodes) {
-    table_.fill(-1);
-  }
-
-  int32_t Find(void* ptr) {
-    auto masked = base_internal::HidePtr(ptr);
-    for (int32_t i = table_[Hash(ptr)]; i != -1;) {
-      Node* n = (*nodes_)[i];
-      if (n->masked_ptr == masked) return i;
-      i = n->next_hash;
-    }
-    return -1;
-  }
-
-  void Add(void* ptr, int32_t i) {
-    int32_t* head = &table_[Hash(ptr)];
-    (*nodes_)[i]->next_hash = *head;
-    *head = i;
-  }
-
-  int32_t Remove(void* ptr) {
-    // Advance through linked list while keeping track of the
-    // predecessor slot that points to the current entry.
-    auto masked = base_internal::HidePtr(ptr);
-    for (int32_t* slot = &table_[Hash(ptr)]; *slot != -1; ) {
-      int32_t index = *slot;
-      Node* n = (*nodes_)[index];
-      if (n->masked_ptr == masked) {
-        *slot = n->next_hash;  // Remove n from linked list
-        n->next_hash = -1;
-        return index;
-      }
-      slot = &n->next_hash;
-    }
-    return -1;
-  }
-
- private:
-  // Number of buckets in hash table for pointer lookups.
-  static constexpr uint32_t kHashTableSize = 8171;  // should be prime
-
-  const Vec<Node*>* nodes_;
-  std::array<int32_t, kHashTableSize> table_;
-
-  static uint32_t Hash(void* ptr) {
-    return reinterpret_cast<uintptr_t>(ptr) % kHashTableSize;
-  }
-};
-
-}  // namespace
-
-struct GraphCycles::Rep {
-  Vec<Node*> nodes_;
-  Vec<int32_t> free_nodes_;  // Indices for unused entries in nodes_
-  PointerMap ptrmap_;
-
-  // Temporary state.
-  Vec<int32_t> deltaf_;  // Results of forward DFS
-  Vec<int32_t> deltab_;  // Results of backward DFS
-  Vec<int32_t> list_;    // All nodes to reprocess
-  Vec<int32_t> merged_;  // Rank values to assign to list_ entries
-  Vec<int32_t> stack_;   // Emulates recursion stack for depth-first searches
-
-  Rep() : ptrmap_(&nodes_) {}
-};
-
-static Node* FindNode(GraphCycles::Rep* rep, GraphId id) {
-  Node* n = rep->nodes_[NodeIndex(id)];
-  return (n->version == NodeVersion(id)) ? n : nullptr;
-}
-
-GraphCycles::GraphCycles() {
-  InitArenaIfNecessary();
-  rep_ = new (base_internal::LowLevelAlloc::AllocWithArena(sizeof(Rep), arena))
-      Rep;
-}
-
-GraphCycles::~GraphCycles() {
-  for (auto* node : rep_->nodes_) {
-    node->Node::~Node();
-    base_internal::LowLevelAlloc::Free(node);
-  }
-  rep_->Rep::~Rep();
-  base_internal::LowLevelAlloc::Free(rep_);
-}
-
-bool GraphCycles::CheckInvariants() const {
-  Rep* r = rep_;
-  NodeSet ranks;  // Set of ranks seen so far.
-  for (uint32_t x = 0; x < r->nodes_.size(); x++) {
-    Node* nx = r->nodes_[x];
-    void* ptr = base_internal::UnhidePtr<void>(nx->masked_ptr);
-    if (ptr != nullptr && static_cast<uint32_t>(r->ptrmap_.Find(ptr)) != x) {
-      ABSL_RAW_LOG(FATAL, "Did not find live node in hash table %u %p", x, ptr);
-    }
-    if (nx->visited) {
-      ABSL_RAW_LOG(FATAL, "Did not clear visited marker on node %u", x);
-    }
-    if (!ranks.insert(nx->rank)) {
-      ABSL_RAW_LOG(FATAL, "Duplicate occurrence of rank %d", nx->rank);
-    }
-    HASH_FOR_EACH(y, nx->out) {
-      Node* ny = r->nodes_[y];
-      if (nx->rank >= ny->rank) {
-        ABSL_RAW_LOG(FATAL, "Edge %u->%d has bad rank assignment %d->%d", x, y,
-                     nx->rank, ny->rank);
-      }
-    }
-  }
-  return true;
-}
-
-GraphId GraphCycles::GetId(void* ptr) {
-  int32_t i = rep_->ptrmap_.Find(ptr);
-  if (i != -1) {
-    return MakeId(i, rep_->nodes_[i]->version);
-  } else if (rep_->free_nodes_.empty()) {
-    Node* n =
-        new (base_internal::LowLevelAlloc::AllocWithArena(sizeof(Node), arena))
-            Node;
-    n->version = 1;  // Avoid 0 since it is used by InvalidGraphId()
-    n->visited = false;
-    n->rank = rep_->nodes_.size();
-    n->masked_ptr = base_internal::HidePtr(ptr);
-    n->nstack = 0;
-    n->priority = 0;
-    rep_->nodes_.push_back(n);
-    rep_->ptrmap_.Add(ptr, n->rank);
-    return MakeId(n->rank, n->version);
-  } else {
-    // Preserve preceding rank since the set of ranks in use must be
-    // a permutation of [0,rep_->nodes_.size()-1].
-    int32_t r = rep_->free_nodes_.back();
-    rep_->free_nodes_.pop_back();
-    Node* n = rep_->nodes_[r];
-    n->masked_ptr = base_internal::HidePtr(ptr);
-    n->nstack = 0;
-    n->priority = 0;
-    rep_->ptrmap_.Add(ptr, r);
-    return MakeId(r, n->version);
-  }
-}
-
-void GraphCycles::RemoveNode(void* ptr) {
-  int32_t i = rep_->ptrmap_.Remove(ptr);
-  if (i == -1) {
-    return;
-  }
-  Node* x = rep_->nodes_[i];
-  HASH_FOR_EACH(y, x->out) {
-    rep_->nodes_[y]->in.erase(i);
-  }
-  HASH_FOR_EACH(y, x->in) {
-    rep_->nodes_[y]->out.erase(i);
-  }
-  x->in.clear();
-  x->out.clear();
-  x->masked_ptr = base_internal::HidePtr<void>(nullptr);
-  if (x->version == std::numeric_limits<uint32_t>::max()) {
-    // Cannot use x any more
-  } else {
-    x->version++;  // Invalidates all copies of node.
-    rep_->free_nodes_.push_back(i);
-  }
-}
-
-void* GraphCycles::Ptr(GraphId id) {
-  Node* n = FindNode(rep_, id);
-  return n == nullptr ? nullptr
-                      : base_internal::UnhidePtr<void>(n->masked_ptr);
-}
-
-bool GraphCycles::HasNode(GraphId node) {
-  return FindNode(rep_, node) != nullptr;
-}
-
-bool GraphCycles::HasEdge(GraphId x, GraphId y) const {
-  Node* xn = FindNode(rep_, x);
-  return xn && FindNode(rep_, y) && xn->out.contains(NodeIndex(y));
-}
-
-void GraphCycles::RemoveEdge(GraphId x, GraphId y) {
-  Node* xn = FindNode(rep_, x);
-  Node* yn = FindNode(rep_, y);
-  if (xn && yn) {
-    xn->out.erase(NodeIndex(y));
-    yn->in.erase(NodeIndex(x));
-    // No need to update the rank assignment since a previous valid
-    // rank assignment remains valid after an edge deletion.
-  }
-}
-
-static bool ForwardDFS(GraphCycles::Rep* r, int32_t n, int32_t upper_bound);
-static void BackwardDFS(GraphCycles::Rep* r, int32_t n, int32_t lower_bound);
-static void Reorder(GraphCycles::Rep* r);
-static void Sort(const Vec<Node*>&, Vec<int32_t>* delta);
-static void MoveToList(
-    GraphCycles::Rep* r, Vec<int32_t>* src, Vec<int32_t>* dst);
-
-bool GraphCycles::InsertEdge(GraphId idx, GraphId idy) {
-  Rep* r = rep_;
-  const int32_t x = NodeIndex(idx);
-  const int32_t y = NodeIndex(idy);
-  Node* nx = FindNode(r, idx);
-  Node* ny = FindNode(r, idy);
-  if (nx == nullptr || ny == nullptr) return true;  // Expired ids
-
-  if (nx == ny) return false;  // Self edge
-  if (!nx->out.insert(y)) {
-    // Edge already exists.
-    return true;
-  }
-
-  ny->in.insert(x);
-
-  if (nx->rank <= ny->rank) {
-    // New edge is consistent with existing rank assignment.
-    return true;
-  }
-
-  // Current rank assignments are incompatible with the new edge.  Recompute.
-  // We only need to consider nodes that fall in the range [ny->rank,nx->rank].
-  if (!ForwardDFS(r, y, nx->rank)) {
-    // Found a cycle.  Undo the insertion and tell caller.
-    nx->out.erase(y);
-    ny->in.erase(x);
-    // Since we do not call Reorder() on this path, clear any visited
-    // markers left by ForwardDFS.
-    for (const auto& d : r->deltaf_) {
-      r->nodes_[d]->visited = false;
-    }
-    return false;
-  }
-  BackwardDFS(r, x, ny->rank);
-  Reorder(r);
-  return true;
-}
-
-static bool ForwardDFS(GraphCycles::Rep* r, int32_t n, int32_t upper_bound) {
-  // Avoid recursion since stack space might be limited.
-  // We instead keep a stack of nodes to visit.
-  r->deltaf_.clear();
-  r->stack_.clear();
-  r->stack_.push_back(n);
-  while (!r->stack_.empty()) {
-    n = r->stack_.back();
-    r->stack_.pop_back();
-    Node* nn = r->nodes_[n];
-    if (nn->visited) continue;
-
-    nn->visited = true;
-    r->deltaf_.push_back(n);
-
-    HASH_FOR_EACH(w, nn->out) {
-      Node* nw = r->nodes_[w];
-      if (nw->rank == upper_bound) {
-        return false;  // Cycle
-      }
-      if (!nw->visited && nw->rank < upper_bound) {
-        r->stack_.push_back(w);
-      }
-    }
-  }
-  return true;
-}
-
-static void BackwardDFS(GraphCycles::Rep* r, int32_t n, int32_t lower_bound) {
-  r->deltab_.clear();
-  r->stack_.clear();
-  r->stack_.push_back(n);
-  while (!r->stack_.empty()) {
-    n = r->stack_.back();
-    r->stack_.pop_back();
-    Node* nn = r->nodes_[n];
-    if (nn->visited) continue;
-
-    nn->visited = true;
-    r->deltab_.push_back(n);
-
-    HASH_FOR_EACH(w, nn->in) {
-      Node* nw = r->nodes_[w];
-      if (!nw->visited && lower_bound < nw->rank) {
-        r->stack_.push_back(w);
-      }
-    }
-  }
-}
-
-static void Reorder(GraphCycles::Rep* r) {
-  Sort(r->nodes_, &r->deltab_);
-  Sort(r->nodes_, &r->deltaf_);
-
-  // Adds contents of delta lists to list_ (backwards deltas first).
-  r->list_.clear();
-  MoveToList(r, &r->deltab_, &r->list_);
-  MoveToList(r, &r->deltaf_, &r->list_);
-
-  // Produce sorted list of all ranks that will be reassigned.
-  r->merged_.resize(r->deltab_.size() + r->deltaf_.size());
-  std::merge(r->deltab_.begin(), r->deltab_.end(),
-             r->deltaf_.begin(), r->deltaf_.end(),
-             r->merged_.begin());
-
-  // Assign the ranks in order to the collected list.
-  for (uint32_t i = 0; i < r->list_.size(); i++) {
-    r->nodes_[r->list_[i]]->rank = r->merged_[i];
-  }
-}
-
-static void Sort(const Vec<Node*>& nodes, Vec<int32_t>* delta) {
-  struct ByRank {
-    const Vec<Node*>* nodes;
-    bool operator()(int32_t a, int32_t b) const {
-      return (*nodes)[a]->rank < (*nodes)[b]->rank;
-    }
-  };
-  ByRank cmp;
-  cmp.nodes = &nodes;
-  std::sort(delta->begin(), delta->end(), cmp);
-}
-
-static void MoveToList(
-    GraphCycles::Rep* r, Vec<int32_t>* src, Vec<int32_t>* dst) {
-  for (auto& v : *src) {
-    int32_t w = v;
-    v = r->nodes_[w]->rank;         // Replace v entry with its rank
-    r->nodes_[w]->visited = false;  // Prepare for future DFS calls
-    dst->push_back(w);
-  }
-}
-
-int GraphCycles::FindPath(GraphId idx, GraphId idy, int max_path_len,
-                          GraphId path[]) const {
-  Rep* r = rep_;
-  if (FindNode(r, idx) == nullptr || FindNode(r, idy) == nullptr) return 0;
-  const int32_t x = NodeIndex(idx);
-  const int32_t y = NodeIndex(idy);
-
-  // Forward depth first search starting at x until we hit y.
-  // As we descend into a node, we push it onto the path.
-  // As we leave a node, we remove it from the path.
-  int path_len = 0;
-
-  NodeSet seen;
-  r->stack_.clear();
-  r->stack_.push_back(x);
-  while (!r->stack_.empty()) {
-    int32_t n = r->stack_.back();
-    r->stack_.pop_back();
-    if (n < 0) {
-      // Marker to indicate that we are leaving a node
-      path_len--;
-      continue;
-    }
-
-    if (path_len < max_path_len) {
-      path[path_len] = MakeId(n, rep_->nodes_[n]->version);
-    }
-    path_len++;
-    r->stack_.push_back(-1);  // Will remove tentative path entry
-
-    if (n == y) {
-      return path_len;
-    }
-
-    HASH_FOR_EACH(w, r->nodes_[n]->out) {
-      if (seen.insert(w)) {
-        r->stack_.push_back(w);
-      }
-    }
-  }
-
-  return 0;
-}
-
-bool GraphCycles::IsReachable(GraphId x, GraphId y) const {
-  return FindPath(x, y, 0, nullptr) > 0;
-}
-
-void GraphCycles::UpdateStackTrace(GraphId id, int priority,
-                                   int (*get_stack_trace)(void** stack, int)) {
-  Node* n = FindNode(rep_, id);
-  if (n == nullptr || n->priority >= priority) {
-    return;
-  }
-  n->nstack = (*get_stack_trace)(n->stack, ABSL_ARRAYSIZE(n->stack));
-  n->priority = priority;
-}
-
-int GraphCycles::GetStackTrace(GraphId id, void*** ptr) {
-  Node* n = FindNode(rep_, id);
-  if (n == nullptr) {
-    *ptr = nullptr;
-    return 0;
-  } else {
-    *ptr = n->stack;
-    return n->nstack;
-  }
-}
-
-}  // namespace synchronization_internal
-ABSL_NAMESPACE_END
-}  // namespace absl
-
-#endif  // ABSL_LOW_LEVEL_ALLOC_MISSING
diff --git a/third_party/abseil_cpp/absl/synchronization/internal/graphcycles.h b/third_party/abseil_cpp/absl/synchronization/internal/graphcycles.h
deleted file mode 100644
index ceba33e4de89..000000000000
--- a/third_party/abseil_cpp/absl/synchronization/internal/graphcycles.h
+++ /dev/null
@@ -1,141 +0,0 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#ifndef ABSL_SYNCHRONIZATION_INTERNAL_GRAPHCYCLES_H_
-#define ABSL_SYNCHRONIZATION_INTERNAL_GRAPHCYCLES_H_
-
-// GraphCycles detects the introduction of a cycle into a directed
-// graph that is being built up incrementally.
-//
-// Nodes are identified by small integers.  It is not possible to
-// record multiple edges with the same (source, destination) pair;
-// requests to add an edge where one already exists are silently
-// ignored.
-//
-// It is also not possible to introduce a cycle; an attempt to insert
-// an edge that would introduce a cycle fails and returns false.
-//
-// GraphCycles uses no internal locking; calls into it should be
-// serialized externally.
-
-// Performance considerations:
-//   Works well on sparse graphs, poorly on dense graphs.
-//   Extra information is maintained incrementally to detect cycles quickly.
-//   InsertEdge() is very fast when the edge already exists, and reasonably fast
-//   otherwise.
-//   FindPath() is linear in the size of the graph.
-// The current implementation uses O(|V|+|E|) space.
-
-#include <cstdint>
-
-#include "absl/base/config.h"
-
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-namespace synchronization_internal {
-
-// Opaque identifier for a graph node.
-struct GraphId {
-  uint64_t handle;
-
-  bool operator==(const GraphId& x) const { return handle == x.handle; }
-  bool operator!=(const GraphId& x) const { return handle != x.handle; }
-};
-
-// Return an invalid graph id that will never be assigned by GraphCycles.
-inline GraphId InvalidGraphId() {
-  return GraphId{0};
-}
-
-class GraphCycles {
- public:
-  GraphCycles();
-  ~GraphCycles();
-
-  // Return the id to use for ptr, assigning one if necessary.
-  // Subsequent calls with the same ptr value will return the same id
-  // until Remove().
-  GraphId GetId(void* ptr);
-
-  // Remove "ptr" from the graph.  Its corresponding node and all
-  // edges to and from it are removed.
-  void RemoveNode(void* ptr);
-
-  // Return the pointer associated with id, or nullptr if id is not
-  // currently in the graph.
-  void* Ptr(GraphId id);
-
-  // Attempt to insert an edge from source_node to dest_node.  If the
-  // edge would introduce a cycle, return false without making any
-  // changes. Otherwise add the edge and return true.
-  bool InsertEdge(GraphId source_node, GraphId dest_node);
-
-  // Remove any edge that exists from source_node to dest_node.
-  void RemoveEdge(GraphId source_node, GraphId dest_node);
-
-  // Return whether node exists in the graph.
-  bool HasNode(GraphId node);
-
-  // Return whether there is an edge directly from source_node to dest_node.
-  bool HasEdge(GraphId source_node, GraphId dest_node) const;
-
-  // Return whether dest_node is reachable from source_node
-  // by following edges.
-  bool IsReachable(GraphId source_node, GraphId dest_node) const;
-
-  // Find a path from "source" to "dest".  If such a path exists,
-  // place the nodes on the path in the array path[], and return
-  // the number of nodes on the path.  If the path is longer than
-  // max_path_len nodes, only the first max_path_len nodes are placed
-  // in path[].  The client should compare the return value with
-  // max_path_len" to see when this occurs.  If no path exists, return
-  // 0.  Any valid path stored in path[] will start with "source" and
-  // end with "dest".  There is no guarantee that the path is the
-  // shortest, but no node will appear twice in the path, except the
-  // source and destination node if they are identical; therefore, the
-  // return value is at most one greater than the number of nodes in
-  // the graph.
-  int FindPath(GraphId source, GraphId dest, int max_path_len,
-               GraphId path[]) const;
-
-  // Update the stack trace recorded for id with the current stack
-  // trace if the last time it was updated had a smaller priority
-  // than the priority passed on this call.
-  //
-  // *get_stack_trace is called to get the stack trace.
-  void UpdateStackTrace(GraphId id, int priority,
-                        int (*get_stack_trace)(void**, int));
-
-  // Set *ptr to the beginning of the array that holds the recorded
-  // stack trace for id and return the depth of the stack trace.
-  int GetStackTrace(GraphId id, void*** ptr);
-
-  // Check internal invariants. Crashes on failure, returns true on success.
-  // Expensive: should only be called from graphcycles_test.cc.
-  bool CheckInvariants() const;
-
-  // ----------------------------------------------------
-  struct Rep;
- private:
-  Rep *rep_;      // opaque representation
-  GraphCycles(const GraphCycles&) = delete;
-  GraphCycles& operator=(const GraphCycles&) = delete;
-};
-
-}  // namespace synchronization_internal
-ABSL_NAMESPACE_END
-}  // namespace absl
-
-#endif
diff --git a/third_party/abseil_cpp/absl/synchronization/internal/graphcycles_benchmark.cc b/third_party/abseil_cpp/absl/synchronization/internal/graphcycles_benchmark.cc
deleted file mode 100644
index 54823e0ba51a..000000000000
--- a/third_party/abseil_cpp/absl/synchronization/internal/graphcycles_benchmark.cc
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2018 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "absl/synchronization/internal/graphcycles.h"
-
-#include <algorithm>
-#include <cstdint>
-#include <vector>
-
-#include "benchmark/benchmark.h"
-#include "absl/base/internal/raw_logging.h"
-
-namespace {
-
-void BM_StressTest(benchmark::State& state) {
-  const int num_nodes = state.range(0);
-  while (state.KeepRunningBatch(num_nodes)) {
-    absl::synchronization_internal::GraphCycles g;
-    std::vector<absl::synchronization_internal::GraphId> nodes(num_nodes);
-    for (int i = 0; i < num_nodes; i++) {
-      nodes[i] = g.GetId(reinterpret_cast<void*>(static_cast<uintptr_t>(i)));
-    }
-    for (int i = 0; i < num_nodes; i++) {
-      int end = std::min(num_nodes, i + 5);
-      for (int j = i + 1; j < end; j++) {
-        ABSL_RAW_CHECK(g.InsertEdge(nodes[i], nodes[j]), "");
-      }
-    }
-  }
-}
-BENCHMARK(BM_StressTest)->Range(2048, 1048576);
-
-}  // namespace
diff --git a/third_party/abseil_cpp/absl/synchronization/internal/graphcycles_test.cc b/third_party/abseil_cpp/absl/synchronization/internal/graphcycles_test.cc
deleted file mode 100644
index 74eaffe7a806..000000000000
--- a/third_party/abseil_cpp/absl/synchronization/internal/graphcycles_test.cc
+++ /dev/null
@@ -1,464 +0,0 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "absl/synchronization/internal/graphcycles.h"
-
-#include <map>
-#include <random>
-#include <unordered_set>
-#include <utility>
-#include <vector>
-
-#include "gtest/gtest.h"
-#include "absl/base/internal/raw_logging.h"
-#include "absl/base/macros.h"
-
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-namespace synchronization_internal {
-
-// We emulate a GraphCycles object with a node vector and an edge vector.
-// We then compare the two implementations.
-
-using Nodes = std::vector<int>;
-struct Edge {
-  int from;
-  int to;
-};
-using Edges = std::vector<Edge>;
-using RandomEngine = std::mt19937_64;
-
-// Mapping from integer index to GraphId.
-typedef std::map<int, GraphId> IdMap;
-static GraphId Get(const IdMap& id, int num) {
-  auto iter = id.find(num);
-  return (iter == id.end()) ? InvalidGraphId() : iter->second;
-}
-
-// Return whether "to" is reachable from "from".
-static bool IsReachable(Edges *edges, int from, int to,
-                        std::unordered_set<int> *seen) {
-  seen->insert(from);     // we are investigating "from"; don't do it again
-  if (from == to) return true;
-  for (const auto &edge : *edges) {
-    if (edge.from == from) {
-      if (edge.to == to) {  // success via edge directly
-        return true;
-      } else if (seen->find(edge.to) == seen->end() &&  // success via edge
-                 IsReachable(edges, edge.to, to, seen)) {
-        return true;
-      }
-    }
-  }
-  return false;
-}
-
-static void PrintEdges(Edges *edges) {
-  ABSL_RAW_LOG(INFO, "EDGES (%zu)", edges->size());
-  for (const auto &edge : *edges) {
-    int a = edge.from;
-    int b = edge.to;
-    ABSL_RAW_LOG(INFO, "%d %d", a, b);
-  }
-  ABSL_RAW_LOG(INFO, "---");
-}
-
-static void PrintGCEdges(Nodes *nodes, const IdMap &id, GraphCycles *gc) {
-  ABSL_RAW_LOG(INFO, "GC EDGES");
-  for (int a : *nodes) {
-    for (int b : *nodes) {
-      if (gc->HasEdge(Get(id, a), Get(id, b))) {
-        ABSL_RAW_LOG(INFO, "%d %d", a, b);
-      }
-    }
-  }
-  ABSL_RAW_LOG(INFO, "---");
-}
-
-static void PrintTransitiveClosure(Nodes *nodes, Edges *edges) {
-  ABSL_RAW_LOG(INFO, "Transitive closure");
-  for (int a : *nodes) {
-    for (int b : *nodes) {
-      std::unordered_set<int> seen;
-      if (IsReachable(edges, a, b, &seen)) {
-        ABSL_RAW_LOG(INFO, "%d %d", a, b);
-      }
-    }
-  }
-  ABSL_RAW_LOG(INFO, "---");
-}
-
-static void PrintGCTransitiveClosure(Nodes *nodes, const IdMap &id,
-                                     GraphCycles *gc) {
-  ABSL_RAW_LOG(INFO, "GC Transitive closure");
-  for (int a : *nodes) {
-    for (int b : *nodes) {
-      if (gc->IsReachable(Get(id, a), Get(id, b))) {
-        ABSL_RAW_LOG(INFO, "%d %d", a, b);
-      }
-    }
-  }
-  ABSL_RAW_LOG(INFO, "---");
-}
-
-static void CheckTransitiveClosure(Nodes *nodes, Edges *edges, const IdMap &id,
-                                   GraphCycles *gc) {
-  std::unordered_set<int> seen;
-  for (const auto &a : *nodes) {
-    for (const auto &b : *nodes) {
-      seen.clear();
-      bool gc_reachable = gc->IsReachable(Get(id, a), Get(id, b));
-      bool reachable = IsReachable(edges, a, b, &seen);
-      if (gc_reachable != reachable) {
-        PrintEdges(edges);
-        PrintGCEdges(nodes, id, gc);
-        PrintTransitiveClosure(nodes, edges);
-        PrintGCTransitiveClosure(nodes, id, gc);
-        ABSL_RAW_LOG(FATAL, "gc_reachable %s reachable %s a %d b %d",
-                     gc_reachable ? "true" : "false",
-                     reachable ? "true" : "false", a, b);
-      }
-    }
-  }
-}
-
-static void CheckEdges(Nodes *nodes, Edges *edges, const IdMap &id,
-                       GraphCycles *gc) {
-  int count = 0;
-  for (const auto &edge : *edges) {
-    int a = edge.from;
-    int b = edge.to;
-    if (!gc->HasEdge(Get(id, a), Get(id, b))) {
-      PrintEdges(edges);
-      PrintGCEdges(nodes, id, gc);
-      ABSL_RAW_LOG(FATAL, "!gc->HasEdge(%d, %d)", a, b);
-    }
-  }
-  for (const auto &a : *nodes) {
-    for (const auto &b : *nodes) {
-      if (gc->HasEdge(Get(id, a), Get(id, b))) {
-        count++;
-      }
-    }
-  }
-  if (count != edges->size()) {
-    PrintEdges(edges);
-    PrintGCEdges(nodes, id, gc);
-    ABSL_RAW_LOG(FATAL, "edges->size() %zu  count %d", edges->size(), count);
-  }
-}
-
-static void CheckInvariants(const GraphCycles &gc) {
-  if (ABSL_PREDICT_FALSE(!gc.CheckInvariants()))
-    ABSL_RAW_LOG(FATAL, "CheckInvariants");
-}
-
-// Returns the index of a randomly chosen node in *nodes.
-// Requires *nodes be non-empty.
-static int RandomNode(RandomEngine* rng, Nodes *nodes) {
-  std::uniform_int_distribution<int> uniform(0, nodes->size()-1);
-  return uniform(*rng);
-}
-
-// Returns the index of a randomly chosen edge in *edges.
-// Requires *edges be non-empty.
-static int RandomEdge(RandomEngine* rng, Edges *edges) {
-  std::uniform_int_distribution<int> uniform(0, edges->size()-1);
-  return uniform(*rng);
-}
-
-// Returns the index of edge (from, to) in *edges or -1 if it is not in *edges.
-static int EdgeIndex(Edges *edges, int from, int to) {
-  int i = 0;
-  while (i != edges->size() &&
-         ((*edges)[i].from != from || (*edges)[i].to != to)) {
-    i++;
-  }
-  return i == edges->size()? -1 : i;
-}
-
-TEST(GraphCycles, RandomizedTest) {
-  int next_node = 0;
-  Nodes nodes;
-  Edges edges;   // from, to
-  IdMap id;
-  GraphCycles graph_cycles;
-  static const int kMaxNodes = 7;  // use <= 7 nodes to keep test short
-  static const int kDataOffset = 17;  // an offset to the node-specific data
-  int n = 100000;
-  int op = 0;
-  RandomEngine rng(testing::UnitTest::GetInstance()->random_seed());
-  std::uniform_int_distribution<int> uniform(0, 5);
-
-  auto ptr = [](intptr_t i) {
-    return reinterpret_cast<void*>(i + kDataOffset);
-  };
-
-  for (int iter = 0; iter != n; iter++) {
-    for (const auto &node : nodes) {
-      ASSERT_EQ(graph_cycles.Ptr(Get(id, node)), ptr(node)) << " node " << node;
-    }
-    CheckEdges(&nodes, &edges, id, &graph_cycles);
-    CheckTransitiveClosure(&nodes, &edges, id, &graph_cycles);
-    op = uniform(rng);
-    switch (op) {
-    case 0:     // Add a node
-      if (nodes.size() < kMaxNodes) {
-        int new_node = next_node++;
-        GraphId new_gnode = graph_cycles.GetId(ptr(new_node));
-        ASSERT_NE(new_gnode, InvalidGraphId());
-        id[new_node] = new_gnode;
-        ASSERT_EQ(ptr(new_node), graph_cycles.Ptr(new_gnode));
-        nodes.push_back(new_node);
-      }
-      break;
-
-    case 1:    // Remove a node
-      if (nodes.size() > 0) {
-        int node_index = RandomNode(&rng, &nodes);
-        int node = nodes[node_index];
-        nodes[node_index] = nodes.back();
-        nodes.pop_back();
-        graph_cycles.RemoveNode(ptr(node));
-        ASSERT_EQ(graph_cycles.Ptr(Get(id, node)), nullptr);
-        id.erase(node);
-        int i = 0;
-        while (i != edges.size()) {
-          if (edges[i].from == node || edges[i].to == node) {
-            edges[i] = edges.back();
-            edges.pop_back();
-          } else {
-            i++;
-          }
-        }
-      }
-      break;
-
-    case 2:   // Add an edge
-      if (nodes.size() > 0) {
-        int from = RandomNode(&rng, &nodes);
-        int to = RandomNode(&rng, &nodes);
-        if (EdgeIndex(&edges, nodes[from], nodes[to]) == -1) {
-          if (graph_cycles.InsertEdge(id[nodes[from]], id[nodes[to]])) {
-            Edge new_edge;
-            new_edge.from = nodes[from];
-            new_edge.to = nodes[to];
-            edges.push_back(new_edge);
-          } else {
-            std::unordered_set<int> seen;
-            ASSERT_TRUE(IsReachable(&edges, nodes[to], nodes[from], &seen))
-                << "Edge " << nodes[to] << "->" << nodes[from];
-          }
-        }
-      }
-      break;
-
-    case 3:    // Remove an edge
-      if (edges.size() > 0) {
-        int i = RandomEdge(&rng, &edges);
-        int from = edges[i].from;
-        int to = edges[i].to;
-        ASSERT_EQ(i, EdgeIndex(&edges, from, to));
-        edges[i] = edges.back();
-        edges.pop_back();
-        ASSERT_EQ(-1, EdgeIndex(&edges, from, to));
-        graph_cycles.RemoveEdge(id[from], id[to]);
-      }
-      break;
-
-    case 4:   // Check a path
-      if (nodes.size() > 0) {
-        int from = RandomNode(&rng, &nodes);
-        int to = RandomNode(&rng, &nodes);
-        GraphId path[2*kMaxNodes];
-        int path_len = graph_cycles.FindPath(id[nodes[from]], id[nodes[to]],
-                                             ABSL_ARRAYSIZE(path), path);
-        std::unordered_set<int> seen;
-        bool reachable = IsReachable(&edges, nodes[from], nodes[to], &seen);
-        bool gc_reachable =
-            graph_cycles.IsReachable(Get(id, nodes[from]), Get(id, nodes[to]));
-        ASSERT_EQ(path_len != 0, reachable);
-        ASSERT_EQ(path_len != 0, gc_reachable);
-        // In the following line, we add one because a node can appear
-        // twice, if the path is from that node to itself, perhaps via
-        // every other node.
-        ASSERT_LE(path_len, kMaxNodes + 1);
-        if (path_len != 0) {
-          ASSERT_EQ(id[nodes[from]], path[0]);
-          ASSERT_EQ(id[nodes[to]], path[path_len-1]);
-          for (int i = 1; i < path_len; i++) {
-            ASSERT_TRUE(graph_cycles.HasEdge(path[i-1], path[i]));
-          }
-        }
-      }
-      break;
-
-    case 5:  // Check invariants
-      CheckInvariants(graph_cycles);
-      break;
-
-    default:
-      ABSL_RAW_LOG(FATAL, "op %d", op);
-    }
-
-    // Very rarely, test graph expansion by adding then removing many nodes.
-    std::bernoulli_distribution one_in_1024(1.0 / 1024);
-    if (one_in_1024(rng)) {
-      CheckEdges(&nodes, &edges, id, &graph_cycles);
-      CheckTransitiveClosure(&nodes, &edges, id, &graph_cycles);
-      for (int i = 0; i != 256; i++) {
-        int new_node = next_node++;
-        GraphId new_gnode = graph_cycles.GetId(ptr(new_node));
-        ASSERT_NE(InvalidGraphId(), new_gnode);
-        id[new_node] = new_gnode;
-        ASSERT_EQ(ptr(new_node), graph_cycles.Ptr(new_gnode));
-        for (const auto &node : nodes) {
-          ASSERT_NE(node, new_node);
-        }
-        nodes.push_back(new_node);
-      }
-      for (int i = 0; i != 256; i++) {
-        ASSERT_GT(nodes.size(), 0);
-        int node_index = RandomNode(&rng, &nodes);
-        int node = nodes[node_index];
-        nodes[node_index] = nodes.back();
-        nodes.pop_back();
-        graph_cycles.RemoveNode(ptr(node));
-        id.erase(node);
-        int j = 0;
-        while (j != edges.size()) {
-          if (edges[j].from == node || edges[j].to == node) {
-            edges[j] = edges.back();
-            edges.pop_back();
-          } else {
-            j++;
-          }
-        }
-      }
-      CheckInvariants(graph_cycles);
-    }
-  }
-}
-
-class GraphCyclesTest : public ::testing::Test {
- public:
-  IdMap id_;
-  GraphCycles g_;
-
-  static void* Ptr(int i) {
-    return reinterpret_cast<void*>(static_cast<uintptr_t>(i));
-  }
-
-  static int Num(void* ptr) {
-    return static_cast<int>(reinterpret_cast<uintptr_t>(ptr));
-  }
-
-  // Test relies on ith NewNode() call returning Node numbered i
-  GraphCyclesTest() {
-    for (int i = 0; i < 100; i++) {
-      id_[i] = g_.GetId(Ptr(i));
-    }
-    CheckInvariants(g_);
-  }
-
-  bool AddEdge(int x, int y) {
-    return g_.InsertEdge(Get(id_, x), Get(id_, y));
-  }
-
-  void AddMultiples() {
-    // For every node x > 0: add edge to 2*x, 3*x
-    for (int x = 1; x < 25; x++) {
-      EXPECT_TRUE(AddEdge(x, 2*x)) << x;
-      EXPECT_TRUE(AddEdge(x, 3*x)) << x;
-    }
-    CheckInvariants(g_);
-  }
-
-  std::string Path(int x, int y) {
-    GraphId path[5];
-    int np = g_.FindPath(Get(id_, x), Get(id_, y), ABSL_ARRAYSIZE(path), path);
-    std::string result;
-    for (int i = 0; i < np; i++) {
-      if (i >= ABSL_ARRAYSIZE(path)) {
-        result += " ...";
-        break;
-      }
-      if (!result.empty()) result.push_back(' ');
-      char buf[20];
-      snprintf(buf, sizeof(buf), "%d", Num(g_.Ptr(path[i])));
-      result += buf;
-    }
-    return result;
-  }
-};
-
-TEST_F(GraphCyclesTest, NoCycle) {
-  AddMultiples();
-  CheckInvariants(g_);
-}
-
-TEST_F(GraphCyclesTest, SimpleCycle) {
-  AddMultiples();
-  EXPECT_FALSE(AddEdge(8, 4));
-  EXPECT_EQ("4 8", Path(4, 8));
-  CheckInvariants(g_);
-}
-
-TEST_F(GraphCyclesTest, IndirectCycle) {
-  AddMultiples();
-  EXPECT_TRUE(AddEdge(16, 9));
-  CheckInvariants(g_);
-  EXPECT_FALSE(AddEdge(9, 2));
-  EXPECT_EQ("2 4 8 16 9", Path(2, 9));
-  CheckInvariants(g_);
-}
-
-TEST_F(GraphCyclesTest, LongPath) {
-  ASSERT_TRUE(AddEdge(2, 4));
-  ASSERT_TRUE(AddEdge(4, 6));
-  ASSERT_TRUE(AddEdge(6, 8));
-  ASSERT_TRUE(AddEdge(8, 10));
-  ASSERT_TRUE(AddEdge(10, 12));
-  ASSERT_FALSE(AddEdge(12, 2));
-  EXPECT_EQ("2 4 6 8 10 ...", Path(2, 12));
-  CheckInvariants(g_);
-}
-
-TEST_F(GraphCyclesTest, RemoveNode) {
-  ASSERT_TRUE(AddEdge(1, 2));
-  ASSERT_TRUE(AddEdge(2, 3));
-  ASSERT_TRUE(AddEdge(3, 4));
-  ASSERT_TRUE(AddEdge(4, 5));
-  g_.RemoveNode(g_.Ptr(id_[3]));
-  id_.erase(3);
-  ASSERT_TRUE(AddEdge(5, 1));
-}
-
-TEST_F(GraphCyclesTest, ManyEdges) {
-  const int N = 50;
-  for (int i = 0; i < N; i++) {
-    for (int j = 1; j < N; j++) {
-      ASSERT_TRUE(AddEdge(i, i+j));
-    }
-  }
-  CheckInvariants(g_);
-  ASSERT_TRUE(AddEdge(2*N-1, 0));
-  CheckInvariants(g_);
-  ASSERT_FALSE(AddEdge(10, 9));
-  CheckInvariants(g_);
-}
-
-}  // namespace synchronization_internal
-ABSL_NAMESPACE_END
-}  // namespace absl
diff --git a/third_party/abseil_cpp/absl/synchronization/internal/kernel_timeout.h b/third_party/abseil_cpp/absl/synchronization/internal/kernel_timeout.h
deleted file mode 100644
index bbd4d2d70f44..000000000000
--- a/third_party/abseil_cpp/absl/synchronization/internal/kernel_timeout.h
+++ /dev/null
@@ -1,156 +0,0 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-// An optional absolute timeout, with nanosecond granularity,
-// compatible with absl::Time. Suitable for in-register
-// parameter-passing (e.g. syscalls.)
-// Constructible from a absl::Time (for a timeout to be respected) or {}
-// (for "no timeout".)
-// This is a private low-level API for use by a handful of low-level
-// components that are friends of this class. Higher-level components
-// should build APIs based on absl::Time and absl::Duration.
-
-#ifndef ABSL_SYNCHRONIZATION_INTERNAL_KERNEL_TIMEOUT_H_
-#define ABSL_SYNCHRONIZATION_INTERNAL_KERNEL_TIMEOUT_H_
-
-#include <time.h>
-
-#include <algorithm>
-#include <limits>
-
-#include "absl/base/internal/raw_logging.h"
-#include "absl/time/clock.h"
-#include "absl/time/time.h"
-
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-namespace synchronization_internal {
-
-class Futex;
-class Waiter;
-
-class KernelTimeout {
- public:
-  // A timeout that should expire at <t>.  Any value, in the full
-  // InfinitePast() to InfiniteFuture() range, is valid here and will be
-  // respected.
-  explicit KernelTimeout(absl::Time t) : ns_(MakeNs(t)) {}
-  // No timeout.
-  KernelTimeout() : ns_(0) {}
-
-  // A more explicit factory for those who prefer it.  Equivalent to {}.
-  static KernelTimeout Never() { return {}; }
-
-  // We explicitly do not support other custom formats: timespec, int64_t nanos.
-  // Unify on this and absl::Time, please.
-
-  bool has_timeout() const { return ns_ != 0; }
-
-  // Convert to parameter for sem_timedwait/futex/similar.  Only for approved
-  // users.  Do not call if !has_timeout.
-  struct timespec MakeAbsTimespec();
-
- private:
-  // internal rep, not user visible: ns after unix epoch.
-  // zero = no timeout.
-  // Negative we treat as an unlikely (and certainly expired!) but valid
-  // timeout.
-  int64_t ns_;
-
-  static int64_t MakeNs(absl::Time t) {
-    // optimization--InfiniteFuture is common "no timeout" value
-    // and cheaper to compare than convert.
-    if (t == absl::InfiniteFuture()) return 0;
-    int64_t x = ToUnixNanos(t);
-
-    // A timeout that lands exactly on the epoch (x=0) needs to be respected,
-    // so we alter it unnoticably to 1.  Negative timeouts are in
-    // theory supported, but handled poorly by the kernel (long
-    // delays) so push them forward too; since all such times have
-    // already passed, it's indistinguishable.
-    if (x <= 0) x = 1;
-    // A time larger than what can be represented to the kernel is treated
-    // as no timeout.
-    if (x == (std::numeric_limits<int64_t>::max)()) x = 0;
-    return x;
-  }
-
-#ifdef _WIN32
-  // Converts to milliseconds from now, or INFINITE when
-  // !has_timeout(). For use by SleepConditionVariableSRW on
-  // Windows. Callers should recognize that the return value is a
-  // relative duration (it should be recomputed by calling this method
-  // in the case of a spurious wakeup).
-  // This header file may be included transitively by public header files,
-  // so we define our own DWORD and INFINITE instead of getting them from
-  // <intsafe.h> and <WinBase.h>.
-  typedef unsigned long DWord;  // NOLINT
-  DWord InMillisecondsFromNow() const {
-    constexpr DWord kInfinite = (std::numeric_limits<DWord>::max)();
-    if (!has_timeout()) {
-      return kInfinite;
-    }
-    // The use of absl::Now() to convert from absolute time to
-    // relative time means that absl::Now() cannot use anything that
-    // depends on KernelTimeout (for example, Mutex) on Windows.
-    int64_t now = ToUnixNanos(absl::Now());
-    if (ns_ >= now) {
-      // Round up so that Now() + ms_from_now >= ns_.
-      constexpr uint64_t max_nanos =
-          (std::numeric_limits<int64_t>::max)() - 999999u;
-      uint64_t ms_from_now =
-          (std::min<uint64_t>(max_nanos, ns_ - now) + 999999u) / 1000000u;
-      if (ms_from_now > kInfinite) {
-        return kInfinite;
-      }
-      return static_cast<DWord>(ms_from_now);
-    }
-    return 0;
-  }
-#endif
-
-  friend class Futex;
-  friend class Waiter;
-};
-
-inline struct timespec KernelTimeout::MakeAbsTimespec() {
-  int64_t n = ns_;
-  static const int64_t kNanosPerSecond = 1000 * 1000 * 1000;
-  if (n == 0) {
-    ABSL_RAW_LOG(
-        ERROR, "Tried to create a timespec from a non-timeout; never do this.");
-    // But we'll try to continue sanely.  no-timeout ~= saturated timeout.
-    n = (std::numeric_limits<int64_t>::max)();
-  }
-
-  // Kernel APIs validate timespecs as being at or after the epoch,
-  // despite the kernel time type being signed.  However, no one can
-  // tell the difference between a timeout at or before the epoch (since
-  // all such timeouts have expired!)
-  if (n < 0) n = 0;
-
-  struct timespec abstime;
-  int64_t seconds = (std::min)(n / kNanosPerSecond,
-                               int64_t{(std::numeric_limits<time_t>::max)()});
-  abstime.tv_sec = static_cast<time_t>(seconds);
-  abstime.tv_nsec = static_cast<decltype(abstime.tv_nsec)>(n % kNanosPerSecond);
-  return abstime;
-}
-
-}  // namespace synchronization_internal
-ABSL_NAMESPACE_END
-}  // namespace absl
-
-#endif  // ABSL_SYNCHRONIZATION_INTERNAL_KERNEL_TIMEOUT_H_
diff --git a/third_party/abseil_cpp/absl/synchronization/internal/per_thread_sem.cc b/third_party/abseil_cpp/absl/synchronization/internal/per_thread_sem.cc
deleted file mode 100644
index 821ca9b4e9c3..000000000000
--- a/third_party/abseil_cpp/absl/synchronization/internal/per_thread_sem.cc
+++ /dev/null
@@ -1,106 +0,0 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// This file is a no-op if the required LowLevelAlloc support is missing.
-#include "absl/base/internal/low_level_alloc.h"
-#ifndef ABSL_LOW_LEVEL_ALLOC_MISSING
-
-#include "absl/synchronization/internal/per_thread_sem.h"
-
-#include <atomic>
-
-#include "absl/base/attributes.h"
-#include "absl/base/internal/thread_identity.h"
-#include "absl/synchronization/internal/waiter.h"
-
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-namespace synchronization_internal {
-
-void PerThreadSem::SetThreadBlockedCounter(std::atomic<int> *counter) {
-  base_internal::ThreadIdentity *identity;
-  identity = GetOrCreateCurrentThreadIdentity();
-  identity->blocked_count_ptr = counter;
-}
-
-std::atomic<int> *PerThreadSem::GetThreadBlockedCounter() {
-  base_internal::ThreadIdentity *identity;
-  identity = GetOrCreateCurrentThreadIdentity();
-  return identity->blocked_count_ptr;
-}
-
-void PerThreadSem::Init(base_internal::ThreadIdentity *identity) {
-  new (Waiter::GetWaiter(identity)) Waiter();
-  identity->ticker.store(0, std::memory_order_relaxed);
-  identity->wait_start.store(0, std::memory_order_relaxed);
-  identity->is_idle.store(false, std::memory_order_relaxed);
-}
-
-void PerThreadSem::Destroy(base_internal::ThreadIdentity *identity) {
-  Waiter::GetWaiter(identity)->~Waiter();
-}
-
-void PerThreadSem::Tick(base_internal::ThreadIdentity *identity) {
-  const int ticker =
-      identity->ticker.fetch_add(1, std::memory_order_relaxed) + 1;
-  const int wait_start = identity->wait_start.load(std::memory_order_relaxed);
-  const bool is_idle = identity->is_idle.load(std::memory_order_relaxed);
-  if (wait_start && (ticker - wait_start > Waiter::kIdlePeriods) && !is_idle) {
-    // Wakeup the waiting thread since it is time for it to become idle.
-    Waiter::GetWaiter(identity)->Poke();
-  }
-}
-
-}  // namespace synchronization_internal
-ABSL_NAMESPACE_END
-}  // namespace absl
-
-extern "C" {
-
-ABSL_ATTRIBUTE_WEAK void AbslInternalPerThreadSemPost(
-    absl::base_internal::ThreadIdentity *identity) {
-  absl::synchronization_internal::Waiter::GetWaiter(identity)->Post();
-}
-
-ABSL_ATTRIBUTE_WEAK bool AbslInternalPerThreadSemWait(
-    absl::synchronization_internal::KernelTimeout t) {
-  bool timeout = false;
-  absl::base_internal::ThreadIdentity *identity;
-  identity = absl::synchronization_internal::GetOrCreateCurrentThreadIdentity();
-
-  // Ensure wait_start != 0.
-  int ticker = identity->ticker.load(std::memory_order_relaxed);
-  identity->wait_start.store(ticker ? ticker : 1, std::memory_order_relaxed);
-  identity->is_idle.store(false, std::memory_order_relaxed);
-
-  if (identity->blocked_count_ptr != nullptr) {
-    // Increment count of threads blocked in a given thread pool.
-    identity->blocked_count_ptr->fetch_add(1, std::memory_order_relaxed);
-  }
-
-  timeout =
-      !absl::synchronization_internal::Waiter::GetWaiter(identity)->Wait(t);
-
-  if (identity->blocked_count_ptr != nullptr) {
-    identity->blocked_count_ptr->fetch_sub(1, std::memory_order_relaxed);
-  }
-
-  identity->is_idle.store(false, std::memory_order_relaxed);
-  identity->wait_start.store(0, std::memory_order_relaxed);
-  return !timeout;
-}
-
-}  // extern "C"
-
-#endif  // ABSL_LOW_LEVEL_ALLOC_MISSING
diff --git a/third_party/abseil_cpp/absl/synchronization/internal/per_thread_sem.h b/third_party/abseil_cpp/absl/synchronization/internal/per_thread_sem.h
deleted file mode 100644
index 2228b6e8ea91..000000000000
--- a/third_party/abseil_cpp/absl/synchronization/internal/per_thread_sem.h
+++ /dev/null
@@ -1,115 +0,0 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-// PerThreadSem is a low-level synchronization primitive controlling the
-// runnability of a single thread, used internally by Mutex and CondVar.
-//
-// This is NOT a general-purpose synchronization mechanism, and should not be
-// used directly by applications.  Applications should use Mutex and CondVar.
-//
-// The semantics of PerThreadSem are the same as that of a counting semaphore.
-// Each thread maintains an abstract "count" value associated with its identity.
-
-#ifndef ABSL_SYNCHRONIZATION_INTERNAL_PER_THREAD_SEM_H_
-#define ABSL_SYNCHRONIZATION_INTERNAL_PER_THREAD_SEM_H_
-
-#include <atomic>
-
-#include "absl/base/internal/thread_identity.h"
-#include "absl/synchronization/internal/create_thread_identity.h"
-#include "absl/synchronization/internal/kernel_timeout.h"
-
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-
-class Mutex;
-
-namespace synchronization_internal {
-
-class PerThreadSem {
- public:
-  PerThreadSem() = delete;
-  PerThreadSem(const PerThreadSem&) = delete;
-  PerThreadSem& operator=(const PerThreadSem&) = delete;
-
-  // Routine invoked periodically (once a second) by a background thread.
-  // Has no effect on user-visible state.
-  static void Tick(base_internal::ThreadIdentity* identity);
-
-  // ---------------------------------------------------------------------------
-  // Routines used by autosizing threadpools to detect when threads are
-  // blocked.  Each thread has a counter pointer, initially zero.  If non-zero,
-  // the implementation atomically increments the counter when it blocks on a
-  // semaphore, a decrements it again when it wakes.  This allows a threadpool
-  // to keep track of how many of its threads are blocked.
-  // SetThreadBlockedCounter() should be used only by threadpool
-  // implementations.  GetThreadBlockedCounter() should be used by modules that
-  // block threads; if the pointer returned is non-zero, the location should be
-  // incremented before the thread blocks, and decremented after it wakes.
-  static void SetThreadBlockedCounter(std::atomic<int> *counter);
-  static std::atomic<int> *GetThreadBlockedCounter();
-
- private:
-  // Create the PerThreadSem associated with "identity".  Initializes count=0.
-  // REQUIRES: May only be called by ThreadIdentity.
-  static void Init(base_internal::ThreadIdentity* identity);
-
-  // Destroy the PerThreadSem associated with "identity".
-  // REQUIRES: May only be called by ThreadIdentity.
-  static void Destroy(base_internal::ThreadIdentity* identity);
-
-  // Increments "identity"'s count.
-  static inline void Post(base_internal::ThreadIdentity* identity);
-
-  // Waits until either our count > 0 or t has expired.
-  // If count > 0, decrements count and returns true.  Otherwise returns false.
-  // !t.has_timeout() => Wait(t) will return true.
-  static inline bool Wait(KernelTimeout t);
-
-  // Permitted callers.
-  friend class PerThreadSemTest;
-  friend class absl::Mutex;
-  friend absl::base_internal::ThreadIdentity* CreateThreadIdentity();
-  friend void ReclaimThreadIdentity(void* v);
-};
-
-}  // namespace synchronization_internal
-ABSL_NAMESPACE_END
-}  // namespace absl
-
-// In some build configurations we pass --detect-odr-violations to the
-// gold linker.  This causes it to flag weak symbol overrides as ODR
-// violations.  Because ODR only applies to C++ and not C,
-// --detect-odr-violations ignores symbols not mangled with C++ names.
-// By changing our extension points to be extern "C", we dodge this
-// check.
-extern "C" {
-void AbslInternalPerThreadSemPost(
-    absl::base_internal::ThreadIdentity* identity);
-bool AbslInternalPerThreadSemWait(
-    absl::synchronization_internal::KernelTimeout t);
-}  // extern "C"
-
-void absl::synchronization_internal::PerThreadSem::Post(
-    absl::base_internal::ThreadIdentity* identity) {
-  AbslInternalPerThreadSemPost(identity);
-}
-
-bool absl::synchronization_internal::PerThreadSem::Wait(
-    absl::synchronization_internal::KernelTimeout t) {
-  return AbslInternalPerThreadSemWait(t);
-}
-
-#endif  // ABSL_SYNCHRONIZATION_INTERNAL_PER_THREAD_SEM_H_
diff --git a/third_party/abseil_cpp/absl/synchronization/internal/per_thread_sem_test.cc b/third_party/abseil_cpp/absl/synchronization/internal/per_thread_sem_test.cc
deleted file mode 100644
index 8cf59e64e961..000000000000
--- a/third_party/abseil_cpp/absl/synchronization/internal/per_thread_sem_test.cc
+++ /dev/null
@@ -1,181 +0,0 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "absl/synchronization/internal/per_thread_sem.h"
-
-#include <atomic>
-#include <condition_variable>  // NOLINT(build/c++11)
-#include <functional>
-#include <limits>
-#include <mutex>               // NOLINT(build/c++11)
-#include <string>
-#include <thread>              // NOLINT(build/c++11)
-
-#include "gtest/gtest.h"
-#include "absl/base/config.h"
-#include "absl/base/internal/cycleclock.h"
-#include "absl/base/internal/thread_identity.h"
-#include "absl/strings/str_cat.h"
-#include "absl/time/clock.h"
-#include "absl/time/time.h"
-
-// In this test we explicitly avoid the use of synchronization
-// primitives which might use PerThreadSem, most notably absl::Mutex.
-
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-namespace synchronization_internal {
-
-class SimpleSemaphore {
- public:
-  SimpleSemaphore() : count_(0) {}
-
-  // Decrements (locks) the semaphore. If the semaphore's value is
-  // greater than zero, then the decrement proceeds, and the function
-  // returns, immediately. If the semaphore currently has the value
-  // zero, then the call blocks until it becomes possible to perform
-  // the decrement.
-  void Wait() {
-    std::unique_lock<std::mutex> lock(mu_);
-    cv_.wait(lock, [this]() { return count_ > 0; });
-    --count_;
-    cv_.notify_one();
-  }
-
-  // Increments (unlocks) the semaphore. If the semaphore's value
-  // consequently becomes greater than zero, then another thread
-  // blocked Wait() call will be woken up and proceed to lock the
-  // semaphore.
-  void Post() {
-    std::lock_guard<std::mutex> lock(mu_);
-    ++count_;
-    cv_.notify_one();
-  }
-
- private:
-  std::mutex mu_;
-  std::condition_variable cv_;
-  int count_;
-};
-
-struct ThreadData {
-  int num_iterations;                 // Number of replies to send.
-  SimpleSemaphore identity2_written;  // Posted by thread writing identity2.
-  base_internal::ThreadIdentity *identity1;  // First Post()-er.
-  base_internal::ThreadIdentity *identity2;  // First Wait()-er.
-  KernelTimeout timeout;
-};
-
-// Need friendship with PerThreadSem.
-class PerThreadSemTest : public testing::Test {
- public:
-  static void TimingThread(ThreadData* t) {
-    t->identity2 = GetOrCreateCurrentThreadIdentity();
-    t->identity2_written.Post();
-    while (t->num_iterations--) {
-      Wait(t->timeout);
-      Post(t->identity1);
-    }
-  }
-
-  void TestTiming(const char *msg, bool timeout) {
-    static const int kNumIterations = 100;
-    ThreadData t;
-    t.num_iterations = kNumIterations;
-    t.timeout = timeout ?
-        KernelTimeout(absl::Now() + absl::Seconds(10000))  // far in the future
-        : KernelTimeout::Never();
-    t.identity1 = GetOrCreateCurrentThreadIdentity();
-
-    // We can't use the Thread class here because it uses the Mutex
-    // class which will invoke PerThreadSem, so we use std::thread instead.
-    std::thread partner_thread(std::bind(TimingThread, &t));
-
-    // Wait for our partner thread to register their identity.
-    t.identity2_written.Wait();
-
-    int64_t min_cycles = std::numeric_limits<int64_t>::max();
-    int64_t total_cycles = 0;
-    for (int i = 0; i < kNumIterations; ++i) {
-      absl::SleepFor(absl::Milliseconds(20));
-      int64_t cycles = base_internal::CycleClock::Now();
-      Post(t.identity2);
-      Wait(t.timeout);
-      cycles = base_internal::CycleClock::Now() - cycles;
-      min_cycles = std::min(min_cycles, cycles);
-      total_cycles += cycles;
-    }
-    std::string out = StrCat(
-        msg, "min cycle count=", min_cycles, " avg cycle count=",
-        absl::SixDigits(static_cast<double>(total_cycles) / kNumIterations));
-    printf("%s\n", out.c_str());
-
-    partner_thread.join();
-  }
-
- protected:
-  static void Post(base_internal::ThreadIdentity *id) {
-    PerThreadSem::Post(id);
-  }
-  static bool Wait(KernelTimeout t) {
-    return PerThreadSem::Wait(t);
-  }
-
-  // convenience overload
-  static bool Wait(absl::Time t) {
-    return Wait(KernelTimeout(t));
-  }
-
-  static void Tick(base_internal::ThreadIdentity *identity) {
-    PerThreadSem::Tick(identity);
-  }
-};
-
-namespace {
-
-TEST_F(PerThreadSemTest, WithoutTimeout) {
-  PerThreadSemTest::TestTiming("Without timeout: ", false);
-}
-
-TEST_F(PerThreadSemTest, WithTimeout) {
-  PerThreadSemTest::TestTiming("With timeout:    ", true);
-}
-
-TEST_F(PerThreadSemTest, Timeouts) {
-  const absl::Duration delay = absl::Milliseconds(50);
-  const absl::Time start = absl::Now();
-  EXPECT_FALSE(Wait(start + delay));
-  const absl::Duration elapsed = absl::Now() - start;
-  // Allow for a slight early return, to account for quality of implementation
-  // issues on various platforms.
-  const absl::Duration slop = absl::Microseconds(200);
-  EXPECT_LE(delay - slop, elapsed)
-      << "Wait returned " << delay - elapsed
-      << " early (with " << slop << " slop), start time was " << start;
-
-  absl::Time negative_timeout = absl::UnixEpoch() - absl::Milliseconds(100);
-  EXPECT_FALSE(Wait(negative_timeout));
-  EXPECT_LE(negative_timeout, absl::Now() + slop);  // trivially true :)
-
-  Post(GetOrCreateCurrentThreadIdentity());
-  // The wait here has an expired timeout, but we have a wake to consume,
-  // so this should succeed
-  EXPECT_TRUE(Wait(negative_timeout));
-}
-
-}  // namespace
-
-}  // namespace synchronization_internal
-ABSL_NAMESPACE_END
-}  // namespace absl
diff --git a/third_party/abseil_cpp/absl/synchronization/internal/thread_pool.h b/third_party/abseil_cpp/absl/synchronization/internal/thread_pool.h
deleted file mode 100644
index 0cb96dacde83..000000000000
--- a/third_party/abseil_cpp/absl/synchronization/internal/thread_pool.h
+++ /dev/null
@@ -1,93 +0,0 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef ABSL_SYNCHRONIZATION_INTERNAL_THREAD_POOL_H_
-#define ABSL_SYNCHRONIZATION_INTERNAL_THREAD_POOL_H_
-
-#include <cassert>
-#include <cstddef>
-#include <functional>
-#include <queue>
-#include <thread>  // NOLINT(build/c++11)
-#include <vector>
-
-#include "absl/base/thread_annotations.h"
-#include "absl/synchronization/mutex.h"
-
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-namespace synchronization_internal {
-
-// A simple ThreadPool implementation for tests.
-class ThreadPool {
- public:
-  explicit ThreadPool(int num_threads) {
-    for (int i = 0; i < num_threads; ++i) {
-      threads_.push_back(std::thread(&ThreadPool::WorkLoop, this));
-    }
-  }
-
-  ThreadPool(const ThreadPool &) = delete;
-  ThreadPool &operator=(const ThreadPool &) = delete;
-
-  ~ThreadPool() {
-    {
-      absl::MutexLock l(&mu_);
-      for (size_t i = 0; i < threads_.size(); i++) {
-        queue_.push(nullptr);  // Shutdown signal.
-      }
-    }
-    for (auto &t : threads_) {
-      t.join();
-    }
-  }
-
-  // Schedule a function to be run on a ThreadPool thread immediately.
-  void Schedule(std::function<void()> func) {
-    assert(func != nullptr);
-    absl::MutexLock l(&mu_);
-    queue_.push(std::move(func));
-  }
-
- private:
-  bool WorkAvailable() const ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
-    return !queue_.empty();
-  }
-
-  void WorkLoop() {
-    while (true) {
-      std::function<void()> func;
-      {
-        absl::MutexLock l(&mu_);
-        mu_.Await(absl::Condition(this, &ThreadPool::WorkAvailable));
-        func = std::move(queue_.front());
-        queue_.pop();
-      }
-      if (func == nullptr) {  // Shutdown signal.
-        break;
-      }
-      func();
-    }
-  }
-
-  absl::Mutex mu_;
-  std::queue<std::function<void()>> queue_ ABSL_GUARDED_BY(mu_);
-  std::vector<std::thread> threads_;
-};
-
-}  // namespace synchronization_internal
-ABSL_NAMESPACE_END
-}  // namespace absl
-
-#endif  // ABSL_SYNCHRONIZATION_INTERNAL_THREAD_POOL_H_
diff --git a/third_party/abseil_cpp/absl/synchronization/internal/waiter.cc b/third_party/abseil_cpp/absl/synchronization/internal/waiter.cc
deleted file mode 100644
index 2123be60f54f..000000000000
--- a/third_party/abseil_cpp/absl/synchronization/internal/waiter.cc
+++ /dev/null
@@ -1,428 +0,0 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "absl/synchronization/internal/waiter.h"
-
-#include "absl/base/config.h"
-
-#ifdef _WIN32
-#include <windows.h>
-#else
-#include <pthread.h>
-#include <sys/time.h>
-#include <unistd.h>
-#endif
-
-#ifdef __linux__
-#include <linux/futex.h>
-#include <sys/syscall.h>
-#endif
-
-#ifdef ABSL_HAVE_SEMAPHORE_H
-#include <semaphore.h>
-#endif
-
-#include <errno.h>
-#include <stdio.h>
-#include <time.h>
-
-#include <atomic>
-#include <cassert>
-#include <cstdint>
-#include <new>
-#include <type_traits>
-
-#include "absl/base/internal/raw_logging.h"
-#include "absl/base/internal/thread_identity.h"
-#include "absl/base/optimization.h"
-#include "absl/synchronization/internal/kernel_timeout.h"
-
-
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-namespace synchronization_internal {
-
-static void MaybeBecomeIdle() {
-  base_internal::ThreadIdentity *identity =
-      base_internal::CurrentThreadIdentityIfPresent();
-  assert(identity != nullptr);
-  const bool is_idle = identity->is_idle.load(std::memory_order_relaxed);
-  const int ticker = identity->ticker.load(std::memory_order_relaxed);
-  const int wait_start = identity->wait_start.load(std::memory_order_relaxed);
-  if (!is_idle && ticker - wait_start > Waiter::kIdlePeriods) {
-    identity->is_idle.store(true, std::memory_order_relaxed);
-  }
-}
-
-#if ABSL_WAITER_MODE == ABSL_WAITER_MODE_FUTEX
-
-Waiter::Waiter() {
-  futex_.store(0, std::memory_order_relaxed);
-}
-
-Waiter::~Waiter() = default;
-
-bool Waiter::Wait(KernelTimeout t) {
-  // Loop until we can atomically decrement futex from a positive
-  // value, waiting on a futex while we believe it is zero.
-  // Note that, since the thread ticker is just reset, we don't need to check
-  // whether the thread is idle on the very first pass of the loop.
-  bool first_pass = true;
-  while (true) {
-    int32_t x = futex_.load(std::memory_order_relaxed);
-    while (x != 0) {
-      if (!futex_.compare_exchange_weak(x, x - 1,
-                                        std::memory_order_acquire,
-                                        std::memory_order_relaxed)) {
-        continue;  // Raced with someone, retry.
-      }
-      return true;  // Consumed a wakeup, we are done.
-    }
-
-
-    if (!first_pass) MaybeBecomeIdle();
-    const int err = Futex::WaitUntil(&futex_, 0, t);
-    if (err != 0) {
-      if (err == -EINTR || err == -EWOULDBLOCK) {
-        // Do nothing, the loop will retry.
-      } else if (err == -ETIMEDOUT) {
-        return false;
-      } else {
-        ABSL_RAW_LOG(FATAL, "Futex operation failed with error %d\n", err);
-      }
-    }
-    first_pass = false;
-  }
-}
-
-void Waiter::Post() {
-  if (futex_.fetch_add(1, std::memory_order_release) == 0) {
-    // We incremented from 0, need to wake a potential waiter.
-    Poke();
-  }
-}
-
-void Waiter::Poke() {
-  // Wake one thread waiting on the futex.
-  const int err = Futex::Wake(&futex_, 1);
-  if (ABSL_PREDICT_FALSE(err < 0)) {
-    ABSL_RAW_LOG(FATAL, "Futex operation failed with error %d\n", err);
-  }
-}
-
-#elif ABSL_WAITER_MODE == ABSL_WAITER_MODE_CONDVAR
-
-class PthreadMutexHolder {
- public:
-  explicit PthreadMutexHolder(pthread_mutex_t *mu) : mu_(mu) {
-    const int err = pthread_mutex_lock(mu_);
-    if (err != 0) {
-      ABSL_RAW_LOG(FATAL, "pthread_mutex_lock failed: %d", err);
-    }
-  }
-
-  PthreadMutexHolder(const PthreadMutexHolder &rhs) = delete;
-  PthreadMutexHolder &operator=(const PthreadMutexHolder &rhs) = delete;
-
-  ~PthreadMutexHolder() {
-    const int err = pthread_mutex_unlock(mu_);
-    if (err != 0) {
-      ABSL_RAW_LOG(FATAL, "pthread_mutex_unlock failed: %d", err);
-    }
-  }
-
- private:
-  pthread_mutex_t *mu_;
-};
-
-Waiter::Waiter() {
-  const int err = pthread_mutex_init(&mu_, 0);
-  if (err != 0) {
-    ABSL_RAW_LOG(FATAL, "pthread_mutex_init failed: %d", err);
-  }
-
-  const int err2 = pthread_cond_init(&cv_, 0);
-  if (err2 != 0) {
-    ABSL_RAW_LOG(FATAL, "pthread_cond_init failed: %d", err2);
-  }
-
-  waiter_count_ = 0;
-  wakeup_count_ = 0;
-}
-
-Waiter::~Waiter() {
-  const int err = pthread_mutex_destroy(&mu_);
-  if (err != 0) {
-    ABSL_RAW_LOG(FATAL, "pthread_mutex_destroy failed: %d", err);
-  }
-
-  const int err2 = pthread_cond_destroy(&cv_);
-  if (err2 != 0) {
-    ABSL_RAW_LOG(FATAL, "pthread_cond_destroy failed: %d", err2);
-  }
-}
-
-bool Waiter::Wait(KernelTimeout t) {
-  struct timespec abs_timeout;
-  if (t.has_timeout()) {
-    abs_timeout = t.MakeAbsTimespec();
-  }
-
-  PthreadMutexHolder h(&mu_);
-  ++waiter_count_;
-  // Loop until we find a wakeup to consume or timeout.
-  // Note that, since the thread ticker is just reset, we don't need to check
-  // whether the thread is idle on the very first pass of the loop.
-  bool first_pass = true;
-  while (wakeup_count_ == 0) {
-    if (!first_pass) MaybeBecomeIdle();
-    // No wakeups available, time to wait.
-    if (!t.has_timeout()) {
-      const int err = pthread_cond_wait(&cv_, &mu_);
-      if (err != 0) {
-        ABSL_RAW_LOG(FATAL, "pthread_cond_wait failed: %d", err);
-      }
-    } else {
-      const int err = pthread_cond_timedwait(&cv_, &mu_, &abs_timeout);
-      if (err == ETIMEDOUT) {
-        --waiter_count_;
-        return false;
-      }
-      if (err != 0) {
-        ABSL_RAW_LOG(FATAL, "pthread_cond_timedwait failed: %d", err);
-      }
-    }
-    first_pass = false;
-  }
-  // Consume a wakeup and we're done.
-  --wakeup_count_;
-  --waiter_count_;
-  return true;
-}
-
-void Waiter::Post() {
-  PthreadMutexHolder h(&mu_);
-  ++wakeup_count_;
-  InternalCondVarPoke();
-}
-
-void Waiter::Poke() {
-  PthreadMutexHolder h(&mu_);
-  InternalCondVarPoke();
-}
-
-void Waiter::InternalCondVarPoke() {
-  if (waiter_count_ != 0) {
-    const int err = pthread_cond_signal(&cv_);
-    if (ABSL_PREDICT_FALSE(err != 0)) {
-      ABSL_RAW_LOG(FATAL, "pthread_cond_signal failed: %d", err);
-    }
-  }
-}
-
-#elif ABSL_WAITER_MODE == ABSL_WAITER_MODE_SEM
-
-Waiter::Waiter() {
-  if (sem_init(&sem_, 0, 0) != 0) {
-    ABSL_RAW_LOG(FATAL, "sem_init failed with errno %d\n", errno);
-  }
-  wakeups_.store(0, std::memory_order_relaxed);
-}
-
-Waiter::~Waiter() {
-  if (sem_destroy(&sem_) != 0) {
-    ABSL_RAW_LOG(FATAL, "sem_destroy failed with errno %d\n", errno);
-  }
-}
-
-bool Waiter::Wait(KernelTimeout t) {
-  struct timespec abs_timeout;
-  if (t.has_timeout()) {
-    abs_timeout = t.MakeAbsTimespec();
-  }
-
-  // Loop until we timeout or consume a wakeup.
-  // Note that, since the thread ticker is just reset, we don't need to check
-  // whether the thread is idle on the very first pass of the loop.
-  bool first_pass = true;
-  while (true) {
-    int x = wakeups_.load(std::memory_order_relaxed);
-    while (x != 0) {
-      if (!wakeups_.compare_exchange_weak(x, x - 1,
-                                          std::memory_order_acquire,
-                                          std::memory_order_relaxed)) {
-        continue;  // Raced with someone, retry.
-      }
-      // Successfully consumed a wakeup, we're done.
-      return true;
-    }
-
-    if (!first_pass) MaybeBecomeIdle();
-    // Nothing to consume, wait (looping on EINTR).
-    while (true) {
-      if (!t.has_timeout()) {
-        if (sem_wait(&sem_) == 0) break;
-        if (errno == EINTR) continue;
-        ABSL_RAW_LOG(FATAL, "sem_wait failed: %d", errno);
-      } else {
-        if (sem_timedwait(&sem_, &abs_timeout) == 0) break;
-        if (errno == EINTR) continue;
-        if (errno == ETIMEDOUT) return false;
-        ABSL_RAW_LOG(FATAL, "sem_timedwait failed: %d", errno);
-      }
-    }
-    first_pass = false;
-  }
-}
-
-void Waiter::Post() {
-  // Post a wakeup.
-  if (wakeups_.fetch_add(1, std::memory_order_release) == 0) {
-    // We incremented from 0, need to wake a potential waiter.
-    Poke();
-  }
-}
-
-void Waiter::Poke() {
-  if (sem_post(&sem_) != 0) {  // Wake any semaphore waiter.
-    ABSL_RAW_LOG(FATAL, "sem_post failed with errno %d\n", errno);
-  }
-}
-
-#elif ABSL_WAITER_MODE == ABSL_WAITER_MODE_WIN32
-
-class Waiter::WinHelper {
- public:
-  static SRWLOCK *GetLock(Waiter *w) {
-    return reinterpret_cast<SRWLOCK *>(&w->mu_storage_);
-  }
-
-  static CONDITION_VARIABLE *GetCond(Waiter *w) {
-    return reinterpret_cast<CONDITION_VARIABLE *>(&w->cv_storage_);
-  }
-
-  static_assert(sizeof(SRWLOCK) == sizeof(void *),
-                "`mu_storage_` does not have the same size as SRWLOCK");
-  static_assert(alignof(SRWLOCK) == alignof(void *),
-                "`mu_storage_` does not have the same alignment as SRWLOCK");
-
-  static_assert(sizeof(CONDITION_VARIABLE) == sizeof(void *),
-                "`ABSL_CONDITION_VARIABLE_STORAGE` does not have the same size "
-                "as `CONDITION_VARIABLE`");
-  static_assert(
-      alignof(CONDITION_VARIABLE) == alignof(void *),
-      "`cv_storage_` does not have the same alignment as `CONDITION_VARIABLE`");
-
-  // The SRWLOCK and CONDITION_VARIABLE types must be trivially constructible
-  // and destructible because we never call their constructors or destructors.
-  static_assert(std::is_trivially_constructible<SRWLOCK>::value,
-                "The `SRWLOCK` type must be trivially constructible");
-  static_assert(
-      std::is_trivially_constructible<CONDITION_VARIABLE>::value,
-      "The `CONDITION_VARIABLE` type must be trivially constructible");
-  static_assert(std::is_trivially_destructible<SRWLOCK>::value,
-                "The `SRWLOCK` type must be trivially destructible");
-  static_assert(std::is_trivially_destructible<CONDITION_VARIABLE>::value,
-                "The `CONDITION_VARIABLE` type must be trivially destructible");
-};
-
-class LockHolder {
- public:
-  explicit LockHolder(SRWLOCK* mu) : mu_(mu) {
-    AcquireSRWLockExclusive(mu_);
-  }
-
-  LockHolder(const LockHolder&) = delete;
-  LockHolder& operator=(const LockHolder&) = delete;
-
-  ~LockHolder() {
-    ReleaseSRWLockExclusive(mu_);
-  }
-
- private:
-  SRWLOCK* mu_;
-};
-
-Waiter::Waiter() {
-  auto *mu = ::new (static_cast<void *>(&mu_storage_)) SRWLOCK;
-  auto *cv = ::new (static_cast<void *>(&cv_storage_)) CONDITION_VARIABLE;
-  InitializeSRWLock(mu);
-  InitializeConditionVariable(cv);
-  waiter_count_ = 0;
-  wakeup_count_ = 0;
-}
-
-// SRW locks and condition variables do not need to be explicitly destroyed.
-// https://docs.microsoft.com/en-us/windows/win32/api/synchapi/nf-synchapi-initializesrwlock
-// https://stackoverflow.com/questions/28975958/why-does-windows-have-no-deleteconditionvariable-function-to-go-together-with
-Waiter::~Waiter() = default;
-
-bool Waiter::Wait(KernelTimeout t) {
-  SRWLOCK *mu = WinHelper::GetLock(this);
-  CONDITION_VARIABLE *cv = WinHelper::GetCond(this);
-
-  LockHolder h(mu);
-  ++waiter_count_;
-
-  // Loop until we find a wakeup to consume or timeout.
-  // Note that, since the thread ticker is just reset, we don't need to check
-  // whether the thread is idle on the very first pass of the loop.
-  bool first_pass = true;
-  while (wakeup_count_ == 0) {
-    if (!first_pass) MaybeBecomeIdle();
-    // No wakeups available, time to wait.
-    if (!SleepConditionVariableSRW(cv, mu, t.InMillisecondsFromNow(), 0)) {
-      // GetLastError() returns a Win32 DWORD, but we assign to
-      // unsigned long to simplify the ABSL_RAW_LOG case below.  The uniform
-      // initialization guarantees this is not a narrowing conversion.
-      const unsigned long err{GetLastError()};  // NOLINT(runtime/int)
-      if (err == ERROR_TIMEOUT) {
-        --waiter_count_;
-        return false;
-      } else {
-        ABSL_RAW_LOG(FATAL, "SleepConditionVariableSRW failed: %lu", err);
-      }
-    }
-    first_pass = false;
-  }
-  // Consume a wakeup and we're done.
-  --wakeup_count_;
-  --waiter_count_;
-  return true;
-}
-
-void Waiter::Post() {
-  LockHolder h(WinHelper::GetLock(this));
-  ++wakeup_count_;
-  InternalCondVarPoke();
-}
-
-void Waiter::Poke() {
-  LockHolder h(WinHelper::GetLock(this));
-  InternalCondVarPoke();
-}
-
-void Waiter::InternalCondVarPoke() {
-  if (waiter_count_ != 0) {
-    WakeConditionVariable(WinHelper::GetCond(this));
-  }
-}
-
-#else
-#error Unknown ABSL_WAITER_MODE
-#endif
-
-}  // namespace synchronization_internal
-ABSL_NAMESPACE_END
-}  // namespace absl
diff --git a/third_party/abseil_cpp/absl/synchronization/internal/waiter.h b/third_party/abseil_cpp/absl/synchronization/internal/waiter.h
deleted file mode 100644
index be3df180d4e2..000000000000
--- a/third_party/abseil_cpp/absl/synchronization/internal/waiter.h
+++ /dev/null
@@ -1,155 +0,0 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#ifndef ABSL_SYNCHRONIZATION_INTERNAL_WAITER_H_
-#define ABSL_SYNCHRONIZATION_INTERNAL_WAITER_H_
-
-#include "absl/base/config.h"
-
-#ifdef _WIN32
-#include <sdkddkver.h>
-#else
-#include <pthread.h>
-#endif
-
-#ifdef __linux__
-#include <linux/futex.h>
-#endif
-
-#ifdef ABSL_HAVE_SEMAPHORE_H
-#include <semaphore.h>
-#endif
-
-#include <atomic>
-#include <cstdint>
-
-#include "absl/base/internal/thread_identity.h"
-#include "absl/synchronization/internal/futex.h"
-#include "absl/synchronization/internal/kernel_timeout.h"
-
-// May be chosen at compile time via -DABSL_FORCE_WAITER_MODE=<index>
-#define ABSL_WAITER_MODE_FUTEX 0
-#define ABSL_WAITER_MODE_SEM 1
-#define ABSL_WAITER_MODE_CONDVAR 2
-#define ABSL_WAITER_MODE_WIN32 3
-
-#if defined(ABSL_FORCE_WAITER_MODE)
-#define ABSL_WAITER_MODE ABSL_FORCE_WAITER_MODE
-#elif defined(_WIN32) && _WIN32_WINNT >= _WIN32_WINNT_VISTA
-#define ABSL_WAITER_MODE ABSL_WAITER_MODE_WIN32
-#elif defined(ABSL_INTERNAL_HAVE_FUTEX)
-#define ABSL_WAITER_MODE ABSL_WAITER_MODE_FUTEX
-#elif defined(ABSL_HAVE_SEMAPHORE_H)
-#define ABSL_WAITER_MODE ABSL_WAITER_MODE_SEM
-#else
-#define ABSL_WAITER_MODE ABSL_WAITER_MODE_CONDVAR
-#endif
-
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-namespace synchronization_internal {
-
-// Waiter is an OS-specific semaphore.
-class Waiter {
- public:
-  // Prepare any data to track waits.
-  Waiter();
-
-  // Not copyable or movable
-  Waiter(const Waiter&) = delete;
-  Waiter& operator=(const Waiter&) = delete;
-
-  // Destroy any data to track waits.
-  ~Waiter();
-
-  // Blocks the calling thread until a matching call to `Post()` or
-  // `t` has passed. Returns `true` if woken (`Post()` called),
-  // `false` on timeout.
-  bool Wait(KernelTimeout t);
-
-  // Restart the caller of `Wait()` as with a normal semaphore.
-  void Post();
-
-  // If anyone is waiting, wake them up temporarily and cause them to
-  // call `MaybeBecomeIdle()`. They will then return to waiting for a
-  // `Post()` or timeout.
-  void Poke();
-
-  // Returns the Waiter associated with the identity.
-  static Waiter* GetWaiter(base_internal::ThreadIdentity* identity) {
-    static_assert(
-        sizeof(Waiter) <= sizeof(base_internal::ThreadIdentity::WaiterState),
-        "Insufficient space for Waiter");
-    return reinterpret_cast<Waiter*>(identity->waiter_state.data);
-  }
-
-  // How many periods to remain idle before releasing resources
-#ifndef ABSL_HAVE_THREAD_SANITIZER
-  static constexpr int kIdlePeriods = 60;
-#else
-  // Memory consumption under ThreadSanitizer is a serious concern,
-  // so we release resources sooner. The value of 1 leads to 1 to 2 second
-  // delay before marking a thread as idle.
-  static const int kIdlePeriods = 1;
-#endif
-
- private:
-#if ABSL_WAITER_MODE == ABSL_WAITER_MODE_FUTEX
-  // Futexes are defined by specification to be 32-bits.
-  // Thus std::atomic<int32_t> must be just an int32_t with lockfree methods.
-  std::atomic<int32_t> futex_;
-  static_assert(sizeof(int32_t) == sizeof(futex_), "Wrong size for futex");
-
-#elif ABSL_WAITER_MODE == ABSL_WAITER_MODE_CONDVAR
-  // REQUIRES: mu_ must be held.
-  void InternalCondVarPoke();
-
-  pthread_mutex_t mu_;
-  pthread_cond_t cv_;
-  int waiter_count_;
-  int wakeup_count_;  // Unclaimed wakeups.
-
-#elif ABSL_WAITER_MODE == ABSL_WAITER_MODE_SEM
-  sem_t sem_;
-  // This seems superfluous, but for Poke() we need to cause spurious
-  // wakeups on the semaphore. Hence we can't actually use the
-  // semaphore's count.
-  std::atomic<int> wakeups_;
-
-#elif ABSL_WAITER_MODE == ABSL_WAITER_MODE_WIN32
-  // WinHelper - Used to define utilities for accessing the lock and
-  // condition variable storage once the types are complete.
-  class WinHelper;
-
-  // REQUIRES: WinHelper::GetLock(this) must be held.
-  void InternalCondVarPoke();
-
-  // We can't include Windows.h in our headers, so we use aligned charachter
-  // buffers to define the storage of SRWLOCK and CONDITION_VARIABLE.
-  alignas(void*) unsigned char mu_storage_[sizeof(void*)];
-  alignas(void*) unsigned char cv_storage_[sizeof(void*)];
-  int waiter_count_;
-  int wakeup_count_;
-
-#else
-  #error Unknown ABSL_WAITER_MODE
-#endif
-};
-
-}  // namespace synchronization_internal
-ABSL_NAMESPACE_END
-}  // namespace absl
-
-#endif  // ABSL_SYNCHRONIZATION_INTERNAL_WAITER_H_
diff --git a/third_party/abseil_cpp/absl/synchronization/lifetime_test.cc b/third_party/abseil_cpp/absl/synchronization/lifetime_test.cc
deleted file mode 100644
index cc973a329071..000000000000
--- a/third_party/abseil_cpp/absl/synchronization/lifetime_test.cc
+++ /dev/null
@@ -1,181 +0,0 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <cstdlib>
-#include <thread>  // NOLINT(build/c++11), Abseil test
-#include <type_traits>
-
-#include "absl/base/attributes.h"
-#include "absl/base/const_init.h"
-#include "absl/base/internal/raw_logging.h"
-#include "absl/base/thread_annotations.h"
-#include "absl/synchronization/mutex.h"
-#include "absl/synchronization/notification.h"
-
-namespace {
-
-// A two-threaded test which checks that Mutex, CondVar, and Notification have
-// correct basic functionality.  The intent is to establish that they
-// function correctly in various phases of construction and destruction.
-//
-// Thread one acquires a lock on 'mutex', wakes thread two via 'notification',
-// then waits for 'state' to be set, as signalled by 'condvar'.
-//
-// Thread two waits on 'notification', then sets 'state' inside the 'mutex',
-// signalling the change via 'condvar'.
-//
-// These tests use ABSL_RAW_CHECK to validate invariants, rather than EXPECT or
-// ASSERT from gUnit, because we need to invoke them during global destructors,
-// when gUnit teardown would have already begun.
-void ThreadOne(absl::Mutex* mutex, absl::CondVar* condvar,
-               absl::Notification* notification, bool* state) {
-  // Test that the notification is in a valid initial state.
-  ABSL_RAW_CHECK(!notification->HasBeenNotified(), "invalid Notification");
-  ABSL_RAW_CHECK(*state == false, "*state not initialized");
-
-  {
-    absl::MutexLock lock(mutex);
-
-    notification->Notify();
-    ABSL_RAW_CHECK(notification->HasBeenNotified(), "invalid Notification");
-
-    while (*state == false) {
-      condvar->Wait(mutex);
-    }
-  }
-}
-
-void ThreadTwo(absl::Mutex* mutex, absl::CondVar* condvar,
-               absl::Notification* notification, bool* state) {
-  ABSL_RAW_CHECK(*state == false, "*state not initialized");
-
-  // Wake thread one
-  notification->WaitForNotification();
-  ABSL_RAW_CHECK(notification->HasBeenNotified(), "invalid Notification");
-  {
-    absl::MutexLock lock(mutex);
-    *state = true;
-    condvar->Signal();
-  }
-}
-
-// Launch thread 1 and thread 2, and block on their completion.
-// If any of 'mutex', 'condvar', or 'notification' is nullptr, use a locally
-// constructed instance instead.
-void RunTests(absl::Mutex* mutex, absl::CondVar* condvar) {
-  absl::Mutex default_mutex;
-  absl::CondVar default_condvar;
-  absl::Notification notification;
-  if (!mutex) {
-    mutex = &default_mutex;
-  }
-  if (!condvar) {
-    condvar = &default_condvar;
-  }
-  bool state = false;
-  std::thread thread_one(ThreadOne, mutex, condvar, &notification, &state);
-  std::thread thread_two(ThreadTwo, mutex, condvar, &notification, &state);
-  thread_one.join();
-  thread_two.join();
-}
-
-void TestLocals() {
-  absl::Mutex mutex;
-  absl::CondVar condvar;
-  RunTests(&mutex, &condvar);
-}
-
-// Normal kConstInit usage
-ABSL_CONST_INIT absl::Mutex const_init_mutex(absl::kConstInit);
-void TestConstInitGlobal() { RunTests(&const_init_mutex, nullptr); }
-
-// Global variables during start and termination
-//
-// In a translation unit, static storage duration variables are initialized in
-// the order of their definitions, and destroyed in the reverse order of their
-// definitions.  We can use this to arrange for tests to be run on these objects
-// before they are created, and after they are destroyed.
-
-using Function = void (*)();
-
-class OnConstruction {
- public:
-  explicit OnConstruction(Function fn) { fn(); }
-};
-
-class OnDestruction {
- public:
-  explicit OnDestruction(Function fn) : fn_(fn) {}
-  ~OnDestruction() { fn_(); }
- private:
-  Function fn_;
-};
-
-// These tests require that the compiler correctly supports C++11 constant
-// initialization... but MSVC has a known regression since v19.10:
-// https://developercommunity.visualstudio.com/content/problem/336946/class-with-constexpr-constructor-not-using-static.html
-// TODO(epastor): Limit the affected range once MSVC fixes this bug.
-#if defined(__clang__) || !(defined(_MSC_VER) && _MSC_VER > 1900)
-// kConstInit
-// Test early usage.  (Declaration comes first; definitions must appear after
-// the test runner.)
-extern absl::Mutex early_const_init_mutex;
-// (Normally I'd write this +[], to make the cast-to-function-pointer explicit,
-// but in some MSVC setups we support, lambdas provide conversion operators to
-// different flavors of function pointers, making this trick ambiguous.)
-OnConstruction test_early_const_init([] {
-  RunTests(&early_const_init_mutex, nullptr);
-});
-// This definition appears before test_early_const_init, but it should be
-// initialized first (due to constant initialization).  Test that the object
-// actually works when constructed this way.
-ABSL_CONST_INIT absl::Mutex early_const_init_mutex(absl::kConstInit);
-
-// Furthermore, test that the const-init c'tor doesn't stomp over the state of
-// a Mutex.  Really, this is a test that the platform under test correctly
-// supports C++11 constant initialization.  (The constant-initialization
-// constructors of globals "happen at link time"; memory is pre-initialized,
-// before the constructors of either grab_lock or check_still_locked are run.)
-extern absl::Mutex const_init_sanity_mutex;
-OnConstruction grab_lock([]() ABSL_NO_THREAD_SAFETY_ANALYSIS {
-  const_init_sanity_mutex.Lock();
-});
-ABSL_CONST_INIT absl::Mutex const_init_sanity_mutex(absl::kConstInit);
-OnConstruction check_still_locked([]() ABSL_NO_THREAD_SAFETY_ANALYSIS {
-  const_init_sanity_mutex.AssertHeld();
-  const_init_sanity_mutex.Unlock();
-});
-#endif  // defined(__clang__) || !(defined(_MSC_VER) && _MSC_VER > 1900)
-
-// Test shutdown usage.  (Declarations come first; definitions must appear after
-// the test runner.)
-extern absl::Mutex late_const_init_mutex;
-// OnDestruction is being used here as a global variable, even though it has a
-// non-trivial destructor.  This is against the style guide.  We're violating
-// that rule here to check that the exception we allow for kConstInit is safe.
-// NOLINTNEXTLINE
-OnDestruction test_late_const_init([] {
-  RunTests(&late_const_init_mutex, nullptr);
-});
-ABSL_CONST_INIT absl::Mutex late_const_init_mutex(absl::kConstInit);
-
-}  // namespace
-
-int main() {
-  TestLocals();
-  TestConstInitGlobal();
-  // Explicitly call exit(0) here, to make it clear that we intend for the
-  // above global object destructors to run.
-  std::exit(0);
-}
diff --git a/third_party/abseil_cpp/absl/synchronization/mutex.cc b/third_party/abseil_cpp/absl/synchronization/mutex.cc
deleted file mode 100644
index 9e01393ca4df..000000000000
--- a/third_party/abseil_cpp/absl/synchronization/mutex.cc
+++ /dev/null
@@ -1,2740 +0,0 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "absl/synchronization/mutex.h"
-
-#ifdef _WIN32
-#include <windows.h>
-#ifdef ERROR
-#undef ERROR
-#endif
-#else
-#include <fcntl.h>
-#include <pthread.h>
-#include <sched.h>
-#include <sys/time.h>
-#endif
-
-#include <assert.h>
-#include <errno.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <time.h>
-
-#include <algorithm>
-#include <atomic>
-#include <cinttypes>
-#include <thread>  // NOLINT(build/c++11)
-
-#include "absl/base/attributes.h"
-#include "absl/base/call_once.h"
-#include "absl/base/config.h"
-#include "absl/base/dynamic_annotations.h"
-#include "absl/base/internal/atomic_hook.h"
-#include "absl/base/internal/cycleclock.h"
-#include "absl/base/internal/hide_ptr.h"
-#include "absl/base/internal/low_level_alloc.h"
-#include "absl/base/internal/raw_logging.h"
-#include "absl/base/internal/spinlock.h"
-#include "absl/base/internal/sysinfo.h"
-#include "absl/base/internal/thread_identity.h"
-#include "absl/base/internal/tsan_mutex_interface.h"
-#include "absl/base/port.h"
-#include "absl/debugging/stacktrace.h"
-#include "absl/debugging/symbolize.h"
-#include "absl/synchronization/internal/graphcycles.h"
-#include "absl/synchronization/internal/per_thread_sem.h"
-#include "absl/time/time.h"
-
-using absl::base_internal::CurrentThreadIdentityIfPresent;
-using absl::base_internal::PerThreadSynch;
-using absl::base_internal::SchedulingGuard;
-using absl::base_internal::ThreadIdentity;
-using absl::synchronization_internal::GetOrCreateCurrentThreadIdentity;
-using absl::synchronization_internal::GraphCycles;
-using absl::synchronization_internal::GraphId;
-using absl::synchronization_internal::InvalidGraphId;
-using absl::synchronization_internal::KernelTimeout;
-using absl::synchronization_internal::PerThreadSem;
-
-extern "C" {
-ABSL_ATTRIBUTE_WEAK void AbslInternalMutexYield() { std::this_thread::yield(); }
-}  // extern "C"
-
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-
-namespace {
-
-#if defined(ABSL_HAVE_THREAD_SANITIZER)
-constexpr OnDeadlockCycle kDeadlockDetectionDefault = OnDeadlockCycle::kIgnore;
-#else
-constexpr OnDeadlockCycle kDeadlockDetectionDefault = OnDeadlockCycle::kAbort;
-#endif
-
-ABSL_CONST_INIT std::atomic<OnDeadlockCycle> synch_deadlock_detection(
-    kDeadlockDetectionDefault);
-ABSL_CONST_INIT std::atomic<bool> synch_check_invariants(false);
-
-ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
-absl::base_internal::AtomicHook<void (*)(int64_t wait_cycles)>
-    submit_profile_data;
-ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES absl::base_internal::AtomicHook<void (*)(
-    const char *msg, const void *obj, int64_t wait_cycles)>
-    mutex_tracer;
-ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
-    absl::base_internal::AtomicHook<void (*)(const char *msg, const void *cv)>
-        cond_var_tracer;
-ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES absl::base_internal::AtomicHook<
-    bool (*)(const void *pc, char *out, int out_size)>
-    symbolizer(absl::Symbolize);
-
-}  // namespace
-
-static inline bool EvalConditionAnnotated(const Condition *cond, Mutex *mu,
-                                          bool locking, bool trylock,
-                                          bool read_lock);
-
-void RegisterMutexProfiler(void (*fn)(int64_t wait_timestamp)) {
-  submit_profile_data.Store(fn);
-}
-
-void RegisterMutexTracer(void (*fn)(const char *msg, const void *obj,
-                                    int64_t wait_cycles)) {
-  mutex_tracer.Store(fn);
-}
-
-void RegisterCondVarTracer(void (*fn)(const char *msg, const void *cv)) {
-  cond_var_tracer.Store(fn);
-}
-
-void RegisterSymbolizer(bool (*fn)(const void *pc, char *out, int out_size)) {
-  symbolizer.Store(fn);
-}
-
-struct ABSL_CACHELINE_ALIGNED MutexGlobals {
-  absl::once_flag once;
-  int num_cpus = 0;
-  int spinloop_iterations = 0;
-};
-
-static const MutexGlobals& GetMutexGlobals() {
-  ABSL_CONST_INIT static MutexGlobals data;
-  absl::base_internal::LowLevelCallOnce(&data.once, [&]() {
-    data.num_cpus = absl::base_internal::NumCPUs();
-    data.spinloop_iterations = data.num_cpus > 1 ? 1500 : 0;
-  });
-  return data;
-}
-
-// Spinlock delay on iteration c.  Returns new c.
-namespace {
-  enum DelayMode { AGGRESSIVE, GENTLE };
-};
-
-namespace synchronization_internal {
-int MutexDelay(int32_t c, int mode) {
-  // If this a uniprocessor, only yield/sleep.  Otherwise, if the mode is
-  // aggressive then spin many times before yielding.  If the mode is
-  // gentle then spin only a few times before yielding.  Aggressive spinning is
-  // used to ensure that an Unlock() call, which  must get the spin lock for
-  // any thread to make progress gets it without undue delay.
-  const int32_t limit =
-      GetMutexGlobals().num_cpus > 1 ? (mode == AGGRESSIVE ? 5000 : 250) : 0;
-  if (c < limit) {
-    // Spin.
-    c++;
-  } else {
-    SchedulingGuard::ScopedEnable enable_rescheduling;
-    ABSL_TSAN_MUTEX_PRE_DIVERT(nullptr, 0);
-    if (c == limit) {
-      // Yield once.
-      AbslInternalMutexYield();
-      c++;
-    } else {
-      // Then wait.
-      absl::SleepFor(absl::Microseconds(10));
-      c = 0;
-    }
-    ABSL_TSAN_MUTEX_POST_DIVERT(nullptr, 0);
-  }
-  return c;
-}
-}  // namespace synchronization_internal
-
-// --------------------------Generic atomic ops
-// Ensure that "(*pv & bits) == bits" by doing an atomic update of "*pv" to
-// "*pv | bits" if necessary.  Wait until (*pv & wait_until_clear)==0
-// before making any change.
-// This is used to set flags in mutex and condition variable words.
-static void AtomicSetBits(std::atomic<intptr_t>* pv, intptr_t bits,
-                          intptr_t wait_until_clear) {
-  intptr_t v;
-  do {
-    v = pv->load(std::memory_order_relaxed);
-  } while ((v & bits) != bits &&
-           ((v & wait_until_clear) != 0 ||
-            !pv->compare_exchange_weak(v, v | bits,
-                                       std::memory_order_release,
-                                       std::memory_order_relaxed)));
-}
-
-// Ensure that "(*pv & bits) == 0" by doing an atomic update of "*pv" to
-// "*pv & ~bits" if necessary.  Wait until (*pv & wait_until_clear)==0
-// before making any change.
-// This is used to unset flags in mutex and condition variable words.
-static void AtomicClearBits(std::atomic<intptr_t>* pv, intptr_t bits,
-                            intptr_t wait_until_clear) {
-  intptr_t v;
-  do {
-    v = pv->load(std::memory_order_relaxed);
-  } while ((v & bits) != 0 &&
-           ((v & wait_until_clear) != 0 ||
-            !pv->compare_exchange_weak(v, v & ~bits,
-                                       std::memory_order_release,
-                                       std::memory_order_relaxed)));
-}
-
-//------------------------------------------------------------------
-
-// Data for doing deadlock detection.
-ABSL_CONST_INIT static absl::base_internal::SpinLock deadlock_graph_mu(
-    absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);
-
-// Graph used to detect deadlocks.
-ABSL_CONST_INIT static GraphCycles *deadlock_graph
-    ABSL_GUARDED_BY(deadlock_graph_mu) ABSL_PT_GUARDED_BY(deadlock_graph_mu);
-
-//------------------------------------------------------------------
-// An event mechanism for debugging mutex use.
-// It also allows mutexes to be given names for those who can't handle
-// addresses, and instead like to give their data structures names like
-// "Henry", "Fido", or "Rupert IV, King of Yondavia".
-
-namespace {  // to prevent name pollution
-enum {       // Mutex and CondVar events passed as "ev" to PostSynchEvent
-             // Mutex events
-  SYNCH_EV_TRYLOCK_SUCCESS,
-  SYNCH_EV_TRYLOCK_FAILED,
-  SYNCH_EV_READERTRYLOCK_SUCCESS,
-  SYNCH_EV_READERTRYLOCK_FAILED,
-  SYNCH_EV_LOCK,
-  SYNCH_EV_LOCK_RETURNING,
-  SYNCH_EV_READERLOCK,
-  SYNCH_EV_READERLOCK_RETURNING,
-  SYNCH_EV_UNLOCK,
-  SYNCH_EV_READERUNLOCK,
-
-  // CondVar events
-  SYNCH_EV_WAIT,
-  SYNCH_EV_WAIT_RETURNING,
-  SYNCH_EV_SIGNAL,
-  SYNCH_EV_SIGNALALL,
-};
-
-enum {                    // Event flags
-  SYNCH_F_R = 0x01,       // reader event
-  SYNCH_F_LCK = 0x02,     // PostSynchEvent called with mutex held
-  SYNCH_F_TRY = 0x04,     // TryLock or ReaderTryLock
-  SYNCH_F_UNLOCK = 0x08,  // Unlock or ReaderUnlock
-
-  SYNCH_F_LCK_W = SYNCH_F_LCK,
-  SYNCH_F_LCK_R = SYNCH_F_LCK | SYNCH_F_R,
-};
-}  // anonymous namespace
-
-// Properties of the events.
-static const struct {
-  int flags;
-  const char *msg;
-} event_properties[] = {
-    {SYNCH_F_LCK_W | SYNCH_F_TRY, "TryLock succeeded "},
-    {0, "TryLock failed "},
-    {SYNCH_F_LCK_R | SYNCH_F_TRY, "ReaderTryLock succeeded "},
-    {0, "ReaderTryLock failed "},
-    {0, "Lock blocking "},
-    {SYNCH_F_LCK_W, "Lock returning "},
-    {0, "ReaderLock blocking "},
-    {SYNCH_F_LCK_R, "ReaderLock returning "},
-    {SYNCH_F_LCK_W | SYNCH_F_UNLOCK, "Unlock "},
-    {SYNCH_F_LCK_R | SYNCH_F_UNLOCK, "ReaderUnlock "},
-    {0, "Wait on "},
-    {0, "Wait unblocked "},
-    {0, "Signal on "},
-    {0, "SignalAll on "},
-};
-
-ABSL_CONST_INIT static absl::base_internal::SpinLock synch_event_mu(
-    absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);
-
-// Hash table size; should be prime > 2.
-// Can't be too small, as it's used for deadlock detection information.
-static constexpr uint32_t kNSynchEvent = 1031;
-
-static struct SynchEvent {     // this is a trivial hash table for the events
-  // struct is freed when refcount reaches 0
-  int refcount ABSL_GUARDED_BY(synch_event_mu);
-
-  // buckets have linear, 0-terminated  chains
-  SynchEvent *next ABSL_GUARDED_BY(synch_event_mu);
-
-  // Constant after initialization
-  uintptr_t masked_addr;  // object at this address is called "name"
-
-  // No explicit synchronization used.  Instead we assume that the
-  // client who enables/disables invariants/logging on a Mutex does so
-  // while the Mutex is not being concurrently accessed by others.
-  void (*invariant)(void *arg);  // called on each event
-  void *arg;            // first arg to (*invariant)()
-  bool log;             // logging turned on
-
-  // Constant after initialization
-  char name[1];         // actually longer---NUL-terminated string
-} * synch_event[kNSynchEvent] ABSL_GUARDED_BY(synch_event_mu);
-
-// Ensure that the object at "addr" has a SynchEvent struct associated with it,
-// set "bits" in the word there (waiting until lockbit is clear before doing
-// so), and return a refcounted reference that will remain valid until
-// UnrefSynchEvent() is called.  If a new SynchEvent is allocated,
-// the string name is copied into it.
-// When used with a mutex, the caller should also ensure that kMuEvent
-// is set in the mutex word, and similarly for condition variables and kCVEvent.
-static SynchEvent *EnsureSynchEvent(std::atomic<intptr_t> *addr,
-                                    const char *name, intptr_t bits,
-                                    intptr_t lockbit) {
-  uint32_t h = reinterpret_cast<intptr_t>(addr) % kNSynchEvent;
-  SynchEvent *e;
-  // first look for existing SynchEvent struct..
-  synch_event_mu.Lock();
-  for (e = synch_event[h];
-       e != nullptr && e->masked_addr != base_internal::HidePtr(addr);
-       e = e->next) {
-  }
-  if (e == nullptr) {  // no SynchEvent struct found; make one.
-    if (name == nullptr) {
-      name = "";
-    }
-    size_t l = strlen(name);
-    e = reinterpret_cast<SynchEvent *>(
-        base_internal::LowLevelAlloc::Alloc(sizeof(*e) + l));
-    e->refcount = 2;    // one for return value, one for linked list
-    e->masked_addr = base_internal::HidePtr(addr);
-    e->invariant = nullptr;
-    e->arg = nullptr;
-    e->log = false;
-    strcpy(e->name, name);  // NOLINT(runtime/printf)
-    e->next = synch_event[h];
-    AtomicSetBits(addr, bits, lockbit);
-    synch_event[h] = e;
-  } else {
-    e->refcount++;      // for return value
-  }
-  synch_event_mu.Unlock();
-  return e;
-}
-
-// Deallocate the SynchEvent *e, whose refcount has fallen to zero.
-static void DeleteSynchEvent(SynchEvent *e) {
-  base_internal::LowLevelAlloc::Free(e);
-}
-
-// Decrement the reference count of *e, or do nothing if e==null.
-static void UnrefSynchEvent(SynchEvent *e) {
-  if (e != nullptr) {
-    synch_event_mu.Lock();
-    bool del = (--(e->refcount) == 0);
-    synch_event_mu.Unlock();
-    if (del) {
-      DeleteSynchEvent(e);
-    }
-  }
-}
-
-// Forget the mapping from the object (Mutex or CondVar) at address addr
-// to SynchEvent object, and clear "bits" in its word (waiting until lockbit
-// is clear before doing so).
-static void ForgetSynchEvent(std::atomic<intptr_t> *addr, intptr_t bits,
-                             intptr_t lockbit) {
-  uint32_t h = reinterpret_cast<intptr_t>(addr) % kNSynchEvent;
-  SynchEvent **pe;
-  SynchEvent *e;
-  synch_event_mu.Lock();
-  for (pe = &synch_event[h];
-       (e = *pe) != nullptr && e->masked_addr != base_internal::HidePtr(addr);
-       pe = &e->next) {
-  }
-  bool del = false;
-  if (e != nullptr) {
-    *pe = e->next;
-    del = (--(e->refcount) == 0);
-  }
-  AtomicClearBits(addr, bits, lockbit);
-  synch_event_mu.Unlock();
-  if (del) {
-    DeleteSynchEvent(e);
-  }
-}
-
-// Return a refcounted reference to the SynchEvent of the object at address
-// "addr", if any.  The pointer returned is valid until the UnrefSynchEvent() is
-// called.
-static SynchEvent *GetSynchEvent(const void *addr) {
-  uint32_t h = reinterpret_cast<intptr_t>(addr) % kNSynchEvent;
-  SynchEvent *e;
-  synch_event_mu.Lock();
-  for (e = synch_event[h];
-       e != nullptr && e->masked_addr != base_internal::HidePtr(addr);
-       e = e->next) {
-  }
-  if (e != nullptr) {
-    e->refcount++;
-  }
-  synch_event_mu.Unlock();
-  return e;
-}
-
-// Called when an event "ev" occurs on a Mutex of CondVar "obj"
-// if event recording is on
-static void PostSynchEvent(void *obj, int ev) {
-  SynchEvent *e = GetSynchEvent(obj);
-  // logging is on if event recording is on and either there's no event struct,
-  // or it explicitly says to log
-  if (e == nullptr || e->log) {
-    void *pcs[40];
-    int n = absl::GetStackTrace(pcs, ABSL_ARRAYSIZE(pcs), 1);
-    // A buffer with enough space for the ASCII for all the PCs, even on a
-    // 64-bit machine.
-    char buffer[ABSL_ARRAYSIZE(pcs) * 24];
-    int pos = snprintf(buffer, sizeof (buffer), " @");
-    for (int i = 0; i != n; i++) {
-      pos += snprintf(&buffer[pos], sizeof (buffer) - pos, " %p", pcs[i]);
-    }
-    ABSL_RAW_LOG(INFO, "%s%p %s %s", event_properties[ev].msg, obj,
-                 (e == nullptr ? "" : e->name), buffer);
-  }
-  const int flags = event_properties[ev].flags;
-  if ((flags & SYNCH_F_LCK) != 0 && e != nullptr && e->invariant != nullptr) {
-    // Calling the invariant as is causes problems under ThreadSanitizer.
-    // We are currently inside of Mutex Lock/Unlock and are ignoring all
-    // memory accesses and synchronization. If the invariant transitively
-    // synchronizes something else and we ignore the synchronization, we will
-    // get false positive race reports later.
-    // Reuse EvalConditionAnnotated to properly call into user code.
-    struct local {
-      static bool pred(SynchEvent *ev) {
-        (*ev->invariant)(ev->arg);
-        return false;
-      }
-    };
-    Condition cond(&local::pred, e);
-    Mutex *mu = static_cast<Mutex *>(obj);
-    const bool locking = (flags & SYNCH_F_UNLOCK) == 0;
-    const bool trylock = (flags & SYNCH_F_TRY) != 0;
-    const bool read_lock = (flags & SYNCH_F_R) != 0;
-    EvalConditionAnnotated(&cond, mu, locking, trylock, read_lock);
-  }
-  UnrefSynchEvent(e);
-}
-
-//------------------------------------------------------------------
-
-// The SynchWaitParams struct encapsulates the way in which a thread is waiting:
-// whether it has a timeout, the condition, exclusive/shared, and whether a
-// condition variable wait has an associated Mutex (as opposed to another
-// type of lock).  It also points to the PerThreadSynch struct of its thread.
-// cv_word tells Enqueue() to enqueue on a CondVar using CondVarEnqueue().
-//
-// This structure is held on the stack rather than directly in
-// PerThreadSynch because a thread can be waiting on multiple Mutexes if,
-// while waiting on one Mutex, the implementation calls a client callback
-// (such as a Condition function) that acquires another Mutex. We don't
-// strictly need to allow this, but programmers become confused if we do not
-// allow them to use functions such a LOG() within Condition functions.  The
-// PerThreadSynch struct points at the most recent SynchWaitParams struct when
-// the thread is on a Mutex's waiter queue.
-struct SynchWaitParams {
-  SynchWaitParams(Mutex::MuHow how_arg, const Condition *cond_arg,
-                  KernelTimeout timeout_arg, Mutex *cvmu_arg,
-                  PerThreadSynch *thread_arg,
-                  std::atomic<intptr_t> *cv_word_arg)
-      : how(how_arg),
-        cond(cond_arg),
-        timeout(timeout_arg),
-        cvmu(cvmu_arg),
-        thread(thread_arg),
-        cv_word(cv_word_arg),
-        contention_start_cycles(base_internal::CycleClock::Now()) {}
-
-  const Mutex::MuHow how;  // How this thread needs to wait.
-  const Condition *cond;  // The condition that this thread is waiting for.
-                          // In Mutex, this field is set to zero if a timeout
-                          // expires.
-  KernelTimeout timeout;  // timeout expiry---absolute time
-                          // In Mutex, this field is set to zero if a timeout
-                          // expires.
-  Mutex *const cvmu;      // used for transfer from cond var to mutex
-  PerThreadSynch *const thread;  // thread that is waiting
-
-  // If not null, thread should be enqueued on the CondVar whose state
-  // word is cv_word instead of queueing normally on the Mutex.
-  std::atomic<intptr_t> *cv_word;
-
-  int64_t contention_start_cycles;  // Time (in cycles) when this thread started
-                                    // to contend for the mutex.
-};
-
-struct SynchLocksHeld {
-  int n;              // number of valid entries in locks[]
-  bool overflow;      // true iff we overflowed the array at some point
-  struct {
-    Mutex *mu;        // lock acquired
-    int32_t count;      // times acquired
-    GraphId id;       // deadlock_graph id of acquired lock
-  } locks[40];
-  // If a thread overfills the array during deadlock detection, we
-  // continue, discarding information as needed.  If no overflow has
-  // taken place, we can provide more error checking, such as
-  // detecting when a thread releases a lock it does not hold.
-};
-
-// A sentinel value in lists that is not 0.
-// A 0 value is used to mean "not on a list".
-static PerThreadSynch *const kPerThreadSynchNull =
-  reinterpret_cast<PerThreadSynch *>(1);
-
-static SynchLocksHeld *LocksHeldAlloc() {
-  SynchLocksHeld *ret = reinterpret_cast<SynchLocksHeld *>(
-      base_internal::LowLevelAlloc::Alloc(sizeof(SynchLocksHeld)));
-  ret->n = 0;
-  ret->overflow = false;
-  return ret;
-}
-
-// Return the PerThreadSynch-struct for this thread.
-static PerThreadSynch *Synch_GetPerThread() {
-  ThreadIdentity *identity = GetOrCreateCurrentThreadIdentity();
-  return &identity->per_thread_synch;
-}
-
-static PerThreadSynch *Synch_GetPerThreadAnnotated(Mutex *mu) {
-  if (mu) {
-    ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
-  }
-  PerThreadSynch *w = Synch_GetPerThread();
-  if (mu) {
-    ABSL_TSAN_MUTEX_POST_DIVERT(mu, 0);
-  }
-  return w;
-}
-
-static SynchLocksHeld *Synch_GetAllLocks() {
-  PerThreadSynch *s = Synch_GetPerThread();
-  if (s->all_locks == nullptr) {
-    s->all_locks = LocksHeldAlloc();  // Freed by ReclaimThreadIdentity.
-  }
-  return s->all_locks;
-}
-
-// Post on "w"'s associated PerThreadSem.
-inline void Mutex::IncrementSynchSem(Mutex *mu, PerThreadSynch *w) {
-  if (mu) {
-    ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
-  }
-  PerThreadSem::Post(w->thread_identity());
-  if (mu) {
-    ABSL_TSAN_MUTEX_POST_DIVERT(mu, 0);
-  }
-}
-
-// Wait on "w"'s associated PerThreadSem; returns false if timeout expired.
-bool Mutex::DecrementSynchSem(Mutex *mu, PerThreadSynch *w, KernelTimeout t) {
-  if (mu) {
-    ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
-  }
-  assert(w == Synch_GetPerThread());
-  static_cast<void>(w);
-  bool res = PerThreadSem::Wait(t);
-  if (mu) {
-    ABSL_TSAN_MUTEX_POST_DIVERT(mu, 0);
-  }
-  return res;
-}
-
-// We're in a fatal signal handler that hopes to use Mutex and to get
-// lucky by not deadlocking.  We try to improve its chances of success
-// by effectively disabling some of the consistency checks.  This will
-// prevent certain ABSL_RAW_CHECK() statements from being triggered when
-// re-rentry is detected.  The ABSL_RAW_CHECK() statements are those in the
-// Mutex code checking that the "waitp" field has not been reused.
-void Mutex::InternalAttemptToUseMutexInFatalSignalHandler() {
-  // Fix the per-thread state only if it exists.
-  ThreadIdentity *identity = CurrentThreadIdentityIfPresent();
-  if (identity != nullptr) {
-    identity->per_thread_synch.suppress_fatal_errors = true;
-  }
-  // Don't do deadlock detection when we are already failing.
-  synch_deadlock_detection.store(OnDeadlockCycle::kIgnore,
-                                 std::memory_order_release);
-}
-
-// --------------------------time support
-
-// Return the current time plus the timeout.  Use the same clock as
-// PerThreadSem::Wait() for consistency.  Unfortunately, we don't have
-// such a choice when a deadline is given directly.
-static absl::Time DeadlineFromTimeout(absl::Duration timeout) {
-#ifndef _WIN32
-  struct timeval tv;
-  gettimeofday(&tv, nullptr);
-  return absl::TimeFromTimeval(tv) + timeout;
-#else
-  return absl::Now() + timeout;
-#endif
-}
-
-// --------------------------Mutexes
-
-// In the layout below, the msb of the bottom byte is currently unused.  Also,
-// the following constraints were considered in choosing the layout:
-//  o Both the debug allocator's "uninitialized" and "freed" patterns (0xab and
-//    0xcd) are illegal: reader and writer lock both held.
-//  o kMuWriter and kMuEvent should exceed kMuDesig and kMuWait, to enable the
-//    bit-twiddling trick in Mutex::Unlock().
-//  o kMuWriter / kMuReader == kMuWrWait / kMuWait,
-//    to enable the bit-twiddling trick in CheckForMutexCorruption().
-static const intptr_t kMuReader      = 0x0001L;  // a reader holds the lock
-static const intptr_t kMuDesig       = 0x0002L;  // there's a designated waker
-static const intptr_t kMuWait        = 0x0004L;  // threads are waiting
-static const intptr_t kMuWriter      = 0x0008L;  // a writer holds the lock
-static const intptr_t kMuEvent       = 0x0010L;  // record this mutex's events
-// INVARIANT1:  there's a thread that was blocked on the mutex, is
-// no longer, yet has not yet acquired the mutex.  If there's a
-// designated waker, all threads can avoid taking the slow path in
-// unlock because the designated waker will subsequently acquire
-// the lock and wake someone.  To maintain INVARIANT1 the bit is
-// set when a thread is unblocked(INV1a), and threads that were
-// unblocked reset the bit when they either acquire or re-block
-// (INV1b).
-static const intptr_t kMuWrWait      = 0x0020L;  // runnable writer is waiting
-                                                 // for a reader
-static const intptr_t kMuSpin        = 0x0040L;  // spinlock protects wait list
-static const intptr_t kMuLow         = 0x00ffL;  // mask all mutex bits
-static const intptr_t kMuHigh        = ~kMuLow;  // mask pointer/reader count
-
-// Hack to make constant values available to gdb pretty printer
-enum {
-  kGdbMuSpin = kMuSpin,
-  kGdbMuEvent = kMuEvent,
-  kGdbMuWait = kMuWait,
-  kGdbMuWriter = kMuWriter,
-  kGdbMuDesig = kMuDesig,
-  kGdbMuWrWait = kMuWrWait,
-  kGdbMuReader = kMuReader,
-  kGdbMuLow = kMuLow,
-};
-
-// kMuWrWait implies kMuWait.
-// kMuReader and kMuWriter are mutually exclusive.
-// If kMuReader is zero, there are no readers.
-// Otherwise, if kMuWait is zero, the high order bits contain a count of the
-// number of readers.  Otherwise, the reader count is held in
-// PerThreadSynch::readers of the most recently queued waiter, again in the
-// bits above kMuLow.
-static const intptr_t kMuOne = 0x0100;  // a count of one reader
-
-// flags passed to Enqueue and LockSlow{,WithTimeout,Loop}
-static const int kMuHasBlocked = 0x01;  // already blocked (MUST == 1)
-static const int kMuIsCond = 0x02;      // conditional waiter (CV or Condition)
-
-static_assert(PerThreadSynch::kAlignment > kMuLow,
-              "PerThreadSynch::kAlignment must be greater than kMuLow");
-
-// This struct contains various bitmasks to be used in
-// acquiring and releasing a mutex in a particular mode.
-struct MuHowS {
-  // if all the bits in fast_need_zero are zero, the lock can be acquired by
-  // adding fast_add and oring fast_or.  The bit kMuDesig should be reset iff
-  // this is the designated waker.
-  intptr_t fast_need_zero;
-  intptr_t fast_or;
-  intptr_t fast_add;
-
-  intptr_t slow_need_zero;  // fast_need_zero with events (e.g. logging)
-
-  intptr_t slow_inc_need_zero;  // if all the bits in slow_inc_need_zero are
-                                // zero a reader can acquire a read share by
-                                // setting the reader bit and incrementing
-                                // the reader count (in last waiter since
-                                // we're now slow-path).  kMuWrWait be may
-                                // be ignored if we already waited once.
-};
-
-static const MuHowS kSharedS = {
-    // shared or read lock
-    kMuWriter | kMuWait | kMuEvent,   // fast_need_zero
-    kMuReader,                        // fast_or
-    kMuOne,                           // fast_add
-    kMuWriter | kMuWait,              // slow_need_zero
-    kMuSpin | kMuWriter | kMuWrWait,  // slow_inc_need_zero
-};
-static const MuHowS kExclusiveS = {
-    // exclusive or write lock
-    kMuWriter | kMuReader | kMuEvent,  // fast_need_zero
-    kMuWriter,                         // fast_or
-    0,                                 // fast_add
-    kMuWriter | kMuReader,             // slow_need_zero
-    ~static_cast<intptr_t>(0),         // slow_inc_need_zero
-};
-static const Mutex::MuHow kShared = &kSharedS;        // shared lock
-static const Mutex::MuHow kExclusive = &kExclusiveS;  // exclusive lock
-
-#ifdef NDEBUG
-static constexpr bool kDebugMode = false;
-#else
-static constexpr bool kDebugMode = true;
-#endif
-
-#ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE
-static unsigned TsanFlags(Mutex::MuHow how) {
-  return how == kShared ? __tsan_mutex_read_lock : 0;
-}
-#endif
-
-static bool DebugOnlyIsExiting() {
-  return false;
-}
-
-Mutex::~Mutex() {
-  intptr_t v = mu_.load(std::memory_order_relaxed);
-  if ((v & kMuEvent) != 0 && !DebugOnlyIsExiting()) {
-    ForgetSynchEvent(&this->mu_, kMuEvent, kMuSpin);
-  }
-  if (kDebugMode) {
-    this->ForgetDeadlockInfo();
-  }
-  ABSL_TSAN_MUTEX_DESTROY(this, __tsan_mutex_not_static);
-}
-
-void Mutex::EnableDebugLog(const char *name) {
-  SynchEvent *e = EnsureSynchEvent(&this->mu_, name, kMuEvent, kMuSpin);
-  e->log = true;
-  UnrefSynchEvent(e);
-}
-
-void EnableMutexInvariantDebugging(bool enabled) {
-  synch_check_invariants.store(enabled, std::memory_order_release);
-}
-
-void Mutex::EnableInvariantDebugging(void (*invariant)(void *),
-                                     void *arg) {
-  if (synch_check_invariants.load(std::memory_order_acquire) &&
-      invariant != nullptr) {
-    SynchEvent *e = EnsureSynchEvent(&this->mu_, nullptr, kMuEvent, kMuSpin);
-    e->invariant = invariant;
-    e->arg = arg;
-    UnrefSynchEvent(e);
-  }
-}
-
-void SetMutexDeadlockDetectionMode(OnDeadlockCycle mode) {
-  synch_deadlock_detection.store(mode, std::memory_order_release);
-}
-
-// Return true iff threads x and y are waiting on the same condition for the
-// same type of lock.  Requires that x and y be waiting on the same Mutex
-// queue.
-static bool MuSameCondition(PerThreadSynch *x, PerThreadSynch *y) {
-  return x->waitp->how == y->waitp->how &&
-         Condition::GuaranteedEqual(x->waitp->cond, y->waitp->cond);
-}
-
-// Given the contents of a mutex word containing a PerThreadSynch pointer,
-// return the pointer.
-static inline PerThreadSynch *GetPerThreadSynch(intptr_t v) {
-  return reinterpret_cast<PerThreadSynch *>(v & kMuHigh);
-}
-
-// The next several routines maintain the per-thread next and skip fields
-// used in the Mutex waiter queue.
-// The queue is a circular singly-linked list, of which the "head" is the
-// last element, and head->next if the first element.
-// The skip field has the invariant:
-//   For thread x, x->skip is one of:
-//     - invalid (iff x is not in a Mutex wait queue),
-//     - null, or
-//     - a pointer to a distinct thread waiting later in the same Mutex queue
-//       such that all threads in [x, x->skip] have the same condition and
-//       lock type (MuSameCondition() is true for all pairs in [x, x->skip]).
-// In addition, if x->skip is  valid, (x->may_skip || x->skip == null)
-//
-// By the spec of MuSameCondition(), it is not necessary when removing the
-// first runnable thread y from the front a Mutex queue to adjust the skip
-// field of another thread x because if x->skip==y, x->skip must (have) become
-// invalid before y is removed.  The function TryRemove can remove a specified
-// thread from an arbitrary position in the queue whether runnable or not, so
-// it fixes up skip fields that would otherwise be left dangling.
-// The statement
-//     if (x->may_skip && MuSameCondition(x, x->next)) { x->skip = x->next; }
-// maintains the invariant provided x is not the last waiter in a Mutex queue
-// The statement
-//          if (x->skip != null) { x->skip = x->skip->skip; }
-// maintains the invariant.
-
-// Returns the last thread y in a mutex waiter queue such that all threads in
-// [x, y] inclusive share the same condition.  Sets skip fields of some threads
-// in that range to optimize future evaluation of Skip() on x values in
-// the range.  Requires thread x is in a mutex waiter queue.
-// The locking is unusual.  Skip() is called under these conditions:
-//   - spinlock is held in call from Enqueue(), with maybe_unlocking == false
-//   - Mutex is held in call from UnlockSlow() by last unlocker, with
-//     maybe_unlocking == true
-//   - both Mutex and spinlock are held in call from DequeueAllWakeable() (from
-//     UnlockSlow()) and TryRemove()
-// These cases are mutually exclusive, so Skip() never runs concurrently
-// with itself on the same Mutex.   The skip chain is used in these other places
-// that cannot occur concurrently:
-//   - FixSkip() (from TryRemove()) - spinlock and Mutex are held)
-//   - Dequeue() (with spinlock and Mutex held)
-//   - UnlockSlow() (with spinlock and Mutex held)
-// A more complex case is Enqueue()
-//   - Enqueue() (with spinlock held and maybe_unlocking == false)
-//               This is the first case in which Skip is called, above.
-//   - Enqueue() (without spinlock held; but queue is empty and being freshly
-//                formed)
-//   - Enqueue() (with spinlock held and maybe_unlocking == true)
-// The first case has mutual exclusion, and the second isolation through
-// working on an otherwise unreachable data structure.
-// In the last case, Enqueue() is required to change no skip/next pointers
-// except those in the added node and the former "head" node.  This implies
-// that the new node is added after head, and so must be the new head or the
-// new front of the queue.
-static PerThreadSynch *Skip(PerThreadSynch *x) {
-  PerThreadSynch *x0 = nullptr;
-  PerThreadSynch *x1 = x;
-  PerThreadSynch *x2 = x->skip;
-  if (x2 != nullptr) {
-    // Each iteration attempts to advance sequence (x0,x1,x2) to next sequence
-    // such that   x1 == x0->skip && x2 == x1->skip
-    while ((x0 = x1, x1 = x2, x2 = x2->skip) != nullptr) {
-      x0->skip = x2;      // short-circuit skip from x0 to x2
-    }
-    x->skip = x1;         // short-circuit skip from x to result
-  }
-  return x1;
-}
-
-// "ancestor" appears before "to_be_removed" in the same Mutex waiter queue.
-// The latter is going to be removed out of order, because of a timeout.
-// Check whether "ancestor" has a skip field pointing to "to_be_removed",
-// and fix it if it does.
-static void FixSkip(PerThreadSynch *ancestor, PerThreadSynch *to_be_removed) {
-  if (ancestor->skip == to_be_removed) {  // ancestor->skip left dangling
-    if (to_be_removed->skip != nullptr) {
-      ancestor->skip = to_be_removed->skip;  // can skip past to_be_removed
-    } else if (ancestor->next != to_be_removed) {  // they are not adjacent
-      ancestor->skip = ancestor->next;             // can skip one past ancestor
-    } else {
-      ancestor->skip = nullptr;  // can't skip at all
-    }
-  }
-}
-
-static void CondVarEnqueue(SynchWaitParams *waitp);
-
-// Enqueue thread "waitp->thread" on a waiter queue.
-// Called with mutex spinlock held if head != nullptr
-// If head==nullptr and waitp->cv_word==nullptr, then Enqueue() is
-// idempotent; it alters no state associated with the existing (empty)
-// queue.
-//
-// If waitp->cv_word == nullptr, queue the thread at either the front or
-// the end (according to its priority) of the circular mutex waiter queue whose
-// head is "head", and return the new head.  mu is the previous mutex state,
-// which contains the reader count (perhaps adjusted for the operation in
-// progress) if the list was empty and a read lock held, and the holder hint if
-// the list was empty and a write lock held.  (flags & kMuIsCond) indicates
-// whether this thread was transferred from a CondVar or is waiting for a
-// non-trivial condition.  In this case, Enqueue() never returns nullptr
-//
-// If waitp->cv_word != nullptr, CondVarEnqueue() is called, and "head" is
-// returned. This mechanism is used by CondVar to queue a thread on the
-// condition variable queue instead of the mutex queue in implementing Wait().
-// In this case, Enqueue() can return nullptr (if head==nullptr).
-static PerThreadSynch *Enqueue(PerThreadSynch *head,
-                               SynchWaitParams *waitp, intptr_t mu, int flags) {
-  // If we have been given a cv_word, call CondVarEnqueue() and return
-  // the previous head of the Mutex waiter queue.
-  if (waitp->cv_word != nullptr) {
-    CondVarEnqueue(waitp);
-    return head;
-  }
-
-  PerThreadSynch *s = waitp->thread;
-  ABSL_RAW_CHECK(
-      s->waitp == nullptr ||    // normal case
-          s->waitp == waitp ||  // Fer()---transfer from condition variable
-          s->suppress_fatal_errors,
-      "detected illegal recursion into Mutex code");
-  s->waitp = waitp;
-  s->skip = nullptr;             // maintain skip invariant (see above)
-  s->may_skip = true;            // always true on entering queue
-  s->wake = false;               // not being woken
-  s->cond_waiter = ((flags & kMuIsCond) != 0);
-  if (head == nullptr) {         // s is the only waiter
-    s->next = s;                 // it's the only entry in the cycle
-    s->readers = mu;             // reader count is from mu word
-    s->maybe_unlocking = false;  // no one is searching an empty list
-    head = s;                    // s is new head
-  } else {
-    PerThreadSynch *enqueue_after = nullptr;  // we'll put s after this element
-#ifdef ABSL_HAVE_PTHREAD_GETSCHEDPARAM
-    int64_t now_cycles = base_internal::CycleClock::Now();
-    if (s->next_priority_read_cycles < now_cycles) {
-      // Every so often, update our idea of the thread's priority.
-      // pthread_getschedparam() is 5% of the block/wakeup time;
-      // base_internal::CycleClock::Now() is 0.5%.
-      int policy;
-      struct sched_param param;
-      const int err = pthread_getschedparam(pthread_self(), &policy, &param);
-      if (err != 0) {
-        ABSL_RAW_LOG(ERROR, "pthread_getschedparam failed: %d", err);
-      } else {
-        s->priority = param.sched_priority;
-        s->next_priority_read_cycles =
-            now_cycles +
-            static_cast<int64_t>(base_internal::CycleClock::Frequency());
-      }
-    }
-    if (s->priority > head->priority) {  // s's priority is above head's
-      // try to put s in priority-fifo order, or failing that at the front.
-      if (!head->maybe_unlocking) {
-        // No unlocker can be scanning the queue, so we can insert between
-        // skip-chains, and within a skip-chain if it has the same condition as
-        // s.  We insert in priority-fifo order, examining the end of every
-        // skip-chain, plus every element with the same condition as s.
-        PerThreadSynch *advance_to = head;    // next value of enqueue_after
-        PerThreadSynch *cur;                  // successor of enqueue_after
-        do {
-          enqueue_after = advance_to;
-          cur = enqueue_after->next;  // this advance ensures progress
-          advance_to = Skip(cur);   // normally, advance to end of skip chain
-                                    // (side-effect: optimizes skip chain)
-          if (advance_to != cur && s->priority > advance_to->priority &&
-              MuSameCondition(s, cur)) {
-            // but this skip chain is not a singleton, s has higher priority
-            // than its tail and has the same condition as the chain,
-            // so we can insert within the skip-chain
-            advance_to = cur;         // advance by just one
-          }
-        } while (s->priority <= advance_to->priority);
-              // termination guaranteed because s->priority > head->priority
-              // and head is the end of a skip chain
-      } else if (waitp->how == kExclusive &&
-                 Condition::GuaranteedEqual(waitp->cond, nullptr)) {
-        // An unlocker could be scanning the queue, but we know it will recheck
-        // the queue front for writers that have no condition, which is what s
-        // is, so an insert at front is safe.
-        enqueue_after = head;       // add after head, at front
-      }
-    }
-#endif
-    if (enqueue_after != nullptr) {
-      s->next = enqueue_after->next;
-      enqueue_after->next = s;
-
-      // enqueue_after can be: head, Skip(...), or cur.
-      // The first two imply enqueue_after->skip == nullptr, and
-      // the last is used only if MuSameCondition(s, cur).
-      // We require this because clearing enqueue_after->skip
-      // is impossible; enqueue_after's predecessors might also
-      // incorrectly skip over s if we were to allow other
-      // insertion points.
-      ABSL_RAW_CHECK(
-          enqueue_after->skip == nullptr || MuSameCondition(enqueue_after, s),
-          "Mutex Enqueue failure");
-
-      if (enqueue_after != head && enqueue_after->may_skip &&
-          MuSameCondition(enqueue_after, enqueue_after->next)) {
-        // enqueue_after can skip to its new successor, s
-        enqueue_after->skip = enqueue_after->next;
-      }
-      if (MuSameCondition(s, s->next)) {  // s->may_skip is known to be true
-        s->skip = s->next;                // s may skip to its successor
-      }
-    } else {   // enqueue not done any other way, so
-               // we're inserting s at the back
-      // s will become new head; copy data from head into it
-      s->next = head->next;        // add s after head
-      head->next = s;
-      s->readers = head->readers;  // reader count is from previous head
-      s->maybe_unlocking = head->maybe_unlocking;  // same for unlock hint
-      if (head->may_skip && MuSameCondition(head, s)) {
-        // head now has successor; may skip
-        head->skip = s;
-      }
-      head = s;  // s is new head
-    }
-  }
-  s->state.store(PerThreadSynch::kQueued, std::memory_order_relaxed);
-  return head;
-}
-
-// Dequeue the successor pw->next of thread pw from the Mutex waiter queue
-// whose last element is head.  The new head element is returned, or null
-// if the list is made empty.
-// Dequeue is called with both spinlock and Mutex held.
-static PerThreadSynch *Dequeue(PerThreadSynch *head, PerThreadSynch *pw) {
-  PerThreadSynch *w = pw->next;
-  pw->next = w->next;         // snip w out of list
-  if (head == w) {            // we removed the head
-    head = (pw == w) ? nullptr : pw;  // either emptied list, or pw is new head
-  } else if (pw != head && MuSameCondition(pw, pw->next)) {
-    // pw can skip to its new successor
-    if (pw->next->skip !=
-        nullptr) {  // either skip to its successors skip target
-      pw->skip = pw->next->skip;
-    } else {                   // or to pw's successor
-      pw->skip = pw->next;
-    }
-  }
-  return head;
-}
-
-// Traverse the elements [ pw->next, h] of the circular list whose last element
-// is head.
-// Remove all elements with wake==true and place them in the
-// singly-linked list wake_list in the order found.   Assumes that
-// there is only one such element if the element has how == kExclusive.
-// Return the new head.
-static PerThreadSynch *DequeueAllWakeable(PerThreadSynch *head,
-                                          PerThreadSynch *pw,
-                                          PerThreadSynch **wake_tail) {
-  PerThreadSynch *orig_h = head;
-  PerThreadSynch *w = pw->next;
-  bool skipped = false;
-  do {
-    if (w->wake) {                    // remove this element
-      ABSL_RAW_CHECK(pw->skip == nullptr, "bad skip in DequeueAllWakeable");
-      // we're removing pw's successor so either pw->skip is zero or we should
-      // already have removed pw since if pw->skip!=null, pw has the same
-      // condition as w.
-      head = Dequeue(head, pw);
-      w->next = *wake_tail;           // keep list terminated
-      *wake_tail = w;                 // add w to wake_list;
-      wake_tail = &w->next;           // next addition to end
-      if (w->waitp->how == kExclusive) {  // wake at most 1 writer
-        break;
-      }
-    } else {                // not waking this one; skip
-      pw = Skip(w);       // skip as much as possible
-      skipped = true;
-    }
-    w = pw->next;
-    // We want to stop processing after we've considered the original head,
-    // orig_h.  We can't test for w==orig_h in the loop because w may skip over
-    // it; we are guaranteed only that w's predecessor will not skip over
-    // orig_h.  When we've considered orig_h, either we've processed it and
-    // removed it (so orig_h != head), or we considered it and skipped it (so
-    // skipped==true && pw == head because skipping from head always skips by
-    // just one, leaving pw pointing at head).  So we want to
-    // continue the loop with the negation of that expression.
-  } while (orig_h == head && (pw != head || !skipped));
-  return head;
-}
-
-// Try to remove thread s from the list of waiters on this mutex.
-// Does nothing if s is not on the waiter list.
-void Mutex::TryRemove(PerThreadSynch *s) {
-  SchedulingGuard::ScopedDisable disable_rescheduling;
-  intptr_t v = mu_.load(std::memory_order_relaxed);
-  // acquire spinlock & lock
-  if ((v & (kMuWait | kMuSpin | kMuWriter | kMuReader)) == kMuWait &&
-      mu_.compare_exchange_strong(v, v | kMuSpin | kMuWriter,
-                                  std::memory_order_acquire,
-                                  std::memory_order_relaxed)) {
-    PerThreadSynch *h = GetPerThreadSynch(v);
-    if (h != nullptr) {
-      PerThreadSynch *pw = h;   // pw is w's predecessor
-      PerThreadSynch *w;
-      if ((w = pw->next) != s) {  // search for thread,
-        do {                      // processing at least one element
-          if (!MuSameCondition(s, w)) {  // seeking different condition
-            pw = Skip(w);                // so skip all that won't match
-            // we don't have to worry about dangling skip fields
-            // in the threads we skipped; none can point to s
-            // because their condition differs from s
-          } else {          // seeking same condition
-            FixSkip(w, s);  // fix up any skip pointer from w to s
-            pw = w;
-          }
-          // don't search further if we found the thread, or we're about to
-          // process the first thread again.
-        } while ((w = pw->next) != s && pw != h);
-      }
-      if (w == s) {                 // found thread; remove it
-        // pw->skip may be non-zero here; the loop above ensured that
-        // no ancestor of s can skip to s, so removal is safe anyway.
-        h = Dequeue(h, pw);
-        s->next = nullptr;
-        s->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
-      }
-    }
-    intptr_t nv;
-    do {                        // release spinlock and lock
-      v = mu_.load(std::memory_order_relaxed);
-      nv = v & (kMuDesig | kMuEvent);
-      if (h != nullptr) {
-        nv |= kMuWait | reinterpret_cast<intptr_t>(h);
-        h->readers = 0;            // we hold writer lock
-        h->maybe_unlocking = false;  // finished unlocking
-      }
-    } while (!mu_.compare_exchange_weak(v, nv,
-                                        std::memory_order_release,
-                                        std::memory_order_relaxed));
-  }
-}
-
-// Wait until thread "s", which must be the current thread, is removed from the
-// this mutex's waiter queue.  If "s->waitp->timeout" has a timeout, wake up
-// if the wait extends past the absolute time specified, even if "s" is still
-// on the mutex queue.  In this case, remove "s" from the queue and return
-// true, otherwise return false.
-ABSL_XRAY_LOG_ARGS(1) void Mutex::Block(PerThreadSynch *s) {
-  while (s->state.load(std::memory_order_acquire) == PerThreadSynch::kQueued) {
-    if (!DecrementSynchSem(this, s, s->waitp->timeout)) {
-      // After a timeout, we go into a spin loop until we remove ourselves
-      // from the queue, or someone else removes us.  We can't be sure to be
-      // able to remove ourselves in a single lock acquisition because this
-      // mutex may be held, and the holder has the right to read the centre
-      // of the waiter queue without holding the spinlock.
-      this->TryRemove(s);
-      int c = 0;
-      while (s->next != nullptr) {
-        c = synchronization_internal::MutexDelay(c, GENTLE);
-        this->TryRemove(s);
-      }
-      if (kDebugMode) {
-        // This ensures that we test the case that TryRemove() is called when s
-        // is not on the queue.
-        this->TryRemove(s);
-      }
-      s->waitp->timeout = KernelTimeout::Never();      // timeout is satisfied
-      s->waitp->cond = nullptr;  // condition no longer relevant for wakeups
-    }
-  }
-  ABSL_RAW_CHECK(s->waitp != nullptr || s->suppress_fatal_errors,
-                 "detected illegal recursion in Mutex code");
-  s->waitp = nullptr;
-}
-
-// Wake thread w, and return the next thread in the list.
-PerThreadSynch *Mutex::Wakeup(PerThreadSynch *w) {
-  PerThreadSynch *next = w->next;
-  w->next = nullptr;
-  w->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
-  IncrementSynchSem(this, w);
-
-  return next;
-}
-
-static GraphId GetGraphIdLocked(Mutex *mu)
-    ABSL_EXCLUSIVE_LOCKS_REQUIRED(deadlock_graph_mu) {
-  if (!deadlock_graph) {  // (re)create the deadlock graph.
-    deadlock_graph =
-        new (base_internal::LowLevelAlloc::Alloc(sizeof(*deadlock_graph)))
-            GraphCycles;
-  }
-  return deadlock_graph->GetId(mu);
-}
-
-static GraphId GetGraphId(Mutex *mu) ABSL_LOCKS_EXCLUDED(deadlock_graph_mu) {
-  deadlock_graph_mu.Lock();
-  GraphId id = GetGraphIdLocked(mu);
-  deadlock_graph_mu.Unlock();
-  return id;
-}
-
-// Record a lock acquisition.  This is used in debug mode for deadlock
-// detection.  The held_locks pointer points to the relevant data
-// structure for each case.
-static void LockEnter(Mutex* mu, GraphId id, SynchLocksHeld *held_locks) {
-  int n = held_locks->n;
-  int i = 0;
-  while (i != n && held_locks->locks[i].id != id) {
-    i++;
-  }
-  if (i == n) {
-    if (n == ABSL_ARRAYSIZE(held_locks->locks)) {
-      held_locks->overflow = true;  // lost some data
-    } else {                        // we have room for lock
-      held_locks->locks[i].mu = mu;
-      held_locks->locks[i].count = 1;
-      held_locks->locks[i].id = id;
-      held_locks->n = n + 1;
-    }
-  } else {
-    held_locks->locks[i].count++;
-  }
-}
-
-// Record a lock release.  Each call to LockEnter(mu, id, x) should be
-// eventually followed by a call to LockLeave(mu, id, x) by the same thread.
-// It does not process the event if is not needed when deadlock detection is
-// disabled.
-static void LockLeave(Mutex* mu, GraphId id, SynchLocksHeld *held_locks) {
-  int n = held_locks->n;
-  int i = 0;
-  while (i != n && held_locks->locks[i].id != id) {
-    i++;
-  }
-  if (i == n) {
-    if (!held_locks->overflow) {
-      // The deadlock id may have been reassigned after ForgetDeadlockInfo,
-      // but in that case mu should still be present.
-      i = 0;
-      while (i != n && held_locks->locks[i].mu != mu) {
-        i++;
-      }
-      if (i == n) {  // mu missing means releasing unheld lock
-        SynchEvent *mu_events = GetSynchEvent(mu);
-        ABSL_RAW_LOG(FATAL,
-                     "thread releasing lock it does not hold: %p %s; "
-                     ,
-                     static_cast<void *>(mu),
-                     mu_events == nullptr ? "" : mu_events->name);
-      }
-    }
-  } else if (held_locks->locks[i].count == 1) {
-    held_locks->n = n - 1;
-    held_locks->locks[i] = held_locks->locks[n - 1];
-    held_locks->locks[n - 1].id = InvalidGraphId();
-    held_locks->locks[n - 1].mu =
-        nullptr;  // clear mu to please the leak detector.
-  } else {
-    assert(held_locks->locks[i].count > 0);
-    held_locks->locks[i].count--;
-  }
-}
-
-// Call LockEnter() if in debug mode and deadlock detection is enabled.
-static inline void DebugOnlyLockEnter(Mutex *mu) {
-  if (kDebugMode) {
-    if (synch_deadlock_detection.load(std::memory_order_acquire) !=
-        OnDeadlockCycle::kIgnore) {
-      LockEnter(mu, GetGraphId(mu), Synch_GetAllLocks());
-    }
-  }
-}
-
-// Call LockEnter() if in debug mode and deadlock detection is enabled.
-static inline void DebugOnlyLockEnter(Mutex *mu, GraphId id) {
-  if (kDebugMode) {
-    if (synch_deadlock_detection.load(std::memory_order_acquire) !=
-        OnDeadlockCycle::kIgnore) {
-      LockEnter(mu, id, Synch_GetAllLocks());
-    }
-  }
-}
-
-// Call LockLeave() if in debug mode and deadlock detection is enabled.
-static inline void DebugOnlyLockLeave(Mutex *mu) {
-  if (kDebugMode) {
-    if (synch_deadlock_detection.load(std::memory_order_acquire) !=
-        OnDeadlockCycle::kIgnore) {
-      LockLeave(mu, GetGraphId(mu), Synch_GetAllLocks());
-    }
-  }
-}
-
-static char *StackString(void **pcs, int n, char *buf, int maxlen,
-                         bool symbolize) {
-  static const int kSymLen = 200;
-  char sym[kSymLen];
-  int len = 0;
-  for (int i = 0; i != n; i++) {
-    if (symbolize) {
-      if (!symbolizer(pcs[i], sym, kSymLen)) {
-        sym[0] = '\0';
-      }
-      snprintf(buf + len, maxlen - len, "%s\t@ %p %s\n",
-               (i == 0 ? "\n" : ""),
-               pcs[i], sym);
-    } else {
-      snprintf(buf + len, maxlen - len, " %p", pcs[i]);
-    }
-    len += strlen(&buf[len]);
-  }
-  return buf;
-}
-
-static char *CurrentStackString(char *buf, int maxlen, bool symbolize) {
-  void *pcs[40];
-  return StackString(pcs, absl::GetStackTrace(pcs, ABSL_ARRAYSIZE(pcs), 2), buf,
-                     maxlen, symbolize);
-}
-
-namespace {
-enum { kMaxDeadlockPathLen = 10 };  // maximum length of a deadlock cycle;
-                                    // a path this long would be remarkable
-// Buffers required to report a deadlock.
-// We do not allocate them on stack to avoid large stack frame.
-struct DeadlockReportBuffers {
-  char buf[6100];
-  GraphId path[kMaxDeadlockPathLen];
-};
-
-struct ScopedDeadlockReportBuffers {
-  ScopedDeadlockReportBuffers() {
-    b = reinterpret_cast<DeadlockReportBuffers *>(
-        base_internal::LowLevelAlloc::Alloc(sizeof(*b)));
-  }
-  ~ScopedDeadlockReportBuffers() { base_internal::LowLevelAlloc::Free(b); }
-  DeadlockReportBuffers *b;
-};
-
-// Helper to pass to GraphCycles::UpdateStackTrace.
-int GetStack(void** stack, int max_depth) {
-  return absl::GetStackTrace(stack, max_depth, 3);
-}
-}  // anonymous namespace
-
-// Called in debug mode when a thread is about to acquire a lock in a way that
-// may block.
-static GraphId DeadlockCheck(Mutex *mu) {
-  if (synch_deadlock_detection.load(std::memory_order_acquire) ==
-      OnDeadlockCycle::kIgnore) {
-    return InvalidGraphId();
-  }
-
-  SynchLocksHeld *all_locks = Synch_GetAllLocks();
-
-  absl::base_internal::SpinLockHolder lock(&deadlock_graph_mu);
-  const GraphId mu_id = GetGraphIdLocked(mu);
-
-  if (all_locks->n == 0) {
-    // There are no other locks held. Return now so that we don't need to
-    // call GetSynchEvent(). This way we do not record the stack trace
-    // for this Mutex. It's ok, since if this Mutex is involved in a deadlock,
-    // it can't always be the first lock acquired by a thread.
-    return mu_id;
-  }
-
-  // We prefer to keep stack traces that show a thread holding and acquiring
-  // as many locks as possible.  This increases the chances that a given edge
-  // in the acquires-before graph will be represented in the stack traces
-  // recorded for the locks.
-  deadlock_graph->UpdateStackTrace(mu_id, all_locks->n + 1, GetStack);
-
-  // For each other mutex already held by this thread:
-  for (int i = 0; i != all_locks->n; i++) {
-    const GraphId other_node_id = all_locks->locks[i].id;
-    const Mutex *other =
-        static_cast<const Mutex *>(deadlock_graph->Ptr(other_node_id));
-    if (other == nullptr) {
-      // Ignore stale lock
-      continue;
-    }
-
-    // Add the acquired-before edge to the graph.
-    if (!deadlock_graph->InsertEdge(other_node_id, mu_id)) {
-      ScopedDeadlockReportBuffers scoped_buffers;
-      DeadlockReportBuffers *b = scoped_buffers.b;
-      static int number_of_reported_deadlocks = 0;
-      number_of_reported_deadlocks++;
-      // Symbolize only 2 first deadlock report to avoid huge slowdowns.
-      bool symbolize = number_of_reported_deadlocks <= 2;
-      ABSL_RAW_LOG(ERROR, "Potential Mutex deadlock: %s",
-                   CurrentStackString(b->buf, sizeof (b->buf), symbolize));
-      int len = 0;
-      for (int j = 0; j != all_locks->n; j++) {
-        void* pr = deadlock_graph->Ptr(all_locks->locks[j].id);
-        if (pr != nullptr) {
-          snprintf(b->buf + len, sizeof (b->buf) - len, " %p", pr);
-          len += static_cast<int>(strlen(&b->buf[len]));
-        }
-      }
-      ABSL_RAW_LOG(ERROR, "Acquiring %p    Mutexes held: %s",
-                   static_cast<void *>(mu), b->buf);
-      ABSL_RAW_LOG(ERROR, "Cycle: ");
-      int path_len = deadlock_graph->FindPath(
-          mu_id, other_node_id, ABSL_ARRAYSIZE(b->path), b->path);
-      for (int j = 0; j != path_len; j++) {
-        GraphId id = b->path[j];
-        Mutex *path_mu = static_cast<Mutex *>(deadlock_graph->Ptr(id));
-        if (path_mu == nullptr) continue;
-        void** stack;
-        int depth = deadlock_graph->GetStackTrace(id, &stack);
-        snprintf(b->buf, sizeof(b->buf),
-                 "mutex@%p stack: ", static_cast<void *>(path_mu));
-        StackString(stack, depth, b->buf + strlen(b->buf),
-                    static_cast<int>(sizeof(b->buf) - strlen(b->buf)),
-                    symbolize);
-        ABSL_RAW_LOG(ERROR, "%s", b->buf);
-      }
-      if (synch_deadlock_detection.load(std::memory_order_acquire) ==
-          OnDeadlockCycle::kAbort) {
-        deadlock_graph_mu.Unlock();  // avoid deadlock in fatal sighandler
-        ABSL_RAW_LOG(FATAL, "dying due to potential deadlock");
-        return mu_id;
-      }
-      break;   // report at most one potential deadlock per acquisition
-    }
-  }
-
-  return mu_id;
-}
-
-// Invoke DeadlockCheck() iff we're in debug mode and
-// deadlock checking has been enabled.
-static inline GraphId DebugOnlyDeadlockCheck(Mutex *mu) {
-  if (kDebugMode && synch_deadlock_detection.load(std::memory_order_acquire) !=
-                        OnDeadlockCycle::kIgnore) {
-    return DeadlockCheck(mu);
-  } else {
-    return InvalidGraphId();
-  }
-}
-
-void Mutex::ForgetDeadlockInfo() {
-  if (kDebugMode && synch_deadlock_detection.load(std::memory_order_acquire) !=
-                        OnDeadlockCycle::kIgnore) {
-    deadlock_graph_mu.Lock();
-    if (deadlock_graph != nullptr) {
-      deadlock_graph->RemoveNode(this);
-    }
-    deadlock_graph_mu.Unlock();
-  }
-}
-
-void Mutex::AssertNotHeld() const {
-  // We have the data to allow this check only if in debug mode and deadlock
-  // detection is enabled.
-  if (kDebugMode &&
-      (mu_.load(std::memory_order_relaxed) & (kMuWriter | kMuReader)) != 0 &&
-      synch_deadlock_detection.load(std::memory_order_acquire) !=
-          OnDeadlockCycle::kIgnore) {
-    GraphId id = GetGraphId(const_cast<Mutex *>(this));
-    SynchLocksHeld *locks = Synch_GetAllLocks();
-    for (int i = 0; i != locks->n; i++) {
-      if (locks->locks[i].id == id) {
-        SynchEvent *mu_events = GetSynchEvent(this);
-        ABSL_RAW_LOG(FATAL, "thread should not hold mutex %p %s",
-                     static_cast<const void *>(this),
-                     (mu_events == nullptr ? "" : mu_events->name));
-      }
-    }
-  }
-}
-
-// Attempt to acquire *mu, and return whether successful.  The implementation
-// may spin for a short while if the lock cannot be acquired immediately.
-static bool TryAcquireWithSpinning(std::atomic<intptr_t>* mu) {
-  int c = GetMutexGlobals().spinloop_iterations;
-  do {  // do/while somewhat faster on AMD
-    intptr_t v = mu->load(std::memory_order_relaxed);
-    if ((v & (kMuReader|kMuEvent)) != 0) {
-      return false;  // a reader or tracing -> give up
-    } else if (((v & kMuWriter) == 0) &&  // no holder -> try to acquire
-               mu->compare_exchange_strong(v, kMuWriter | v,
-                                           std::memory_order_acquire,
-                                           std::memory_order_relaxed)) {
-      return true;
-    }
-  } while (--c > 0);
-  return false;
-}
-
-ABSL_XRAY_LOG_ARGS(1) void Mutex::Lock() {
-  ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
-  GraphId id = DebugOnlyDeadlockCheck(this);
-  intptr_t v = mu_.load(std::memory_order_relaxed);
-  // try fast acquire, then spin loop
-  if ((v & (kMuWriter | kMuReader | kMuEvent)) != 0 ||
-      !mu_.compare_exchange_strong(v, kMuWriter | v,
-                                   std::memory_order_acquire,
-                                   std::memory_order_relaxed)) {
-    // try spin acquire, then slow loop
-    if (!TryAcquireWithSpinning(&this->mu_)) {
-      this->LockSlow(kExclusive, nullptr, 0);
-    }
-  }
-  DebugOnlyLockEnter(this, id);
-  ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
-}
-
-ABSL_XRAY_LOG_ARGS(1) void Mutex::ReaderLock() {
-  ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock);
-  GraphId id = DebugOnlyDeadlockCheck(this);
-  intptr_t v = mu_.load(std::memory_order_relaxed);
-  // try fast acquire, then slow loop
-  if ((v & (kMuWriter | kMuWait | kMuEvent)) != 0 ||
-      !mu_.compare_exchange_strong(v, (kMuReader | v) + kMuOne,
-                                   std::memory_order_acquire,
-                                   std::memory_order_relaxed)) {
-    this->LockSlow(kShared, nullptr, 0);
-  }
-  DebugOnlyLockEnter(this, id);
-  ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_read_lock, 0);
-}
-
-void Mutex::LockWhen(const Condition &cond) {
-  ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
-  GraphId id = DebugOnlyDeadlockCheck(this);
-  this->LockSlow(kExclusive, &cond, 0);
-  DebugOnlyLockEnter(this, id);
-  ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
-}
-
-bool Mutex::LockWhenWithTimeout(const Condition &cond, absl::Duration timeout) {
-  return LockWhenWithDeadline(cond, DeadlineFromTimeout(timeout));
-}
-
-bool Mutex::LockWhenWithDeadline(const Condition &cond, absl::Time deadline) {
-  ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
-  GraphId id = DebugOnlyDeadlockCheck(this);
-  bool res = LockSlowWithDeadline(kExclusive, &cond,
-                                  KernelTimeout(deadline), 0);
-  DebugOnlyLockEnter(this, id);
-  ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
-  return res;
-}
-
-void Mutex::ReaderLockWhen(const Condition &cond) {
-  ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock);
-  GraphId id = DebugOnlyDeadlockCheck(this);
-  this->LockSlow(kShared, &cond, 0);
-  DebugOnlyLockEnter(this, id);
-  ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_read_lock, 0);
-}
-
-bool Mutex::ReaderLockWhenWithTimeout(const Condition &cond,
-                                      absl::Duration timeout) {
-  return ReaderLockWhenWithDeadline(cond, DeadlineFromTimeout(timeout));
-}
-
-bool Mutex::ReaderLockWhenWithDeadline(const Condition &cond,
-                                       absl::Time deadline) {
-  ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock);
-  GraphId id = DebugOnlyDeadlockCheck(this);
-  bool res = LockSlowWithDeadline(kShared, &cond, KernelTimeout(deadline), 0);
-  DebugOnlyLockEnter(this, id);
-  ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_read_lock, 0);
-  return res;
-}
-
-void Mutex::Await(const Condition &cond) {
-  if (cond.Eval()) {    // condition already true; nothing to do
-    if (kDebugMode) {
-      this->AssertReaderHeld();
-    }
-  } else {              // normal case
-    ABSL_RAW_CHECK(this->AwaitCommon(cond, KernelTimeout::Never()),
-                   "condition untrue on return from Await");
-  }
-}
-
-bool Mutex::AwaitWithTimeout(const Condition &cond, absl::Duration timeout) {
-  return AwaitWithDeadline(cond, DeadlineFromTimeout(timeout));
-}
-
-bool Mutex::AwaitWithDeadline(const Condition &cond, absl::Time deadline) {
-  if (cond.Eval()) {      // condition already true; nothing to do
-    if (kDebugMode) {
-      this->AssertReaderHeld();
-    }
-    return true;
-  }
-
-  KernelTimeout t{deadline};
-  bool res = this->AwaitCommon(cond, t);
-  ABSL_RAW_CHECK(res || t.has_timeout(),
-                 "condition untrue on return from Await");
-  return res;
-}
-
-bool Mutex::AwaitCommon(const Condition &cond, KernelTimeout t) {
-  this->AssertReaderHeld();
-  MuHow how =
-      (mu_.load(std::memory_order_relaxed) & kMuWriter) ? kExclusive : kShared;
-  ABSL_TSAN_MUTEX_PRE_UNLOCK(this, TsanFlags(how));
-  SynchWaitParams waitp(
-      how, &cond, t, nullptr /*no cvmu*/, Synch_GetPerThreadAnnotated(this),
-      nullptr /*no cv_word*/);
-  int flags = kMuHasBlocked;
-  if (!Condition::GuaranteedEqual(&cond, nullptr)) {
-    flags |= kMuIsCond;
-  }
-  this->UnlockSlow(&waitp);
-  this->Block(waitp.thread);
-  ABSL_TSAN_MUTEX_POST_UNLOCK(this, TsanFlags(how));
-  ABSL_TSAN_MUTEX_PRE_LOCK(this, TsanFlags(how));
-  this->LockSlowLoop(&waitp, flags);
-  bool res = waitp.cond != nullptr ||  // => cond known true from LockSlowLoop
-             EvalConditionAnnotated(&cond, this, true, false, how == kShared);
-  ABSL_TSAN_MUTEX_POST_LOCK(this, TsanFlags(how), 0);
-  return res;
-}
-
-ABSL_XRAY_LOG_ARGS(1) bool Mutex::TryLock() {
-  ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_try_lock);
-  intptr_t v = mu_.load(std::memory_order_relaxed);
-  if ((v & (kMuWriter | kMuReader | kMuEvent)) == 0 &&  // try fast acquire
-      mu_.compare_exchange_strong(v, kMuWriter | v,
-                                  std::memory_order_acquire,
-                                  std::memory_order_relaxed)) {
-    DebugOnlyLockEnter(this);
-    ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_try_lock, 0);
-    return true;
-  }
-  if ((v & kMuEvent) != 0) {              // we're recording events
-    if ((v & kExclusive->slow_need_zero) == 0 &&  // try fast acquire
-        mu_.compare_exchange_strong(
-            v, (kExclusive->fast_or | v) + kExclusive->fast_add,
-            std::memory_order_acquire, std::memory_order_relaxed)) {
-      DebugOnlyLockEnter(this);
-      PostSynchEvent(this, SYNCH_EV_TRYLOCK_SUCCESS);
-      ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_try_lock, 0);
-      return true;
-    } else {
-      PostSynchEvent(this, SYNCH_EV_TRYLOCK_FAILED);
-    }
-  }
-  ABSL_TSAN_MUTEX_POST_LOCK(
-      this, __tsan_mutex_try_lock | __tsan_mutex_try_lock_failed, 0);
-  return false;
-}
-
-ABSL_XRAY_LOG_ARGS(1) bool Mutex::ReaderTryLock() {
-  ABSL_TSAN_MUTEX_PRE_LOCK(this,
-                           __tsan_mutex_read_lock | __tsan_mutex_try_lock);
-  intptr_t v = mu_.load(std::memory_order_relaxed);
-  // The while-loops (here and below) iterate only if the mutex word keeps
-  // changing (typically because the reader count changes) under the CAS.  We
-  // limit the number of attempts to avoid having to think about livelock.
-  int loop_limit = 5;
-  while ((v & (kMuWriter|kMuWait|kMuEvent)) == 0 && loop_limit != 0) {
-    if (mu_.compare_exchange_strong(v, (kMuReader | v) + kMuOne,
-                                    std::memory_order_acquire,
-                                    std::memory_order_relaxed)) {
-      DebugOnlyLockEnter(this);
-      ABSL_TSAN_MUTEX_POST_LOCK(
-          this, __tsan_mutex_read_lock | __tsan_mutex_try_lock, 0);
-      return true;
-    }
-    loop_limit--;
-    v = mu_.load(std::memory_order_relaxed);
-  }
-  if ((v & kMuEvent) != 0) {   // we're recording events
-    loop_limit = 5;
-    while ((v & kShared->slow_need_zero) == 0 && loop_limit != 0) {
-      if (mu_.compare_exchange_strong(v, (kMuReader | v) + kMuOne,
-                                      std::memory_order_acquire,
-                                      std::memory_order_relaxed)) {
-        DebugOnlyLockEnter(this);
-        PostSynchEvent(this, SYNCH_EV_READERTRYLOCK_SUCCESS);
-        ABSL_TSAN_MUTEX_POST_LOCK(
-            this, __tsan_mutex_read_lock | __tsan_mutex_try_lock, 0);
-        return true;
-      }
-      loop_limit--;
-      v = mu_.load(std::memory_order_relaxed);
-    }
-    if ((v & kMuEvent) != 0) {
-      PostSynchEvent(this, SYNCH_EV_READERTRYLOCK_FAILED);
-    }
-  }
-  ABSL_TSAN_MUTEX_POST_LOCK(this,
-                            __tsan_mutex_read_lock | __tsan_mutex_try_lock |
-                                __tsan_mutex_try_lock_failed,
-                            0);
-  return false;
-}
-
-ABSL_XRAY_LOG_ARGS(1) void Mutex::Unlock() {
-  ABSL_TSAN_MUTEX_PRE_UNLOCK(this, 0);
-  DebugOnlyLockLeave(this);
-  intptr_t v = mu_.load(std::memory_order_relaxed);
-
-  if (kDebugMode && ((v & (kMuWriter | kMuReader)) != kMuWriter)) {
-    ABSL_RAW_LOG(FATAL, "Mutex unlocked when destroyed or not locked: v=0x%x",
-                 static_cast<unsigned>(v));
-  }
-
-  // should_try_cas is whether we'll try a compare-and-swap immediately.
-  // NOTE: optimized out when kDebugMode is false.
-  bool should_try_cas = ((v & (kMuEvent | kMuWriter)) == kMuWriter &&
-                          (v & (kMuWait | kMuDesig)) != kMuWait);
-  // But, we can use an alternate computation of it, that compilers
-  // currently don't find on their own.  When that changes, this function
-  // can be simplified.
-  intptr_t x = (v ^ (kMuWriter | kMuWait)) & (kMuWriter | kMuEvent);
-  intptr_t y = (v ^ (kMuWriter | kMuWait)) & (kMuWait | kMuDesig);
-  // Claim: "x == 0 && y > 0" is equal to should_try_cas.
-  // Also, because kMuWriter and kMuEvent exceed kMuDesig and kMuWait,
-  // all possible non-zero values for x exceed all possible values for y.
-  // Therefore, (x == 0 && y > 0) == (x < y).
-  if (kDebugMode && should_try_cas != (x < y)) {
-    // We would usually use PRIdPTR here, but is not correctly implemented
-    // within the android toolchain.
-    ABSL_RAW_LOG(FATAL, "internal logic error %llx %llx %llx\n",
-                 static_cast<long long>(v), static_cast<long long>(x),
-                 static_cast<long long>(y));
-  }
-  if (x < y &&
-      mu_.compare_exchange_strong(v, v & ~(kMuWrWait | kMuWriter),
-                                  std::memory_order_release,
-                                  std::memory_order_relaxed)) {
-    // fast writer release (writer with no waiters or with designated waker)
-  } else {
-    this->UnlockSlow(nullptr /*no waitp*/);  // take slow path
-  }
-  ABSL_TSAN_MUTEX_POST_UNLOCK(this, 0);
-}
-
-// Requires v to represent a reader-locked state.
-static bool ExactlyOneReader(intptr_t v) {
-  assert((v & (kMuWriter|kMuReader)) == kMuReader);
-  assert((v & kMuHigh) != 0);
-  // The more straightforward "(v & kMuHigh) == kMuOne" also works, but
-  // on some architectures the following generates slightly smaller code.
-  // It may be faster too.
-  constexpr intptr_t kMuMultipleWaitersMask = kMuHigh ^ kMuOne;
-  return (v & kMuMultipleWaitersMask) == 0;
-}
-
-ABSL_XRAY_LOG_ARGS(1) void Mutex::ReaderUnlock() {
-  ABSL_TSAN_MUTEX_PRE_UNLOCK(this, __tsan_mutex_read_lock);
-  DebugOnlyLockLeave(this);
-  intptr_t v = mu_.load(std::memory_order_relaxed);
-  assert((v & (kMuWriter|kMuReader)) == kMuReader);
-  if ((v & (kMuReader|kMuWait|kMuEvent)) == kMuReader) {
-    // fast reader release (reader with no waiters)
-    intptr_t clear = ExactlyOneReader(v) ? kMuReader|kMuOne : kMuOne;
-    if (mu_.compare_exchange_strong(v, v - clear,
-                                    std::memory_order_release,
-                                    std::memory_order_relaxed)) {
-      ABSL_TSAN_MUTEX_POST_UNLOCK(this, __tsan_mutex_read_lock);
-      return;
-    }
-  }
-  this->UnlockSlow(nullptr /*no waitp*/);  // take slow path
-  ABSL_TSAN_MUTEX_POST_UNLOCK(this, __tsan_mutex_read_lock);
-}
-
-// The zap_desig_waker bitmask is used to clear the designated waker flag in
-// the mutex if this thread has blocked, and therefore may be the designated
-// waker.
-static const intptr_t zap_desig_waker[] = {
-    ~static_cast<intptr_t>(0),  // not blocked
-    ~static_cast<intptr_t>(
-        kMuDesig)  // blocked; turn off the designated waker bit
-};
-
-// The ignore_waiting_writers bitmask is used to ignore the existence
-// of waiting writers if a reader that has already blocked once
-// wakes up.
-static const intptr_t ignore_waiting_writers[] = {
-    ~static_cast<intptr_t>(0),  // not blocked
-    ~static_cast<intptr_t>(
-        kMuWrWait)  // blocked; pretend there are no waiting writers
-};
-
-// Internal version of LockWhen().  See LockSlowWithDeadline()
-ABSL_ATTRIBUTE_NOINLINE void Mutex::LockSlow(MuHow how, const Condition *cond,
-                                             int flags) {
-  ABSL_RAW_CHECK(
-      this->LockSlowWithDeadline(how, cond, KernelTimeout::Never(), flags),
-      "condition untrue on return from LockSlow");
-}
-
-// Compute cond->Eval() and tell race detectors that we do it under mutex mu.
-static inline bool EvalConditionAnnotated(const Condition *cond, Mutex *mu,
-                                          bool locking, bool trylock,
-                                          bool read_lock) {
-  // Delicate annotation dance.
-  // We are currently inside of read/write lock/unlock operation.
-  // All memory accesses are ignored inside of mutex operations + for unlock
-  // operation tsan considers that we've already released the mutex.
-  bool res = false;
-#ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE
-  const int flags = read_lock ? __tsan_mutex_read_lock : 0;
-  const int tryflags = flags | (trylock ? __tsan_mutex_try_lock : 0);
-#endif
-  if (locking) {
-    // For lock we pretend that we have finished the operation,
-    // evaluate the predicate, then unlock the mutex and start locking it again
-    // to match the annotation at the end of outer lock operation.
-    // Note: we can't simply do POST_LOCK, Eval, PRE_LOCK, because then tsan
-    // will think the lock acquisition is recursive which will trigger
-    // deadlock detector.
-    ABSL_TSAN_MUTEX_POST_LOCK(mu, tryflags, 0);
-    res = cond->Eval();
-    // There is no "try" version of Unlock, so use flags instead of tryflags.
-    ABSL_TSAN_MUTEX_PRE_UNLOCK(mu, flags);
-    ABSL_TSAN_MUTEX_POST_UNLOCK(mu, flags);
-    ABSL_TSAN_MUTEX_PRE_LOCK(mu, tryflags);
-  } else {
-    // Similarly, for unlock we pretend that we have unlocked the mutex,
-    // lock the mutex, evaluate the predicate, and start unlocking it again
-    // to match the annotation at the end of outer unlock operation.
-    ABSL_TSAN_MUTEX_POST_UNLOCK(mu, flags);
-    ABSL_TSAN_MUTEX_PRE_LOCK(mu, flags);
-    ABSL_TSAN_MUTEX_POST_LOCK(mu, flags, 0);
-    res = cond->Eval();
-    ABSL_TSAN_MUTEX_PRE_UNLOCK(mu, flags);
-  }
-  // Prevent unused param warnings in non-TSAN builds.
-  static_cast<void>(mu);
-  static_cast<void>(trylock);
-  static_cast<void>(read_lock);
-  return res;
-}
-
-// Compute cond->Eval() hiding it from race detectors.
-// We are hiding it because inside of UnlockSlow we can evaluate a predicate
-// that was just added by a concurrent Lock operation; Lock adds the predicate
-// to the internal Mutex list without actually acquiring the Mutex
-// (it only acquires the internal spinlock, which is rightfully invisible for
-// tsan). As the result there is no tsan-visible synchronization between the
-// addition and this thread. So if we would enable race detection here,
-// it would race with the predicate initialization.
-static inline bool EvalConditionIgnored(Mutex *mu, const Condition *cond) {
-  // Memory accesses are already ignored inside of lock/unlock operations,
-  // but synchronization operations are also ignored. When we evaluate the
-  // predicate we must ignore only memory accesses but not synchronization,
-  // because missed synchronization can lead to false reports later.
-  // So we "divert" (which un-ignores both memory accesses and synchronization)
-  // and then separately turn on ignores of memory accesses.
-  ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
-  ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN();
-  bool res = cond->Eval();
-  ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_END();
-  ABSL_TSAN_MUTEX_POST_DIVERT(mu, 0);
-  static_cast<void>(mu);  // Prevent unused param warning in non-TSAN builds.
-  return res;
-}
-
-// Internal equivalent of *LockWhenWithDeadline(), where
-//   "t" represents the absolute timeout; !t.has_timeout() means "forever".
-//   "how" is "kShared" (for ReaderLockWhen) or "kExclusive" (for LockWhen)
-// In flags, bits are ored together:
-// - kMuHasBlocked indicates that the client has already blocked on the call so
-//   the designated waker bit must be cleared and waiting writers should not
-//   obstruct this call
-// - kMuIsCond indicates that this is a conditional acquire (condition variable,
-//   Await,  LockWhen) so contention profiling should be suppressed.
-bool Mutex::LockSlowWithDeadline(MuHow how, const Condition *cond,
-                                 KernelTimeout t, int flags) {
-  intptr_t v = mu_.load(std::memory_order_relaxed);
-  bool unlock = false;
-  if ((v & how->fast_need_zero) == 0 &&  // try fast acquire
-      mu_.compare_exchange_strong(
-          v, (how->fast_or | (v & zap_desig_waker[flags & kMuHasBlocked])) +
-                 how->fast_add,
-          std::memory_order_acquire, std::memory_order_relaxed)) {
-    if (cond == nullptr ||
-        EvalConditionAnnotated(cond, this, true, false, how == kShared)) {
-      return true;
-    }
-    unlock = true;
-  }
-  SynchWaitParams waitp(
-      how, cond, t, nullptr /*no cvmu*/, Synch_GetPerThreadAnnotated(this),
-      nullptr /*no cv_word*/);
-  if (!Condition::GuaranteedEqual(cond, nullptr)) {
-    flags |= kMuIsCond;
-  }
-  if (unlock) {
-    this->UnlockSlow(&waitp);
-    this->Block(waitp.thread);
-    flags |= kMuHasBlocked;
-  }
-  this->LockSlowLoop(&waitp, flags);
-  return waitp.cond != nullptr ||  // => cond known true from LockSlowLoop
-         cond == nullptr ||
-         EvalConditionAnnotated(cond, this, true, false, how == kShared);
-}
-
-// RAW_CHECK_FMT() takes a condition, a printf-style format string, and
-// the printf-style argument list.   The format string must be a literal.
-// Arguments after the first are not evaluated unless the condition is true.
-#define RAW_CHECK_FMT(cond, ...)                                   \
-  do {                                                             \
-    if (ABSL_PREDICT_FALSE(!(cond))) {                             \
-      ABSL_RAW_LOG(FATAL, "Check " #cond " failed: " __VA_ARGS__); \
-    }                                                              \
-  } while (0)
-
-static void CheckForMutexCorruption(intptr_t v, const char* label) {
-  // Test for either of two situations that should not occur in v:
-  //   kMuWriter and kMuReader
-  //   kMuWrWait and !kMuWait
-  const uintptr_t w = v ^ kMuWait;
-  // By flipping that bit, we can now test for:
-  //   kMuWriter and kMuReader in w
-  //   kMuWrWait and kMuWait in w
-  // We've chosen these two pairs of values to be so that they will overlap,
-  // respectively, when the word is left shifted by three.  This allows us to
-  // save a branch in the common (correct) case of them not being coincident.
-  static_assert(kMuReader << 3 == kMuWriter, "must match");
-  static_assert(kMuWait << 3 == kMuWrWait, "must match");
-  if (ABSL_PREDICT_TRUE((w & (w << 3) & (kMuWriter | kMuWrWait)) == 0)) return;
-  RAW_CHECK_FMT((v & (kMuWriter | kMuReader)) != (kMuWriter | kMuReader),
-                "%s: Mutex corrupt: both reader and writer lock held: %p",
-                label, reinterpret_cast<void *>(v));
-  RAW_CHECK_FMT((v & (kMuWait | kMuWrWait)) != kMuWrWait,
-                "%s: Mutex corrupt: waiting writer with no waiters: %p",
-                label, reinterpret_cast<void *>(v));
-  assert(false);
-}
-
-void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
-  SchedulingGuard::ScopedDisable disable_rescheduling;
-  int c = 0;
-  intptr_t v = mu_.load(std::memory_order_relaxed);
-  if ((v & kMuEvent) != 0) {
-    PostSynchEvent(this,
-         waitp->how == kExclusive?  SYNCH_EV_LOCK: SYNCH_EV_READERLOCK);
-  }
-  ABSL_RAW_CHECK(
-      waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors,
-      "detected illegal recursion into Mutex code");
-  for (;;) {
-    v = mu_.load(std::memory_order_relaxed);
-    CheckForMutexCorruption(v, "Lock");
-    if ((v & waitp->how->slow_need_zero) == 0) {
-      if (mu_.compare_exchange_strong(
-              v, (waitp->how->fast_or |
-                  (v & zap_desig_waker[flags & kMuHasBlocked])) +
-                     waitp->how->fast_add,
-              std::memory_order_acquire, std::memory_order_relaxed)) {
-        if (waitp->cond == nullptr ||
-            EvalConditionAnnotated(waitp->cond, this, true, false,
-                                   waitp->how == kShared)) {
-          break;  // we timed out, or condition true, so return
-        }
-        this->UnlockSlow(waitp);  // got lock but condition false
-        this->Block(waitp->thread);
-        flags |= kMuHasBlocked;
-        c = 0;
-      }
-    } else {                      // need to access waiter list
-      bool dowait = false;
-      if ((v & (kMuSpin|kMuWait)) == 0) {   // no waiters
-        // This thread tries to become the one and only waiter.
-        PerThreadSynch *new_h = Enqueue(nullptr, waitp, v, flags);
-        intptr_t nv = (v & zap_desig_waker[flags & kMuHasBlocked] & kMuLow) |
-                      kMuWait;
-        ABSL_RAW_CHECK(new_h != nullptr, "Enqueue to empty list failed");
-        if (waitp->how == kExclusive && (v & kMuReader) != 0) {
-          nv |= kMuWrWait;
-        }
-        if (mu_.compare_exchange_strong(
-                v, reinterpret_cast<intptr_t>(new_h) | nv,
-                std::memory_order_release, std::memory_order_relaxed)) {
-          dowait = true;
-        } else {            // attempted Enqueue() failed
-          // zero out the waitp field set by Enqueue()
-          waitp->thread->waitp = nullptr;
-        }
-      } else if ((v & waitp->how->slow_inc_need_zero &
-                  ignore_waiting_writers[flags & kMuHasBlocked]) == 0) {
-        // This is a reader that needs to increment the reader count,
-        // but the count is currently held in the last waiter.
-        if (mu_.compare_exchange_strong(
-                v, (v & zap_desig_waker[flags & kMuHasBlocked]) | kMuSpin |
-                       kMuReader,
-                std::memory_order_acquire, std::memory_order_relaxed)) {
-          PerThreadSynch *h = GetPerThreadSynch(v);
-          h->readers += kMuOne;       // inc reader count in waiter
-          do {                        // release spinlock
-            v = mu_.load(std::memory_order_relaxed);
-          } while (!mu_.compare_exchange_weak(v, (v & ~kMuSpin) | kMuReader,
-                                              std::memory_order_release,
-                                              std::memory_order_relaxed));
-          if (waitp->cond == nullptr ||
-              EvalConditionAnnotated(waitp->cond, this, true, false,
-                                     waitp->how == kShared)) {
-            break;  // we timed out, or condition true, so return
-          }
-          this->UnlockSlow(waitp);           // got lock but condition false
-          this->Block(waitp->thread);
-          flags |= kMuHasBlocked;
-          c = 0;
-        }
-      } else if ((v & kMuSpin) == 0 &&  // attempt to queue ourselves
-                 mu_.compare_exchange_strong(
-                     v, (v & zap_desig_waker[flags & kMuHasBlocked]) | kMuSpin |
-                            kMuWait,
-                     std::memory_order_acquire, std::memory_order_relaxed)) {
-        PerThreadSynch *h = GetPerThreadSynch(v);
-        PerThreadSynch *new_h = Enqueue(h, waitp, v, flags);
-        intptr_t wr_wait = 0;
-        ABSL_RAW_CHECK(new_h != nullptr, "Enqueue to list failed");
-        if (waitp->how == kExclusive && (v & kMuReader) != 0) {
-          wr_wait = kMuWrWait;      // give priority to a waiting writer
-        }
-        do {                        // release spinlock
-          v = mu_.load(std::memory_order_relaxed);
-        } while (!mu_.compare_exchange_weak(
-            v, (v & (kMuLow & ~kMuSpin)) | kMuWait | wr_wait |
-            reinterpret_cast<intptr_t>(new_h),
-            std::memory_order_release, std::memory_order_relaxed));
-        dowait = true;
-      }
-      if (dowait) {
-        this->Block(waitp->thread);  // wait until removed from list or timeout
-        flags |= kMuHasBlocked;
-        c = 0;
-      }
-    }
-    ABSL_RAW_CHECK(
-        waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors,
-        "detected illegal recursion into Mutex code");
-    // delay, then try again
-    c = synchronization_internal::MutexDelay(c, GENTLE);
-  }
-  ABSL_RAW_CHECK(
-      waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors,
-      "detected illegal recursion into Mutex code");
-  if ((v & kMuEvent) != 0) {
-    PostSynchEvent(this,
-                   waitp->how == kExclusive? SYNCH_EV_LOCK_RETURNING :
-                                      SYNCH_EV_READERLOCK_RETURNING);
-  }
-}
-
-// Unlock this mutex, which is held by the current thread.
-// If waitp is non-zero, it must be the wait parameters for the current thread
-// which holds the lock but is not runnable because its condition is false
-// or it is in the process of blocking on a condition variable; it must requeue
-// itself on the mutex/condvar to wait for its condition to become true.
-ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
-  SchedulingGuard::ScopedDisable disable_rescheduling;
-  intptr_t v = mu_.load(std::memory_order_relaxed);
-  this->AssertReaderHeld();
-  CheckForMutexCorruption(v, "Unlock");
-  if ((v & kMuEvent) != 0) {
-    PostSynchEvent(this,
-                (v & kMuWriter) != 0? SYNCH_EV_UNLOCK: SYNCH_EV_READERUNLOCK);
-  }
-  int c = 0;
-  // the waiter under consideration to wake, or zero
-  PerThreadSynch *w = nullptr;
-  // the predecessor to w or zero
-  PerThreadSynch *pw = nullptr;
-  // head of the list searched previously, or zero
-  PerThreadSynch *old_h = nullptr;
-  // a condition that's known to be false.
-  const Condition *known_false = nullptr;
-  PerThreadSynch *wake_list = kPerThreadSynchNull;   // list of threads to wake
-  intptr_t wr_wait = 0;        // set to kMuWrWait if we wake a reader and a
-                               // later writer could have acquired the lock
-                               // (starvation avoidance)
-  ABSL_RAW_CHECK(waitp == nullptr || waitp->thread->waitp == nullptr ||
-                     waitp->thread->suppress_fatal_errors,
-                 "detected illegal recursion into Mutex code");
-  // This loop finds threads wake_list to wakeup if any, and removes them from
-  // the list of waiters.  In addition, it places waitp.thread on the queue of
-  // waiters if waitp is non-zero.
-  for (;;) {
-    v = mu_.load(std::memory_order_relaxed);
-    if ((v & kMuWriter) != 0 && (v & (kMuWait | kMuDesig)) != kMuWait &&
-        waitp == nullptr) {
-      // fast writer release (writer with no waiters or with designated waker)
-      if (mu_.compare_exchange_strong(v, v & ~(kMuWrWait | kMuWriter),
-                                      std::memory_order_release,
-                                      std::memory_order_relaxed)) {
-        return;
-      }
-    } else if ((v & (kMuReader | kMuWait)) == kMuReader && waitp == nullptr) {
-      // fast reader release (reader with no waiters)
-      intptr_t clear = ExactlyOneReader(v) ? kMuReader | kMuOne : kMuOne;
-      if (mu_.compare_exchange_strong(v, v - clear,
-                                      std::memory_order_release,
-                                      std::memory_order_relaxed)) {
-        return;
-      }
-    } else if ((v & kMuSpin) == 0 &&  // attempt to get spinlock
-               mu_.compare_exchange_strong(v, v | kMuSpin,
-                                           std::memory_order_acquire,
-                                           std::memory_order_relaxed)) {
-      if ((v & kMuWait) == 0) {       // no one to wake
-        intptr_t nv;
-        bool do_enqueue = true;  // always Enqueue() the first time
-        ABSL_RAW_CHECK(waitp != nullptr,
-                       "UnlockSlow is confused");  // about to sleep
-        do {    // must loop to release spinlock as reader count may change
-          v = mu_.load(std::memory_order_relaxed);
-          // decrement reader count if there are readers
-          intptr_t new_readers = (v >= kMuOne)?  v - kMuOne : v;
-          PerThreadSynch *new_h = nullptr;
-          if (do_enqueue) {
-            // If we are enqueuing on a CondVar (waitp->cv_word != nullptr) then
-            // we must not retry here.  The initial attempt will always have
-            // succeeded, further attempts would enqueue us against *this due to
-            // Fer() handling.
-            do_enqueue = (waitp->cv_word == nullptr);
-            new_h = Enqueue(nullptr, waitp, new_readers, kMuIsCond);
-          }
-          intptr_t clear = kMuWrWait | kMuWriter;  // by default clear write bit
-          if ((v & kMuWriter) == 0 && ExactlyOneReader(v)) {  // last reader
-            clear = kMuWrWait | kMuReader;                    // clear read bit
-          }
-          nv = (v & kMuLow & ~clear & ~kMuSpin);
-          if (new_h != nullptr) {
-            nv |= kMuWait | reinterpret_cast<intptr_t>(new_h);
-          } else {  // new_h could be nullptr if we queued ourselves on a
-                    // CondVar
-            // In that case, we must place the reader count back in the mutex
-            // word, as Enqueue() did not store it in the new waiter.
-            nv |= new_readers & kMuHigh;
-          }
-          // release spinlock & our lock; retry if reader-count changed
-          // (writer count cannot change since we hold lock)
-        } while (!mu_.compare_exchange_weak(v, nv,
-                                            std::memory_order_release,
-                                            std::memory_order_relaxed));
-        break;
-      }
-
-      // There are waiters.
-      // Set h to the head of the circular waiter list.
-      PerThreadSynch *h = GetPerThreadSynch(v);
-      if ((v & kMuReader) != 0 && (h->readers & kMuHigh) > kMuOne) {
-        // a reader but not the last
-        h->readers -= kMuOne;  // release our lock
-        intptr_t nv = v;       // normally just release spinlock
-        if (waitp != nullptr) {  // but waitp!=nullptr => must queue ourselves
-          PerThreadSynch *new_h = Enqueue(h, waitp, v, kMuIsCond);
-          ABSL_RAW_CHECK(new_h != nullptr,
-                         "waiters disappeared during Enqueue()!");
-          nv &= kMuLow;
-          nv |= kMuWait | reinterpret_cast<intptr_t>(new_h);
-        }
-        mu_.store(nv, std::memory_order_release);  // release spinlock
-        // can release with a store because there were waiters
-        break;
-      }
-
-      // Either we didn't search before, or we marked the queue
-      // as "maybe_unlocking" and no one else should have changed it.
-      ABSL_RAW_CHECK(old_h == nullptr || h->maybe_unlocking,
-                     "Mutex queue changed beneath us");
-
-      // The lock is becoming free, and there's a waiter
-      if (old_h != nullptr &&
-          !old_h->may_skip) {                  // we used old_h as a terminator
-        old_h->may_skip = true;                // allow old_h to skip once more
-        ABSL_RAW_CHECK(old_h->skip == nullptr, "illegal skip from head");
-        if (h != old_h && MuSameCondition(old_h, old_h->next)) {
-          old_h->skip = old_h->next;  // old_h not head & can skip to successor
-        }
-      }
-      if (h->next->waitp->how == kExclusive &&
-          Condition::GuaranteedEqual(h->next->waitp->cond, nullptr)) {
-        // easy case: writer with no condition; no need to search
-        pw = h;                       // wake w, the successor of h (=pw)
-        w = h->next;
-        w->wake = true;
-        // We are waking up a writer.  This writer may be racing against
-        // an already awake reader for the lock.  We want the
-        // writer to usually win this race,
-        // because if it doesn't, we can potentially keep taking a reader
-        // perpetually and writers will starve.  Worse than
-        // that, this can also starve other readers if kMuWrWait gets set
-        // later.
-        wr_wait = kMuWrWait;
-      } else if (w != nullptr && (w->waitp->how == kExclusive || h == old_h)) {
-        // we found a waiter w to wake on a previous iteration and either it's
-        // a writer, or we've searched the entire list so we have all the
-        // readers.
-        if (pw == nullptr) {  // if w's predecessor is unknown, it must be h
-          pw = h;
-        }
-      } else {
-        // At this point we don't know all the waiters to wake, and the first
-        // waiter has a condition or is a reader.  We avoid searching over
-        // waiters we've searched on previous iterations by starting at
-        // old_h if it's set.  If old_h==h, there's no one to wakeup at all.
-        if (old_h == h) {      // we've searched before, and nothing's new
-                               // so there's no one to wake.
-          intptr_t nv = (v & ~(kMuReader|kMuWriter|kMuWrWait));
-          h->readers = 0;
-          h->maybe_unlocking = false;   // finished unlocking
-          if (waitp != nullptr) {       // we must queue ourselves and sleep
-            PerThreadSynch *new_h = Enqueue(h, waitp, v, kMuIsCond);
-            nv &= kMuLow;
-            if (new_h != nullptr) {
-              nv |= kMuWait | reinterpret_cast<intptr_t>(new_h);
-            }  // else new_h could be nullptr if we queued ourselves on a
-               // CondVar
-          }
-          // release spinlock & lock
-          // can release with a store because there were waiters
-          mu_.store(nv, std::memory_order_release);
-          break;
-        }
-
-        // set up to walk the list
-        PerThreadSynch *w_walk;   // current waiter during list walk
-        PerThreadSynch *pw_walk;  // previous waiter during list walk
-        if (old_h != nullptr) {  // we've searched up to old_h before
-          pw_walk = old_h;
-          w_walk = old_h->next;
-        } else {            // no prior search, start at beginning
-          pw_walk =
-              nullptr;  // h->next's predecessor may change; don't record it
-          w_walk = h->next;
-        }
-
-        h->may_skip = false;  // ensure we never skip past h in future searches
-                              // even if other waiters are queued after it.
-        ABSL_RAW_CHECK(h->skip == nullptr, "illegal skip from head");
-
-        h->maybe_unlocking = true;  // we're about to scan the waiter list
-                                    // without the spinlock held.
-                                    // Enqueue must be conservative about
-                                    // priority queuing.
-
-        // We must release the spinlock to evaluate the conditions.
-        mu_.store(v, std::memory_order_release);  // release just spinlock
-        // can release with a store because there were waiters
-
-        // h is the last waiter queued, and w_walk the first unsearched waiter.
-        // Without the spinlock, the locations mu_ and h->next may now change
-        // underneath us, but since we hold the lock itself, the only legal
-        // change is to add waiters between h and w_walk.  Therefore, it's safe
-        // to walk the path from w_walk to h inclusive. (TryRemove() can remove
-        // a waiter anywhere, but it acquires both the spinlock and the Mutex)
-
-        old_h = h;        // remember we searched to here
-
-        // Walk the path upto and including h looking for waiters we can wake.
-        while (pw_walk != h) {
-          w_walk->wake = false;
-          if (w_walk->waitp->cond ==
-                  nullptr ||  // no condition => vacuously true OR
-              (w_walk->waitp->cond != known_false &&
-               // this thread's condition is not known false, AND
-               //  is in fact true
-               EvalConditionIgnored(this, w_walk->waitp->cond))) {
-            if (w == nullptr) {
-              w_walk->wake = true;    // can wake this waiter
-              w = w_walk;
-              pw = pw_walk;
-              if (w_walk->waitp->how == kExclusive) {
-                wr_wait = kMuWrWait;
-                break;                // bail if waking this writer
-              }
-            } else if (w_walk->waitp->how == kShared) {  // wake if a reader
-              w_walk->wake = true;
-            } else {   // writer with true condition
-              wr_wait = kMuWrWait;
-            }
-          } else {                  // can't wake; condition false
-            known_false = w_walk->waitp->cond;  // remember last false condition
-          }
-          if (w_walk->wake) {   // we're waking reader w_walk
-            pw_walk = w_walk;   // don't skip similar waiters
-          } else {              // not waking; skip as much as possible
-            pw_walk = Skip(w_walk);
-          }
-          // If pw_walk == h, then load of pw_walk->next can race with
-          // concurrent write in Enqueue(). However, at the same time
-          // we do not need to do the load, because we will bail out
-          // from the loop anyway.
-          if (pw_walk != h) {
-            w_walk = pw_walk->next;
-          }
-        }
-
-        continue;  // restart for(;;)-loop to wakeup w or to find more waiters
-      }
-      ABSL_RAW_CHECK(pw->next == w, "pw not w's predecessor");
-      // The first (and perhaps only) waiter we've chosen to wake is w, whose
-      // predecessor is pw.  If w is a reader, we must wake all the other
-      // waiters with wake==true as well.  We may also need to queue
-      // ourselves if waitp != null.  The spinlock and the lock are still
-      // held.
-
-      // This traverses the list in [ pw->next, h ], where h is the head,
-      // removing all elements with wake==true and placing them in the
-      // singly-linked list wake_list.  Returns the new head.
-      h = DequeueAllWakeable(h, pw, &wake_list);
-
-      intptr_t nv = (v & kMuEvent) | kMuDesig;
-                                             // assume no waiters left,
-                                             // set kMuDesig for INV1a
-
-      if (waitp != nullptr) {  // we must queue ourselves and sleep
-        h = Enqueue(h, waitp, v, kMuIsCond);
-        // h is new last waiter; could be null if we queued ourselves on a
-        // CondVar
-      }
-
-      ABSL_RAW_CHECK(wake_list != kPerThreadSynchNull,
-                     "unexpected empty wake list");
-
-      if (h != nullptr) {  // there are waiters left
-        h->readers = 0;
-        h->maybe_unlocking = false;     // finished unlocking
-        nv |= wr_wait | kMuWait | reinterpret_cast<intptr_t>(h);
-      }
-
-      // release both spinlock & lock
-      // can release with a store because there were waiters
-      mu_.store(nv, std::memory_order_release);
-      break;  // out of for(;;)-loop
-    }
-    // aggressive here; no one can proceed till we do
-    c = synchronization_internal::MutexDelay(c, AGGRESSIVE);
-  }                            // end of for(;;)-loop
-
-  if (wake_list != kPerThreadSynchNull) {
-    int64_t enqueue_timestamp = wake_list->waitp->contention_start_cycles;
-    bool cond_waiter = wake_list->cond_waiter;
-    do {
-      wake_list = Wakeup(wake_list);              // wake waiters
-    } while (wake_list != kPerThreadSynchNull);
-    if (!cond_waiter) {
-      // Sample lock contention events only if the (first) waiter was trying to
-      // acquire the lock, not waiting on a condition variable or Condition.
-      int64_t wait_cycles =
-          base_internal::CycleClock::Now() - enqueue_timestamp;
-      mutex_tracer("slow release", this, wait_cycles);
-      ABSL_TSAN_MUTEX_PRE_DIVERT(this, 0);
-      submit_profile_data(enqueue_timestamp);
-      ABSL_TSAN_MUTEX_POST_DIVERT(this, 0);
-    }
-  }
-}
-
-// Used by CondVar implementation to reacquire mutex after waking from
-// condition variable.  This routine is used instead of Lock() because the
-// waiting thread may have been moved from the condition variable queue to the
-// mutex queue without a wakeup, by Trans().  In that case, when the thread is
-// finally woken, the woken thread will believe it has been woken from the
-// condition variable (i.e. its PC will be in when in the CondVar code), when
-// in fact it has just been woken from the mutex.  Thus, it must enter the slow
-// path of the mutex in the same state as if it had just woken from the mutex.
-// That is, it must ensure to clear kMuDesig (INV1b).
-void Mutex::Trans(MuHow how) {
-  this->LockSlow(how, nullptr, kMuHasBlocked | kMuIsCond);
-}
-
-// Used by CondVar implementation to effectively wake thread w from the
-// condition variable.  If this mutex is free, we simply wake the thread.
-// It will later acquire the mutex with high probability.  Otherwise, we
-// enqueue thread w on this mutex.
-void Mutex::Fer(PerThreadSynch *w) {
-  SchedulingGuard::ScopedDisable disable_rescheduling;
-  int c = 0;
-  ABSL_RAW_CHECK(w->waitp->cond == nullptr,
-                 "Mutex::Fer while waiting on Condition");
-  ABSL_RAW_CHECK(!w->waitp->timeout.has_timeout(),
-                 "Mutex::Fer while in timed wait");
-  ABSL_RAW_CHECK(w->waitp->cv_word == nullptr,
-                 "Mutex::Fer with pending CondVar queueing");
-  for (;;) {
-    intptr_t v = mu_.load(std::memory_order_relaxed);
-    // Note: must not queue if the mutex is unlocked (nobody will wake it).
-    // For example, we can have only kMuWait (conditional) or maybe
-    // kMuWait|kMuWrWait.
-    // conflicting != 0 implies that the waking thread cannot currently take
-    // the mutex, which in turn implies that someone else has it and can wake
-    // us if we queue.
-    const intptr_t conflicting =
-        kMuWriter | (w->waitp->how == kShared ? 0 : kMuReader);
-    if ((v & conflicting) == 0) {
-      w->next = nullptr;
-      w->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
-      IncrementSynchSem(this, w);
-      return;
-    } else {
-      if ((v & (kMuSpin|kMuWait)) == 0) {       // no waiters
-        // This thread tries to become the one and only waiter.
-        PerThreadSynch *new_h = Enqueue(nullptr, w->waitp, v, kMuIsCond);
-        ABSL_RAW_CHECK(new_h != nullptr,
-                       "Enqueue failed");  // we must queue ourselves
-        if (mu_.compare_exchange_strong(
-                v, reinterpret_cast<intptr_t>(new_h) | (v & kMuLow) | kMuWait,
-                std::memory_order_release, std::memory_order_relaxed)) {
-          return;
-        }
-      } else if ((v & kMuSpin) == 0 &&
-                 mu_.compare_exchange_strong(v, v | kMuSpin | kMuWait)) {
-        PerThreadSynch *h = GetPerThreadSynch(v);
-        PerThreadSynch *new_h = Enqueue(h, w->waitp, v, kMuIsCond);
-        ABSL_RAW_CHECK(new_h != nullptr,
-                       "Enqueue failed");  // we must queue ourselves
-        do {
-          v = mu_.load(std::memory_order_relaxed);
-        } while (!mu_.compare_exchange_weak(
-            v,
-            (v & kMuLow & ~kMuSpin) | kMuWait |
-                reinterpret_cast<intptr_t>(new_h),
-            std::memory_order_release, std::memory_order_relaxed));
-        return;
-      }
-    }
-    c = synchronization_internal::MutexDelay(c, GENTLE);
-  }
-}
-
-void Mutex::AssertHeld() const {
-  if ((mu_.load(std::memory_order_relaxed) & kMuWriter) == 0) {
-    SynchEvent *e = GetSynchEvent(this);
-    ABSL_RAW_LOG(FATAL, "thread should hold write lock on Mutex %p %s",
-                 static_cast<const void *>(this),
-                 (e == nullptr ? "" : e->name));
-  }
-}
-
-void Mutex::AssertReaderHeld() const {
-  if ((mu_.load(std::memory_order_relaxed) & (kMuReader | kMuWriter)) == 0) {
-    SynchEvent *e = GetSynchEvent(this);
-    ABSL_RAW_LOG(
-        FATAL, "thread should hold at least a read lock on Mutex %p %s",
-        static_cast<const void *>(this), (e == nullptr ? "" : e->name));
-  }
-}
-
-// -------------------------------- condition variables
-static const intptr_t kCvSpin = 0x0001L;   // spinlock protects waiter list
-static const intptr_t kCvEvent = 0x0002L;  // record events
-
-static const intptr_t kCvLow = 0x0003L;  // low order bits of CV
-
-// Hack to make constant values available to gdb pretty printer
-enum { kGdbCvSpin = kCvSpin, kGdbCvEvent = kCvEvent, kGdbCvLow = kCvLow, };
-
-static_assert(PerThreadSynch::kAlignment > kCvLow,
-              "PerThreadSynch::kAlignment must be greater than kCvLow");
-
-void CondVar::EnableDebugLog(const char *name) {
-  SynchEvent *e = EnsureSynchEvent(&this->cv_, name, kCvEvent, kCvSpin);
-  e->log = true;
-  UnrefSynchEvent(e);
-}
-
-CondVar::~CondVar() {
-  if ((cv_.load(std::memory_order_relaxed) & kCvEvent) != 0) {
-    ForgetSynchEvent(&this->cv_, kCvEvent, kCvSpin);
-  }
-}
-
-
-// Remove thread s from the list of waiters on this condition variable.
-void CondVar::Remove(PerThreadSynch *s) {
-  SchedulingGuard::ScopedDisable disable_rescheduling;
-  intptr_t v;
-  int c = 0;
-  for (v = cv_.load(std::memory_order_relaxed);;
-       v = cv_.load(std::memory_order_relaxed)) {
-    if ((v & kCvSpin) == 0 &&  // attempt to acquire spinlock
-        cv_.compare_exchange_strong(v, v | kCvSpin,
-                                    std::memory_order_acquire,
-                                    std::memory_order_relaxed)) {
-      PerThreadSynch *h = reinterpret_cast<PerThreadSynch *>(v & ~kCvLow);
-      if (h != nullptr) {
-        PerThreadSynch *w = h;
-        while (w->next != s && w->next != h) {  // search for thread
-          w = w->next;
-        }
-        if (w->next == s) {           // found thread; remove it
-          w->next = s->next;
-          if (h == s) {
-            h = (w == s) ? nullptr : w;
-          }
-          s->next = nullptr;
-          s->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
-        }
-      }
-                                      // release spinlock
-      cv_.store((v & kCvEvent) | reinterpret_cast<intptr_t>(h),
-                std::memory_order_release);
-      return;
-    } else {
-      // try again after a delay
-      c = synchronization_internal::MutexDelay(c, GENTLE);
-    }
-  }
-}
-
-// Queue thread waitp->thread on condition variable word cv_word using
-// wait parameters waitp.
-// We split this into a separate routine, rather than simply doing it as part
-// of WaitCommon().  If we were to queue ourselves on the condition variable
-// before calling Mutex::UnlockSlow(), the Mutex code might be re-entered (via
-// the logging code, or via a Condition function) and might potentially attempt
-// to block this thread.  That would be a problem if the thread were already on
-// a the condition variable waiter queue.  Thus, we use the waitp->cv_word
-// to tell the unlock code to call CondVarEnqueue() to queue the thread on the
-// condition variable queue just before the mutex is to be unlocked, and (most
-// importantly) after any call to an external routine that might re-enter the
-// mutex code.
-static void CondVarEnqueue(SynchWaitParams *waitp) {
-  // This thread might be transferred to the Mutex queue by Fer() when
-  // we are woken.  To make sure that is what happens, Enqueue() doesn't
-  // call CondVarEnqueue() again but instead uses its normal code.  We
-  // must do this before we queue ourselves so that cv_word will be null
-  // when seen by the dequeuer, who may wish immediately to requeue
-  // this thread on another queue.
-  std::atomic<intptr_t> *cv_word = waitp->cv_word;
-  waitp->cv_word = nullptr;
-
-  intptr_t v = cv_word->load(std::memory_order_relaxed);
-  int c = 0;
-  while ((v & kCvSpin) != 0 ||  // acquire spinlock
-         !cv_word->compare_exchange_weak(v, v | kCvSpin,
-                                         std::memory_order_acquire,
-                                         std::memory_order_relaxed)) {
-    c = synchronization_internal::MutexDelay(c, GENTLE);
-    v = cv_word->load(std::memory_order_relaxed);
-  }
-  ABSL_RAW_CHECK(waitp->thread->waitp == nullptr, "waiting when shouldn't be");
-  waitp->thread->waitp = waitp;      // prepare ourselves for waiting
-  PerThreadSynch *h = reinterpret_cast<PerThreadSynch *>(v & ~kCvLow);
-  if (h == nullptr) {  // add this thread to waiter list
-    waitp->thread->next = waitp->thread;
-  } else {
-    waitp->thread->next = h->next;
-    h->next = waitp->thread;
-  }
-  waitp->thread->state.store(PerThreadSynch::kQueued,
-                             std::memory_order_relaxed);
-  cv_word->store((v & kCvEvent) | reinterpret_cast<intptr_t>(waitp->thread),
-                 std::memory_order_release);
-}
-
-bool CondVar::WaitCommon(Mutex *mutex, KernelTimeout t) {
-  bool rc = false;          // return value; true iff we timed-out
-
-  intptr_t mutex_v = mutex->mu_.load(std::memory_order_relaxed);
-  Mutex::MuHow mutex_how = ((mutex_v & kMuWriter) != 0) ? kExclusive : kShared;
-  ABSL_TSAN_MUTEX_PRE_UNLOCK(mutex, TsanFlags(mutex_how));
-
-  // maybe trace this call
-  intptr_t v = cv_.load(std::memory_order_relaxed);
-  cond_var_tracer("Wait", this);
-  if ((v & kCvEvent) != 0) {
-    PostSynchEvent(this, SYNCH_EV_WAIT);
-  }
-
-  // Release mu and wait on condition variable.
-  SynchWaitParams waitp(mutex_how, nullptr, t, mutex,
-                        Synch_GetPerThreadAnnotated(mutex), &cv_);
-  // UnlockSlow() will call CondVarEnqueue() just before releasing the
-  // Mutex, thus queuing this thread on the condition variable.  See
-  // CondVarEnqueue() for the reasons.
-  mutex->UnlockSlow(&waitp);
-
-  // wait for signal
-  while (waitp.thread->state.load(std::memory_order_acquire) ==
-         PerThreadSynch::kQueued) {
-    if (!Mutex::DecrementSynchSem(mutex, waitp.thread, t)) {
-      this->Remove(waitp.thread);
-      rc = true;
-    }
-  }
-
-  ABSL_RAW_CHECK(waitp.thread->waitp != nullptr, "not waiting when should be");
-  waitp.thread->waitp = nullptr;  // cleanup
-
-  // maybe trace this call
-  cond_var_tracer("Unwait", this);
-  if ((v & kCvEvent) != 0) {
-    PostSynchEvent(this, SYNCH_EV_WAIT_RETURNING);
-  }
-
-  // From synchronization point of view Wait is unlock of the mutex followed
-  // by lock of the mutex. We've annotated start of unlock in the beginning
-  // of the function. Now, finish unlock and annotate lock of the mutex.
-  // (Trans is effectively lock).
-  ABSL_TSAN_MUTEX_POST_UNLOCK(mutex, TsanFlags(mutex_how));
-  ABSL_TSAN_MUTEX_PRE_LOCK(mutex, TsanFlags(mutex_how));
-  mutex->Trans(mutex_how);  // Reacquire mutex
-  ABSL_TSAN_MUTEX_POST_LOCK(mutex, TsanFlags(mutex_how), 0);
-  return rc;
-}
-
-bool CondVar::WaitWithTimeout(Mutex *mu, absl::Duration timeout) {
-  return WaitWithDeadline(mu, DeadlineFromTimeout(timeout));
-}
-
-bool CondVar::WaitWithDeadline(Mutex *mu, absl::Time deadline) {
-  return WaitCommon(mu, KernelTimeout(deadline));
-}
-
-void CondVar::Wait(Mutex *mu) {
-  WaitCommon(mu, KernelTimeout::Never());
-}
-
-// Wake thread w
-// If it was a timed wait, w will be waiting on w->cv
-// Otherwise, if it was not a Mutex mutex, w will be waiting on w->sem
-// Otherwise, w is transferred to the Mutex mutex via Mutex::Fer().
-void CondVar::Wakeup(PerThreadSynch *w) {
-  if (w->waitp->timeout.has_timeout() || w->waitp->cvmu == nullptr) {
-    // The waiting thread only needs to observe "w->state == kAvailable" to be
-    // released, we must cache "cvmu" before clearing "next".
-    Mutex *mu = w->waitp->cvmu;
-    w->next = nullptr;
-    w->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
-    Mutex::IncrementSynchSem(mu, w);
-  } else {
-    w->waitp->cvmu->Fer(w);
-  }
-}
-
-void CondVar::Signal() {
-  SchedulingGuard::ScopedDisable disable_rescheduling;
-  ABSL_TSAN_MUTEX_PRE_SIGNAL(nullptr, 0);
-  intptr_t v;
-  int c = 0;
-  for (v = cv_.load(std::memory_order_relaxed); v != 0;
-       v = cv_.load(std::memory_order_relaxed)) {
-    if ((v & kCvSpin) == 0 &&  // attempt to acquire spinlock
-        cv_.compare_exchange_strong(v, v | kCvSpin,
-                                    std::memory_order_acquire,
-                                    std::memory_order_relaxed)) {
-      PerThreadSynch *h = reinterpret_cast<PerThreadSynch *>(v & ~kCvLow);
-      PerThreadSynch *w = nullptr;
-      if (h != nullptr) {  // remove first waiter
-        w = h->next;
-        if (w == h) {
-          h = nullptr;
-        } else {
-          h->next = w->next;
-        }
-      }
-                                      // release spinlock
-      cv_.store((v & kCvEvent) | reinterpret_cast<intptr_t>(h),
-                std::memory_order_release);
-      if (w != nullptr) {
-        CondVar::Wakeup(w);                // wake waiter, if there was one
-        cond_var_tracer("Signal wakeup", this);
-      }
-      if ((v & kCvEvent) != 0) {
-        PostSynchEvent(this, SYNCH_EV_SIGNAL);
-      }
-      ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0);
-      return;
-    } else {
-      c = synchronization_internal::MutexDelay(c, GENTLE);
-    }
-  }
-  ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0);
-}
-
-void CondVar::SignalAll () {
-  ABSL_TSAN_MUTEX_PRE_SIGNAL(nullptr, 0);
-  intptr_t v;
-  int c = 0;
-  for (v = cv_.load(std::memory_order_relaxed); v != 0;
-       v = cv_.load(std::memory_order_relaxed)) {
-    // empty the list if spinlock free
-    // We do this by simply setting the list to empty using
-    // compare and swap.   We then have the entire list in our hands,
-    // which cannot be changing since we grabbed it while no one
-    // held the lock.
-    if ((v & kCvSpin) == 0 &&
-        cv_.compare_exchange_strong(v, v & kCvEvent, std::memory_order_acquire,
-                                    std::memory_order_relaxed)) {
-      PerThreadSynch *h = reinterpret_cast<PerThreadSynch *>(v & ~kCvLow);
-      if (h != nullptr) {
-        PerThreadSynch *w;
-        PerThreadSynch *n = h->next;
-        do {                          // for every thread, wake it up
-          w = n;
-          n = n->next;
-          CondVar::Wakeup(w);
-        } while (w != h);
-        cond_var_tracer("SignalAll wakeup", this);
-      }
-      if ((v & kCvEvent) != 0) {
-        PostSynchEvent(this, SYNCH_EV_SIGNALALL);
-      }
-      ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0);
-      return;
-    } else {
-      // try again after a delay
-      c = synchronization_internal::MutexDelay(c, GENTLE);
-    }
-  }
-  ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0);
-}
-
-void ReleasableMutexLock::Release() {
-  ABSL_RAW_CHECK(this->mu_ != nullptr,
-                 "ReleasableMutexLock::Release may only be called once");
-  this->mu_->Unlock();
-  this->mu_ = nullptr;
-}
-
-#ifdef ABSL_HAVE_THREAD_SANITIZER
-extern "C" void __tsan_read1(void *addr);
-#else
-#define __tsan_read1(addr)  // do nothing if TSan not enabled
-#endif
-
-// A function that just returns its argument, dereferenced
-static bool Dereference(void *arg) {
-  // ThreadSanitizer does not instrument this file for memory accesses.
-  // This function dereferences a user variable that can participate
-  // in a data race, so we need to manually tell TSan about this memory access.
-  __tsan_read1(arg);
-  return *(static_cast<bool *>(arg));
-}
-
-Condition::Condition() {}   // null constructor, used for kTrue only
-const Condition Condition::kTrue;
-
-Condition::Condition(bool (*func)(void *), void *arg)
-    : eval_(&CallVoidPtrFunction),
-      function_(func),
-      method_(nullptr),
-      arg_(arg) {}
-
-bool Condition::CallVoidPtrFunction(const Condition *c) {
-  return (*c->function_)(c->arg_);
-}
-
-Condition::Condition(const bool *cond)
-    : eval_(CallVoidPtrFunction),
-      function_(Dereference),
-      method_(nullptr),
-      // const_cast is safe since Dereference does not modify arg
-      arg_(const_cast<bool *>(cond)) {}
-
-bool Condition::Eval() const {
-  // eval_ == null for kTrue
-  return (this->eval_ == nullptr) || (*this->eval_)(this);
-}
-
-bool Condition::GuaranteedEqual(const Condition *a, const Condition *b) {
-  if (a == nullptr) {
-    return b == nullptr || b->eval_ == nullptr;
-  }
-  if (b == nullptr || b->eval_ == nullptr) {
-    return a->eval_ == nullptr;
-  }
-  return a->eval_ == b->eval_ && a->function_ == b->function_ &&
-         a->arg_ == b->arg_ && a->method_ == b->method_;
-}
-
-ABSL_NAMESPACE_END
-}  // namespace absl
diff --git a/third_party/abseil_cpp/absl/synchronization/mutex.h b/third_party/abseil_cpp/absl/synchronization/mutex.h
deleted file mode 100644
index 598d1e0617cf..000000000000
--- a/third_party/abseil_cpp/absl/synchronization/mutex.h
+++ /dev/null
@@ -1,1084 +0,0 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// -----------------------------------------------------------------------------
-// mutex.h
-// -----------------------------------------------------------------------------
-//
-// This header file defines a `Mutex` -- a mutually exclusive lock -- and the
-// most common type of synchronization primitive for facilitating locks on
-// shared resources. A mutex is used to prevent multiple threads from accessing
-// and/or writing to a shared resource concurrently.
-//
-// Unlike a `std::mutex`, the Abseil `Mutex` provides the following additional
-// features:
-//   * Conditional predicates intrinsic to the `Mutex` object
-//   * Shared/reader locks, in addition to standard exclusive/writer locks
-//   * Deadlock detection and debug support.
-//
-// The following helper classes are also defined within this file:
-//
-//  MutexLock - An RAII wrapper to acquire and release a `Mutex` for exclusive/
-//              write access within the current scope.
-//
-//  ReaderMutexLock
-//            - An RAII wrapper to acquire and release a `Mutex` for shared/read
-//              access within the current scope.
-//
-//  WriterMutexLock
-//            - Effectively an alias for `MutexLock` above, designed for use in
-//              distinguishing reader and writer locks within code.
-//
-// In addition to simple mutex locks, this file also defines ways to perform
-// locking under certain conditions.
-//
-//  Condition - (Preferred) Used to wait for a particular predicate that
-//              depends on state protected by the `Mutex` to become true.
-//  CondVar   - A lower-level variant of `Condition` that relies on
-//              application code to explicitly signal the `CondVar` when
-//              a condition has been met.
-//
-// See below for more information on using `Condition` or `CondVar`.
-//
-// Mutexes and mutex behavior can be quite complicated. The information within
-// this header file is limited, as a result. Please consult the Mutex guide for
-// more complete information and examples.
-
-#ifndef ABSL_SYNCHRONIZATION_MUTEX_H_
-#define ABSL_SYNCHRONIZATION_MUTEX_H_
-
-#include <atomic>
-#include <cstdint>
-#include <string>
-
-#include "absl/base/const_init.h"
-#include "absl/base/internal/identity.h"
-#include "absl/base/internal/low_level_alloc.h"
-#include "absl/base/internal/thread_identity.h"
-#include "absl/base/internal/tsan_mutex_interface.h"
-#include "absl/base/port.h"
-#include "absl/base/thread_annotations.h"
-#include "absl/synchronization/internal/kernel_timeout.h"
-#include "absl/synchronization/internal/per_thread_sem.h"
-#include "absl/time/time.h"
-
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-
-class Condition;
-struct SynchWaitParams;
-
-// -----------------------------------------------------------------------------
-// Mutex
-// -----------------------------------------------------------------------------
-//
-// A `Mutex` is a non-reentrant (aka non-recursive) Mutually Exclusive lock
-// on some resource, typically a variable or data structure with associated
-// invariants. Proper usage of mutexes prevents concurrent access by different
-// threads to the same resource.
-//
-// A `Mutex` has two basic operations: `Mutex::Lock()` and `Mutex::Unlock()`.
-// The `Lock()` operation *acquires* a `Mutex` (in a state known as an
-// *exclusive* -- or write -- lock), while the `Unlock()` operation *releases* a
-// Mutex. During the span of time between the Lock() and Unlock() operations,
-// a mutex is said to be *held*. By design all mutexes support exclusive/write
-// locks, as this is the most common way to use a mutex.
-//
-// The `Mutex` state machine for basic lock/unlock operations is quite simple:
-//
-// |                | Lock()     | Unlock() |
-// |----------------+------------+----------|
-// | Free           | Exclusive  | invalid  |
-// | Exclusive      | blocks     | Free     |
-//
-// Attempts to `Unlock()` must originate from the thread that performed the
-// corresponding `Lock()` operation.
-//
-// An "invalid" operation is disallowed by the API. The `Mutex` implementation
-// is allowed to do anything on an invalid call, including but not limited to
-// crashing with a useful error message, silently succeeding, or corrupting
-// data structures. In debug mode, the implementation attempts to crash with a
-// useful error message.
-//
-// `Mutex` is not guaranteed to be "fair" in prioritizing waiting threads; it
-// is, however, approximately fair over long periods, and starvation-free for
-// threads at the same priority.
-//
-// The lock/unlock primitives are now annotated with lock annotations
-// defined in (base/thread_annotations.h). When writing multi-threaded code,
-// you should use lock annotations whenever possible to document your lock
-// synchronization policy. Besides acting as documentation, these annotations
-// also help compilers or static analysis tools to identify and warn about
-// issues that could potentially result in race conditions and deadlocks.
-//
-// For more information about the lock annotations, please see
-// [Thread Safety Analysis](http://clang.llvm.org/docs/ThreadSafetyAnalysis.html)
-// in the Clang documentation.
-//
-// See also `MutexLock`, below, for scoped `Mutex` acquisition.
-
-class ABSL_LOCKABLE Mutex {
- public:
-  // Creates a `Mutex` that is not held by anyone. This constructor is
-  // typically used for Mutexes allocated on the heap or the stack.
-  //
-  // To create `Mutex` instances with static storage duration
-  // (e.g. a namespace-scoped or global variable), see
-  // `Mutex::Mutex(absl::kConstInit)` below instead.
-  Mutex();
-
-  // Creates a mutex with static storage duration.  A global variable
-  // constructed this way avoids the lifetime issues that can occur on program
-  // startup and shutdown.  (See absl/base/const_init.h.)
-  //
-  // For Mutexes allocated on the heap and stack, instead use the default
-  // constructor, which can interact more fully with the thread sanitizer.
-  //
-  // Example usage:
-  //   namespace foo {
-  //   ABSL_CONST_INIT Mutex mu(absl::kConstInit);
-  //   }
-  explicit constexpr Mutex(absl::ConstInitType);
-
-  ~Mutex();
-
-  // Mutex::Lock()
-  //
-  // Blocks the calling thread, if necessary, until this `Mutex` is free, and
-  // then acquires it exclusively. (This lock is also known as a "write lock.")
-  void Lock() ABSL_EXCLUSIVE_LOCK_FUNCTION();
-
-  // Mutex::Unlock()
-  //
-  // Releases this `Mutex` and returns it from the exclusive/write state to the
-  // free state. Caller must hold the `Mutex` exclusively.
-  void Unlock() ABSL_UNLOCK_FUNCTION();
-
-  // Mutex::TryLock()
-  //
-  // If the mutex can be acquired without blocking, does so exclusively and
-  // returns `true`. Otherwise, returns `false`. Returns `true` with high
-  // probability if the `Mutex` was free.
-  bool TryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true);
-
-  // Mutex::AssertHeld()
-  //
-  // Return immediately if this thread holds the `Mutex` exclusively (in write
-  // mode). Otherwise, may report an error (typically by crashing with a
-  // diagnostic), or may return immediately.
-  void AssertHeld() const ABSL_ASSERT_EXCLUSIVE_LOCK();
-
-  // ---------------------------------------------------------------------------
-  // Reader-Writer Locking
-  // ---------------------------------------------------------------------------
-
-  // A Mutex can also be used as a starvation-free reader-writer lock.
-  // Neither read-locks nor write-locks are reentrant/recursive to avoid
-  // potential client programming errors.
-  //
-  // The Mutex API provides `Writer*()` aliases for the existing `Lock()`,
-  // `Unlock()` and `TryLock()` methods for use within applications mixing
-  // reader/writer locks. Using `Reader*()` and `Writer*()` operations in this
-  // manner can make locking behavior clearer when mixing read and write modes.
-  //
-  // Introducing reader locks necessarily complicates the `Mutex` state
-  // machine somewhat. The table below illustrates the allowed state transitions
-  // of a mutex in such cases. Note that ReaderLock() may block even if the lock
-  // is held in shared mode; this occurs when another thread is blocked on a
-  // call to WriterLock().
-  //
-  // ---------------------------------------------------------------------------
-  //     Operation: WriterLock() Unlock()  ReaderLock()           ReaderUnlock()
-  // ---------------------------------------------------------------------------
-  // State
-  // ---------------------------------------------------------------------------
-  // Free           Exclusive    invalid   Shared(1)              invalid
-  // Shared(1)      blocks       invalid   Shared(2) or blocks    Free
-  // Shared(n) n>1  blocks       invalid   Shared(n+1) or blocks  Shared(n-1)
-  // Exclusive      blocks       Free      blocks                 invalid
-  // ---------------------------------------------------------------------------
-  //
-  // In comments below, "shared" refers to a state of Shared(n) for any n > 0.
-
-  // Mutex::ReaderLock()
-  //
-  // Blocks the calling thread, if necessary, until this `Mutex` is either free,
-  // or in shared mode, and then acquires a share of it. Note that
-  // `ReaderLock()` will block if some other thread has an exclusive/writer lock
-  // on the mutex.
-
-  void ReaderLock() ABSL_SHARED_LOCK_FUNCTION();
-
-  // Mutex::ReaderUnlock()
-  //
-  // Releases a read share of this `Mutex`. `ReaderUnlock` may return a mutex to
-  // the free state if this thread holds the last reader lock on the mutex. Note
-  // that you cannot call `ReaderUnlock()` on a mutex held in write mode.
-  void ReaderUnlock() ABSL_UNLOCK_FUNCTION();
-
-  // Mutex::ReaderTryLock()
-  //
-  // If the mutex can be acquired without blocking, acquires this mutex for
-  // shared access and returns `true`. Otherwise, returns `false`. Returns
-  // `true` with high probability if the `Mutex` was free or shared.
-  bool ReaderTryLock() ABSL_SHARED_TRYLOCK_FUNCTION(true);
-
-  // Mutex::AssertReaderHeld()
-  //
-  // Returns immediately if this thread holds the `Mutex` in at least shared
-  // mode (read mode). Otherwise, may report an error (typically by
-  // crashing with a diagnostic), or may return immediately.
-  void AssertReaderHeld() const ABSL_ASSERT_SHARED_LOCK();
-
-  // Mutex::WriterLock()
-  // Mutex::WriterUnlock()
-  // Mutex::WriterTryLock()
-  //
-  // Aliases for `Mutex::Lock()`, `Mutex::Unlock()`, and `Mutex::TryLock()`.
-  //
-  // These methods may be used (along with the complementary `Reader*()`
-  // methods) to distingish simple exclusive `Mutex` usage (`Lock()`,
-  // etc.) from reader/writer lock usage.
-  void WriterLock() ABSL_EXCLUSIVE_LOCK_FUNCTION() { this->Lock(); }
-
-  void WriterUnlock() ABSL_UNLOCK_FUNCTION() { this->Unlock(); }
-
-  bool WriterTryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) {
-    return this->TryLock();
-  }
-
-  // ---------------------------------------------------------------------------
-  // Conditional Critical Regions
-  // ---------------------------------------------------------------------------
-
-  // Conditional usage of a `Mutex` can occur using two distinct paradigms:
-  //
-  //   * Use of `Mutex` member functions with `Condition` objects.
-  //   * Use of the separate `CondVar` abstraction.
-  //
-  // In general, prefer use of `Condition` and the `Mutex` member functions
-  // listed below over `CondVar`. When there are multiple threads waiting on
-  // distinctly different conditions, however, a battery of `CondVar`s may be
-  // more efficient. This section discusses use of `Condition` objects.
-  //
-  // `Mutex` contains member functions for performing lock operations only under
-  // certain conditions, of class `Condition`. For correctness, the `Condition`
-  // must return a boolean that is a pure function, only of state protected by
-  // the `Mutex`. The condition must be invariant w.r.t. environmental state
-  // such as thread, cpu id, or time, and must be `noexcept`. The condition will
-  // always be invoked with the mutex held in at least read mode, so you should
-  // not block it for long periods or sleep it on a timer.
-  //
-  // Since a condition must not depend directly on the current time, use
-  // `*WithTimeout()` member function variants to make your condition
-  // effectively true after a given duration, or `*WithDeadline()` variants to
-  // make your condition effectively true after a given time.
-  //
-  // The condition function should have no side-effects aside from debug
-  // logging; as a special exception, the function may acquire other mutexes
-  // provided it releases all those that it acquires.  (This exception was
-  // required to allow logging.)
-
-  // Mutex::Await()
-  //
-  // Unlocks this `Mutex` and blocks until simultaneously both `cond` is `true`
-  // and this `Mutex` can be reacquired, then reacquires this `Mutex` in the
-  // same mode in which it was previously held. If the condition is initially
-  // `true`, `Await()` *may* skip the release/re-acquire step.
-  //
-  // `Await()` requires that this thread holds this `Mutex` in some mode.
-  void Await(const Condition &cond);
-
-  // Mutex::LockWhen()
-  // Mutex::ReaderLockWhen()
-  // Mutex::WriterLockWhen()
-  //
-  // Blocks until simultaneously both `cond` is `true` and this `Mutex` can
-  // be acquired, then atomically acquires this `Mutex`. `LockWhen()` is
-  // logically equivalent to `*Lock(); Await();` though they may have different
-  // performance characteristics.
-  void LockWhen(const Condition &cond) ABSL_EXCLUSIVE_LOCK_FUNCTION();
-
-  void ReaderLockWhen(const Condition &cond) ABSL_SHARED_LOCK_FUNCTION();
-
-  void WriterLockWhen(const Condition &cond) ABSL_EXCLUSIVE_LOCK_FUNCTION() {
-    this->LockWhen(cond);
-  }
-
-  // ---------------------------------------------------------------------------
-  // Mutex Variants with Timeouts/Deadlines
-  // ---------------------------------------------------------------------------
-
-  // Mutex::AwaitWithTimeout()
-  // Mutex::AwaitWithDeadline()
-  //
-  // Unlocks this `Mutex` and blocks until simultaneously:
-  //   - either `cond` is true or the {timeout has expired, deadline has passed}
-  //     and
-  //   - this `Mutex` can be reacquired,
-  // then reacquire this `Mutex` in the same mode in which it was previously
-  // held, returning `true` iff `cond` is `true` on return.
-  //
-  // If the condition is initially `true`, the implementation *may* skip the
-  // release/re-acquire step and return immediately.
-  //
-  // Deadlines in the past are equivalent to an immediate deadline.
-  // Negative timeouts are equivalent to a zero timeout.
-  //
-  // This method requires that this thread holds this `Mutex` in some mode.
-  bool AwaitWithTimeout(const Condition &cond, absl::Duration timeout);
-
-  bool AwaitWithDeadline(const Condition &cond, absl::Time deadline);
-
-  // Mutex::LockWhenWithTimeout()
-  // Mutex::ReaderLockWhenWithTimeout()
-  // Mutex::WriterLockWhenWithTimeout()
-  //
-  // Blocks until simultaneously both:
-  //   - either `cond` is `true` or the timeout has expired, and
-  //   - this `Mutex` can be acquired,
-  // then atomically acquires this `Mutex`, returning `true` iff `cond` is
-  // `true` on return.
-  //
-  // Negative timeouts are equivalent to a zero timeout.
-  bool LockWhenWithTimeout(const Condition &cond, absl::Duration timeout)
-      ABSL_EXCLUSIVE_LOCK_FUNCTION();
-  bool ReaderLockWhenWithTimeout(const Condition &cond, absl::Duration timeout)
-      ABSL_SHARED_LOCK_FUNCTION();
-  bool WriterLockWhenWithTimeout(const Condition &cond, absl::Duration timeout)
-      ABSL_EXCLUSIVE_LOCK_FUNCTION() {
-    return this->LockWhenWithTimeout(cond, timeout);
-  }
-
-  // Mutex::LockWhenWithDeadline()
-  // Mutex::ReaderLockWhenWithDeadline()
-  // Mutex::WriterLockWhenWithDeadline()
-  //
-  // Blocks until simultaneously both:
-  //   - either `cond` is `true` or the deadline has been passed, and
-  //   - this `Mutex` can be acquired,
-  // then atomically acquires this Mutex, returning `true` iff `cond` is `true`
-  // on return.
-  //
-  // Deadlines in the past are equivalent to an immediate deadline.
-  bool LockWhenWithDeadline(const Condition &cond, absl::Time deadline)
-      ABSL_EXCLUSIVE_LOCK_FUNCTION();
-  bool ReaderLockWhenWithDeadline(const Condition &cond, absl::Time deadline)
-      ABSL_SHARED_LOCK_FUNCTION();
-  bool WriterLockWhenWithDeadline(const Condition &cond, absl::Time deadline)
-      ABSL_EXCLUSIVE_LOCK_FUNCTION() {
-    return this->LockWhenWithDeadline(cond, deadline);
-  }
-
-  // ---------------------------------------------------------------------------
-  // Debug Support: Invariant Checking, Deadlock Detection, Logging.
-  // ---------------------------------------------------------------------------
-
-  // Mutex::EnableInvariantDebugging()
-  //
-  // If `invariant`!=null and if invariant debugging has been enabled globally,
-  // cause `(*invariant)(arg)` to be called at moments when the invariant for
-  // this `Mutex` should hold (for example: just after acquire, just before
-  // release).
-  //
-  // The routine `invariant` should have no side-effects since it is not
-  // guaranteed how many times it will be called; it should check the invariant
-  // and crash if it does not hold. Enabling global invariant debugging may
-  // substantially reduce `Mutex` performance; it should be set only for
-  // non-production runs.  Optimization options may also disable invariant
-  // checks.
-  void EnableInvariantDebugging(void (*invariant)(void *), void *arg);
-
-  // Mutex::EnableDebugLog()
-  //
-  // Cause all subsequent uses of this `Mutex` to be logged via
-  // `ABSL_RAW_LOG(INFO)`. Log entries are tagged with `name` if no previous
-  // call to `EnableInvariantDebugging()` or `EnableDebugLog()` has been made.
-  //
-  // Note: This method substantially reduces `Mutex` performance.
-  void EnableDebugLog(const char *name);
-
-  // Deadlock detection
-
-  // Mutex::ForgetDeadlockInfo()
-  //
-  // Forget any deadlock-detection information previously gathered
-  // about this `Mutex`. Call this method in debug mode when the lock ordering
-  // of a `Mutex` changes.
-  void ForgetDeadlockInfo();
-
-  // Mutex::AssertNotHeld()
-  //
-  // Return immediately if this thread does not hold this `Mutex` in any
-  // mode; otherwise, may report an error (typically by crashing with a
-  // diagnostic), or may return immediately.
-  //
-  // Currently this check is performed only if all of:
-  //    - in debug mode
-  //    - SetMutexDeadlockDetectionMode() has been set to kReport or kAbort
-  //    - number of locks concurrently held by this thread is not large.
-  // are true.
-  void AssertNotHeld() const;
-
-  // Special cases.
-
-  // A `MuHow` is a constant that indicates how a lock should be acquired.
-  // Internal implementation detail.  Clients should ignore.
-  typedef const struct MuHowS *MuHow;
-
-  // Mutex::InternalAttemptToUseMutexInFatalSignalHandler()
-  //
-  // Causes the `Mutex` implementation to prepare itself for re-entry caused by
-  // future use of `Mutex` within a fatal signal handler. This method is
-  // intended for use only for last-ditch attempts to log crash information.
-  // It does not guarantee that attempts to use Mutexes within the handler will
-  // not deadlock; it merely makes other faults less likely.
-  //
-  // WARNING:  This routine must be invoked from a signal handler, and the
-  // signal handler must either loop forever or terminate the process.
-  // Attempts to return from (or `longjmp` out of) the signal handler once this
-  // call has been made may cause arbitrary program behaviour including
-  // crashes and deadlocks.
-  static void InternalAttemptToUseMutexInFatalSignalHandler();
-
- private:
-  std::atomic<intptr_t> mu_;  // The Mutex state.
-
-  // Post()/Wait() versus associated PerThreadSem; in class for required
-  // friendship with PerThreadSem.
-  static inline void IncrementSynchSem(Mutex *mu,
-                                       base_internal::PerThreadSynch *w);
-  static inline bool DecrementSynchSem(
-      Mutex *mu, base_internal::PerThreadSynch *w,
-      synchronization_internal::KernelTimeout t);
-
-  // slow path acquire
-  void LockSlowLoop(SynchWaitParams *waitp, int flags);
-  // wrappers around LockSlowLoop()
-  bool LockSlowWithDeadline(MuHow how, const Condition *cond,
-                            synchronization_internal::KernelTimeout t,
-                            int flags);
-  void LockSlow(MuHow how, const Condition *cond,
-                int flags) ABSL_ATTRIBUTE_COLD;
-  // slow path release
-  void UnlockSlow(SynchWaitParams *waitp) ABSL_ATTRIBUTE_COLD;
-  // Common code between Await() and AwaitWithTimeout/Deadline()
-  bool AwaitCommon(const Condition &cond,
-                   synchronization_internal::KernelTimeout t);
-  // Attempt to remove thread s from queue.
-  void TryRemove(base_internal::PerThreadSynch *s);
-  // Block a thread on mutex.
-  void Block(base_internal::PerThreadSynch *s);
-  // Wake a thread; return successor.
-  base_internal::PerThreadSynch *Wakeup(base_internal::PerThreadSynch *w);
-
-  friend class CondVar;   // for access to Trans()/Fer().
-  void Trans(MuHow how);  // used for CondVar->Mutex transfer
-  void Fer(
-      base_internal::PerThreadSynch *w);  // used for CondVar->Mutex transfer
-
-  // Catch the error of writing Mutex when intending MutexLock.
-  Mutex(const volatile Mutex * /*ignored*/) {}  // NOLINT(runtime/explicit)
-
-  Mutex(const Mutex&) = delete;
-  Mutex& operator=(const Mutex&) = delete;
-};
-
-// -----------------------------------------------------------------------------
-// Mutex RAII Wrappers
-// -----------------------------------------------------------------------------
-
-// MutexLock
-//
-// `MutexLock` is a helper class, which acquires and releases a `Mutex` via
-// RAII.
-//
-// Example:
-//
-// Class Foo {
-//  public:
-//   Foo::Bar* Baz() {
-//     MutexLock lock(&mu_);
-//     ...
-//     return bar;
-//   }
-//
-// private:
-//   Mutex mu_;
-// };
-class ABSL_SCOPED_LOCKABLE MutexLock {
- public:
-  // Constructors
-
-  // Calls `mu->Lock()` and returns when that call returns. That is, `*mu` is
-  // guaranteed to be locked when this object is constructed. Requires that
-  // `mu` be dereferenceable.
-  explicit MutexLock(Mutex *mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu) : mu_(mu) {
-    this->mu_->Lock();
-  }
-
-  // Like above, but calls `mu->LockWhen(cond)` instead. That is, in addition to
-  // the above, the condition given by `cond` is also guaranteed to hold when
-  // this object is constructed.
-  explicit MutexLock(Mutex *mu, const Condition &cond)
-      ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
-      : mu_(mu) {
-    this->mu_->LockWhen(cond);
-  }
-
-  MutexLock(const MutexLock &) = delete;  // NOLINT(runtime/mutex)
-  MutexLock(MutexLock&&) = delete;  // NOLINT(runtime/mutex)
-  MutexLock& operator=(const MutexLock&) = delete;
-  MutexLock& operator=(MutexLock&&) = delete;
-
-  ~MutexLock() ABSL_UNLOCK_FUNCTION() { this->mu_->Unlock(); }
-
- private:
-  Mutex *const mu_;
-};
-
-// ReaderMutexLock
-//
-// The `ReaderMutexLock` is a helper class, like `MutexLock`, which acquires and
-// releases a shared lock on a `Mutex` via RAII.
-class ABSL_SCOPED_LOCKABLE ReaderMutexLock {
- public:
-  explicit ReaderMutexLock(Mutex *mu) ABSL_SHARED_LOCK_FUNCTION(mu) : mu_(mu) {
-    mu->ReaderLock();
-  }
-
-  explicit ReaderMutexLock(Mutex *mu, const Condition &cond)
-      ABSL_SHARED_LOCK_FUNCTION(mu)
-      : mu_(mu) {
-    mu->ReaderLockWhen(cond);
-  }
-
-  ReaderMutexLock(const ReaderMutexLock&) = delete;
-  ReaderMutexLock(ReaderMutexLock&&) = delete;
-  ReaderMutexLock& operator=(const ReaderMutexLock&) = delete;
-  ReaderMutexLock& operator=(ReaderMutexLock&&) = delete;
-
-  ~ReaderMutexLock() ABSL_UNLOCK_FUNCTION() { this->mu_->ReaderUnlock(); }
-
- private:
-  Mutex *const mu_;
-};
-
-// WriterMutexLock
-//
-// The `WriterMutexLock` is a helper class, like `MutexLock`, which acquires and
-// releases a write (exclusive) lock on a `Mutex` via RAII.
-class ABSL_SCOPED_LOCKABLE WriterMutexLock {
- public:
-  explicit WriterMutexLock(Mutex *mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
-      : mu_(mu) {
-    mu->WriterLock();
-  }
-
-  explicit WriterMutexLock(Mutex *mu, const Condition &cond)
-      ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
-      : mu_(mu) {
-    mu->WriterLockWhen(cond);
-  }
-
-  WriterMutexLock(const WriterMutexLock&) = delete;
-  WriterMutexLock(WriterMutexLock&&) = delete;
-  WriterMutexLock& operator=(const WriterMutexLock&) = delete;
-  WriterMutexLock& operator=(WriterMutexLock&&) = delete;
-
-  ~WriterMutexLock() ABSL_UNLOCK_FUNCTION() { this->mu_->WriterUnlock(); }
-
- private:
-  Mutex *const mu_;
-};
-
-// -----------------------------------------------------------------------------
-// Condition
-// -----------------------------------------------------------------------------
-//
-// As noted above, `Mutex` contains a number of member functions which take a
-// `Condition` as an argument; clients can wait for conditions to become `true`
-// before attempting to acquire the mutex. These sections are known as
-// "condition critical" sections. To use a `Condition`, you simply need to
-// construct it, and use within an appropriate `Mutex` member function;
-// everything else in the `Condition` class is an implementation detail.
-//
-// A `Condition` is specified as a function pointer which returns a boolean.
-// `Condition` functions should be pure functions -- their results should depend
-// only on passed arguments, should not consult any external state (such as
-// clocks), and should have no side-effects, aside from debug logging. Any
-// objects that the function may access should be limited to those which are
-// constant while the mutex is blocked on the condition (e.g. a stack variable),
-// or objects of state protected explicitly by the mutex.
-//
-// No matter which construction is used for `Condition`, the underlying
-// function pointer / functor / callable must not throw any
-// exceptions. Correctness of `Mutex` / `Condition` is not guaranteed in
-// the face of a throwing `Condition`. (When Abseil is allowed to depend
-// on C++17, these function pointers will be explicitly marked
-// `noexcept`; until then this requirement cannot be enforced in the
-// type system.)
-//
-// Note: to use a `Condition`, you need only construct it and pass it to a
-// suitable `Mutex' member function, such as `Mutex::Await()`, or to the
-// constructor of one of the scope guard classes.
-//
-// Example using LockWhen/Unlock:
-//
-//   // assume count_ is not internal reference count
-//   int count_ ABSL_GUARDED_BY(mu_);
-//   Condition count_is_zero(+[](int *count) { return *count == 0; }, &count_);
-//
-//   mu_.LockWhen(count_is_zero);
-//   // ...
-//   mu_.Unlock();
-//
-// Example using a scope guard:
-//
-//   {
-//     MutexLock lock(&mu_, count_is_zero);
-//     // ...
-//   }
-//
-// When multiple threads are waiting on exactly the same condition, make sure
-// that they are constructed with the same parameters (same pointer to function
-// + arg, or same pointer to object + method), so that the mutex implementation
-// can avoid redundantly evaluating the same condition for each thread.
-class Condition {
- public:
-  // A Condition that returns the result of "(*func)(arg)"
-  Condition(bool (*func)(void *), void *arg);
-
-  // Templated version for people who are averse to casts.
-  //
-  // To use a lambda, prepend it with unary plus, which converts the lambda
-  // into a function pointer:
-  //     Condition(+[](T* t) { return ...; }, arg).
-  //
-  // Note: lambdas in this case must contain no bound variables.
-  //
-  // See class comment for performance advice.
-  template<typename T>
-  Condition(bool (*func)(T *), T *arg);
-
-  // Templated version for invoking a method that returns a `bool`.
-  //
-  // `Condition(object, &Class::Method)` constructs a `Condition` that evaluates
-  // `object->Method()`.
-  //
-  // Implementation Note: `absl::internal::identity` is used to allow methods to
-  // come from base classes. A simpler signature like
-  // `Condition(T*, bool (T::*)())` does not suffice.
-  template<typename T>
-  Condition(T *object, bool (absl::internal::identity<T>::type::* method)());
-
-  // Same as above, for const members
-  template<typename T>
-  Condition(const T *object,
-            bool (absl::internal::identity<T>::type::* method)() const);
-
-  // A Condition that returns the value of `*cond`
-  explicit Condition(const bool *cond);
-
-  // Templated version for invoking a functor that returns a `bool`.
-  // This approach accepts pointers to non-mutable lambdas, `std::function`,
-  // the result of` std::bind` and user-defined functors that define
-  // `bool F::operator()() const`.
-  //
-  // Example:
-  //
-  //   auto reached = [this, current]() {
-  //     mu_.AssertReaderHeld();                // For annotalysis.
-  //     return processed_ >= current;
-  //   };
-  //   mu_.Await(Condition(&reached));
-  //
-  // NOTE: never use "mu_.AssertHeld()" instead of "mu_.AssertReaderHeld()" in
-  // the lambda as it may be called when the mutex is being unlocked from a
-  // scope holding only a reader lock, which will make the assertion not
-  // fulfilled and crash the binary.
-
-  // See class comment for performance advice. In particular, if there
-  // might be more than one waiter for the same condition, make sure
-  // that all waiters construct the condition with the same pointers.
-
-  // Implementation note: The second template parameter ensures that this
-  // constructor doesn't participate in overload resolution if T doesn't have
-  // `bool operator() const`.
-  template <typename T, typename E = decltype(
-      static_cast<bool (T::*)() const>(&T::operator()))>
-  explicit Condition(const T *obj)
-      : Condition(obj, static_cast<bool (T::*)() const>(&T::operator())) {}
-
-  // A Condition that always returns `true`.
-  static const Condition kTrue;
-
-  // Evaluates the condition.
-  bool Eval() const;
-
-  // Returns `true` if the two conditions are guaranteed to return the same
-  // value if evaluated at the same time, `false` if the evaluation *may* return
-  // different results.
-  //
-  // Two `Condition` values are guaranteed equal if both their `func` and `arg`
-  // components are the same. A null pointer is equivalent to a `true`
-  // condition.
-  static bool GuaranteedEqual(const Condition *a, const Condition *b);
-
- private:
-  typedef bool (*InternalFunctionType)(void * arg);
-  typedef bool (Condition::*InternalMethodType)();
-  typedef bool (*InternalMethodCallerType)(void * arg,
-                                           InternalMethodType internal_method);
-
-  bool (*eval_)(const Condition*);  // Actual evaluator
-  InternalFunctionType function_;   // function taking pointer returning bool
-  InternalMethodType method_;       // method returning bool
-  void *arg_;                       // arg of function_ or object of method_
-
-  Condition();        // null constructor used only to create kTrue
-
-  // Various functions eval_ can point to:
-  static bool CallVoidPtrFunction(const Condition*);
-  template <typename T> static bool CastAndCallFunction(const Condition* c);
-  template <typename T> static bool CastAndCallMethod(const Condition* c);
-};
-
-// -----------------------------------------------------------------------------
-// CondVar
-// -----------------------------------------------------------------------------
-//
-// A condition variable, reflecting state evaluated separately outside of the
-// `Mutex` object, which can be signaled to wake callers.
-// This class is not normally needed; use `Mutex` member functions such as
-// `Mutex::Await()` and intrinsic `Condition` abstractions. In rare cases
-// with many threads and many conditions, `CondVar` may be faster.
-//
-// The implementation may deliver signals to any condition variable at
-// any time, even when no call to `Signal()` or `SignalAll()` is made; as a
-// result, upon being awoken, you must check the logical condition you have
-// been waiting upon.
-//
-// Examples:
-//
-// Usage for a thread waiting for some condition C protected by mutex mu:
-//       mu.Lock();
-//       while (!C) { cv->Wait(&mu); }        // releases and reacquires mu
-//       //  C holds; process data
-//       mu.Unlock();
-//
-// Usage to wake T is:
-//       mu.Lock();
-//      // process data, possibly establishing C
-//      if (C) { cv->Signal(); }
-//      mu.Unlock();
-//
-// If C may be useful to more than one waiter, use `SignalAll()` instead of
-// `Signal()`.
-//
-// With this implementation it is efficient to use `Signal()/SignalAll()` inside
-// the locked region; this usage can make reasoning about your program easier.
-//
-class CondVar {
- public:
-  // A `CondVar` allocated on the heap or on the stack can use the this
-  // constructor.
-  CondVar();
-  ~CondVar();
-
-  // CondVar::Wait()
-  //
-  // Atomically releases a `Mutex` and blocks on this condition variable.
-  // Waits until awakened by a call to `Signal()` or `SignalAll()` (or a
-  // spurious wakeup), then reacquires the `Mutex` and returns.
-  //
-  // Requires and ensures that the current thread holds the `Mutex`.
-  void Wait(Mutex *mu);
-
-  // CondVar::WaitWithTimeout()
-  //
-  // Atomically releases a `Mutex` and blocks on this condition variable.
-  // Waits until awakened by a call to `Signal()` or `SignalAll()` (or a
-  // spurious wakeup), or until the timeout has expired, then reacquires
-  // the `Mutex` and returns.
-  //
-  // Returns true if the timeout has expired without this `CondVar`
-  // being signalled in any manner. If both the timeout has expired
-  // and this `CondVar` has been signalled, the implementation is free
-  // to return `true` or `false`.
-  //
-  // Requires and ensures that the current thread holds the `Mutex`.
-  bool WaitWithTimeout(Mutex *mu, absl::Duration timeout);
-
-  // CondVar::WaitWithDeadline()
-  //
-  // Atomically releases a `Mutex` and blocks on this condition variable.
-  // Waits until awakened by a call to `Signal()` or `SignalAll()` (or a
-  // spurious wakeup), or until the deadline has passed, then reacquires
-  // the `Mutex` and returns.
-  //
-  // Deadlines in the past are equivalent to an immediate deadline.
-  //
-  // Returns true if the deadline has passed without this `CondVar`
-  // being signalled in any manner. If both the deadline has passed
-  // and this `CondVar` has been signalled, the implementation is free
-  // to return `true` or `false`.
-  //
-  // Requires and ensures that the current thread holds the `Mutex`.
-  bool WaitWithDeadline(Mutex *mu, absl::Time deadline);
-
-  // CondVar::Signal()
-  //
-  // Signal this `CondVar`; wake at least one waiter if one exists.
-  void Signal();
-
-  // CondVar::SignalAll()
-  //
-  // Signal this `CondVar`; wake all waiters.
-  void SignalAll();
-
-  // CondVar::EnableDebugLog()
-  //
-  // Causes all subsequent uses of this `CondVar` to be logged via
-  // `ABSL_RAW_LOG(INFO)`. Log entries are tagged with `name` if `name != 0`.
-  // Note: this method substantially reduces `CondVar` performance.
-  void EnableDebugLog(const char *name);
-
- private:
-  bool WaitCommon(Mutex *mutex, synchronization_internal::KernelTimeout t);
-  void Remove(base_internal::PerThreadSynch *s);
-  void Wakeup(base_internal::PerThreadSynch *w);
-  std::atomic<intptr_t> cv_;  // Condition variable state.
-  CondVar(const CondVar&) = delete;
-  CondVar& operator=(const CondVar&) = delete;
-};
-
-
-// Variants of MutexLock.
-//
-// If you find yourself using one of these, consider instead using
-// Mutex::Unlock() and/or if-statements for clarity.
-
-// MutexLockMaybe
-//
-// MutexLockMaybe is like MutexLock, but is a no-op when mu is null.
-class ABSL_SCOPED_LOCKABLE MutexLockMaybe {
- public:
-  explicit MutexLockMaybe(Mutex *mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
-      : mu_(mu) {
-    if (this->mu_ != nullptr) {
-      this->mu_->Lock();
-    }
-  }
-
-  explicit MutexLockMaybe(Mutex *mu, const Condition &cond)
-      ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
-      : mu_(mu) {
-    if (this->mu_ != nullptr) {
-      this->mu_->LockWhen(cond);
-    }
-  }
-
-  ~MutexLockMaybe() ABSL_UNLOCK_FUNCTION() {
-    if (this->mu_ != nullptr) { this->mu_->Unlock(); }
-  }
-
- private:
-  Mutex *const mu_;
-  MutexLockMaybe(const MutexLockMaybe&) = delete;
-  MutexLockMaybe(MutexLockMaybe&&) = delete;
-  MutexLockMaybe& operator=(const MutexLockMaybe&) = delete;
-  MutexLockMaybe& operator=(MutexLockMaybe&&) = delete;
-};
-
-// ReleasableMutexLock
-//
-// ReleasableMutexLock is like MutexLock, but permits `Release()` of its
-// mutex before destruction. `Release()` may be called at most once.
-class ABSL_SCOPED_LOCKABLE ReleasableMutexLock {
- public:
-  explicit ReleasableMutexLock(Mutex *mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
-      : mu_(mu) {
-    this->mu_->Lock();
-  }
-
-  explicit ReleasableMutexLock(Mutex *mu, const Condition &cond)
-      ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
-      : mu_(mu) {
-    this->mu_->LockWhen(cond);
-  }
-
-  ~ReleasableMutexLock() ABSL_UNLOCK_FUNCTION() {
-    if (this->mu_ != nullptr) { this->mu_->Unlock(); }
-  }
-
-  void Release() ABSL_UNLOCK_FUNCTION();
-
- private:
-  Mutex *mu_;
-  ReleasableMutexLock(const ReleasableMutexLock&) = delete;
-  ReleasableMutexLock(ReleasableMutexLock&&) = delete;
-  ReleasableMutexLock& operator=(const ReleasableMutexLock&) = delete;
-  ReleasableMutexLock& operator=(ReleasableMutexLock&&) = delete;
-};
-
-inline Mutex::Mutex() : mu_(0) {
-  ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static);
-}
-
-inline constexpr Mutex::Mutex(absl::ConstInitType) : mu_(0) {}
-
-inline CondVar::CondVar() : cv_(0) {}
-
-// static
-template <typename T>
-bool Condition::CastAndCallMethod(const Condition *c) {
-  typedef bool (T::*MemberType)();
-  MemberType rm = reinterpret_cast<MemberType>(c->method_);
-  T *x = static_cast<T *>(c->arg_);
-  return (x->*rm)();
-}
-
-// static
-template <typename T>
-bool Condition::CastAndCallFunction(const Condition *c) {
-  typedef bool (*FuncType)(T *);
-  FuncType fn = reinterpret_cast<FuncType>(c->function_);
-  T *x = static_cast<T *>(c->arg_);
-  return (*fn)(x);
-}
-
-template <typename T>
-inline Condition::Condition(bool (*func)(T *), T *arg)
-    : eval_(&CastAndCallFunction<T>),
-      function_(reinterpret_cast<InternalFunctionType>(func)),
-      method_(nullptr),
-      arg_(const_cast<void *>(static_cast<const void *>(arg))) {}
-
-template <typename T>
-inline Condition::Condition(T *object,
-                            bool (absl::internal::identity<T>::type::*method)())
-    : eval_(&CastAndCallMethod<T>),
-      function_(nullptr),
-      method_(reinterpret_cast<InternalMethodType>(method)),
-      arg_(object) {}
-
-template <typename T>
-inline Condition::Condition(const T *object,
-                            bool (absl::internal::identity<T>::type::*method)()
-                                const)
-    : eval_(&CastAndCallMethod<T>),
-      function_(nullptr),
-      method_(reinterpret_cast<InternalMethodType>(method)),
-      arg_(reinterpret_cast<void *>(const_cast<T *>(object))) {}
-
-// Register a hook for profiling support.
-//
-// The function pointer registered here will be called whenever a mutex is
-// contended.  The callback is given the absl/base/cycleclock.h timestamp when
-// waiting began.
-//
-// Calls to this function do not race or block, but there is no ordering
-// guaranteed between calls to this function and call to the provided hook.
-// In particular, the previously registered hook may still be called for some
-// time after this function returns.
-void RegisterMutexProfiler(void (*fn)(int64_t wait_timestamp));
-
-// Register a hook for Mutex tracing.
-//
-// The function pointer registered here will be called whenever a mutex is
-// contended.  The callback is given an opaque handle to the contended mutex,
-// an event name, and the number of wait cycles (as measured by
-// //absl/base/internal/cycleclock.h, and which may not be real
-// "cycle" counts.)
-//
-// The only event name currently sent is "slow release".
-//
-// This has the same memory ordering concerns as RegisterMutexProfiler() above.
-void RegisterMutexTracer(void (*fn)(const char *msg, const void *obj,
-                                    int64_t wait_cycles));
-
-// TODO(gfalcon): Combine RegisterMutexProfiler() and RegisterMutexTracer()
-// into a single interface, since they are only ever called in pairs.
-
-// Register a hook for CondVar tracing.
-//
-// The function pointer registered here will be called here on various CondVar
-// events.  The callback is given an opaque handle to the CondVar object and
-// a string identifying the event.  This is thread-safe, but only a single
-// tracer can be registered.
-//
-// Events that can be sent are "Wait", "Unwait", "Signal wakeup", and
-// "SignalAll wakeup".
-//
-// This has the same memory ordering concerns as RegisterMutexProfiler() above.
-void RegisterCondVarTracer(void (*fn)(const char *msg, const void *cv));
-
-// Register a hook for symbolizing stack traces in deadlock detector reports.
-//
-// 'pc' is the program counter being symbolized, 'out' is the buffer to write
-// into, and 'out_size' is the size of the buffer.  This function can return
-// false if symbolizing failed, or true if a NUL-terminated symbol was written
-// to 'out.'
-//
-// This has the same memory ordering concerns as RegisterMutexProfiler() above.
-//
-// DEPRECATED: The default symbolizer function is absl::Symbolize() and the
-// ability to register a different hook for symbolizing stack traces will be
-// removed on or after 2023-05-01.
-ABSL_DEPRECATED("absl::RegisterSymbolizer() is deprecated and will be removed "
-                "on or after 2023-05-01")
-void RegisterSymbolizer(bool (*fn)(const void *pc, char *out, int out_size));
-
-// EnableMutexInvariantDebugging()
-//
-// Enable or disable global support for Mutex invariant debugging.  If enabled,
-// then invariant predicates can be registered per-Mutex for debug checking.
-// See Mutex::EnableInvariantDebugging().
-void EnableMutexInvariantDebugging(bool enabled);
-
-// When in debug mode, and when the feature has been enabled globally, the
-// implementation will keep track of lock ordering and complain (or optionally
-// crash) if a cycle is detected in the acquired-before graph.
-
-// Possible modes of operation for the deadlock detector in debug mode.
-enum class OnDeadlockCycle {
-  kIgnore,  // Neither report on nor attempt to track cycles in lock ordering
-  kReport,  // Report lock cycles to stderr when detected
-  kAbort,  // Report lock cycles to stderr when detected, then abort
-};
-
-// SetMutexDeadlockDetectionMode()
-//
-// Enable or disable global support for detection of potential deadlocks
-// due to Mutex lock ordering inversions.  When set to 'kIgnore', tracking of
-// lock ordering is disabled.  Otherwise, in debug builds, a lock ordering graph
-// will be maintained internally, and detected cycles will be reported in
-// the manner chosen here.
-void SetMutexDeadlockDetectionMode(OnDeadlockCycle mode);
-
-ABSL_NAMESPACE_END
-}  // namespace absl
-
-// In some build configurations we pass --detect-odr-violations to the
-// gold linker.  This causes it to flag weak symbol overrides as ODR
-// violations.  Because ODR only applies to C++ and not C,
-// --detect-odr-violations ignores symbols not mangled with C++ names.
-// By changing our extension points to be extern "C", we dodge this
-// check.
-extern "C" {
-void AbslInternalMutexYield();
-}  // extern "C"
-
-#endif  // ABSL_SYNCHRONIZATION_MUTEX_H_
diff --git a/third_party/abseil_cpp/absl/synchronization/mutex_benchmark.cc b/third_party/abseil_cpp/absl/synchronization/mutex_benchmark.cc
deleted file mode 100644
index 933ea14f8f1d..000000000000
--- a/third_party/abseil_cpp/absl/synchronization/mutex_benchmark.cc
+++ /dev/null
@@ -1,224 +0,0 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <cstdint>
-#include <mutex>  // NOLINT(build/c++11)
-#include <vector>
-
-#include "absl/base/config.h"
-#include "absl/base/internal/cycleclock.h"
-#include "absl/base/internal/spinlock.h"
-#include "absl/synchronization/blocking_counter.h"
-#include "absl/synchronization/internal/thread_pool.h"
-#include "absl/synchronization/mutex.h"
-#include "benchmark/benchmark.h"
-
-namespace {
-
-void BM_Mutex(benchmark::State& state) {
-  static absl::Mutex* mu = new absl::Mutex;
-  for (auto _ : state) {
-    absl::MutexLock lock(mu);
-  }
-}
-BENCHMARK(BM_Mutex)->UseRealTime()->Threads(1)->ThreadPerCpu();
-
-static void DelayNs(int64_t ns, int* data) {
-  int64_t end = absl::base_internal::CycleClock::Now() +
-                ns * absl::base_internal::CycleClock::Frequency() / 1e9;
-  while (absl::base_internal::CycleClock::Now() < end) {
-    ++(*data);
-    benchmark::DoNotOptimize(*data);
-  }
-}
-
-template <typename MutexType>
-class RaiiLocker {
- public:
-  explicit RaiiLocker(MutexType* mu) : mu_(mu) { mu_->Lock(); }
-  ~RaiiLocker() { mu_->Unlock(); }
- private:
-  MutexType* mu_;
-};
-
-template <>
-class RaiiLocker<std::mutex> {
- public:
-  explicit RaiiLocker(std::mutex* mu) : mu_(mu) { mu_->lock(); }
-  ~RaiiLocker() { mu_->unlock(); }
- private:
-  std::mutex* mu_;
-};
-
-template <typename MutexType>
-void BM_Contended(benchmark::State& state) {
-  struct Shared {
-    MutexType mu;
-    int data = 0;
-  };
-  static auto* shared = new Shared;
-  int local = 0;
-  for (auto _ : state) {
-    // Here we model both local work outside of the critical section as well as
-    // some work inside of the critical section. The idea is to capture some
-    // more or less realisitic contention levels.
-    // If contention is too low, the benchmark won't measure anything useful.
-    // If contention is unrealistically high, the benchmark will favor
-    // bad mutex implementations that block and otherwise distract threads
-    // from the mutex and shared state for as much as possible.
-    // To achieve this amount of local work is multiplied by number of threads
-    // to keep ratio between local work and critical section approximately
-    // equal regardless of number of threads.
-    DelayNs(100 * state.threads, &local);
-    RaiiLocker<MutexType> locker(&shared->mu);
-    DelayNs(state.range(0), &shared->data);
-  }
-}
-
-BENCHMARK_TEMPLATE(BM_Contended, absl::Mutex)
-    ->UseRealTime()
-    // ThreadPerCpu poorly handles non-power-of-two CPU counts.
-    ->Threads(1)
-    ->Threads(2)
-    ->Threads(4)
-    ->Threads(6)
-    ->Threads(8)
-    ->Threads(12)
-    ->Threads(16)
-    ->Threads(24)
-    ->Threads(32)
-    ->Threads(48)
-    ->Threads(64)
-    ->Threads(96)
-    ->Threads(128)
-    ->Threads(192)
-    ->Threads(256)
-    // Some empirically chosen amounts of work in critical section.
-    // 1 is low contention, 200 is high contention and few values in between.
-    ->Arg(1)
-    ->Arg(20)
-    ->Arg(50)
-    ->Arg(200);
-
-BENCHMARK_TEMPLATE(BM_Contended, absl::base_internal::SpinLock)
-    ->UseRealTime()
-    // ThreadPerCpu poorly handles non-power-of-two CPU counts.
-    ->Threads(1)
-    ->Threads(2)
-    ->Threads(4)
-    ->Threads(6)
-    ->Threads(8)
-    ->Threads(12)
-    ->Threads(16)
-    ->Threads(24)
-    ->Threads(32)
-    ->Threads(48)
-    ->Threads(64)
-    ->Threads(96)
-    ->Threads(128)
-    ->Threads(192)
-    ->Threads(256)
-    // Some empirically chosen amounts of work in critical section.
-    // 1 is low contention, 200 is high contention and few values in between.
-    ->Arg(1)
-    ->Arg(20)
-    ->Arg(50)
-    ->Arg(200);
-
-BENCHMARK_TEMPLATE(BM_Contended, std::mutex)
-    ->UseRealTime()
-    // ThreadPerCpu poorly handles non-power-of-two CPU counts.
-    ->Threads(1)
-    ->Threads(2)
-    ->Threads(4)
-    ->Threads(6)
-    ->Threads(8)
-    ->Threads(12)
-    ->Threads(16)
-    ->Threads(24)
-    ->Threads(32)
-    ->Threads(48)
-    ->Threads(64)
-    ->Threads(96)
-    ->Threads(128)
-    ->Threads(192)
-    ->Threads(256)
-    // Some empirically chosen amounts of work in critical section.
-    // 1 is low contention, 200 is high contention and few values in between.
-    ->Arg(1)
-    ->Arg(20)
-    ->Arg(50)
-    ->Arg(200);
-
-// Measure the overhead of conditions on mutex release (when they must be
-// evaluated).  Mutex has (some) support for equivalence classes allowing
-// Conditions with the same function/argument to potentially not be multiply
-// evaluated.
-//
-// num_classes==0 is used for the special case of every waiter being distinct.
-void BM_ConditionWaiters(benchmark::State& state) {
-  int num_classes = state.range(0);
-  int num_waiters = state.range(1);
-
-  struct Helper {
-    static void Waiter(absl::BlockingCounter* init, absl::Mutex* m, int* p) {
-      init->DecrementCount();
-      m->LockWhen(absl::Condition(
-          static_cast<bool (*)(int*)>([](int* v) { return *v == 0; }), p));
-      m->Unlock();
-    }
-  };
-
-  if (num_classes == 0) {
-    // No equivalence classes.
-    num_classes = num_waiters;
-  }
-
-  absl::BlockingCounter init(num_waiters);
-  absl::Mutex mu;
-  std::vector<int> equivalence_classes(num_classes, 1);
-
-  // Must be declared last to be destroyed first.
-  absl::synchronization_internal::ThreadPool pool(num_waiters);
-
-  for (int i = 0; i < num_waiters; i++) {
-    // Mutex considers Conditions with the same function and argument
-    // to be equivalent.
-    pool.Schedule([&, i] {
-      Helper::Waiter(&init, &mu, &equivalence_classes[i % num_classes]);
-    });
-  }
-  init.Wait();
-
-  for (auto _ : state) {
-    mu.Lock();
-    mu.Unlock();  // Each unlock requires Condition evaluation for our waiters.
-  }
-
-  mu.Lock();
-  for (int i = 0; i < num_classes; i++) {
-    equivalence_classes[i] = 0;
-  }
-  mu.Unlock();
-}
-
-// Some configurations have higher thread limits than others.
-#if defined(__linux__) && !defined(ABSL_HAVE_THREAD_SANITIZER)
-constexpr int kMaxConditionWaiters = 8192;
-#else
-constexpr int kMaxConditionWaiters = 1024;
-#endif
-BENCHMARK(BM_ConditionWaiters)->RangePair(0, 2, 1, kMaxConditionWaiters);
-
-}  // namespace
diff --git a/third_party/abseil_cpp/absl/synchronization/mutex_test.cc b/third_party/abseil_cpp/absl/synchronization/mutex_test.cc
deleted file mode 100644
index 058f757b482f..000000000000
--- a/third_party/abseil_cpp/absl/synchronization/mutex_test.cc
+++ /dev/null
@@ -1,1706 +0,0 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "absl/synchronization/mutex.h"
-
-#ifdef _WIN32
-#include <windows.h>
-#endif
-
-#include <algorithm>
-#include <atomic>
-#include <cstdlib>
-#include <functional>
-#include <memory>
-#include <random>
-#include <string>
-#include <thread>  // NOLINT(build/c++11)
-#include <vector>
-
-#include "gtest/gtest.h"
-#include "absl/base/attributes.h"
-#include "absl/base/config.h"
-#include "absl/base/internal/raw_logging.h"
-#include "absl/base/internal/sysinfo.h"
-#include "absl/memory/memory.h"
-#include "absl/synchronization/internal/thread_pool.h"
-#include "absl/time/clock.h"
-#include "absl/time/time.h"
-
-namespace {
-
-// TODO(dmauro): Replace with a commandline flag.
-static constexpr bool kExtendedTest = false;
-
-std::unique_ptr<absl::synchronization_internal::ThreadPool> CreatePool(
-    int threads) {
-  return absl::make_unique<absl::synchronization_internal::ThreadPool>(threads);
-}
-
-std::unique_ptr<absl::synchronization_internal::ThreadPool>
-CreateDefaultPool() {
-  return CreatePool(kExtendedTest ? 32 : 10);
-}
-
-// Hack to schedule a function to run on a thread pool thread after a
-// duration has elapsed.
-static void ScheduleAfter(absl::synchronization_internal::ThreadPool *tp,
-                          absl::Duration after,
-                          const std::function<void()> &func) {
-  tp->Schedule([func, after] {
-    absl::SleepFor(after);
-    func();
-  });
-}
-
-struct TestContext {
-  int iterations;
-  int threads;
-  int g0;  // global 0
-  int g1;  // global 1
-  absl::Mutex mu;
-  absl::CondVar cv;
-};
-
-// To test whether the invariant check call occurs
-static std::atomic<bool> invariant_checked;
-
-static bool GetInvariantChecked() {
-  return invariant_checked.load(std::memory_order_relaxed);
-}
-
-static void SetInvariantChecked(bool new_value) {
-  invariant_checked.store(new_value, std::memory_order_relaxed);
-}
-
-static void CheckSumG0G1(void *v) {
-  TestContext *cxt = static_cast<TestContext *>(v);
-  ABSL_RAW_CHECK(cxt->g0 == -cxt->g1, "Error in CheckSumG0G1");
-  SetInvariantChecked(true);
-}
-
-static void TestMu(TestContext *cxt, int c) {
-  for (int i = 0; i != cxt->iterations; i++) {
-    absl::MutexLock l(&cxt->mu);
-    int a = cxt->g0 + 1;
-    cxt->g0 = a;
-    cxt->g1--;
-  }
-}
-
-static void TestTry(TestContext *cxt, int c) {
-  for (int i = 0; i != cxt->iterations; i++) {
-    do {
-      std::this_thread::yield();
-    } while (!cxt->mu.TryLock());
-    int a = cxt->g0 + 1;
-    cxt->g0 = a;
-    cxt->g1--;
-    cxt->mu.Unlock();
-  }
-}
-
-static void TestR20ms(TestContext *cxt, int c) {
-  for (int i = 0; i != cxt->iterations; i++) {
-    absl::ReaderMutexLock l(&cxt->mu);
-    absl::SleepFor(absl::Milliseconds(20));
-    cxt->mu.AssertReaderHeld();
-  }
-}
-
-static void TestRW(TestContext *cxt, int c) {
-  if ((c & 1) == 0) {
-    for (int i = 0; i != cxt->iterations; i++) {
-      absl::WriterMutexLock l(&cxt->mu);
-      cxt->g0++;
-      cxt->g1--;
-      cxt->mu.AssertHeld();
-      cxt->mu.AssertReaderHeld();
-    }
-  } else {
-    for (int i = 0; i != cxt->iterations; i++) {
-      absl::ReaderMutexLock l(&cxt->mu);
-      ABSL_RAW_CHECK(cxt->g0 == -cxt->g1, "Error in TestRW");
-      cxt->mu.AssertReaderHeld();
-    }
-  }
-}
-
-struct MyContext {
-  int target;
-  TestContext *cxt;
-  bool MyTurn();
-};
-
-bool MyContext::MyTurn() {
-  TestContext *cxt = this->cxt;
-  return cxt->g0 == this->target || cxt->g0 == cxt->iterations;
-}
-
-static void TestAwait(TestContext *cxt, int c) {
-  MyContext mc;
-  mc.target = c;
-  mc.cxt = cxt;
-  absl::MutexLock l(&cxt->mu);
-  cxt->mu.AssertHeld();
-  while (cxt->g0 < cxt->iterations) {
-    cxt->mu.Await(absl::Condition(&mc, &MyContext::MyTurn));
-    ABSL_RAW_CHECK(mc.MyTurn(), "Error in TestAwait");
-    cxt->mu.AssertHeld();
-    if (cxt->g0 < cxt->iterations) {
-      int a = cxt->g0 + 1;
-      cxt->g0 = a;
-      mc.target += cxt->threads;
-    }
-  }
-}
-
-static void TestSignalAll(TestContext *cxt, int c) {
-  int target = c;
-  absl::MutexLock l(&cxt->mu);
-  cxt->mu.AssertHeld();
-  while (cxt->g0 < cxt->iterations) {
-    while (cxt->g0 != target && cxt->g0 != cxt->iterations) {
-      cxt->cv.Wait(&cxt->mu);
-    }
-    if (cxt->g0 < cxt->iterations) {
-      int a = cxt->g0 + 1;
-      cxt->g0 = a;
-      cxt->cv.SignalAll();
-      target += cxt->threads;
-    }
-  }
-}
-
-static void TestSignal(TestContext *cxt, int c) {
-  ABSL_RAW_CHECK(cxt->threads == 2, "TestSignal should use 2 threads");
-  int target = c;
-  absl::MutexLock l(&cxt->mu);
-  cxt->mu.AssertHeld();
-  while (cxt->g0 < cxt->iterations) {
-    while (cxt->g0 != target && cxt->g0 != cxt->iterations) {
-      cxt->cv.Wait(&cxt->mu);
-    }
-    if (cxt->g0 < cxt->iterations) {
-      int a = cxt->g0 + 1;
-      cxt->g0 = a;
-      cxt->cv.Signal();
-      target += cxt->threads;
-    }
-  }
-}
-
-static void TestCVTimeout(TestContext *cxt, int c) {
-  int target = c;
-  absl::MutexLock l(&cxt->mu);
-  cxt->mu.AssertHeld();
-  while (cxt->g0 < cxt->iterations) {
-    while (cxt->g0 != target && cxt->g0 != cxt->iterations) {
-      cxt->cv.WaitWithTimeout(&cxt->mu, absl::Seconds(100));
-    }
-    if (cxt->g0 < cxt->iterations) {
-      int a = cxt->g0 + 1;
-      cxt->g0 = a;
-      cxt->cv.SignalAll();
-      target += cxt->threads;
-    }
-  }
-}
-
-static bool G0GE2(TestContext *cxt) { return cxt->g0 >= 2; }
-
-static void TestTime(TestContext *cxt, int c, bool use_cv) {
-  ABSL_RAW_CHECK(cxt->iterations == 1, "TestTime should only use 1 iteration");
-  ABSL_RAW_CHECK(cxt->threads > 2, "TestTime should use more than 2 threads");
-  const bool kFalse = false;
-  absl::Condition false_cond(&kFalse);
-  absl::Condition g0ge2(G0GE2, cxt);
-  if (c == 0) {
-    absl::MutexLock l(&cxt->mu);
-
-    absl::Time start = absl::Now();
-    if (use_cv) {
-      cxt->cv.WaitWithTimeout(&cxt->mu, absl::Seconds(1));
-    } else {
-      ABSL_RAW_CHECK(!cxt->mu.AwaitWithTimeout(false_cond, absl::Seconds(1)),
-                     "TestTime failed");
-    }
-    absl::Duration elapsed = absl::Now() - start;
-    ABSL_RAW_CHECK(
-        absl::Seconds(0.9) <= elapsed && elapsed <= absl::Seconds(2.0),
-        "TestTime failed");
-    ABSL_RAW_CHECK(cxt->g0 == 1, "TestTime failed");
-
-    start = absl::Now();
-    if (use_cv) {
-      cxt->cv.WaitWithTimeout(&cxt->mu, absl::Seconds(1));
-    } else {
-      ABSL_RAW_CHECK(!cxt->mu.AwaitWithTimeout(false_cond, absl::Seconds(1)),
-                     "TestTime failed");
-    }
-    elapsed = absl::Now() - start;
-    ABSL_RAW_CHECK(
-        absl::Seconds(0.9) <= elapsed && elapsed <= absl::Seconds(2.0),
-        "TestTime failed");
-    cxt->g0++;
-    if (use_cv) {
-      cxt->cv.Signal();
-    }
-
-    start = absl::Now();
-    if (use_cv) {
-      cxt->cv.WaitWithTimeout(&cxt->mu, absl::Seconds(4));
-    } else {
-      ABSL_RAW_CHECK(!cxt->mu.AwaitWithTimeout(false_cond, absl::Seconds(4)),
-                     "TestTime failed");
-    }
-    elapsed = absl::Now() - start;
-    ABSL_RAW_CHECK(
-        absl::Seconds(3.9) <= elapsed && elapsed <= absl::Seconds(6.0),
-        "TestTime failed");
-    ABSL_RAW_CHECK(cxt->g0 >= 3, "TestTime failed");
-
-    start = absl::Now();
-    if (use_cv) {
-      cxt->cv.WaitWithTimeout(&cxt->mu, absl::Seconds(1));
-    } else {
-      ABSL_RAW_CHECK(!cxt->mu.AwaitWithTimeout(false_cond, absl::Seconds(1)),
-                     "TestTime failed");
-    }
-    elapsed = absl::Now() - start;
-    ABSL_RAW_CHECK(
-        absl::Seconds(0.9) <= elapsed && elapsed <= absl::Seconds(2.0),
-        "TestTime failed");
-    if (use_cv) {
-      cxt->cv.SignalAll();
-    }
-
-    start = absl::Now();
-    if (use_cv) {
-      cxt->cv.WaitWithTimeout(&cxt->mu, absl::Seconds(1));
-    } else {
-      ABSL_RAW_CHECK(!cxt->mu.AwaitWithTimeout(false_cond, absl::Seconds(1)),
-                     "TestTime failed");
-    }
-    elapsed = absl::Now() - start;
-    ABSL_RAW_CHECK(absl::Seconds(0.9) <= elapsed &&
-                   elapsed <= absl::Seconds(2.0), "TestTime failed");
-    ABSL_RAW_CHECK(cxt->g0 == cxt->threads, "TestTime failed");
-
-  } else if (c == 1) {
-    absl::MutexLock l(&cxt->mu);
-    const absl::Time start = absl::Now();
-    if (use_cv) {
-      cxt->cv.WaitWithTimeout(&cxt->mu, absl::Milliseconds(500));
-    } else {
-      ABSL_RAW_CHECK(
-          !cxt->mu.AwaitWithTimeout(false_cond, absl::Milliseconds(500)),
-          "TestTime failed");
-    }
-    const absl::Duration elapsed = absl::Now() - start;
-    ABSL_RAW_CHECK(
-        absl::Seconds(0.4) <= elapsed && elapsed <= absl::Seconds(0.9),
-        "TestTime failed");
-    cxt->g0++;
-  } else if (c == 2) {
-    absl::MutexLock l(&cxt->mu);
-    if (use_cv) {
-      while (cxt->g0 < 2) {
-        cxt->cv.WaitWithTimeout(&cxt->mu, absl::Seconds(100));
-      }
-    } else {
-      ABSL_RAW_CHECK(cxt->mu.AwaitWithTimeout(g0ge2, absl::Seconds(100)),
-                     "TestTime failed");
-    }
-    cxt->g0++;
-  } else {
-    absl::MutexLock l(&cxt->mu);
-    if (use_cv) {
-      while (cxt->g0 < 2) {
-        cxt->cv.Wait(&cxt->mu);
-      }
-    } else {
-      cxt->mu.Await(g0ge2);
-    }
-    cxt->g0++;
-  }
-}
-
-static void TestMuTime(TestContext *cxt, int c) { TestTime(cxt, c, false); }
-
-static void TestCVTime(TestContext *cxt, int c) { TestTime(cxt, c, true); }
-
-static void EndTest(int *c0, int *c1, absl::Mutex *mu, absl::CondVar *cv,
-                    const std::function<void(int)>& cb) {
-  mu->Lock();
-  int c = (*c0)++;
-  mu->Unlock();
-  cb(c);
-  absl::MutexLock l(mu);
-  (*c1)++;
-  cv->Signal();
-}
-
-// Code common to RunTest() and RunTestWithInvariantDebugging().
-static int RunTestCommon(TestContext *cxt, void (*test)(TestContext *cxt, int),
-                         int threads, int iterations, int operations) {
-  absl::Mutex mu2;
-  absl::CondVar cv2;
-  int c0 = 0;
-  int c1 = 0;
-  cxt->g0 = 0;
-  cxt->g1 = 0;
-  cxt->iterations = iterations;
-  cxt->threads = threads;
-  absl::synchronization_internal::ThreadPool tp(threads);
-  for (int i = 0; i != threads; i++) {
-    tp.Schedule(std::bind(&EndTest, &c0, &c1, &mu2, &cv2,
-                          std::function<void(int)>(
-                              std::bind(test, cxt, std::placeholders::_1))));
-  }
-  mu2.Lock();
-  while (c1 != threads) {
-    cv2.Wait(&mu2);
-  }
-  mu2.Unlock();
-  return cxt->g0;
-}
-
-// Basis for the parameterized tests configured below.
-static int RunTest(void (*test)(TestContext *cxt, int), int threads,
-                   int iterations, int operations) {
-  TestContext cxt;
-  return RunTestCommon(&cxt, test, threads, iterations, operations);
-}
-
-// Like RunTest(), but sets an invariant on the tested Mutex and
-// verifies that the invariant check happened. The invariant function
-// will be passed the TestContext* as its arg and must call
-// SetInvariantChecked(true);
-#if !defined(ABSL_MUTEX_ENABLE_INVARIANT_DEBUGGING_NOT_IMPLEMENTED)
-static int RunTestWithInvariantDebugging(void (*test)(TestContext *cxt, int),
-                                         int threads, int iterations,
-                                         int operations,
-                                         void (*invariant)(void *)) {
-  absl::EnableMutexInvariantDebugging(true);
-  SetInvariantChecked(false);
-  TestContext cxt;
-  cxt.mu.EnableInvariantDebugging(invariant, &cxt);
-  int ret = RunTestCommon(&cxt, test, threads, iterations, operations);
-  ABSL_RAW_CHECK(GetInvariantChecked(), "Invariant not checked");
-  absl::EnableMutexInvariantDebugging(false);  // Restore.
-  return ret;
-}
-#endif
-
-// --------------------------------------------------------
-// Test for fix of bug in TryRemove()
-struct TimeoutBugStruct {
-  absl::Mutex mu;
-  bool a;
-  int a_waiter_count;
-};
-
-static void WaitForA(TimeoutBugStruct *x) {
-  x->mu.LockWhen(absl::Condition(&x->a));
-  x->a_waiter_count--;
-  x->mu.Unlock();
-}
-
-static bool NoAWaiters(TimeoutBugStruct *x) { return x->a_waiter_count == 0; }
-
-// Test that a CondVar.Wait(&mutex) can un-block a call to mutex.Await() in
-// another thread.
-TEST(Mutex, CondVarWaitSignalsAwait) {
-  // Use a struct so the lock annotations apply.
-  struct {
-    absl::Mutex barrier_mu;
-    bool barrier ABSL_GUARDED_BY(barrier_mu) = false;
-
-    absl::Mutex release_mu;
-    bool release ABSL_GUARDED_BY(release_mu) = false;
-    absl::CondVar released_cv;
-  } state;
-
-  auto pool = CreateDefaultPool();
-
-  // Thread A.  Sets barrier, waits for release using Mutex::Await, then
-  // signals released_cv.
-  pool->Schedule([&state] {
-    state.release_mu.Lock();
-
-    state.barrier_mu.Lock();
-    state.barrier = true;
-    state.barrier_mu.Unlock();
-
-    state.release_mu.Await(absl::Condition(&state.release));
-    state.released_cv.Signal();
-    state.release_mu.Unlock();
-  });
-
-  state.barrier_mu.LockWhen(absl::Condition(&state.barrier));
-  state.barrier_mu.Unlock();
-  state.release_mu.Lock();
-  // Thread A is now blocked on release by way of Mutex::Await().
-
-  // Set release.  Calling released_cv.Wait() should un-block thread A,
-  // which will signal released_cv.  If not, the test will hang.
-  state.release = true;
-  state.released_cv.Wait(&state.release_mu);
-  state.release_mu.Unlock();
-}
-
-// Test that a CondVar.WaitWithTimeout(&mutex) can un-block a call to
-// mutex.Await() in another thread.
-TEST(Mutex, CondVarWaitWithTimeoutSignalsAwait) {
-  // Use a struct so the lock annotations apply.
-  struct {
-    absl::Mutex barrier_mu;
-    bool barrier ABSL_GUARDED_BY(barrier_mu) = false;
-
-    absl::Mutex release_mu;
-    bool release ABSL_GUARDED_BY(release_mu) = false;
-    absl::CondVar released_cv;
-  } state;
-
-  auto pool = CreateDefaultPool();
-
-  // Thread A.  Sets barrier, waits for release using Mutex::Await, then
-  // signals released_cv.
-  pool->Schedule([&state] {
-    state.release_mu.Lock();
-
-    state.barrier_mu.Lock();
-    state.barrier = true;
-    state.barrier_mu.Unlock();
-
-    state.release_mu.Await(absl::Condition(&state.release));
-    state.released_cv.Signal();
-    state.release_mu.Unlock();
-  });
-
-  state.barrier_mu.LockWhen(absl::Condition(&state.barrier));
-  state.barrier_mu.Unlock();
-  state.release_mu.Lock();
-  // Thread A is now blocked on release by way of Mutex::Await().
-
-  // Set release.  Calling released_cv.Wait() should un-block thread A,
-  // which will signal released_cv.  If not, the test will hang.
-  state.release = true;
-  EXPECT_TRUE(
-      !state.released_cv.WaitWithTimeout(&state.release_mu, absl::Seconds(10)))
-      << "; Unrecoverable test failure: CondVar::WaitWithTimeout did not "
-         "unblock the absl::Mutex::Await call in another thread.";
-
-  state.release_mu.Unlock();
-}
-
-// Test for regression of a bug in loop of TryRemove()
-TEST(Mutex, MutexTimeoutBug) {
-  auto tp = CreateDefaultPool();
-
-  TimeoutBugStruct x;
-  x.a = false;
-  x.a_waiter_count = 2;
-  tp->Schedule(std::bind(&WaitForA, &x));
-  tp->Schedule(std::bind(&WaitForA, &x));
-  absl::SleepFor(absl::Seconds(1));  // Allow first two threads to hang.
-  // The skip field of the second will point to the first because there are
-  // only two.
-
-  // Now cause a thread waiting on an always-false to time out
-  // This would deadlock when the bug was present.
-  bool always_false = false;
-  x.mu.LockWhenWithTimeout(absl::Condition(&always_false),
-                           absl::Milliseconds(500));
-
-  // if we get here, the bug is not present.   Cleanup the state.
-
-  x.a = true;                                    // wakeup the two waiters on A
-  x.mu.Await(absl::Condition(&NoAWaiters, &x));  // wait for them to exit
-  x.mu.Unlock();
-}
-
-struct CondVarWaitDeadlock : testing::TestWithParam<int> {
-  absl::Mutex mu;
-  absl::CondVar cv;
-  bool cond1 = false;
-  bool cond2 = false;
-  bool read_lock1;
-  bool read_lock2;
-  bool signal_unlocked;
-
-  CondVarWaitDeadlock() {
-    read_lock1 = GetParam() & (1 << 0);
-    read_lock2 = GetParam() & (1 << 1);
-    signal_unlocked = GetParam() & (1 << 2);
-  }
-
-  void Waiter1() {
-    if (read_lock1) {
-      mu.ReaderLock();
-      while (!cond1) {
-        cv.Wait(&mu);
-      }
-      mu.ReaderUnlock();
-    } else {
-      mu.Lock();
-      while (!cond1) {
-        cv.Wait(&mu);
-      }
-      mu.Unlock();
-    }
-  }
-
-  void Waiter2() {
-    if (read_lock2) {
-      mu.ReaderLockWhen(absl::Condition(&cond2));
-      mu.ReaderUnlock();
-    } else {
-      mu.LockWhen(absl::Condition(&cond2));
-      mu.Unlock();
-    }
-  }
-};
-
-// Test for a deadlock bug in Mutex::Fer().
-// The sequence of events that lead to the deadlock is:
-// 1. waiter1 blocks on cv in read mode (mu bits = 0).
-// 2. waiter2 blocks on mu in either mode (mu bits = kMuWait).
-// 3. main thread locks mu, sets cond1, unlocks mu (mu bits = kMuWait).
-// 4. main thread signals on cv and this eventually calls Mutex::Fer().
-// Currently Fer wakes waiter1 since mu bits = kMuWait (mutex is unlocked).
-// Before the bug fix Fer neither woke waiter1 nor queued it on mutex,
-// which resulted in deadlock.
-TEST_P(CondVarWaitDeadlock, Test) {
-  auto waiter1 = CreatePool(1);
-  auto waiter2 = CreatePool(1);
-  waiter1->Schedule([this] { this->Waiter1(); });
-  waiter2->Schedule([this] { this->Waiter2(); });
-
-  // Wait while threads block (best-effort is fine).
-  absl::SleepFor(absl::Milliseconds(100));
-
-  // Wake condwaiter.
-  mu.Lock();
-  cond1 = true;
-  if (signal_unlocked) {
-    mu.Unlock();
-    cv.Signal();
-  } else {
-    cv.Signal();
-    mu.Unlock();
-  }
-  waiter1.reset();  // "join" waiter1
-
-  // Wake waiter.
-  mu.Lock();
-  cond2 = true;
-  mu.Unlock();
-  waiter2.reset();  // "join" waiter2
-}
-
-INSTANTIATE_TEST_SUITE_P(CondVarWaitDeadlockTest, CondVarWaitDeadlock,
-                         ::testing::Range(0, 8),
-                         ::testing::PrintToStringParamName());
-
-// --------------------------------------------------------
-// Test for fix of bug in DequeueAllWakeable()
-// Bug was that if there was more than one waiting reader
-// and all should be woken, the most recently blocked one
-// would not be.
-
-struct DequeueAllWakeableBugStruct {
-  absl::Mutex mu;
-  absl::Mutex mu2;       // protects all fields below
-  int unfinished_count;  // count of unfinished readers; under mu2
-  bool done1;            // unfinished_count == 0; under mu2
-  int finished_count;    // count of finished readers, under mu2
-  bool done2;            // finished_count == 0; under mu2
-};
-
-// Test for regression of a bug in loop of DequeueAllWakeable()
-static void AcquireAsReader(DequeueAllWakeableBugStruct *x) {
-  x->mu.ReaderLock();
-  x->mu2.Lock();
-  x->unfinished_count--;
-  x->done1 = (x->unfinished_count == 0);
-  x->mu2.Unlock();
-  // make sure that both readers acquired mu before we release it.
-  absl::SleepFor(absl::Seconds(2));
-  x->mu.ReaderUnlock();
-
-  x->mu2.Lock();
-  x->finished_count--;
-  x->done2 = (x->finished_count == 0);
-  x->mu2.Unlock();
-}
-
-// Test for regression of a bug in loop of DequeueAllWakeable()
-TEST(Mutex, MutexReaderWakeupBug) {
-  auto tp = CreateDefaultPool();
-
-  DequeueAllWakeableBugStruct x;
-  x.unfinished_count = 2;
-  x.done1 = false;
-  x.finished_count = 2;
-  x.done2 = false;
-  x.mu.Lock();  // acquire mu exclusively
-  // queue two thread that will block on reader locks on x.mu
-  tp->Schedule(std::bind(&AcquireAsReader, &x));
-  tp->Schedule(std::bind(&AcquireAsReader, &x));
-  absl::SleepFor(absl::Seconds(1));  // give time for reader threads to block
-  x.mu.Unlock();                     // wake them up
-
-  // both readers should finish promptly
-  EXPECT_TRUE(
-      x.mu2.LockWhenWithTimeout(absl::Condition(&x.done1), absl::Seconds(10)));
-  x.mu2.Unlock();
-
-  EXPECT_TRUE(
-      x.mu2.LockWhenWithTimeout(absl::Condition(&x.done2), absl::Seconds(10)));
-  x.mu2.Unlock();
-}
-
-struct LockWhenTestStruct {
-  absl::Mutex mu1;
-  bool cond = false;
-
-  absl::Mutex mu2;
-  bool waiting = false;
-};
-
-static bool LockWhenTestIsCond(LockWhenTestStruct* s) {
-  s->mu2.Lock();
-  s->waiting = true;
-  s->mu2.Unlock();
-  return s->cond;
-}
-
-static void LockWhenTestWaitForIsCond(LockWhenTestStruct* s) {
-  s->mu1.LockWhen(absl::Condition(&LockWhenTestIsCond, s));
-  s->mu1.Unlock();
-}
-
-TEST(Mutex, LockWhen) {
-  LockWhenTestStruct s;
-
-  std::thread t(LockWhenTestWaitForIsCond, &s);
-  s.mu2.LockWhen(absl::Condition(&s.waiting));
-  s.mu2.Unlock();
-
-  s.mu1.Lock();
-  s.cond = true;
-  s.mu1.Unlock();
-
-  t.join();
-}
-
-TEST(Mutex, LockWhenGuard) {
-  absl::Mutex mu;
-  int n = 30;
-  bool done = false;
-
-  // We don't inline the lambda because the conversion is ambiguous in MSVC.
-  bool (*cond_eq_10)(int *) = [](int *p) { return *p == 10; };
-  bool (*cond_lt_10)(int *) = [](int *p) { return *p < 10; };
-
-  std::thread t1([&mu, &n, &done, cond_eq_10]() {
-    absl::ReaderMutexLock lock(&mu, absl::Condition(cond_eq_10, &n));
-    done = true;
-  });
-
-  std::thread t2[10];
-  for (std::thread &t : t2) {
-    t = std::thread([&mu, &n, cond_lt_10]() {
-      absl::WriterMutexLock lock(&mu, absl::Condition(cond_lt_10, &n));
-      ++n;
-    });
-  }
-
-  {
-    absl::MutexLock lock(&mu);
-    n = 0;
-  }
-
-  for (std::thread &t : t2) t.join();
-  t1.join();
-
-  EXPECT_TRUE(done);
-  EXPECT_EQ(n, 10);
-}
-
-// --------------------------------------------------------
-// The following test requires Mutex::ReaderLock to be a real shared
-// lock, which is not the case in all builds.
-#if !defined(ABSL_MUTEX_READER_LOCK_IS_EXCLUSIVE)
-
-// Test for fix of bug in UnlockSlow() that incorrectly decremented the reader
-// count when putting a thread to sleep waiting for a false condition when the
-// lock was not held.
-
-// For this bug to strike, we make a thread wait on a free mutex with no
-// waiters by causing its wakeup condition to be false.   Then the
-// next two acquirers must be readers.   The bug causes the lock
-// to be released when one reader unlocks, rather than both.
-
-struct ReaderDecrementBugStruct {
-  bool cond;  // to delay first thread (under mu)
-  int done;   // reference count (under mu)
-  absl::Mutex mu;
-
-  bool waiting_on_cond;   // under mu2
-  bool have_reader_lock;  // under mu2
-  bool complete;          // under mu2
-  absl::Mutex mu2;        // > mu
-};
-
-// L >= mu, L < mu_waiting_on_cond
-static bool IsCond(void *v) {
-  ReaderDecrementBugStruct *x = reinterpret_cast<ReaderDecrementBugStruct *>(v);
-  x->mu2.Lock();
-  x->waiting_on_cond = true;
-  x->mu2.Unlock();
-  return x->cond;
-}
-
-// L >= mu
-static bool AllDone(void *v) {
-  ReaderDecrementBugStruct *x = reinterpret_cast<ReaderDecrementBugStruct *>(v);
-  return x->done == 0;
-}
-
-// L={}
-static void WaitForCond(ReaderDecrementBugStruct *x) {
-  absl::Mutex dummy;
-  absl::MutexLock l(&dummy);
-  x->mu.LockWhen(absl::Condition(&IsCond, x));
-  x->done--;
-  x->mu.Unlock();
-}
-
-// L={}
-static void GetReadLock(ReaderDecrementBugStruct *x) {
-  x->mu.ReaderLock();
-  x->mu2.Lock();
-  x->have_reader_lock = true;
-  x->mu2.Await(absl::Condition(&x->complete));
-  x->mu2.Unlock();
-  x->mu.ReaderUnlock();
-  x->mu.Lock();
-  x->done--;
-  x->mu.Unlock();
-}
-
-// Test for reader counter being decremented incorrectly by waiter
-// with false condition.
-TEST(Mutex, MutexReaderDecrementBug) ABSL_NO_THREAD_SAFETY_ANALYSIS {
-  ReaderDecrementBugStruct x;
-  x.cond = false;
-  x.waiting_on_cond = false;
-  x.have_reader_lock = false;
-  x.complete = false;
-  x.done = 2;  // initial ref count
-
-  // Run WaitForCond() and wait for it to sleep
-  std::thread thread1(WaitForCond, &x);
-  x.mu2.LockWhen(absl::Condition(&x.waiting_on_cond));
-  x.mu2.Unlock();
-
-  // Run GetReadLock(), and wait for it to get the read lock
-  std::thread thread2(GetReadLock, &x);
-  x.mu2.LockWhen(absl::Condition(&x.have_reader_lock));
-  x.mu2.Unlock();
-
-  // Get the reader lock ourselves, and release it.
-  x.mu.ReaderLock();
-  x.mu.ReaderUnlock();
-
-  // The lock should be held in read mode by GetReadLock().
-  // If we have the bug, the lock will be free.
-  x.mu.AssertReaderHeld();
-
-  // Wake up all the threads.
-  x.mu2.Lock();
-  x.complete = true;
-  x.mu2.Unlock();
-
-  // TODO(delesley): turn on analysis once lock upgrading is supported.
-  // (This call upgrades the lock from shared to exclusive.)
-  x.mu.Lock();
-  x.cond = true;
-  x.mu.Await(absl::Condition(&AllDone, &x));
-  x.mu.Unlock();
-
-  thread1.join();
-  thread2.join();
-}
-#endif  // !ABSL_MUTEX_READER_LOCK_IS_EXCLUSIVE
-
-// Test that we correctly handle the situation when a lock is
-// held and then destroyed (w/o unlocking).
-#ifdef ABSL_HAVE_THREAD_SANITIZER
-// TSAN reports errors when locked Mutexes are destroyed.
-TEST(Mutex, DISABLED_LockedMutexDestructionBug) NO_THREAD_SAFETY_ANALYSIS {
-#else
-TEST(Mutex, LockedMutexDestructionBug) ABSL_NO_THREAD_SAFETY_ANALYSIS {
-#endif
-  for (int i = 0; i != 10; i++) {
-    // Create, lock and destroy 10 locks.
-    const int kNumLocks = 10;
-    auto mu = absl::make_unique<absl::Mutex[]>(kNumLocks);
-    for (int j = 0; j != kNumLocks; j++) {
-      if ((j % 2) == 0) {
-        mu[j].WriterLock();
-      } else {
-        mu[j].ReaderLock();
-      }
-    }
-  }
-}
-
-// --------------------------------------------------------
-// Test for bug with pattern of readers using a condvar.  The bug was that if a
-// reader went to sleep on a condition variable while one or more other readers
-// held the lock, but there were no waiters, the reader count (held in the
-// mutex word) would be lost.  (This is because Enqueue() had at one time
-// always placed the thread on the Mutex queue.  Later (CL 4075610), to
-// tolerate re-entry into Mutex from a Condition predicate, Enqueue() was
-// changed so that it could also place a thread on a condition-variable.  This
-// introduced the case where Enqueue() returned with an empty queue, and this
-// case was handled incorrectly in one place.)
-
-static void ReaderForReaderOnCondVar(absl::Mutex *mu, absl::CondVar *cv,
-                                     int *running) {
-  std::random_device dev;
-  std::mt19937 gen(dev());
-  std::uniform_int_distribution<int> random_millis(0, 15);
-  mu->ReaderLock();
-  while (*running == 3) {
-    absl::SleepFor(absl::Milliseconds(random_millis(gen)));
-    cv->WaitWithTimeout(mu, absl::Milliseconds(random_millis(gen)));
-  }
-  mu->ReaderUnlock();
-  mu->Lock();
-  (*running)--;
-  mu->Unlock();
-}
-
-struct True {
-  template <class... Args>
-  bool operator()(Args...) const {
-    return true;
-  }
-};
-
-struct DerivedTrue : True {};
-
-TEST(Mutex, FunctorCondition) {
-  {  // Variadic
-    True f;
-    EXPECT_TRUE(absl::Condition(&f).Eval());
-  }
-
-  {  // Inherited
-    DerivedTrue g;
-    EXPECT_TRUE(absl::Condition(&g).Eval());
-  }
-
-  {  // lambda
-    int value = 3;
-    auto is_zero = [&value] { return value == 0; };
-    absl::Condition c(&is_zero);
-    EXPECT_FALSE(c.Eval());
-    value = 0;
-    EXPECT_TRUE(c.Eval());
-  }
-
-  {  // bind
-    int value = 0;
-    auto is_positive = std::bind(std::less<int>(), 0, std::cref(value));
-    absl::Condition c(&is_positive);
-    EXPECT_FALSE(c.Eval());
-    value = 1;
-    EXPECT_TRUE(c.Eval());
-  }
-
-  {  // std::function
-    int value = 3;
-    std::function<bool()> is_zero = [&value] { return value == 0; };
-    absl::Condition c(&is_zero);
-    EXPECT_FALSE(c.Eval());
-    value = 0;
-    EXPECT_TRUE(c.Eval());
-  }
-}
-
-static bool IntIsZero(int *x) { return *x == 0; }
-
-// Test for reader waiting condition variable when there are other readers
-// but no waiters.
-TEST(Mutex, TestReaderOnCondVar) {
-  auto tp = CreateDefaultPool();
-  absl::Mutex mu;
-  absl::CondVar cv;
-  int running = 3;
-  tp->Schedule(std::bind(&ReaderForReaderOnCondVar, &mu, &cv, &running));
-  tp->Schedule(std::bind(&ReaderForReaderOnCondVar, &mu, &cv, &running));
-  absl::SleepFor(absl::Seconds(2));
-  mu.Lock();
-  running--;
-  mu.Await(absl::Condition(&IntIsZero, &running));
-  mu.Unlock();
-}
-
-// --------------------------------------------------------
-struct AcquireFromConditionStruct {
-  absl::Mutex mu0;   // protects value, done
-  int value;         // times condition function is called; under mu0,
-  bool done;         // done with test?  under mu0
-  absl::Mutex mu1;   // used to attempt to mess up state of mu0
-  absl::CondVar cv;  // so the condition function can be invoked from
-                     // CondVar::Wait().
-};
-
-static bool ConditionWithAcquire(AcquireFromConditionStruct *x) {
-  x->value++;  // count times this function is called
-
-  if (x->value == 2 || x->value == 3) {
-    // On the second and third invocation of this function, sleep for 100ms,
-    // but with the side-effect of altering the state of a Mutex other than
-    // than one for which this is a condition.  The spec now explicitly allows
-    // this side effect; previously it did not.  it was illegal.
-    bool always_false = false;
-    x->mu1.LockWhenWithTimeout(absl::Condition(&always_false),
-                               absl::Milliseconds(100));
-    x->mu1.Unlock();
-  }
-  ABSL_RAW_CHECK(x->value < 4, "should not be invoked a fourth time");
-
-  // We arrange for the condition to return true on only the 2nd and 3rd calls.
-  return x->value == 2 || x->value == 3;
-}
-
-static void WaitForCond2(AcquireFromConditionStruct *x) {
-  // wait for cond0 to become true
-  x->mu0.LockWhen(absl::Condition(&ConditionWithAcquire, x));
-  x->done = true;
-  x->mu0.Unlock();
-}
-
-// Test for Condition whose function acquires other Mutexes
-TEST(Mutex, AcquireFromCondition) {
-  auto tp = CreateDefaultPool();
-
-  AcquireFromConditionStruct x;
-  x.value = 0;
-  x.done = false;
-  tp->Schedule(
-      std::bind(&WaitForCond2, &x));  // run WaitForCond2() in a thread T
-  // T will hang because the first invocation of ConditionWithAcquire() will
-  // return false.
-  absl::SleepFor(absl::Milliseconds(500));  // allow T time to hang
-
-  x.mu0.Lock();
-  x.cv.WaitWithTimeout(&x.mu0, absl::Milliseconds(500));  // wake T
-  // T will be woken because the Wait() will call ConditionWithAcquire()
-  // for the second time, and it will return true.
-
-  x.mu0.Unlock();
-
-  // T will then acquire the lock and recheck its own condition.
-  // It will find the condition true, as this is the third invocation,
-  // but the use of another Mutex by the calling function will
-  // cause the old mutex implementation to think that the outer
-  // LockWhen() has timed out because the inner LockWhenWithTimeout() did.
-  // T will then check the condition a fourth time because it finds a
-  // timeout occurred.  This should not happen in the new
-  // implementation that allows the Condition function to use Mutexes.
-
-  // It should also succeed, even though the Condition function
-  // is being invoked from CondVar::Wait, and thus this thread
-  // is conceptually waiting both on the condition variable, and on mu2.
-
-  x.mu0.LockWhen(absl::Condition(&x.done));
-  x.mu0.Unlock();
-}
-
-TEST(Mutex, DeadlockDetector) {
-  absl::SetMutexDeadlockDetectionMode(absl::OnDeadlockCycle::kAbort);
-
-  // check that we can call ForgetDeadlockInfo() on a lock with the lock held
-  absl::Mutex m1;
-  absl::Mutex m2;
-  absl::Mutex m3;
-  absl::Mutex m4;
-
-  m1.Lock();  // m1 gets ID1
-  m2.Lock();  // m2 gets ID2
-  m3.Lock();  // m3 gets ID3
-  m3.Unlock();
-  m2.Unlock();
-  // m1 still held
-  m1.ForgetDeadlockInfo();  // m1 loses ID
-  m2.Lock();                // m2 gets ID2
-  m3.Lock();                // m3 gets ID3
-  m4.Lock();                // m4 gets ID4
-  m3.Unlock();
-  m2.Unlock();
-  m4.Unlock();
-  m1.Unlock();
-}
-
-// Bazel has a test "warning" file that programs can write to if the
-// test should pass with a warning.  This class disables the warning
-// file until it goes out of scope.
-class ScopedDisableBazelTestWarnings {
- public:
-  ScopedDisableBazelTestWarnings() {
-#ifdef _WIN32
-    char file[MAX_PATH];
-    if (GetEnvironmentVariableA(kVarName, file, sizeof(file)) < sizeof(file)) {
-      warnings_output_file_ = file;
-      SetEnvironmentVariableA(kVarName, nullptr);
-    }
-#else
-    const char *file = getenv(kVarName);
-    if (file != nullptr) {
-      warnings_output_file_ = file;
-      unsetenv(kVarName);
-    }
-#endif
-  }
-
-  ~ScopedDisableBazelTestWarnings() {
-    if (!warnings_output_file_.empty()) {
-#ifdef _WIN32
-      SetEnvironmentVariableA(kVarName, warnings_output_file_.c_str());
-#else
-      setenv(kVarName, warnings_output_file_.c_str(), 0);
-#endif
-    }
-  }
-
- private:
-  static const char kVarName[];
-  std::string warnings_output_file_;
-};
-const char ScopedDisableBazelTestWarnings::kVarName[] =
-    "TEST_WARNINGS_OUTPUT_FILE";
-
-#ifdef ABSL_HAVE_THREAD_SANITIZER
-// This test intentionally creates deadlocks to test the deadlock detector.
-TEST(Mutex, DISABLED_DeadlockDetectorBazelWarning) {
-#else
-TEST(Mutex, DeadlockDetectorBazelWarning) {
-#endif
-  absl::SetMutexDeadlockDetectionMode(absl::OnDeadlockCycle::kReport);
-
-  // Cause deadlock detection to detect something, if it's
-  // compiled in and enabled.  But turn off the bazel warning.
-  ScopedDisableBazelTestWarnings disable_bazel_test_warnings;
-
-  absl::Mutex mu0;
-  absl::Mutex mu1;
-  bool got_mu0 = mu0.TryLock();
-  mu1.Lock();  // acquire mu1 while holding mu0
-  if (got_mu0) {
-    mu0.Unlock();
-  }
-  if (mu0.TryLock()) {  // try lock shouldn't cause deadlock detector to fire
-    mu0.Unlock();
-  }
-  mu0.Lock();  // acquire mu0 while holding mu1; should get one deadlock
-               // report here
-  mu0.Unlock();
-  mu1.Unlock();
-
-  absl::SetMutexDeadlockDetectionMode(absl::OnDeadlockCycle::kAbort);
-}
-
-// This test is tagged with NO_THREAD_SAFETY_ANALYSIS because the
-// annotation-based static thread-safety analysis is not currently
-// predicate-aware and cannot tell if the two for-loops that acquire and
-// release the locks have the same predicates.
-TEST(Mutex, DeadlockDetectorStressTest) ABSL_NO_THREAD_SAFETY_ANALYSIS {
-  // Stress test: Here we create a large number of locks and use all of them.
-  // If a deadlock detector keeps a full graph of lock acquisition order,
-  // it will likely be too slow for this test to pass.
-  const int n_locks = 1 << 17;
-  auto array_of_locks = absl::make_unique<absl::Mutex[]>(n_locks);
-  for (int i = 0; i < n_locks; i++) {
-    int end = std::min(n_locks, i + 5);
-    // acquire and then release locks i, i+1, ..., i+4
-    for (int j = i; j < end; j++) {
-      array_of_locks[j].Lock();
-    }
-    for (int j = i; j < end; j++) {
-      array_of_locks[j].Unlock();
-    }
-  }
-}
-
-#ifdef ABSL_HAVE_THREAD_SANITIZER
-// TSAN reports errors when locked Mutexes are destroyed.
-TEST(Mutex, DISABLED_DeadlockIdBug) NO_THREAD_SAFETY_ANALYSIS {
-#else
-TEST(Mutex, DeadlockIdBug) ABSL_NO_THREAD_SAFETY_ANALYSIS {
-#endif
-  // Test a scenario where a cached deadlock graph node id in the
-  // list of held locks is not invalidated when the corresponding
-  // mutex is deleted.
-  absl::SetMutexDeadlockDetectionMode(absl::OnDeadlockCycle::kAbort);
-  // Mutex that will be destroyed while being held
-  absl::Mutex *a = new absl::Mutex;
-  // Other mutexes needed by test
-  absl::Mutex b, c;
-
-  // Hold mutex.
-  a->Lock();
-
-  // Force deadlock id assignment by acquiring another lock.
-  b.Lock();
-  b.Unlock();
-
-  // Delete the mutex. The Mutex destructor tries to remove held locks,
-  // but the attempt isn't foolproof.  It can fail if:
-  //   (a) Deadlock detection is currently disabled.
-  //   (b) The destruction is from another thread.
-  // We exploit (a) by temporarily disabling deadlock detection.
-  absl::SetMutexDeadlockDetectionMode(absl::OnDeadlockCycle::kIgnore);
-  delete a;
-  absl::SetMutexDeadlockDetectionMode(absl::OnDeadlockCycle::kAbort);
-
-  // Now acquire another lock which will force a deadlock id assignment.
-  // We should end up getting assigned the same deadlock id that was
-  // freed up when "a" was deleted, which will cause a spurious deadlock
-  // report if the held lock entry for "a" was not invalidated.
-  c.Lock();
-  c.Unlock();
-}
-
-// --------------------------------------------------------
-// Test for timeouts/deadlines on condition waits that are specified using
-// absl::Duration and absl::Time.  For each waiting function we test with
-// a timeout/deadline that has already expired/passed, one that is infinite
-// and so never expires/passes, and one that will expire/pass in the near
-// future.
-
-static absl::Duration TimeoutTestAllowedSchedulingDelay() {
-  // Note: we use a function here because Microsoft Visual Studio fails to
-  // properly initialize constexpr static absl::Duration variables.
-  return absl::Milliseconds(150);
-}
-
-// Returns true if `actual_delay` is close enough to `expected_delay` to pass
-// the timeouts/deadlines test.  Otherwise, logs warnings and returns false.
-ABSL_MUST_USE_RESULT
-static bool DelayIsWithinBounds(absl::Duration expected_delay,
-                                absl::Duration actual_delay) {
-  bool pass = true;
-  // Do not allow the observed delay to be less than expected.  This may occur
-  // in practice due to clock skew or when the synchronization primitives use a
-  // different clock than absl::Now(), but these cases should be handled by the
-  // the retry mechanism in each TimeoutTest.
-  if (actual_delay < expected_delay) {
-    ABSL_RAW_LOG(WARNING,
-                 "Actual delay %s was too short, expected %s (difference %s)",
-                 absl::FormatDuration(actual_delay).c_str(),
-                 absl::FormatDuration(expected_delay).c_str(),
-                 absl::FormatDuration(actual_delay - expected_delay).c_str());
-    pass = false;
-  }
-  // If the expected delay is <= zero then allow a small error tolerance, since
-  // we do not expect context switches to occur during test execution.
-  // Otherwise, thread scheduling delays may be substantial in rare cases, so
-  // tolerate up to kTimeoutTestAllowedSchedulingDelay of error.
-  absl::Duration tolerance = expected_delay <= absl::ZeroDuration()
-                                 ? absl::Milliseconds(10)
-                                 : TimeoutTestAllowedSchedulingDelay();
-  if (actual_delay > expected_delay + tolerance) {
-    ABSL_RAW_LOG(WARNING,
-                 "Actual delay %s was too long, expected %s (difference %s)",
-                 absl::FormatDuration(actual_delay).c_str(),
-                 absl::FormatDuration(expected_delay).c_str(),
-                 absl::FormatDuration(actual_delay - expected_delay).c_str());
-    pass = false;
-  }
-  return pass;
-}
-
-// Parameters for TimeoutTest, below.
-struct TimeoutTestParam {
-  // The file and line number (used for logging purposes only).
-  const char *from_file;
-  int from_line;
-
-  // Should the absolute deadline API based on absl::Time be tested?  If false,
-  // the relative deadline API based on absl::Duration is tested.
-  bool use_absolute_deadline;
-
-  // The deadline/timeout used when calling the API being tested
-  // (e.g. Mutex::LockWhenWithDeadline).
-  absl::Duration wait_timeout;
-
-  // The delay before the condition will be set true by the test code.  If zero
-  // or negative, the condition is set true immediately (before calling the API
-  // being tested).  Otherwise, if infinite, the condition is never set true.
-  // Otherwise a closure is scheduled for the future that sets the condition
-  // true.
-  absl::Duration satisfy_condition_delay;
-
-  // The expected result of the condition after the call to the API being
-  // tested. Generally `true` means the condition was true when the API returns,
-  // `false` indicates an expected timeout.
-  bool expected_result;
-
-  // The expected delay before the API under test returns.  This is inherently
-  // flaky, so some slop is allowed (see `DelayIsWithinBounds` above), and the
-  // test keeps trying indefinitely until this constraint passes.
-  absl::Duration expected_delay;
-};
-
-// Print a `TimeoutTestParam` to a debug log.
-std::ostream &operator<<(std::ostream &os, const TimeoutTestParam &param) {
-  return os << "from: " << param.from_file << ":" << param.from_line
-            << " use_absolute_deadline: "
-            << (param.use_absolute_deadline ? "true" : "false")
-            << " wait_timeout: " << param.wait_timeout
-            << " satisfy_condition_delay: " << param.satisfy_condition_delay
-            << " expected_result: "
-            << (param.expected_result ? "true" : "false")
-            << " expected_delay: " << param.expected_delay;
-}
-
-std::string FormatString(const TimeoutTestParam &param) {
-  std::ostringstream os;
-  os << param;
-  return os.str();
-}
-
-// Like `thread::Executor::ScheduleAt` except:
-// a) Delays zero or negative are executed immediately in the current thread.
-// b) Infinite delays are never scheduled.
-// c) Calls this test's `ScheduleAt` helper instead of using `pool` directly.
-static void RunAfterDelay(absl::Duration delay,
-                          absl::synchronization_internal::ThreadPool *pool,
-                          const std::function<void()> &callback) {
-  if (delay <= absl::ZeroDuration()) {
-    callback();  // immediate
-  } else if (delay != absl::InfiniteDuration()) {
-    ScheduleAfter(pool, delay, callback);
-  }
-}
-
-class TimeoutTest : public ::testing::Test,
-                    public ::testing::WithParamInterface<TimeoutTestParam> {};
-
-std::vector<TimeoutTestParam> MakeTimeoutTestParamValues() {
-  // The `finite` delay is a finite, relatively short, delay.  We make it larger
-  // than our allowed scheduling delay (slop factor) to avoid confusion when
-  // diagnosing test failures.  The other constants here have clear meanings.
-  const absl::Duration finite = 3 * TimeoutTestAllowedSchedulingDelay();
-  const absl::Duration never = absl::InfiniteDuration();
-  const absl::Duration negative = -absl::InfiniteDuration();
-  const absl::Duration immediate = absl::ZeroDuration();
-
-  // Every test case is run twice; once using the absolute deadline API and once
-  // using the relative timeout API.
-  std::vector<TimeoutTestParam> values;
-  for (bool use_absolute_deadline : {false, true}) {
-    // Tests with a negative timeout (deadline in the past), which should
-    // immediately return current state of the condition.
-
-    // The condition is already true:
-    values.push_back(TimeoutTestParam{
-        __FILE__, __LINE__, use_absolute_deadline,
-        negative,   // wait_timeout
-        immediate,  // satisfy_condition_delay
-        true,       // expected_result
-        immediate,  // expected_delay
-    });
-
-    // The condition becomes true, but the timeout has already expired:
-    values.push_back(TimeoutTestParam{
-        __FILE__, __LINE__, use_absolute_deadline,
-        negative,  // wait_timeout
-        finite,    // satisfy_condition_delay
-        false,     // expected_result
-        immediate  // expected_delay
-    });
-
-    // The condition never becomes true:
-    values.push_back(TimeoutTestParam{
-        __FILE__, __LINE__, use_absolute_deadline,
-        negative,  // wait_timeout
-        never,     // satisfy_condition_delay
-        false,     // expected_result
-        immediate  // expected_delay
-    });
-
-    // Tests with an infinite timeout (deadline in the infinite future), which
-    // should only return when the condition becomes true.
-
-    // The condition is already true:
-    values.push_back(TimeoutTestParam{
-        __FILE__, __LINE__, use_absolute_deadline,
-        never,      // wait_timeout
-        immediate,  // satisfy_condition_delay
-        true,       // expected_result
-        immediate   // expected_delay
-    });
-
-    // The condition becomes true before the (infinite) expiry:
-    values.push_back(TimeoutTestParam{
-        __FILE__, __LINE__, use_absolute_deadline,
-        never,   // wait_timeout
-        finite,  // satisfy_condition_delay
-        true,    // expected_result
-        finite,  // expected_delay
-    });
-
-    // Tests with a (small) finite timeout (deadline soon), with the condition
-    // becoming true both before and after its expiry.
-
-    // The condition is already true:
-    values.push_back(TimeoutTestParam{
-        __FILE__, __LINE__, use_absolute_deadline,
-        never,      // wait_timeout
-        immediate,  // satisfy_condition_delay
-        true,       // expected_result
-        immediate   // expected_delay
-    });
-
-    // The condition becomes true before the expiry:
-    values.push_back(TimeoutTestParam{
-        __FILE__, __LINE__, use_absolute_deadline,
-        finite * 2,  // wait_timeout
-        finite,      // satisfy_condition_delay
-        true,        // expected_result
-        finite       // expected_delay
-    });
-
-    // The condition becomes true, but the timeout has already expired:
-    values.push_back(TimeoutTestParam{
-        __FILE__, __LINE__, use_absolute_deadline,
-        finite,      // wait_timeout
-        finite * 2,  // satisfy_condition_delay
-        false,       // expected_result
-        finite       // expected_delay
-    });
-
-    // The condition never becomes true:
-    values.push_back(TimeoutTestParam{
-        __FILE__, __LINE__, use_absolute_deadline,
-        finite,  // wait_timeout
-        never,   // satisfy_condition_delay
-        false,   // expected_result
-        finite   // expected_delay
-    });
-  }
-  return values;
-}
-
-// Instantiate `TimeoutTest` with `MakeTimeoutTestParamValues()`.
-INSTANTIATE_TEST_SUITE_P(All, TimeoutTest,
-                         testing::ValuesIn(MakeTimeoutTestParamValues()));
-
-TEST_P(TimeoutTest, Await) {
-  const TimeoutTestParam params = GetParam();
-  ABSL_RAW_LOG(INFO, "Params: %s", FormatString(params).c_str());
-
-  // Because this test asserts bounds on scheduling delays it is flaky.  To
-  // compensate it loops forever until it passes.  Failures express as test
-  // timeouts, in which case the test log can be used to diagnose the issue.
-  for (int attempt = 1;; ++attempt) {
-    ABSL_RAW_LOG(INFO, "Attempt %d", attempt);
-
-    absl::Mutex mu;
-    bool value = false;  // condition value (under mu)
-
-    std::unique_ptr<absl::synchronization_internal::ThreadPool> pool =
-        CreateDefaultPool();
-    RunAfterDelay(params.satisfy_condition_delay, pool.get(), [&] {
-      absl::MutexLock l(&mu);
-      value = true;
-    });
-
-    absl::MutexLock lock(&mu);
-    absl::Time start_time = absl::Now();
-    absl::Condition cond(&value);
-    bool result =
-        params.use_absolute_deadline
-            ? mu.AwaitWithDeadline(cond, start_time + params.wait_timeout)
-            : mu.AwaitWithTimeout(cond, params.wait_timeout);
-    if (DelayIsWithinBounds(params.expected_delay, absl::Now() - start_time)) {
-      EXPECT_EQ(params.expected_result, result);
-      break;
-    }
-  }
-}
-
-TEST_P(TimeoutTest, LockWhen) {
-  const TimeoutTestParam params = GetParam();
-  ABSL_RAW_LOG(INFO, "Params: %s", FormatString(params).c_str());
-
-  // Because this test asserts bounds on scheduling delays it is flaky.  To
-  // compensate it loops forever until it passes.  Failures express as test
-  // timeouts, in which case the test log can be used to diagnose the issue.
-  for (int attempt = 1;; ++attempt) {
-    ABSL_RAW_LOG(INFO, "Attempt %d", attempt);
-
-    absl::Mutex mu;
-    bool value = false;  // condition value (under mu)
-
-    std::unique_ptr<absl::synchronization_internal::ThreadPool> pool =
-        CreateDefaultPool();
-    RunAfterDelay(params.satisfy_condition_delay, pool.get(), [&] {
-      absl::MutexLock l(&mu);
-      value = true;
-    });
-
-    absl::Time start_time = absl::Now();
-    absl::Condition cond(&value);
-    bool result =
-        params.use_absolute_deadline
-            ? mu.LockWhenWithDeadline(cond, start_time + params.wait_timeout)
-            : mu.LockWhenWithTimeout(cond, params.wait_timeout);
-    mu.Unlock();
-
-    if (DelayIsWithinBounds(params.expected_delay, absl::Now() - start_time)) {
-      EXPECT_EQ(params.expected_result, result);
-      break;
-    }
-  }
-}
-
-TEST_P(TimeoutTest, ReaderLockWhen) {
-  const TimeoutTestParam params = GetParam();
-  ABSL_RAW_LOG(INFO, "Params: %s", FormatString(params).c_str());
-
-  // Because this test asserts bounds on scheduling delays it is flaky.  To
-  // compensate it loops forever until it passes.  Failures express as test
-  // timeouts, in which case the test log can be used to diagnose the issue.
-  for (int attempt = 0;; ++attempt) {
-    ABSL_RAW_LOG(INFO, "Attempt %d", attempt);
-
-    absl::Mutex mu;
-    bool value = false;  // condition value (under mu)
-
-    std::unique_ptr<absl::synchronization_internal::ThreadPool> pool =
-        CreateDefaultPool();
-    RunAfterDelay(params.satisfy_condition_delay, pool.get(), [&] {
-      absl::MutexLock l(&mu);
-      value = true;
-    });
-
-    absl::Time start_time = absl::Now();
-    bool result =
-        params.use_absolute_deadline
-            ? mu.ReaderLockWhenWithDeadline(absl::Condition(&value),
-                                            start_time + params.wait_timeout)
-            : mu.ReaderLockWhenWithTimeout(absl::Condition(&value),
-                                           params.wait_timeout);
-    mu.ReaderUnlock();
-
-    if (DelayIsWithinBounds(params.expected_delay, absl::Now() - start_time)) {
-      EXPECT_EQ(params.expected_result, result);
-      break;
-    }
-  }
-}
-
-TEST_P(TimeoutTest, Wait) {
-  const TimeoutTestParam params = GetParam();
-  ABSL_RAW_LOG(INFO, "Params: %s", FormatString(params).c_str());
-
-  // Because this test asserts bounds on scheduling delays it is flaky.  To
-  // compensate it loops forever until it passes.  Failures express as test
-  // timeouts, in which case the test log can be used to diagnose the issue.
-  for (int attempt = 0;; ++attempt) {
-    ABSL_RAW_LOG(INFO, "Attempt %d", attempt);
-
-    absl::Mutex mu;
-    bool value = false;  // condition value (under mu)
-    absl::CondVar cv;    // signals a change of `value`
-
-    std::unique_ptr<absl::synchronization_internal::ThreadPool> pool =
-        CreateDefaultPool();
-    RunAfterDelay(params.satisfy_condition_delay, pool.get(), [&] {
-      absl::MutexLock l(&mu);
-      value = true;
-      cv.Signal();
-    });
-
-    absl::MutexLock lock(&mu);
-    absl::Time start_time = absl::Now();
-    absl::Duration timeout = params.wait_timeout;
-    absl::Time deadline = start_time + timeout;
-    while (!value) {
-      if (params.use_absolute_deadline ? cv.WaitWithDeadline(&mu, deadline)
-                                       : cv.WaitWithTimeout(&mu, timeout)) {
-        break;  // deadline/timeout exceeded
-      }
-      timeout = deadline - absl::Now();  // recompute
-    }
-    bool result = value;  // note: `mu` is still held
-
-    if (DelayIsWithinBounds(params.expected_delay, absl::Now() - start_time)) {
-      EXPECT_EQ(params.expected_result, result);
-      break;
-    }
-  }
-}
-
-TEST(Mutex, Logging) {
-  // Allow user to look at logging output
-  absl::Mutex logged_mutex;
-  logged_mutex.EnableDebugLog("fido_mutex");
-  absl::CondVar logged_cv;
-  logged_cv.EnableDebugLog("rover_cv");
-  logged_mutex.Lock();
-  logged_cv.WaitWithTimeout(&logged_mutex, absl::Milliseconds(20));
-  logged_mutex.Unlock();
-  logged_mutex.ReaderLock();
-  logged_mutex.ReaderUnlock();
-  logged_mutex.Lock();
-  logged_mutex.Unlock();
-  logged_cv.Signal();
-  logged_cv.SignalAll();
-}
-
-// --------------------------------------------------------
-
-// Generate the vector of thread counts for tests parameterized on thread count.
-static std::vector<int> AllThreadCountValues() {
-  if (kExtendedTest) {
-    return {2, 4, 8, 10, 16, 20, 24, 30, 32};
-  }
-  return {2, 4, 10};
-}
-
-// A test fixture parameterized by thread count.
-class MutexVariableThreadCountTest : public ::testing::TestWithParam<int> {};
-
-// Instantiate the above with AllThreadCountOptions().
-INSTANTIATE_TEST_SUITE_P(ThreadCounts, MutexVariableThreadCountTest,
-                         ::testing::ValuesIn(AllThreadCountValues()),
-                         ::testing::PrintToStringParamName());
-
-// Reduces iterations by some factor for slow platforms
-// (determined empirically).
-static int ScaleIterations(int x) {
-  // ABSL_MUTEX_READER_LOCK_IS_EXCLUSIVE is set in the implementation
-  // of Mutex that uses either std::mutex or pthread_mutex_t. Use
-  // these as keys to determine the slow implementation.
-#if defined(ABSL_MUTEX_READER_LOCK_IS_EXCLUSIVE)
-  return x / 10;
-#else
-  return x;
-#endif
-}
-
-TEST_P(MutexVariableThreadCountTest, Mutex) {
-  int threads = GetParam();
-  int iterations = ScaleIterations(10000000) / threads;
-  int operations = threads * iterations;
-  EXPECT_EQ(RunTest(&TestMu, threads, iterations, operations), operations);
-#if !defined(ABSL_MUTEX_ENABLE_INVARIANT_DEBUGGING_NOT_IMPLEMENTED)
-  iterations = std::min(iterations, 10);
-  operations = threads * iterations;
-  EXPECT_EQ(RunTestWithInvariantDebugging(&TestMu, threads, iterations,
-                                          operations, CheckSumG0G1),
-            operations);
-#endif
-}
-
-TEST_P(MutexVariableThreadCountTest, Try) {
-  int threads = GetParam();
-  int iterations = 1000000 / threads;
-  int operations = iterations * threads;
-  EXPECT_EQ(RunTest(&TestTry, threads, iterations, operations), operations);
-#if !defined(ABSL_MUTEX_ENABLE_INVARIANT_DEBUGGING_NOT_IMPLEMENTED)
-  iterations = std::min(iterations, 10);
-  operations = threads * iterations;
-  EXPECT_EQ(RunTestWithInvariantDebugging(&TestTry, threads, iterations,
-                                          operations, CheckSumG0G1),
-            operations);
-#endif
-}
-
-TEST_P(MutexVariableThreadCountTest, R20ms) {
-  int threads = GetParam();
-  int iterations = 100;
-  int operations = iterations * threads;
-  EXPECT_EQ(RunTest(&TestR20ms, threads, iterations, operations), 0);
-}
-
-TEST_P(MutexVariableThreadCountTest, RW) {
-  int threads = GetParam();
-  int iterations = ScaleIterations(20000000) / threads;
-  int operations = iterations * threads;
-  EXPECT_EQ(RunTest(&TestRW, threads, iterations, operations), operations / 2);
-#if !defined(ABSL_MUTEX_ENABLE_INVARIANT_DEBUGGING_NOT_IMPLEMENTED)
-  iterations = std::min(iterations, 10);
-  operations = threads * iterations;
-  EXPECT_EQ(RunTestWithInvariantDebugging(&TestRW, threads, iterations,
-                                          operations, CheckSumG0G1),
-            operations / 2);
-#endif
-}
-
-TEST_P(MutexVariableThreadCountTest, Await) {
-  int threads = GetParam();
-  int iterations = ScaleIterations(500000);
-  int operations = iterations;
-  EXPECT_EQ(RunTest(&TestAwait, threads, iterations, operations), operations);
-}
-
-TEST_P(MutexVariableThreadCountTest, SignalAll) {
-  int threads = GetParam();
-  int iterations = 200000 / threads;
-  int operations = iterations;
-  EXPECT_EQ(RunTest(&TestSignalAll, threads, iterations, operations),
-            operations);
-}
-
-TEST(Mutex, Signal) {
-  int threads = 2;  // TestSignal must use two threads
-  int iterations = 200000;
-  int operations = iterations;
-  EXPECT_EQ(RunTest(&TestSignal, threads, iterations, operations), operations);
-}
-
-TEST(Mutex, Timed) {
-  int threads = 10;  // Use a fixed thread count of 10
-  int iterations = 1000;
-  int operations = iterations;
-  EXPECT_EQ(RunTest(&TestCVTimeout, threads, iterations, operations),
-            operations);
-}
-
-TEST(Mutex, CVTime) {
-  int threads = 10;  // Use a fixed thread count of 10
-  int iterations = 1;
-  EXPECT_EQ(RunTest(&TestCVTime, threads, iterations, 1),
-            threads * iterations);
-}
-
-TEST(Mutex, MuTime) {
-  int threads = 10;  // Use a fixed thread count of 10
-  int iterations = 1;
-  EXPECT_EQ(RunTest(&TestMuTime, threads, iterations, 1), threads * iterations);
-}
-
-}  // namespace
diff --git a/third_party/abseil_cpp/absl/synchronization/notification.cc b/third_party/abseil_cpp/absl/synchronization/notification.cc
deleted file mode 100644
index e91b90382271..000000000000
--- a/third_party/abseil_cpp/absl/synchronization/notification.cc
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "absl/synchronization/notification.h"
-
-#include <atomic>
-
-#include "absl/base/attributes.h"
-#include "absl/base/internal/raw_logging.h"
-#include "absl/synchronization/mutex.h"
-#include "absl/time/time.h"
-
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-
-void Notification::Notify() {
-  MutexLock l(&this->mutex_);
-
-#ifndef NDEBUG
-  if (ABSL_PREDICT_FALSE(notified_yet_.load(std::memory_order_relaxed))) {
-    ABSL_RAW_LOG(
-        FATAL,
-        "Notify() method called more than once for Notification object %p",
-        static_cast<void *>(this));
-  }
-#endif
-
-  notified_yet_.store(true, std::memory_order_release);
-}
-
-Notification::~Notification() {
-  // Make sure that the thread running Notify() exits before the object is
-  // destructed.
-  MutexLock l(&this->mutex_);
-}
-
-void Notification::WaitForNotification() const {
-  if (!HasBeenNotifiedInternal(&this->notified_yet_)) {
-    this->mutex_.LockWhen(Condition(&HasBeenNotifiedInternal,
-                                    &this->notified_yet_));
-    this->mutex_.Unlock();
-  }
-}
-
-bool Notification::WaitForNotificationWithTimeout(
-    absl::Duration timeout) const {
-  bool notified = HasBeenNotifiedInternal(&this->notified_yet_);
-  if (!notified) {
-    notified = this->mutex_.LockWhenWithTimeout(
-        Condition(&HasBeenNotifiedInternal, &this->notified_yet_), timeout);
-    this->mutex_.Unlock();
-  }
-  return notified;
-}
-
-bool Notification::WaitForNotificationWithDeadline(absl::Time deadline) const {
-  bool notified = HasBeenNotifiedInternal(&this->notified_yet_);
-  if (!notified) {
-    notified = this->mutex_.LockWhenWithDeadline(
-        Condition(&HasBeenNotifiedInternal, &this->notified_yet_), deadline);
-    this->mutex_.Unlock();
-  }
-  return notified;
-}
-
-ABSL_NAMESPACE_END
-}  // namespace absl
diff --git a/third_party/abseil_cpp/absl/synchronization/notification.h b/third_party/abseil_cpp/absl/synchronization/notification.h
deleted file mode 100644
index 9a354ca2c023..000000000000
--- a/third_party/abseil_cpp/absl/synchronization/notification.h
+++ /dev/null
@@ -1,123 +0,0 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// -----------------------------------------------------------------------------
-// notification.h
-// -----------------------------------------------------------------------------
-//
-// This header file defines a `Notification` abstraction, which allows threads
-// to receive notification of a single occurrence of a single event.
-//
-// The `Notification` object maintains a private boolean "notified" state that
-// transitions to `true` at most once. The `Notification` class provides the
-// following primary member functions:
-//   * `HasBeenNotified() `to query its state
-//   * `WaitForNotification*()` to have threads wait until the "notified" state
-//      is `true`.
-//   * `Notify()` to set the notification's "notified" state to `true` and
-//     notify all waiting threads that the event has occurred.
-//     This method may only be called once.
-//
-// Note that while `Notify()` may only be called once, it is perfectly valid to
-// call any of the `WaitForNotification*()` methods multiple times, from
-// multiple threads -- even after the notification's "notified" state has been
-// set -- in which case those methods will immediately return.
-//
-// Note that the lifetime of a `Notification` requires careful consideration;
-// it might not be safe to destroy a notification after calling `Notify()` since
-// it is still legal for other threads to call `WaitForNotification*()` methods
-// on the notification. However, observers responding to a "notified" state of
-// `true` can safely delete the notification without interfering with the call
-// to `Notify()` in the other thread.
-//
-// Memory ordering: For any threads X and Y, if X calls `Notify()`, then any
-// action taken by X before it calls `Notify()` is visible to thread Y after:
-//  * Y returns from `WaitForNotification()`, or
-//  * Y receives a `true` return value from either `HasBeenNotified()` or
-//    `WaitForNotificationWithTimeout()`.
-
-#ifndef ABSL_SYNCHRONIZATION_NOTIFICATION_H_
-#define ABSL_SYNCHRONIZATION_NOTIFICATION_H_
-
-#include <atomic>
-
-#include "absl/base/macros.h"
-#include "absl/synchronization/mutex.h"
-#include "absl/time/time.h"
-
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-
-// -----------------------------------------------------------------------------
-// Notification
-// -----------------------------------------------------------------------------
-class Notification {
- public:
-  // Initializes the "notified" state to unnotified.
-  Notification() : notified_yet_(false) {}
-  explicit Notification(bool prenotify) : notified_yet_(prenotify) {}
-  Notification(const Notification&) = delete;
-  Notification& operator=(const Notification&) = delete;
-  ~Notification();
-
-  // Notification::HasBeenNotified()
-  //
-  // Returns the value of the notification's internal "notified" state.
-  bool HasBeenNotified() const {
-    return HasBeenNotifiedInternal(&this->notified_yet_);
-  }
-
-  // Notification::WaitForNotification()
-  //
-  // Blocks the calling thread until the notification's "notified" state is
-  // `true`. Note that if `Notify()` has been previously called on this
-  // notification, this function will immediately return.
-  void WaitForNotification() const;
-
-  // Notification::WaitForNotificationWithTimeout()
-  //
-  // Blocks until either the notification's "notified" state is `true` (which
-  // may occur immediately) or the timeout has elapsed, returning the value of
-  // its "notified" state in either case.
-  bool WaitForNotificationWithTimeout(absl::Duration timeout) const;
-
-  // Notification::WaitForNotificationWithDeadline()
-  //
-  // Blocks until either the notification's "notified" state is `true` (which
-  // may occur immediately) or the deadline has expired, returning the value of
-  // its "notified" state in either case.
-  bool WaitForNotificationWithDeadline(absl::Time deadline) const;
-
-  // Notification::Notify()
-  //
-  // Sets the "notified" state of this notification to `true` and wakes waiting
-  // threads. Note: do not call `Notify()` multiple times on the same
-  // `Notification`; calling `Notify()` more than once on the same notification
-  // results in undefined behavior.
-  void Notify();
-
- private:
-  static inline bool HasBeenNotifiedInternal(
-      const std::atomic<bool>* notified_yet) {
-    return notified_yet->load(std::memory_order_acquire);
-  }
-
-  mutable Mutex mutex_;
-  std::atomic<bool> notified_yet_;  // written under mutex_
-};
-
-ABSL_NAMESPACE_END
-}  // namespace absl
-
-#endif  // ABSL_SYNCHRONIZATION_NOTIFICATION_H_
diff --git a/third_party/abseil_cpp/absl/synchronization/notification_test.cc b/third_party/abseil_cpp/absl/synchronization/notification_test.cc
deleted file mode 100644
index 100ea76f33ac..000000000000
--- a/third_party/abseil_cpp/absl/synchronization/notification_test.cc
+++ /dev/null
@@ -1,133 +0,0 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "absl/synchronization/notification.h"
-
-#include <thread>  // NOLINT(build/c++11)
-#include <vector>
-
-#include "gtest/gtest.h"
-#include "absl/synchronization/mutex.h"
-
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-
-// A thread-safe class that holds a counter.
-class ThreadSafeCounter {
- public:
-  ThreadSafeCounter() : count_(0) {}
-
-  void Increment() {
-    MutexLock lock(&mutex_);
-    ++count_;
-  }
-
-  int Get() const {
-    MutexLock lock(&mutex_);
-    return count_;
-  }
-
-  void WaitUntilGreaterOrEqual(int n) {
-    MutexLock lock(&mutex_);
-    auto cond = [this, n]() { return count_ >= n; };
-    mutex_.Await(Condition(&cond));
-  }
-
- private:
-  mutable Mutex mutex_;
-  int count_;
-};
-
-// Runs the |i|'th worker thread for the tests in BasicTests().  Increments the
-// |ready_counter|, waits on the |notification|, and then increments the
-// |done_counter|.
-static void RunWorker(int i, ThreadSafeCounter* ready_counter,
-                      Notification* notification,
-                      ThreadSafeCounter* done_counter) {
-  ready_counter->Increment();
-  notification->WaitForNotification();
-  done_counter->Increment();
-}
-
-// Tests that the |notification| properly blocks and awakens threads.  Assumes
-// that the |notification| is not yet triggered.  If |notify_before_waiting| is
-// true, the |notification| is triggered before any threads are created, so the
-// threads never block in WaitForNotification().  Otherwise, the |notification|
-// is triggered at a later point when most threads are likely to be blocking in
-// WaitForNotification().
-static void BasicTests(bool notify_before_waiting, Notification* notification) {
-  EXPECT_FALSE(notification->HasBeenNotified());
-  EXPECT_FALSE(
-      notification->WaitForNotificationWithTimeout(absl::Milliseconds(0)));
-  EXPECT_FALSE(notification->WaitForNotificationWithDeadline(absl::Now()));
-
-  const absl::Duration delay = absl::Milliseconds(50);
-  const absl::Time start = absl::Now();
-  EXPECT_FALSE(notification->WaitForNotificationWithTimeout(delay));
-  const absl::Duration elapsed = absl::Now() - start;
-
-  // Allow for a slight early return, to account for quality of implementation
-  // issues on various platforms.
-  const absl::Duration slop = absl::Microseconds(200);
-  EXPECT_LE(delay - slop, elapsed)
-      << "WaitForNotificationWithTimeout returned " << delay - elapsed
-      << " early (with " << slop << " slop), start time was " << start;
-
-  ThreadSafeCounter ready_counter;
-  ThreadSafeCounter done_counter;
-
-  if (notify_before_waiting) {
-    notification->Notify();
-  }
-
-  // Create a bunch of threads that increment the |done_counter| after being
-  // notified.
-  const int kNumThreads = 10;
-  std::vector<std::thread> workers;
-  for (int i = 0; i < kNumThreads; ++i) {
-    workers.push_back(std::thread(&RunWorker, i, &ready_counter, notification,
-                                  &done_counter));
-  }
-
-  if (!notify_before_waiting) {
-    ready_counter.WaitUntilGreaterOrEqual(kNumThreads);
-
-    // Workers have not been notified yet, so the |done_counter| should be
-    // unmodified.
-    EXPECT_EQ(0, done_counter.Get());
-
-    notification->Notify();
-  }
-
-  // After notifying and then joining the workers, both counters should be
-  // fully incremented.
-  notification->WaitForNotification();  // should exit immediately
-  EXPECT_TRUE(notification->HasBeenNotified());
-  EXPECT_TRUE(notification->WaitForNotificationWithTimeout(absl::Seconds(0)));
-  EXPECT_TRUE(notification->WaitForNotificationWithDeadline(absl::Now()));
-  for (std::thread& worker : workers) {
-    worker.join();
-  }
-  EXPECT_EQ(kNumThreads, ready_counter.Get());
-  EXPECT_EQ(kNumThreads, done_counter.Get());
-}
-
-TEST(NotificationTest, SanityTest) {
-  Notification local_notification1, local_notification2;
-  BasicTests(false, &local_notification1);
-  BasicTests(true, &local_notification2);
-}
-
-ABSL_NAMESPACE_END
-}  // namespace absl