about summary refs log tree commit diff
path: root/third_party/abseil_cpp/absl/container
diff options
context:
space:
mode:
Diffstat (limited to 'third_party/abseil_cpp/absl/container')
-rw-r--r--third_party/abseil_cpp/absl/container/BUILD.bazel7
-rw-r--r--third_party/abseil_cpp/absl/container/CMakeLists.txt5
-rw-r--r--third_party/abseil_cpp/absl/container/btree_map.h12
-rw-r--r--third_party/abseil_cpp/absl/container/btree_set.h2
-rw-r--r--third_party/abseil_cpp/absl/container/btree_test.cc461
-rw-r--r--third_party/abseil_cpp/absl/container/fixed_array.h33
-rw-r--r--third_party/abseil_cpp/absl/container/fixed_array_exception_safety_test.cc3
-rw-r--r--third_party/abseil_cpp/absl/container/fixed_array_test.cc5
-rw-r--r--third_party/abseil_cpp/absl/container/flat_hash_map.h8
-rw-r--r--third_party/abseil_cpp/absl/container/flat_hash_map_test.cc15
-rw-r--r--third_party/abseil_cpp/absl/container/flat_hash_set.h5
-rw-r--r--third_party/abseil_cpp/absl/container/inlined_vector.h2
-rw-r--r--third_party/abseil_cpp/absl/container/inlined_vector_test.cc36
-rw-r--r--third_party/abseil_cpp/absl/container/internal/btree.h651
-rw-r--r--third_party/abseil_cpp/absl/container/internal/btree_container.h293
-rw-r--r--third_party/abseil_cpp/absl/container/internal/common.h7
-rw-r--r--third_party/abseil_cpp/absl/container/internal/compressed_tuple.h2
-rw-r--r--third_party/abseil_cpp/absl/container/internal/container_memory.h61
-rw-r--r--third_party/abseil_cpp/absl/container/internal/hash_function_defaults_test.cc4
-rw-r--r--third_party/abseil_cpp/absl/container/internal/hash_generator_testing.cc6
-rw-r--r--third_party/abseil_cpp/absl/container/internal/hash_policy_traits.h31
-rw-r--r--third_party/abseil_cpp/absl/container/internal/hashtablez_sampler.cc1
-rw-r--r--third_party/abseil_cpp/absl/container/internal/hashtablez_sampler.h39
-rw-r--r--third_party/abseil_cpp/absl/container/internal/hashtablez_sampler_test.cc16
-rw-r--r--third_party/abseil_cpp/absl/container/internal/inlined_vector.h125
-rw-r--r--third_party/abseil_cpp/absl/container/internal/layout.h12
-rw-r--r--third_party/abseil_cpp/absl/container/internal/layout_test.cc564
-rw-r--r--third_party/abseil_cpp/absl/container/internal/raw_hash_set.cc15
-rw-r--r--third_party/abseil_cpp/absl/container/internal/raw_hash_set.h226
-rw-r--r--third_party/abseil_cpp/absl/container/internal/raw_hash_set_allocator_test.cc75
-rw-r--r--third_party/abseil_cpp/absl/container/internal/raw_hash_set_test.cc38
-rw-r--r--third_party/abseil_cpp/absl/container/node_hash_map.h8
-rw-r--r--third_party/abseil_cpp/absl/container/node_hash_map_test.cc15
-rw-r--r--third_party/abseil_cpp/absl/container/node_hash_set.h3
34 files changed, 1777 insertions, 1009 deletions
diff --git a/third_party/abseil_cpp/absl/container/BUILD.bazel b/third_party/abseil_cpp/absl/container/BUILD.bazel
index 069212354c8d..8e72ad03547c 100644
--- a/third_party/abseil_cpp/absl/container/BUILD.bazel
+++ b/third_party/abseil_cpp/absl/container/BUILD.bazel
@@ -24,7 +24,7 @@ load(
 
 package(default_visibility = ["//visibility:public"])
 
-licenses(["notice"])  # Apache 2.0
+licenses(["notice"])
 
 cc_library(
     name = "compressed_tuple",
@@ -60,6 +60,7 @@ cc_library(
     deps = [
         ":compressed_tuple",
         "//absl/algorithm",
+        "//absl/base:config",
         "//absl/base:core_headers",
         "//absl/base:dynamic_annotations",
         "//absl/base:throw_delegate",
@@ -368,6 +369,7 @@ cc_library(
     copts = ABSL_DEFAULT_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
     deps = [
+        "//absl/base:config",
         "//absl/memory",
         "//absl/meta:type_traits",
         "//absl/utility",
@@ -620,6 +622,7 @@ cc_test(
         ":hashtable_debug",
         ":raw_hash_set",
         "//absl/base",
+        "//absl/base:config",
         "//absl/base:core_headers",
         "//absl/base:raw_logging_internal",
         "//absl/strings",
@@ -647,6 +650,7 @@ cc_library(
     copts = ABSL_DEFAULT_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
     deps = [
+        "//absl/base:config",
         "//absl/base:core_headers",
         "//absl/meta:type_traits",
         "//absl/strings",
@@ -665,6 +669,7 @@ cc_test(
     visibility = ["//visibility:private"],
     deps = [
         ":layout",
+        "//absl/base:config",
         "//absl/base:core_headers",
         "//absl/base:raw_logging_internal",
         "//absl/types:span",
diff --git a/third_party/abseil_cpp/absl/container/CMakeLists.txt b/third_party/abseil_cpp/absl/container/CMakeLists.txt
index 13d969576370..eb202c459b27 100644
--- a/third_party/abseil_cpp/absl/container/CMakeLists.txt
+++ b/third_party/abseil_cpp/absl/container/CMakeLists.txt
@@ -131,6 +131,7 @@ absl_cc_library(
   DEPS
     absl::compressed_tuple
     absl::algorithm
+    absl::config
     absl::core_headers
     absl::dynamic_annotations
     absl::throw_delegate
@@ -423,6 +424,7 @@ absl_cc_library(
   COPTS
     ${ABSL_DEFAULT_COPTS}
   DEPS
+    absl::config
     absl::memory
     absl::type_traits
     absl::utility
@@ -696,6 +698,7 @@ absl_cc_test(
     absl::hashtable_debug
     absl::raw_hash_set
     absl::base
+    absl::config
     absl::core_headers
     absl::raw_logging_internal
     absl::strings
@@ -724,6 +727,7 @@ absl_cc_library(
   COPTS
     ${ABSL_DEFAULT_COPTS}
   DEPS
+    absl::config
     absl::core_headers
     absl::meta
     absl::strings
@@ -741,6 +745,7 @@ absl_cc_test(
     ${ABSL_TEST_COPTS}
   DEPS
     absl::layout
+    absl::config
     absl::core_headers
     absl::raw_logging_internal
     absl::span
diff --git a/third_party/abseil_cpp/absl/container/btree_map.h b/third_party/abseil_cpp/absl/container/btree_map.h
index bb450eadde7c..abc09b0ac068 100644
--- a/third_party/abseil_cpp/absl/container/btree_map.h
+++ b/third_party/abseil_cpp/absl/container/btree_map.h
@@ -185,7 +185,7 @@ class btree_map
   // template <typename K> size_type erase(const K& key):
   //
   //   Erases the element with the matching key, if it exists, returning the
-  //   number of elements erased.
+  //   number of elements erased (0 or 1).
   using Base::erase;
 
   // btree_map::insert()
@@ -325,6 +325,11 @@ class btree_map
   //   does not contain an element with a matching key, this function returns an
   //   empty node handle.
   //
+  // NOTE: when compiled in an earlier version of C++ than C++17,
+  // `node_type::key()` returns a const reference to the key instead of a
+  // mutable reference. We cannot safely return a mutable reference without
+  // std::launder (which is not available before C++17).
+  //
   // NOTE: In this context, `node_type` refers to the C++17 concept of a
   // move-only type that owns and provides access to the elements in associative
   // containers (https://en.cppreference.com/w/cpp/container/node_handle).
@@ -652,6 +657,11 @@ class btree_multimap
   //   does not contain an element with a matching key, this function returns an
   //   empty node handle.
   //
+  // NOTE: when compiled in an earlier version of C++ than C++17,
+  // `node_type::key()` returns a const reference to the key instead of a
+  // mutable reference. We cannot safely return a mutable reference without
+  // std::launder (which is not available before C++17).
+  //
   // NOTE: In this context, `node_type` refers to the C++17 concept of a
   // move-only type that owns and provides access to the elements in associative
   // containers (https://en.cppreference.com/w/cpp/container/node_handle).
diff --git a/third_party/abseil_cpp/absl/container/btree_set.h b/third_party/abseil_cpp/absl/container/btree_set.h
index d3e78866a7ed..21ef0a032a85 100644
--- a/third_party/abseil_cpp/absl/container/btree_set.h
+++ b/third_party/abseil_cpp/absl/container/btree_set.h
@@ -183,7 +183,7 @@ class btree_set
   // template <typename K> size_type erase(const K& key):
   //
   //   Erases the element with the matching key, if it exists, returning the
-  //   number of elements erased.
+  //   number of elements erased (0 or 1).
   using Base::erase;
 
   // btree_set::insert()
diff --git a/third_party/abseil_cpp/absl/container/btree_test.cc b/third_party/abseil_cpp/absl/container/btree_test.cc
index bbdb5f42a621..9b1b6436c7ca 100644
--- a/third_party/abseil_cpp/absl/container/btree_test.cc
+++ b/third_party/abseil_cpp/absl/container/btree_test.cc
@@ -15,6 +15,7 @@
 #include "absl/container/btree_test.h"
 
 #include <cstdint>
+#include <limits>
 #include <map>
 #include <memory>
 #include <stdexcept>
@@ -52,7 +53,9 @@ using ::absl::test_internal::MovableOnlyInstance;
 using ::testing::ElementsAre;
 using ::testing::ElementsAreArray;
 using ::testing::IsEmpty;
+using ::testing::IsNull;
 using ::testing::Pair;
+using ::testing::SizeIs;
 
 template <typename T, typename U>
 void CheckPairEquals(const T &x, const U &y) {
@@ -1180,6 +1183,103 @@ TEST(Btree, RangeCtorSanity) {
   EXPECT_EQ(1, tmap.size());
 }
 
+}  // namespace
+
+class BtreeNodePeer {
+ public:
+  // Yields the size of a leaf node with a specific number of values.
+  template <typename ValueType>
+  constexpr static size_t GetTargetNodeSize(size_t target_values_per_node) {
+    return btree_node<
+        set_params<ValueType, std::less<ValueType>, std::allocator<ValueType>,
+                   /*TargetNodeSize=*/256,  // This parameter isn't used here.
+                   /*Multi=*/false>>::SizeWithNValues(target_values_per_node);
+  }
+
+  // Yields the number of values in a (non-root) leaf node for this btree.
+  template <typename Btree>
+  constexpr static size_t GetNumValuesPerNode() {
+    return btree_node<typename Btree::params_type>::kNodeValues;
+  }
+
+  template <typename Btree>
+  constexpr static size_t GetMaxFieldType() {
+    return std::numeric_limits<
+        typename btree_node<typename Btree::params_type>::field_type>::max();
+  }
+
+  template <typename Btree>
+  constexpr static bool UsesLinearNodeSearch() {
+    return btree_node<typename Btree::params_type>::use_linear_search::value;
+  }
+};
+
+namespace {
+
+class BtreeMapTest : public ::testing::Test {
+ public:
+  struct Key {};
+  struct Cmp {
+    template <typename T>
+    bool operator()(T, T) const {
+      return false;
+    }
+  };
+
+  struct KeyLin {
+    using absl_btree_prefer_linear_node_search = std::true_type;
+  };
+  struct CmpLin : Cmp {
+    using absl_btree_prefer_linear_node_search = std::true_type;
+  };
+
+  struct KeyBin {
+    using absl_btree_prefer_linear_node_search = std::false_type;
+  };
+  struct CmpBin : Cmp {
+    using absl_btree_prefer_linear_node_search = std::false_type;
+  };
+
+  template <typename K, typename C>
+  static bool IsLinear() {
+    return BtreeNodePeer::UsesLinearNodeSearch<absl::btree_map<K, int, C>>();
+  }
+};
+
+TEST_F(BtreeMapTest, TestLinearSearchPreferredForKeyLinearViaAlias) {
+  // Test requesting linear search by directly exporting an alias.
+  EXPECT_FALSE((IsLinear<Key, Cmp>()));
+  EXPECT_TRUE((IsLinear<KeyLin, Cmp>()));
+  EXPECT_TRUE((IsLinear<Key, CmpLin>()));
+  EXPECT_TRUE((IsLinear<KeyLin, CmpLin>()));
+}
+
+TEST_F(BtreeMapTest, LinearChoiceTree) {
+  // Cmp has precedence, and is forcing binary
+  EXPECT_FALSE((IsLinear<Key, CmpBin>()));
+  EXPECT_FALSE((IsLinear<KeyLin, CmpBin>()));
+  EXPECT_FALSE((IsLinear<KeyBin, CmpBin>()));
+  EXPECT_FALSE((IsLinear<int, CmpBin>()));
+  EXPECT_FALSE((IsLinear<std::string, CmpBin>()));
+  // Cmp has precedence, and is forcing linear
+  EXPECT_TRUE((IsLinear<Key, CmpLin>()));
+  EXPECT_TRUE((IsLinear<KeyLin, CmpLin>()));
+  EXPECT_TRUE((IsLinear<KeyBin, CmpLin>()));
+  EXPECT_TRUE((IsLinear<int, CmpLin>()));
+  EXPECT_TRUE((IsLinear<std::string, CmpLin>()));
+  // Cmp has no preference, Key determines linear vs binary.
+  EXPECT_FALSE((IsLinear<Key, Cmp>()));
+  EXPECT_TRUE((IsLinear<KeyLin, Cmp>()));
+  EXPECT_FALSE((IsLinear<KeyBin, Cmp>()));
+  // arithmetic key w/ std::less or std::greater: linear
+  EXPECT_TRUE((IsLinear<int, std::less<int>>()));
+  EXPECT_TRUE((IsLinear<double, std::greater<double>>()));
+  // arithmetic key w/ custom compare: binary
+  EXPECT_FALSE((IsLinear<int, Cmp>()));
+  // non-arithmetic key: binary
+  EXPECT_FALSE((IsLinear<std::string, std::less<std::string>>()));
+}
+
 TEST(Btree, BtreeMapCanHoldMoveOnlyTypes) {
   absl::btree_map<std::string, std::unique_ptr<std::string>> m;
 
@@ -1325,28 +1425,6 @@ TEST(Btree, RValueInsert) {
   EXPECT_EQ(tracker.swaps(), 0);
 }
 
-}  // namespace
-
-class BtreeNodePeer {
- public:
-  // Yields the size of a leaf node with a specific number of values.
-  template <typename ValueType>
-  constexpr static size_t GetTargetNodeSize(size_t target_values_per_node) {
-    return btree_node<
-        set_params<ValueType, std::less<ValueType>, std::allocator<ValueType>,
-                   /*TargetNodeSize=*/256,  // This parameter isn't used here.
-                   /*Multi=*/false>>::SizeWithNValues(target_values_per_node);
-  }
-
-  // Yields the number of values in a (non-root) leaf node for this set.
-  template <typename Set>
-  constexpr static size_t GetNumValuesPerNode() {
-    return btree_node<typename Set::params_type>::kNodeValues;
-  }
-};
-
-namespace {
-
 // A btree set with a specific number of values per node.
 template <typename Key, int TargetValuesPerNode, typename Cmp = std::less<Key>>
 class SizedBtreeSet
@@ -2101,6 +2179,31 @@ TEST(Btree, MergeIntoMultiMapsWithDifferentComparators) {
                                Pair(4, 1), Pair(4, 4), Pair(5, 5)));
 }
 
+TEST(Btree, MergeIntoSetMovableOnly) {
+  absl::btree_set<MovableOnlyInstance> src;
+  src.insert(MovableOnlyInstance(1));
+  absl::btree_multiset<MovableOnlyInstance> dst1;
+  dst1.insert(MovableOnlyInstance(2));
+  absl::btree_set<MovableOnlyInstance> dst2;
+
+  // Test merge into multiset.
+  dst1.merge(src);
+
+  EXPECT_TRUE(src.empty());
+  // ElementsAre/ElementsAreArray don't work with move-only types.
+  ASSERT_THAT(dst1, SizeIs(2));
+  EXPECT_EQ(*dst1.begin(), MovableOnlyInstance(1));
+  EXPECT_EQ(*std::next(dst1.begin()), MovableOnlyInstance(2));
+
+  // Test merge into set.
+  dst2.merge(dst1);
+
+  EXPECT_TRUE(dst1.empty());
+  ASSERT_THAT(dst2, SizeIs(2));
+  EXPECT_EQ(*dst2.begin(), MovableOnlyInstance(1));
+  EXPECT_EQ(*std::next(dst2.begin()), MovableOnlyInstance(2));
+}
+
 struct KeyCompareToWeakOrdering {
   template <typename T>
   absl::weak_ordering operator()(const T &a, const T &b) const {
@@ -2404,6 +2507,320 @@ TEST(Btree, BitfieldArgument) {
   m[n];
 }
 
+TEST(Btree, SetRangeConstructorAndInsertSupportExplicitConversionComparable) {
+  const absl::string_view names[] = {"n1", "n2"};
+
+  absl::btree_set<std::string> name_set1{std::begin(names), std::end(names)};
+  EXPECT_THAT(name_set1, ElementsAreArray(names));
+
+  absl::btree_set<std::string> name_set2;
+  name_set2.insert(std::begin(names), std::end(names));
+  EXPECT_THAT(name_set2, ElementsAreArray(names));
+}
+
+// A type that is explicitly convertible from int and counts constructor calls.
+struct ConstructorCounted {
+  explicit ConstructorCounted(int i) : i(i) { ++constructor_calls; }
+  bool operator==(int other) const { return i == other; }
+
+  int i;
+  static int constructor_calls;
+};
+int ConstructorCounted::constructor_calls = 0;
+
+struct ConstructorCountedCompare {
+  bool operator()(int a, const ConstructorCounted &b) const { return a < b.i; }
+  bool operator()(const ConstructorCounted &a, int b) const { return a.i < b; }
+  bool operator()(const ConstructorCounted &a,
+                  const ConstructorCounted &b) const {
+    return a.i < b.i;
+  }
+  using is_transparent = void;
+};
+
+TEST(Btree,
+     SetRangeConstructorAndInsertExplicitConvComparableLimitConstruction) {
+  const int i[] = {0, 1, 1};
+  ConstructorCounted::constructor_calls = 0;
+
+  absl::btree_set<ConstructorCounted, ConstructorCountedCompare> set{
+      std::begin(i), std::end(i)};
+  EXPECT_THAT(set, ElementsAre(0, 1));
+  EXPECT_EQ(ConstructorCounted::constructor_calls, 2);
+
+  set.insert(std::begin(i), std::end(i));
+  EXPECT_THAT(set, ElementsAre(0, 1));
+  EXPECT_EQ(ConstructorCounted::constructor_calls, 2);
+}
+
+TEST(Btree,
+     SetRangeConstructorAndInsertSupportExplicitConversionNonComparable) {
+  const int i[] = {0, 1};
+
+  absl::btree_set<std::vector<void *>> s1{std::begin(i), std::end(i)};
+  EXPECT_THAT(s1, ElementsAre(IsEmpty(), ElementsAre(IsNull())));
+
+  absl::btree_set<std::vector<void *>> s2;
+  s2.insert(std::begin(i), std::end(i));
+  EXPECT_THAT(s2, ElementsAre(IsEmpty(), ElementsAre(IsNull())));
+}
+
+// libstdc++ included with GCC 4.9 has a bug in the std::pair constructors that
+// prevents explicit conversions between pair types.
+// We only run this test for the libstdc++ from GCC 7 or newer because we can't
+// reliably check the libstdc++ version prior to that release.
+#if !defined(__GLIBCXX__) || \
+    (defined(_GLIBCXX_RELEASE) && _GLIBCXX_RELEASE >= 7)
+TEST(Btree, MapRangeConstructorAndInsertSupportExplicitConversionComparable) {
+  const std::pair<absl::string_view, int> names[] = {{"n1", 1}, {"n2", 2}};
+
+  absl::btree_map<std::string, int> name_map1{std::begin(names),
+                                              std::end(names)};
+  EXPECT_THAT(name_map1, ElementsAre(Pair("n1", 1), Pair("n2", 2)));
+
+  absl::btree_map<std::string, int> name_map2;
+  name_map2.insert(std::begin(names), std::end(names));
+  EXPECT_THAT(name_map2, ElementsAre(Pair("n1", 1), Pair("n2", 2)));
+}
+
+TEST(Btree,
+     MapRangeConstructorAndInsertExplicitConvComparableLimitConstruction) {
+  const std::pair<int, int> i[] = {{0, 1}, {1, 2}, {1, 3}};
+  ConstructorCounted::constructor_calls = 0;
+
+  absl::btree_map<ConstructorCounted, int, ConstructorCountedCompare> map{
+      std::begin(i), std::end(i)};
+  EXPECT_THAT(map, ElementsAre(Pair(0, 1), Pair(1, 2)));
+  EXPECT_EQ(ConstructorCounted::constructor_calls, 2);
+
+  map.insert(std::begin(i), std::end(i));
+  EXPECT_THAT(map, ElementsAre(Pair(0, 1), Pair(1, 2)));
+  EXPECT_EQ(ConstructorCounted::constructor_calls, 2);
+}
+
+TEST(Btree,
+     MapRangeConstructorAndInsertSupportExplicitConversionNonComparable) {
+  const std::pair<int, int> i[] = {{0, 1}, {1, 2}};
+
+  absl::btree_map<std::vector<void *>, int> m1{std::begin(i), std::end(i)};
+  EXPECT_THAT(m1,
+              ElementsAre(Pair(IsEmpty(), 1), Pair(ElementsAre(IsNull()), 2)));
+
+  absl::btree_map<std::vector<void *>, int> m2;
+  m2.insert(std::begin(i), std::end(i));
+  EXPECT_THAT(m2,
+              ElementsAre(Pair(IsEmpty(), 1), Pair(ElementsAre(IsNull()), 2)));
+}
+
+TEST(Btree, HeterogeneousTryEmplace) {
+  absl::btree_map<std::string, int> m;
+  std::string s = "key";
+  absl::string_view sv = s;
+  m.try_emplace(sv, 1);
+  EXPECT_EQ(m[s], 1);
+
+  m.try_emplace(m.end(), sv, 2);
+  EXPECT_EQ(m[s], 1);
+}
+
+TEST(Btree, HeterogeneousOperatorMapped) {
+  absl::btree_map<std::string, int> m;
+  std::string s = "key";
+  absl::string_view sv = s;
+  m[sv] = 1;
+  EXPECT_EQ(m[s], 1);
+
+  m[sv] = 2;
+  EXPECT_EQ(m[s], 2);
+}
+
+TEST(Btree, HeterogeneousInsertOrAssign) {
+  absl::btree_map<std::string, int> m;
+  std::string s = "key";
+  absl::string_view sv = s;
+  m.insert_or_assign(sv, 1);
+  EXPECT_EQ(m[s], 1);
+
+  m.insert_or_assign(m.end(), sv, 2);
+  EXPECT_EQ(m[s], 2);
+}
+#endif
+
+// This test requires std::launder for mutable key access in node handles.
+#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606
+TEST(Btree, NodeHandleMutableKeyAccess) {
+  {
+    absl::btree_map<std::string, std::string> map;
+
+    map["key1"] = "mapped";
+
+    auto nh = map.extract(map.begin());
+    nh.key().resize(3);
+    map.insert(std::move(nh));
+
+    EXPECT_THAT(map, ElementsAre(Pair("key", "mapped")));
+  }
+  // Also for multimap.
+  {
+    absl::btree_multimap<std::string, std::string> map;
+
+    map.emplace("key1", "mapped");
+
+    auto nh = map.extract(map.begin());
+    nh.key().resize(3);
+    map.insert(std::move(nh));
+
+    EXPECT_THAT(map, ElementsAre(Pair("key", "mapped")));
+  }
+}
+#endif
+
+struct MultiKey {
+  int i1;
+  int i2;
+};
+
+bool operator==(const MultiKey a, const MultiKey b) {
+  return a.i1 == b.i1 && a.i2 == b.i2;
+}
+
+// A heterogeneous comparator that has different equivalence classes for
+// different lookup types.
+struct MultiKeyComp {
+  using is_transparent = void;
+  bool operator()(const MultiKey a, const MultiKey b) const {
+    if (a.i1 != b.i1) return a.i1 < b.i1;
+    return a.i2 < b.i2;
+  }
+  bool operator()(const int a, const MultiKey b) const { return a < b.i1; }
+  bool operator()(const MultiKey a, const int b) const { return a.i1 < b; }
+};
+
+TEST(Btree, MultiKeyEqualRange) {
+  absl::btree_set<MultiKey, MultiKeyComp> set;
+
+  for (int i = 0; i < 100; ++i) {
+    for (int j = 0; j < 100; ++j) {
+      set.insert({i, j});
+    }
+  }
+
+  for (int i = 0; i < 100; ++i) {
+    auto equal_range = set.equal_range(i);
+    EXPECT_EQ(equal_range.first->i1, i);
+    EXPECT_EQ(equal_range.first->i2, 0);
+    EXPECT_EQ(std::distance(equal_range.first, equal_range.second), 100) << i;
+  }
+}
+
+TEST(Btree, MultiKeyErase) {
+  absl::btree_set<MultiKey, MultiKeyComp> set = {
+      {1, 1}, {2, 1}, {2, 2}, {3, 1}};
+  EXPECT_EQ(set.erase(2), 2);
+  EXPECT_THAT(set, ElementsAre(MultiKey{1, 1}, MultiKey{3, 1}));
+}
+
+TEST(Btree, MultiKeyCount) {
+  const absl::btree_set<MultiKey, MultiKeyComp> set = {
+      {1, 1}, {2, 1}, {2, 2}, {3, 1}};
+  EXPECT_EQ(set.count(2), 2);
+}
+
+TEST(Btree, AllocConstructor) {
+  using Alloc = CountingAllocator<int>;
+  using Set = absl::btree_set<int, std::less<int>, Alloc>;
+  int64_t bytes_used = 0;
+  Alloc alloc(&bytes_used);
+  Set set(alloc);
+
+  set.insert({1, 2, 3});
+
+  EXPECT_THAT(set, ElementsAre(1, 2, 3));
+  EXPECT_GT(bytes_used, set.size() * sizeof(int));
+}
+
+TEST(Btree, AllocInitializerListConstructor) {
+  using Alloc = CountingAllocator<int>;
+  using Set = absl::btree_set<int, std::less<int>, Alloc>;
+  int64_t bytes_used = 0;
+  Alloc alloc(&bytes_used);
+  Set set({1, 2, 3}, alloc);
+
+  EXPECT_THAT(set, ElementsAre(1, 2, 3));
+  EXPECT_GT(bytes_used, set.size() * sizeof(int));
+}
+
+TEST(Btree, AllocRangeConstructor) {
+  using Alloc = CountingAllocator<int>;
+  using Set = absl::btree_set<int, std::less<int>, Alloc>;
+  int64_t bytes_used = 0;
+  Alloc alloc(&bytes_used);
+  std::vector<int> v = {1, 2, 3};
+  Set set(v.begin(), v.end(), alloc);
+
+  EXPECT_THAT(set, ElementsAre(1, 2, 3));
+  EXPECT_GT(bytes_used, set.size() * sizeof(int));
+}
+
+TEST(Btree, AllocCopyConstructor) {
+  using Alloc = CountingAllocator<int>;
+  using Set = absl::btree_set<int, std::less<int>, Alloc>;
+  int64_t bytes_used1 = 0;
+  Alloc alloc1(&bytes_used1);
+  Set set1(alloc1);
+
+  set1.insert({1, 2, 3});
+
+  int64_t bytes_used2 = 0;
+  Alloc alloc2(&bytes_used2);
+  Set set2(set1, alloc2);
+
+  EXPECT_THAT(set1, ElementsAre(1, 2, 3));
+  EXPECT_THAT(set2, ElementsAre(1, 2, 3));
+  EXPECT_GT(bytes_used1, set1.size() * sizeof(int));
+  EXPECT_EQ(bytes_used1, bytes_used2);
+}
+
+TEST(Btree, AllocMoveConstructor_SameAlloc) {
+  using Alloc = CountingAllocator<int>;
+  using Set = absl::btree_set<int, std::less<int>, Alloc>;
+  int64_t bytes_used = 0;
+  Alloc alloc(&bytes_used);
+  Set set1(alloc);
+
+  set1.insert({1, 2, 3});
+
+  const int64_t original_bytes_used = bytes_used;
+  EXPECT_GT(original_bytes_used, set1.size() * sizeof(int));
+
+  Set set2(std::move(set1), alloc);
+
+  EXPECT_THAT(set2, ElementsAre(1, 2, 3));
+  EXPECT_EQ(bytes_used, original_bytes_used);
+}
+
+TEST(Btree, AllocMoveConstructor_DifferentAlloc) {
+  using Alloc = CountingAllocator<int>;
+  using Set = absl::btree_set<int, std::less<int>, Alloc>;
+  int64_t bytes_used1 = 0;
+  Alloc alloc1(&bytes_used1);
+  Set set1(alloc1);
+
+  set1.insert({1, 2, 3});
+
+  const int64_t original_bytes_used = bytes_used1;
+  EXPECT_GT(original_bytes_used, set1.size() * sizeof(int));
+
+  int64_t bytes_used2 = 0;
+  Alloc alloc2(&bytes_used2);
+  Set set2(std::move(set1), alloc2);
+
+  EXPECT_THAT(set2, ElementsAre(1, 2, 3));
+  // We didn't free these bytes allocated by `set1` yet.
+  EXPECT_EQ(bytes_used1, original_bytes_used);
+  EXPECT_EQ(bytes_used2, original_bytes_used);
+}
+
 }  // namespace
 }  // namespace container_internal
 ABSL_NAMESPACE_END
diff --git a/third_party/abseil_cpp/absl/container/fixed_array.h b/third_party/abseil_cpp/absl/container/fixed_array.h
index adf0dc8088b6..fcb3e545b243 100644
--- a/third_party/abseil_cpp/absl/container/fixed_array.h
+++ b/third_party/abseil_cpp/absl/container/fixed_array.h
@@ -41,6 +41,7 @@
 #include <type_traits>
 
 #include "absl/algorithm/algorithm.h"
+#include "absl/base/config.h"
 #include "absl/base/dynamic_annotations.h"
 #include "absl/base/internal/throw_delegate.h"
 #include "absl/base/macros.h"
@@ -231,8 +232,8 @@ class FixedArray {
 
   // FixedArray::at
   //
-  // Bounds-checked access.  Returns a reference to the ith element of the
-  // fiexed array, or throws std::out_of_range
+  // Bounds-checked access.  Returns a reference to the ith element of the fixed
+  // array, or throws std::out_of_range
   reference at(size_type i) {
     if (ABSL_PREDICT_FALSE(i >= size())) {
       base_internal::ThrowStdOutOfRange("FixedArray::at failed bounds check");
@@ -422,15 +423,15 @@ class FixedArray {
     void AnnotateConstruct(size_type n);
     void AnnotateDestruct(size_type n);
 
-#ifdef ADDRESS_SANITIZER
+#ifdef ABSL_HAVE_ADDRESS_SANITIZER
     void* RedzoneBegin() { return &redzone_begin_; }
     void* RedzoneEnd() { return &redzone_end_ + 1; }
-#endif  // ADDRESS_SANITIZER
+#endif  // ABSL_HAVE_ADDRESS_SANITIZER
 
    private:
-    ADDRESS_SANITIZER_REDZONE(redzone_begin_);
+    ABSL_ADDRESS_SANITIZER_REDZONE(redzone_begin_);
     alignas(StorageElement) char buff_[sizeof(StorageElement[inline_elements])];
-    ADDRESS_SANITIZER_REDZONE(redzone_end_);
+    ABSL_ADDRESS_SANITIZER_REDZONE(redzone_end_);
   };
 
   class EmptyInlinedStorage {
@@ -503,22 +504,26 @@ constexpr typename FixedArray<T, N, A>::size_type
 template <typename T, size_t N, typename A>
 void FixedArray<T, N, A>::NonEmptyInlinedStorage::AnnotateConstruct(
     typename FixedArray<T, N, A>::size_type n) {
-#ifdef ADDRESS_SANITIZER
+#ifdef ABSL_HAVE_ADDRESS_SANITIZER
   if (!n) return;
-  ANNOTATE_CONTIGUOUS_CONTAINER(data(), RedzoneEnd(), RedzoneEnd(), data() + n);
-  ANNOTATE_CONTIGUOUS_CONTAINER(RedzoneBegin(), data(), data(), RedzoneBegin());
-#endif                   // ADDRESS_SANITIZER
+  ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(data(), RedzoneEnd(), RedzoneEnd(),
+                                     data() + n);
+  ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(RedzoneBegin(), data(), data(),
+                                     RedzoneBegin());
+#endif  // ABSL_HAVE_ADDRESS_SANITIZER
   static_cast<void>(n);  // Mark used when not in asan mode
 }
 
 template <typename T, size_t N, typename A>
 void FixedArray<T, N, A>::NonEmptyInlinedStorage::AnnotateDestruct(
     typename FixedArray<T, N, A>::size_type n) {
-#ifdef ADDRESS_SANITIZER
+#ifdef ABSL_HAVE_ADDRESS_SANITIZER
   if (!n) return;
-  ANNOTATE_CONTIGUOUS_CONTAINER(data(), RedzoneEnd(), data() + n, RedzoneEnd());
-  ANNOTATE_CONTIGUOUS_CONTAINER(RedzoneBegin(), data(), RedzoneBegin(), data());
-#endif                   // ADDRESS_SANITIZER
+  ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(data(), RedzoneEnd(), data() + n,
+                                     RedzoneEnd());
+  ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(RedzoneBegin(), data(), RedzoneBegin(),
+                                     data());
+#endif  // ABSL_HAVE_ADDRESS_SANITIZER
   static_cast<void>(n);  // Mark used when not in asan mode
 }
 ABSL_NAMESPACE_END
diff --git a/third_party/abseil_cpp/absl/container/fixed_array_exception_safety_test.cc b/third_party/abseil_cpp/absl/container/fixed_array_exception_safety_test.cc
index a5bb009d9881..e5f59299b358 100644
--- a/third_party/abseil_cpp/absl/container/fixed_array_exception_safety_test.cc
+++ b/third_party/abseil_cpp/absl/container/fixed_array_exception_safety_test.cc
@@ -150,8 +150,7 @@ TEST(FixedArrayExceptionSafety, InitListConstructorWithAlloc) {
 
 template <typename FixedArrT>
 testing::AssertionResult ReadMemory(FixedArrT* fixed_arr) {
-  // Marked volatile to prevent optimization. Used for running asan tests.
-  volatile int sum = 0;
+  int sum = 0;
   for (const auto& thrower : *fixed_arr) {
     sum += thrower.Get();
   }
diff --git a/third_party/abseil_cpp/absl/container/fixed_array_test.cc b/third_party/abseil_cpp/absl/container/fixed_array_test.cc
index 064a88a239bd..49598e7a053c 100644
--- a/third_party/abseil_cpp/absl/container/fixed_array_test.cc
+++ b/third_party/abseil_cpp/absl/container/fixed_array_test.cc
@@ -27,6 +27,7 @@
 
 #include "gmock/gmock.h"
 #include "gtest/gtest.h"
+#include "absl/base/config.h"
 #include "absl/base/internal/exception_testing.h"
 #include "absl/base/options.h"
 #include "absl/container/internal/counting_allocator.h"
@@ -767,7 +768,7 @@ TEST(AllocatorSupportTest, SizeValAllocConstructor) {
   }
 }
 
-#ifdef ADDRESS_SANITIZER
+#ifdef ABSL_HAVE_ADDRESS_SANITIZER
 TEST(FixedArrayTest, AddressSanitizerAnnotations1) {
   absl::FixedArray<int, 32> a(10);
   int* raw = a.data();
@@ -814,7 +815,7 @@ TEST(FixedArrayTest, AddressSanitizerAnnotations4) {
   // so reading raw[21] should still trigger the correct warning.
   EXPECT_DEATH_IF_SUPPORTED(raw[21] = ThreeInts(), "container-overflow");
 }
-#endif  // ADDRESS_SANITIZER
+#endif  // ABSL_HAVE_ADDRESS_SANITIZER
 
 TEST(FixedArrayTest, AbslHashValueWorks) {
   using V = absl::FixedArray<int>;
diff --git a/third_party/abseil_cpp/absl/container/flat_hash_map.h b/third_party/abseil_cpp/absl/container/flat_hash_map.h
index fcb70d861f39..74def0df0e33 100644
--- a/third_party/abseil_cpp/absl/container/flat_hash_map.h
+++ b/third_party/abseil_cpp/absl/container/flat_hash_map.h
@@ -234,7 +234,8 @@ class flat_hash_map : public absl::container_internal::raw_hash_map<
   //
   // size_type erase(const key_type& key):
   //
-  //   Erases the element with the matching key, if it exists.
+  //   Erases the element with the matching key, if it exists, returning the
+  //   number of elements erased (0 or 1).
   using Base::erase;
 
   // flat_hash_map::insert()
@@ -383,6 +384,11 @@ class flat_hash_map : public absl::container_internal::raw_hash_map<
   //   key value and returns a node handle owning that extracted data. If the
   //   `flat_hash_map` does not contain an element with a matching key, this
   //   function returns an empty node handle.
+  //
+  // NOTE: when compiled in an earlier version of C++ than C++17,
+  // `node_type::key()` returns a const reference to the key instead of a
+  // mutable reference. We cannot safely return a mutable reference without
+  // std::launder (which is not available before C++17).
   using Base::extract;
 
   // flat_hash_map::merge()
diff --git a/third_party/abseil_cpp/absl/container/flat_hash_map_test.cc b/third_party/abseil_cpp/absl/container/flat_hash_map_test.cc
index 2823c32bbe9d..89ec60c916ed 100644
--- a/third_party/abseil_cpp/absl/container/flat_hash_map_test.cc
+++ b/third_party/abseil_cpp/absl/container/flat_hash_map_test.cc
@@ -267,6 +267,21 @@ TEST(FlatHashMap, EraseIf) {
   }
 }
 
+// This test requires std::launder for mutable key access in node handles.
+#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606
+TEST(FlatHashMap, NodeHandleMutableKeyAccess) {
+  flat_hash_map<std::string, std::string> map;
+
+  map["key1"] = "mapped";
+
+  auto nh = map.extract(map.begin());
+  nh.key().resize(3);
+  map.insert(std::move(nh));
+
+  EXPECT_THAT(map, testing::ElementsAre(Pair("key", "mapped")));
+}
+#endif
+
 }  // namespace
 }  // namespace container_internal
 ABSL_NAMESPACE_END
diff --git a/third_party/abseil_cpp/absl/container/flat_hash_set.h b/third_party/abseil_cpp/absl/container/flat_hash_set.h
index 94be6e3d13f1..6b89da65714c 100644
--- a/third_party/abseil_cpp/absl/container/flat_hash_set.h
+++ b/third_party/abseil_cpp/absl/container/flat_hash_set.h
@@ -227,7 +227,8 @@ class flat_hash_set
   //
   // size_type erase(const key_type& key):
   //
-  //   Erases the element with the matching key, if it exists.
+  //   Erases the element with the matching key, if it exists, returning the
+  //   number of elements erased (0 or 1).
   using Base::erase;
 
   // flat_hash_set::insert()
@@ -323,7 +324,7 @@ class flat_hash_set
 
   // flat_hash_set::merge()
   //
-  // Extracts elements from a given `source` flat hash map into this
+  // Extracts elements from a given `source` flat hash set into this
   // `flat_hash_set`. If the destination `flat_hash_set` already contains an
   // element with an equivalent key, that element is not extracted.
   using Base::merge;
diff --git a/third_party/abseil_cpp/absl/container/inlined_vector.h b/third_party/abseil_cpp/absl/container/inlined_vector.h
index f18dd4c78583..90bb96e8325c 100644
--- a/third_party/abseil_cpp/absl/container/inlined_vector.h
+++ b/third_party/abseil_cpp/absl/container/inlined_vector.h
@@ -64,7 +64,7 @@ ABSL_NAMESPACE_BEGIN
 // `std::vector` for use cases where the vector's size is sufficiently small
 // that it can be inlined. If the inlined vector does grow beyond its estimated
 // capacity, it will trigger an initial allocation on the heap, and will behave
-// as a `std:vector`. The API of the `absl::InlinedVector` within this file is
+// as a `std::vector`. The API of the `absl::InlinedVector` within this file is
 // designed to cover the same API footprint as covered by `std::vector`.
 template <typename T, size_t N, typename A = std::allocator<T>>
 class InlinedVector {
diff --git a/third_party/abseil_cpp/absl/container/inlined_vector_test.cc b/third_party/abseil_cpp/absl/container/inlined_vector_test.cc
index 415c60d9f1af..98aff33498b2 100644
--- a/third_party/abseil_cpp/absl/container/inlined_vector_test.cc
+++ b/third_party/abseil_cpp/absl/container/inlined_vector_test.cc
@@ -736,22 +736,26 @@ TEST(OverheadTest, Storage) {
   // In particular, ensure that std::allocator doesn't cost anything to store.
   // The union should be absorbing some of the allocation bookkeeping overhead
   // in the larger vectors, leaving only the size_ field as overhead.
-  EXPECT_EQ(2 * sizeof(int*),
-            sizeof(absl::InlinedVector<int*, 1>) - 1 * sizeof(int*));
-  EXPECT_EQ(1 * sizeof(int*),
-            sizeof(absl::InlinedVector<int*, 2>) - 2 * sizeof(int*));
-  EXPECT_EQ(1 * sizeof(int*),
-            sizeof(absl::InlinedVector<int*, 3>) - 3 * sizeof(int*));
-  EXPECT_EQ(1 * sizeof(int*),
-            sizeof(absl::InlinedVector<int*, 4>) - 4 * sizeof(int*));
-  EXPECT_EQ(1 * sizeof(int*),
-            sizeof(absl::InlinedVector<int*, 5>) - 5 * sizeof(int*));
-  EXPECT_EQ(1 * sizeof(int*),
-            sizeof(absl::InlinedVector<int*, 6>) - 6 * sizeof(int*));
-  EXPECT_EQ(1 * sizeof(int*),
-            sizeof(absl::InlinedVector<int*, 7>) - 7 * sizeof(int*));
-  EXPECT_EQ(1 * sizeof(int*),
-            sizeof(absl::InlinedVector<int*, 8>) - 8 * sizeof(int*));
+
+  struct T { void* val; };
+  size_t expected_overhead = sizeof(T);
+
+  EXPECT_EQ((2 * expected_overhead),
+            sizeof(absl::InlinedVector<T, 1>) - sizeof(T[1]));
+  EXPECT_EQ(expected_overhead,
+            sizeof(absl::InlinedVector<T, 2>) - sizeof(T[2]));
+  EXPECT_EQ(expected_overhead,
+            sizeof(absl::InlinedVector<T, 3>) - sizeof(T[3]));
+  EXPECT_EQ(expected_overhead,
+            sizeof(absl::InlinedVector<T, 4>) - sizeof(T[4]));
+  EXPECT_EQ(expected_overhead,
+            sizeof(absl::InlinedVector<T, 5>) - sizeof(T[5]));
+  EXPECT_EQ(expected_overhead,
+            sizeof(absl::InlinedVector<T, 6>) - sizeof(T[6]));
+  EXPECT_EQ(expected_overhead,
+            sizeof(absl::InlinedVector<T, 7>) - sizeof(T[7]));
+  EXPECT_EQ(expected_overhead,
+            sizeof(absl::InlinedVector<T, 8>) - sizeof(T[8]));
 }
 
 TEST(IntVec, Clear) {
diff --git a/third_party/abseil_cpp/absl/container/internal/btree.h b/third_party/abseil_cpp/absl/container/internal/btree.h
index f52fe235a2a6..f2fc31df8d41 100644
--- a/third_party/abseil_cpp/absl/container/internal/btree.h
+++ b/third_party/abseil_cpp/absl/container/internal/btree.h
@@ -137,15 +137,14 @@ struct StringBtreeDefaultGreater {
 };
 
 // A helper class to convert a boolean comparison into a three-way "compare-to"
-// comparison that returns a negative value to indicate less-than, zero to
-// indicate equality and a positive value to indicate greater-than. This helper
+// comparison that returns an `absl::weak_ordering`. This helper
 // class is specialized for less<std::string>, greater<std::string>,
 // less<string_view>, greater<string_view>, less<absl::Cord>, and
 // greater<absl::Cord>.
 //
 // key_compare_to_adapter is provided so that btree users
 // automatically get the more efficient compare-to code when using common
-// google string types with common comparison functors.
+// Abseil string types with common comparison functors.
 // These string-like specializations also turn on heterogeneous lookup by
 // default.
 template <typename Compare>
@@ -183,12 +182,47 @@ struct key_compare_to_adapter<std::greater<absl::Cord>> {
   using type = StringBtreeDefaultGreater;
 };
 
+// Detects an 'absl_btree_prefer_linear_node_search' member. This is
+// a protocol used as an opt-in or opt-out of linear search.
+//
+//  For example, this would be useful for key types that wrap an integer
+//  and define their own cheap operator<(). For example:
+//
+//   class K {
+//    public:
+//     using absl_btree_prefer_linear_node_search = std::true_type;
+//     ...
+//    private:
+//     friend bool operator<(K a, K b) { return a.k_ < b.k_; }
+//     int k_;
+//   };
+//
+//   btree_map<K, V> m;  // Uses linear search
+//
+// If T has the preference tag, then it has a preference.
+// Btree will use the tag's truth value.
+template <typename T, typename = void>
+struct has_linear_node_search_preference : std::false_type {};
+template <typename T, typename = void>
+struct prefers_linear_node_search : std::false_type {};
+template <typename T>
+struct has_linear_node_search_preference<
+    T, absl::void_t<typename T::absl_btree_prefer_linear_node_search>>
+    : std::true_type {};
+template <typename T>
+struct prefers_linear_node_search<
+    T, absl::void_t<typename T::absl_btree_prefer_linear_node_search>>
+    : T::absl_btree_prefer_linear_node_search {};
+
 template <typename Key, typename Compare, typename Alloc, int TargetNodeSize,
           bool Multi, typename SlotPolicy>
 struct common_params {
   // If Compare is a common comparator for a string-like type, then we adapt it
   // to use heterogeneous lookup and to be a key-compare-to comparator.
   using key_compare = typename key_compare_to_adapter<Compare>::type;
+  // True when key_compare has been adapted to StringBtreeDefault{Less,Greater}.
+  using is_key_compare_adapted =
+      absl::negation<std::is_same<key_compare, Compare>>;
   // A type which indicates if we have a key-compare-to functor or a plain old
   // key-compare functor.
   using is_key_compare_to = btree_is_key_compare_to<key_compare, Key>;
@@ -255,10 +289,6 @@ struct common_params {
   static void move(Alloc *alloc, slot_type *src, slot_type *dest) {
     slot_policy::move(alloc, src, dest);
   }
-  static void move(Alloc *alloc, slot_type *first, slot_type *last,
-                   slot_type *result) {
-    slot_policy::move(alloc, first, last, result);
-  }
 };
 
 // A parameters structure for holding the type parameters for a btree_map.
@@ -290,9 +320,17 @@ struct map_params : common_params<Key, Compare, Alloc, TargetNodeSize, Multi,
   };
   using is_map_container = std::true_type;
 
-  static const Key &key(const value_type &value) { return value.first; }
-  static const Key &key(const init_type &init) { return init.first; }
+  template <typename V>
+  static auto key(const V &value) -> decltype(value.first) {
+    return value.first;
+  }
   static const Key &key(const slot_type *s) { return slot_policy::key(s); }
+  static const Key &key(slot_type *s) { return slot_policy::key(s); }
+  // For use in node handle.
+  static auto mutable_key(slot_type *s)
+      -> decltype(slot_policy::mutable_key(s)) {
+    return slot_policy::mutable_key(s);
+  }
   static mapped_type &value(value_type *value) { return value->second; }
 };
 
@@ -333,13 +371,6 @@ struct set_slot_policy {
   static void move(Alloc * /*alloc*/, slot_type *src, slot_type *dest) {
     *dest = std::move(*src);
   }
-
-  template <typename Alloc>
-  static void move(Alloc *alloc, slot_type *first, slot_type *last,
-                   slot_type *result) {
-    for (slot_type *src = first, *dest = result; src != last; ++src, ++dest)
-      move(alloc, src, dest);
-  }
 };
 
 // A parameters structure for holding the type parameters for a btree_set.
@@ -353,8 +384,10 @@ struct set_params : common_params<Key, Compare, Alloc, TargetNodeSize, Multi,
   using value_compare = typename set_params::common_params::key_compare;
   using is_map_container = std::false_type;
 
-  static const Key &key(const value_type &value) { return value; }
+  template <typename V>
+  static const V &key(const V &value) { return value; }
   static const Key &key(const slot_type *slot) { return *slot; }
+  static const Key &key(slot_type *slot) { return *slot; }
 };
 
 // An adapter class that converts a lower-bound compare into an upper-bound
@@ -390,6 +423,10 @@ struct SearchResult {
 // useful information.
 template <typename V>
 struct SearchResult<V, false> {
+  SearchResult() {}
+  explicit SearchResult(V value) : value(value) {}
+  SearchResult(V value, MatchKind /*match*/) : value(value) {}
+
   V value;
 
   static constexpr bool HasMatch() { return false; }
@@ -420,15 +457,22 @@ class btree_node {
   using difference_type = typename Params::difference_type;
 
   // Btree decides whether to use linear node search as follows:
+  //   - If the comparator expresses a preference, use that.
+  //   - If the key expresses a preference, use that.
   //   - If the key is arithmetic and the comparator is std::less or
   //     std::greater, choose linear.
   //   - Otherwise, choose binary.
   // TODO(ezb): Might make sense to add condition(s) based on node-size.
   using use_linear_search = std::integral_constant<
       bool,
-                std::is_arithmetic<key_type>::value &&
-                    (std::is_same<std::less<key_type>, key_compare>::value ||
-                     std::is_same<std::greater<key_type>, key_compare>::value)>;
+      has_linear_node_search_preference<key_compare>::value
+          ? prefers_linear_node_search<key_compare>::value
+          : has_linear_node_search_preference<key_type>::value
+                ? prefers_linear_node_search<key_type>::value
+                : std::is_arithmetic<key_type>::value &&
+                      (std::is_same<std::less<key_type>, key_compare>::value ||
+                       std::is_same<std::greater<key_type>,
+                                    key_compare>::value)>;
 
   // This class is organized by gtl::Layout as if it had the following
   // structure:
@@ -671,7 +715,7 @@ class btree_node {
       }
       ++s;
     }
-    return {s};
+    return SearchResult<int, false>{s};
   }
 
   // Returns the position of the first value whose key is not less than k using
@@ -706,7 +750,7 @@ class btree_node {
         e = mid;
       }
     }
-    return {s};
+    return SearchResult<int, false>{s};
   }
 
   // Returns the position of the first value whose key is not less than k using
@@ -754,14 +798,10 @@ class btree_node {
   template <typename... Args>
   void emplace_value(size_type i, allocator_type *alloc, Args &&... args);
 
-  // Removes the value at position i, shifting all existing values and children
-  // at positions > i to the left by 1.
-  void remove_value(int i, allocator_type *alloc);
-
-  // Removes the values at positions [i, i + to_erase), shifting all values
-  // after that range to the left by to_erase. Does not change children at all.
-  void remove_values_ignore_children(int i, int to_erase,
-                                     allocator_type *alloc);
+  // Removes the values at positions [i, i + to_erase), shifting all existing
+  // values and children after that range to the left by to_erase. Clears all
+  // children between [i, i + to_erase).
+  void remove_values(field_type i, field_type to_erase, allocator_type *alloc);
 
   // Rebalances a node with its right sibling.
   void rebalance_right_to_left(int to_move, btree_node *right,
@@ -773,7 +813,7 @@ class btree_node {
   void split(int insert_position, btree_node *dest, allocator_type *alloc);
 
   // Merges a node with its right sibling, moving all of the values and the
-  // delimiting key in the parent node onto itself.
+  // delimiting key in the parent node onto itself, and deleting the src node.
   void merge(btree_node *src, allocator_type *alloc);
 
   // Node allocation/deletion routines.
@@ -794,35 +834,43 @@ class btree_node {
     absl::container_internal::SanitizerPoisonMemoryRegion(
         &mutable_child(start()), (kNodeValues + 1) * sizeof(btree_node *));
   }
-  void destroy(allocator_type *alloc) {
-    for (int i = start(); i < finish(); ++i) {
-      value_destroy(i, alloc);
-    }
-  }
 
- public:
-  // Exposed only for tests.
-  static bool testonly_uses_linear_node_search() {
-    return use_linear_search::value;
+  static void deallocate(const size_type size, btree_node *node,
+                         allocator_type *alloc) {
+    absl::container_internal::Deallocate<Alignment()>(alloc, node, size);
   }
 
+  // Deletes a node and all of its children.
+  static void clear_and_delete(btree_node *node, allocator_type *alloc);
+
  private:
   template <typename... Args>
-  void value_init(const size_type i, allocator_type *alloc, Args &&... args) {
+  void value_init(const field_type i, allocator_type *alloc, Args &&... args) {
     absl::container_internal::SanitizerUnpoisonObject(slot(i));
     params_type::construct(alloc, slot(i), std::forward<Args>(args)...);
   }
-  void value_destroy(const size_type i, allocator_type *alloc) {
+  void value_destroy(const field_type i, allocator_type *alloc) {
     params_type::destroy(alloc, slot(i));
     absl::container_internal::SanitizerPoisonObject(slot(i));
   }
+  void value_destroy_n(const field_type i, const field_type n,
+                       allocator_type *alloc) {
+    for (slot_type *s = slot(i), *end = slot(i + n); s != end; ++s) {
+      params_type::destroy(alloc, s);
+      absl::container_internal::SanitizerPoisonObject(s);
+    }
+  }
+
+  static void transfer(slot_type *dest, slot_type *src, allocator_type *alloc) {
+    absl::container_internal::SanitizerUnpoisonObject(dest);
+    params_type::transfer(alloc, dest, src);
+    absl::container_internal::SanitizerPoisonObject(src);
+  }
 
   // Transfers value from slot `src_i` in `src_node` to slot `dest_i` in `this`.
   void transfer(const size_type dest_i, const size_type src_i,
                 btree_node *src_node, allocator_type *alloc) {
-    absl::container_internal::SanitizerUnpoisonObject(slot(dest_i));
-    params_type::transfer(alloc, slot(dest_i), src_node->slot(src_i));
-    absl::container_internal::SanitizerPoisonObject(src_node->slot(src_i));
+    transfer(slot(dest_i), src_node->slot(src_i), alloc);
   }
 
   // Transfers `n` values starting at value `src_i` in `src_node` into the
@@ -830,19 +878,11 @@ class btree_node {
   void transfer_n(const size_type n, const size_type dest_i,
                   const size_type src_i, btree_node *src_node,
                   allocator_type *alloc) {
-    absl::container_internal::SanitizerUnpoisonMemoryRegion(
-        slot(dest_i), n * sizeof(slot_type));
     for (slot_type *src = src_node->slot(src_i), *end = src + n,
                    *dest = slot(dest_i);
          src != end; ++src, ++dest) {
-      params_type::transfer(alloc, dest, src);
+      transfer(dest, src, alloc);
     }
-    // We take care to avoid poisoning transferred-to nodes in case of overlap.
-    const size_type overlap =
-        this == src_node ? (std::max)(src_i, dest_i + n) - src_i : 0;
-    assert(n >= overlap);
-    absl::container_internal::SanitizerPoisonMemoryRegion(
-        src_node->slot(src_i + overlap), (n - overlap) * sizeof(slot_type));
   }
 
   // Same as above, except that we start at the end and work our way to the
@@ -850,19 +890,11 @@ class btree_node {
   void transfer_n_backward(const size_type n, const size_type dest_i,
                            const size_type src_i, btree_node *src_node,
                            allocator_type *alloc) {
-    absl::container_internal::SanitizerUnpoisonMemoryRegion(
-        slot(dest_i), n * sizeof(slot_type));
     for (slot_type *src = src_node->slot(src_i + n - 1), *end = src - n,
                    *dest = slot(dest_i + n - 1);
          src != end; --src, --dest) {
-      params_type::transfer(alloc, dest, src);
+      transfer(dest, src, alloc);
     }
-    // We take care to avoid poisoning transferred-to nodes in case of overlap.
-    assert(this != src_node || dest_i >= src_i);
-    const size_type num_to_poison =
-        this == src_node ? (std::min)(n, dest_i - src_i) : n;
-    absl::container_internal::SanitizerPoisonMemoryRegion(
-        src_node->slot(src_i), num_to_poison * sizeof(slot_type));
   }
 
   template <typename P>
@@ -1020,6 +1052,10 @@ template <typename Params>
 class btree {
   using node_type = btree_node<Params>;
   using is_key_compare_to = typename Params::is_key_compare_to;
+  using init_type = typename Params::init_type;
+  using field_type = typename node_type::field_type;
+  using is_multi_container = typename Params::is_multi_container;
+  using is_key_compare_adapted = typename Params::is_key_compare_adapted;
 
   // We use a static empty node for the root/leftmost/rightmost of empty btrees
   // in order to avoid branching in begin()/end().
@@ -1054,7 +1090,7 @@ class btree {
 #endif
   }
 
-  enum {
+  enum : uint32_t {
     kNodeValues = node_type::kNodeValues,
     kMinNodeValues = kNodeValues / 2,
   };
@@ -1106,21 +1142,35 @@ class btree {
   // before this method is called. This method is used in copy construction,
   // copy assignment, and move assignment.
   template <typename Btree>
-  void copy_or_move_values_in_order(Btree *other);
+  void copy_or_move_values_in_order(Btree &other);
 
   // Validates that various assumptions/requirements are true at compile time.
   constexpr static bool static_assert_validation();
 
  public:
-  btree(const key_compare &comp, const allocator_type &alloc);
+  btree(const key_compare &comp, const allocator_type &alloc)
+      : root_(comp, alloc, EmptyNode()), rightmost_(EmptyNode()), size_(0) {}
 
-  btree(const btree &other);
+  btree(const btree &other) : btree(other, other.allocator()) {}
+  btree(const btree &other, const allocator_type &alloc)
+      : btree(other.key_comp(), alloc) {
+    copy_or_move_values_in_order(other);
+  }
   btree(btree &&other) noexcept
       : root_(std::move(other.root_)),
         rightmost_(absl::exchange(other.rightmost_, EmptyNode())),
         size_(absl::exchange(other.size_, 0)) {
     other.mutable_root() = EmptyNode();
   }
+  btree(btree &&other, const allocator_type &alloc)
+      : btree(other.key_comp(), alloc) {
+    if (alloc == other.allocator()) {
+      swap(other);
+    } else {
+      // Move values from `other` one at a time when allocators are different.
+      copy_or_move_values_in_order(other);
+    }
+  }
 
   ~btree() {
     // Put static_asserts in destructor to avoid triggering them before the type
@@ -1151,11 +1201,11 @@ class btree {
   // Finds the first element whose key is not less than key.
   template <typename K>
   iterator lower_bound(const K &key) {
-    return internal_end(internal_lower_bound(key));
+    return internal_end(internal_lower_bound(key).value);
   }
   template <typename K>
   const_iterator lower_bound(const K &key) const {
-    return internal_end(internal_lower_bound(key));
+    return internal_end(internal_lower_bound(key).value);
   }
 
   // Finds the first element whose key is greater than key.
@@ -1169,23 +1219,21 @@ class btree {
   }
 
   // Finds the range of values which compare equal to key. The first member of
-  // the returned pair is equal to lower_bound(key). The second member pair of
-  // the pair is equal to upper_bound(key).
+  // the returned pair is equal to lower_bound(key). The second member of the
+  // pair is equal to upper_bound(key).
   template <typename K>
-  std::pair<iterator, iterator> equal_range(const K &key) {
-    return {lower_bound(key), upper_bound(key)};
-  }
+  std::pair<iterator, iterator> equal_range(const K &key);
   template <typename K>
   std::pair<const_iterator, const_iterator> equal_range(const K &key) const {
-    return {lower_bound(key), upper_bound(key)};
+    return const_cast<btree *>(this)->equal_range(key);
   }
 
   // Inserts a value into the btree only if it does not already exist. The
   // boolean return value indicates whether insertion succeeded or failed.
   // Requirement: if `key` already exists in the btree, does not consume `args`.
   // Requirement: `key` is never referenced after consuming `args`.
-  template <typename... Args>
-  std::pair<iterator, bool> insert_unique(const key_type &key, Args &&... args);
+  template <typename K, typename... Args>
+  std::pair<iterator, bool> insert_unique(const K &key, Args &&... args);
 
   // Inserts with hint. Checks to see if the value should be placed immediately
   // before `position` in the tree. If so, then the insertion will take
@@ -1193,14 +1241,23 @@ class btree {
   // logarithmic time as if a call to insert_unique() were made.
   // Requirement: if `key` already exists in the btree, does not consume `args`.
   // Requirement: `key` is never referenced after consuming `args`.
-  template <typename... Args>
+  template <typename K, typename... Args>
   std::pair<iterator, bool> insert_hint_unique(iterator position,
-                                               const key_type &key,
+                                               const K &key,
                                                Args &&... args);
 
   // Insert a range of values into the btree.
+  // Note: the first overload avoids constructing a value_type if the key
+  // already exists in the btree.
+  template <typename InputIterator,
+            typename = decltype(std::declval<const key_compare &>()(
+                params_type::key(*std::declval<InputIterator>()),
+                std::declval<const key_type &>()))>
+  void insert_iterator_unique(InputIterator b, InputIterator e, int);
+  // We need the second overload for cases in which we need to construct a
+  // value_type in order to compare it with the keys already in the btree.
   template <typename InputIterator>
-  void insert_iterator_unique(InputIterator b, InputIterator e);
+  void insert_iterator_unique(InputIterator b, InputIterator e, char);
 
   // Inserts a value into the btree.
   template <typename ValueType>
@@ -1233,16 +1290,6 @@ class btree {
   // to the element after the last erased element.
   std::pair<size_type, iterator> erase_range(iterator begin, iterator end);
 
-  // Erases the specified key from the btree. Returns 1 if an element was
-  // erased and 0 otherwise.
-  template <typename K>
-  size_type erase_unique(const K &key);
-
-  // Erases all of the entries matching the specified key from the
-  // btree. Returns the number of elements erased.
-  template <typename K>
-  size_type erase_multi(const K &key);
-
   // Finds the iterator corresponding to a key or returns end() if the key is
   // not present.
   template <typename K>
@@ -1254,23 +1301,6 @@ class btree {
     return internal_end(internal_find(key));
   }
 
-  // Returns a count of the number of times the key appears in the btree.
-  template <typename K>
-  size_type count_unique(const K &key) const {
-    const iterator begin = internal_find(key);
-    if (begin.node == nullptr) {
-      // The key doesn't exist in the tree.
-      return 0;
-    }
-    return 1;
-  }
-  // Returns a count of the number of times the key appears in the btree.
-  template <typename K>
-  size_type count_multi(const K &key) const {
-    const auto range = equal_range(key);
-    return std::distance(range.first, range.second);
-  }
-
   // Clear the btree, deleting all of the values it contains.
   void clear();
 
@@ -1408,25 +1438,8 @@ class btree {
   }
 
   // Deletion helper routines.
-  void erase_same_node(iterator begin, iterator end);
-  iterator erase_from_leaf_node(iterator begin, size_type to_erase);
   iterator rebalance_after_delete(iterator iter);
 
-  // Deallocates a node of a certain size in bytes using the allocator.
-  void deallocate(const size_type size, node_type *node) {
-    absl::container_internal::Deallocate<node_type::Alignment()>(
-        mutable_allocator(), node, size);
-  }
-
-  void delete_internal_node(node_type *node) {
-    node->destroy(mutable_allocator());
-    deallocate(node_type::InternalSize(), node);
-  }
-  void delete_leaf_node(node_type *node) {
-    node->destroy(mutable_allocator());
-    deallocate(node_type::LeafSize(node->max_count()), node);
-  }
-
   // Rebalances or splits the node iter points to.
   void rebalance_or_split(iterator *iter);
 
@@ -1464,28 +1477,19 @@ class btree {
   static IterType internal_last(IterType iter);
 
   // Returns an iterator pointing to the leaf position at which key would
-  // reside in the tree. We provide 2 versions of internal_locate. The first
-  // version uses a less-than comparator and is incapable of distinguishing when
-  // there is an exact match. The second version is for the key-compare-to
-  // specialization and distinguishes exact matches. The key-compare-to
-  // specialization allows the caller to avoid a subsequent comparison to
-  // determine if an exact match was made, which is important for keys with
-  // expensive comparison, such as strings.
+  // reside in the tree, unless there is an exact match - in which case, the
+  // result may not be on a leaf. When there's a three-way comparator, we can
+  // return whether there was an exact match. This allows the caller to avoid a
+  // subsequent comparison to determine if an exact match was made, which is
+  // important for keys with expensive comparison, such as strings.
   template <typename K>
   SearchResult<iterator, is_key_compare_to::value> internal_locate(
       const K &key) const;
 
-  template <typename K>
-  SearchResult<iterator, false> internal_locate_impl(
-      const K &key, std::false_type /* IsCompareTo */) const;
-
-  template <typename K>
-  SearchResult<iterator, true> internal_locate_impl(
-      const K &key, std::true_type /* IsCompareTo */) const;
-
   // Internal routine which implements lower_bound().
   template <typename K>
-  iterator internal_lower_bound(const K &key) const;
+  SearchResult<iterator, is_key_compare_to::value> internal_lower_bound(
+      const K &key) const;
 
   // Internal routine which implements upper_bound().
   template <typename K>
@@ -1495,9 +1499,6 @@ class btree {
   template <typename K>
   iterator internal_find(const K &key) const;
 
-  // Deletes a node and all of its children.
-  void internal_clear(node_type *node);
-
   // Verifies the tree structure of node.
   int internal_verify(const node_type *node, const key_type *lo,
                       const key_type *hi) const;
@@ -1517,13 +1518,6 @@ class btree {
     return res;
   }
 
- public:
-  // Exposed only for tests.
-  static bool testonly_uses_linear_node_search() {
-    return node_type::testonly_uses_linear_node_search();
-  }
-
- private:
   // We use compressed tuple in order to save space because key_compare and
   // allocator_type are usually empty.
   absl::container_internal::CompressedTuple<key_compare, allocator_type,
@@ -1565,26 +1559,27 @@ inline void btree_node<P>::emplace_value(const size_type i,
 }
 
 template <typename P>
-inline void btree_node<P>::remove_value(const int i, allocator_type *alloc) {
-  if (!leaf() && finish() > i + 1) {
-    assert(child(i + 1)->count() == 0);
-    for (size_type j = i + 1; j < finish(); ++j) {
-      set_child(j, child(j + 1));
-    }
-    clear_child(finish());
-  }
-
-  remove_values_ignore_children(i, /*to_erase=*/1, alloc);
-}
+inline void btree_node<P>::remove_values(const field_type i,
+                                         const field_type to_erase,
+                                         allocator_type *alloc) {
+  // Transfer values after the removed range into their new places.
+  value_destroy_n(i, to_erase, alloc);
+  const field_type orig_finish = finish();
+  const field_type src_i = i + to_erase;
+  transfer_n(orig_finish - src_i, i, src_i, this, alloc);
 
-template <typename P>
-inline void btree_node<P>::remove_values_ignore_children(
-    const int i, const int to_erase, allocator_type *alloc) {
-  params_type::move(alloc, slot(i + to_erase), finish_slot(), slot(i));
-  for (int j = finish() - to_erase; j < finish(); ++j) {
-    value_destroy(j, alloc);
+  if (!leaf()) {
+    // Delete all children between begin and end.
+    for (int j = 0; j < to_erase; ++j) {
+      clear_and_delete(child(i + j + 1), alloc);
+    }
+    // Rotate children after end into new positions.
+    for (int j = i + to_erase + 1; j <= orig_finish; ++j) {
+      set_child(j - to_erase, child(j));
+      clear_child(j);
+    }
   }
-  set_finish(finish() - to_erase);
+  set_finish(orig_finish - to_erase);
 }
 
 template <typename P>
@@ -1736,8 +1731,59 @@ void btree_node<P>::merge(btree_node *src, allocator_type *alloc) {
   set_finish(start() + 1 + count() + src->count());
   src->set_finish(src->start());
 
-  // Remove the value on the parent node.
-  parent()->remove_value(position(), alloc);
+  // Remove the value on the parent node and delete the src node.
+  parent()->remove_values(position(), /*to_erase=*/1, alloc);
+}
+
+template <typename P>
+void btree_node<P>::clear_and_delete(btree_node *node, allocator_type *alloc) {
+  if (node->leaf()) {
+    node->value_destroy_n(node->start(), node->count(), alloc);
+    deallocate(LeafSize(node->max_count()), node, alloc);
+    return;
+  }
+  if (node->count() == 0) {
+    deallocate(InternalSize(), node, alloc);
+    return;
+  }
+
+  // The parent of the root of the subtree we are deleting.
+  btree_node *delete_root_parent = node->parent();
+
+  // Navigate to the leftmost leaf under node, and then delete upwards.
+  while (!node->leaf()) node = node->start_child();
+  // Use `int` because `pos` needs to be able to hold `kNodeValues+1`, which
+  // isn't guaranteed to be a valid `field_type`.
+  int pos = node->position();
+  btree_node *parent = node->parent();
+  for (;;) {
+    // In each iteration of the next loop, we delete one leaf node and go right.
+    assert(pos <= parent->finish());
+    do {
+      node = parent->child(pos);
+      if (!node->leaf()) {
+        // Navigate to the leftmost leaf under node.
+        while (!node->leaf()) node = node->start_child();
+        pos = node->position();
+        parent = node->parent();
+      }
+      node->value_destroy_n(node->start(), node->count(), alloc);
+      deallocate(LeafSize(node->max_count()), node, alloc);
+      ++pos;
+    } while (pos <= parent->finish());
+
+    // Once we've deleted all children of parent, delete parent and go up/right.
+    assert(pos > parent->finish());
+    do {
+      node = parent;
+      pos = node->position();
+      parent = node->parent();
+      node->value_destroy_n(node->start(), node->count(), alloc);
+      deallocate(InternalSize(), node, alloc);
+      if (parent == delete_root_parent) return;
+      ++pos;
+    } while (pos > parent->finish());
+  }
 }
 
 ////
@@ -1794,7 +1840,7 @@ void btree_iterator<N, R, P>::decrement_slow() {
 // btree methods
 template <typename P>
 template <typename Btree>
-void btree<P>::copy_or_move_values_in_order(Btree *other) {
+void btree<P>::copy_or_move_values_in_order(Btree &other) {
   static_assert(std::is_same<btree, Btree>::value ||
                     std::is_same<const btree, Btree>::value,
                 "Btree type must be same or const.");
@@ -1802,11 +1848,11 @@ void btree<P>::copy_or_move_values_in_order(Btree *other) {
 
   // We can avoid key comparisons because we know the order of the
   // values is the same order we'll store them in.
-  auto iter = other->begin();
-  if (iter == other->end()) return;
+  auto iter = other.begin();
+  if (iter == other.end()) return;
   insert_multi(maybe_move_from_iterator(iter));
   ++iter;
-  for (; iter != other->end(); ++iter) {
+  for (; iter != other.end(); ++iter) {
     // If the btree is not empty, we can just insert the new value at the end
     // of the tree.
     internal_emplace(end(), maybe_move_from_iterator(iter));
@@ -1845,25 +1891,52 @@ constexpr bool btree<P>::static_assert_validation() {
 }
 
 template <typename P>
-btree<P>::btree(const key_compare &comp, const allocator_type &alloc)
-    : root_(comp, alloc, EmptyNode()), rightmost_(EmptyNode()), size_(0) {}
-
-template <typename P>
-btree<P>::btree(const btree &other)
-    : btree(other.key_comp(), other.allocator()) {
-  copy_or_move_values_in_order(&other);
+template <typename K>
+auto btree<P>::equal_range(const K &key) -> std::pair<iterator, iterator> {
+  const SearchResult<iterator, is_key_compare_to::value> res =
+      internal_lower_bound(key);
+  const iterator lower = internal_end(res.value);
+  if (res.HasMatch() ? !res.IsEq()
+                     : lower == end() || compare_keys(key, lower.key())) {
+    return {lower, lower};
+  }
+
+  const iterator next = std::next(lower);
+  // When the comparator is heterogeneous, we can't assume that comparison with
+  // non-`key_type` will be equivalent to `key_type` comparisons so there
+  // could be multiple equivalent keys even in a unique-container. But for
+  // heterogeneous comparisons from the default string adapted comparators, we
+  // don't need to worry about this.
+  if (!is_multi_container::value &&
+      (std::is_same<K, key_type>::value || is_key_compare_adapted::value)) {
+    // The next iterator after lower must point to a key greater than `key`.
+    // Note: if this assert fails, then it may indicate that the comparator does
+    // not meet the equivalence requirements for Compare
+    // (see https://en.cppreference.com/w/cpp/named_req/Compare).
+    assert(next == end() || compare_keys(key, next.key()));
+    return {lower, next};
+  }
+  // Try once more to avoid the call to upper_bound() if there's only one
+  // equivalent key. This should prevent all calls to upper_bound() in cases of
+  // unique-containers with heterogeneous comparators in which all comparison
+  // operators have the same equivalence classes.
+  if (next == end() || compare_keys(key, next.key())) return {lower, next};
+
+  // In this case, we need to call upper_bound() to avoid worst case O(N)
+  // behavior if we were to iterate over equal keys.
+  return {lower, upper_bound(key)};
 }
 
 template <typename P>
-template <typename... Args>
-auto btree<P>::insert_unique(const key_type &key, Args &&... args)
+template <typename K, typename... Args>
+auto btree<P>::insert_unique(const K &key, Args &&... args)
     -> std::pair<iterator, bool> {
   if (empty()) {
     mutable_root() = rightmost_ = new_leaf_root_node(1);
   }
 
-  auto res = internal_locate(key);
-  iterator &iter = res.value;
+  SearchResult<iterator, is_key_compare_to::value> res = internal_locate(key);
+  iterator iter = res.value;
 
   if (res.HasMatch()) {
     if (res.IsEq()) {
@@ -1881,8 +1954,8 @@ auto btree<P>::insert_unique(const key_type &key, Args &&... args)
 }
 
 template <typename P>
-template <typename... Args>
-inline auto btree<P>::insert_hint_unique(iterator position, const key_type &key,
+template <typename K, typename... Args>
+inline auto btree<P>::insert_hint_unique(iterator position, const K &key,
                                          Args &&... args)
     -> std::pair<iterator, bool> {
   if (!empty()) {
@@ -1906,14 +1979,23 @@ inline auto btree<P>::insert_hint_unique(iterator position, const key_type &key,
 }
 
 template <typename P>
-template <typename InputIterator>
-void btree<P>::insert_iterator_unique(InputIterator b, InputIterator e) {
+template <typename InputIterator, typename>
+void btree<P>::insert_iterator_unique(InputIterator b, InputIterator e, int) {
   for (; b != e; ++b) {
     insert_hint_unique(end(), params_type::key(*b), *b);
   }
 }
 
 template <typename P>
+template <typename InputIterator>
+void btree<P>::insert_iterator_unique(InputIterator b, InputIterator e, char) {
+  for (; b != e; ++b) {
+    init_type value(*b);
+    insert_hint_unique(end(), params_type::key(value), std::move(value));
+  }
+}
+
+template <typename P>
 template <typename ValueType>
 auto btree<P>::insert_multi(const key_type &key, ValueType &&v) -> iterator {
   if (empty()) {
@@ -1968,7 +2050,7 @@ auto btree<P>::operator=(const btree &other) -> btree & {
       *mutable_allocator() = other.allocator();
     }
 
-    copy_or_move_values_in_order(&other);
+    copy_or_move_values_in_order(other);
   }
   return *this;
 }
@@ -1998,7 +2080,7 @@ auto btree<P>::operator=(btree &&other) noexcept -> btree & {
         // comparator while moving the values so we can't swap the key
         // comparators.
         *mutable_key_comp() = other.key_comp();
-        copy_or_move_values_in_order(&other);
+        copy_or_move_values_in_order(other);
       }
     }
   }
@@ -2010,7 +2092,7 @@ auto btree<P>::erase(iterator iter) -> iterator {
   bool internal_delete = false;
   if (!iter.node->leaf()) {
     // Deletion of a value on an internal node. First, move the largest value
-    // from our left child here, then delete that position (in remove_value()
+    // from our left child here, then delete that position (in remove_values()
     // below). We can get to the largest value from our left child by
     // decrementing iter.
     iterator internal_iter(iter);
@@ -2022,7 +2104,7 @@ auto btree<P>::erase(iterator iter) -> iterator {
   }
 
   // Delete the key from the leaf.
-  iter.node->remove_value(iter.position, mutable_allocator());
+  iter.node->remove_values(iter.position, /*to_erase=*/1, mutable_allocator());
   --size_;
 
   // We want to return the next value after the one we just erased. If we
@@ -2097,7 +2179,9 @@ auto btree<P>::erase_range(iterator begin, iterator end)
   }
 
   if (begin.node == end.node) {
-    erase_same_node(begin, end);
+    assert(end.position > begin.position);
+    begin.node->remove_values(begin.position, end.position - begin.position,
+                              mutable_allocator());
     size_ -= count;
     return {count, rebalance_after_delete(begin)};
   }
@@ -2107,8 +2191,11 @@ auto btree<P>::erase_range(iterator begin, iterator end)
     if (begin.node->leaf()) {
       const size_type remaining_to_erase = size_ - target_size;
       const size_type remaining_in_node = begin.node->finish() - begin.position;
-      begin = erase_from_leaf_node(
-          begin, (std::min)(remaining_to_erase, remaining_in_node));
+      const size_type to_erase =
+          (std::min)(remaining_to_erase, remaining_in_node);
+      begin.node->remove_values(begin.position, to_erase, mutable_allocator());
+      size_ -= to_erase;
+      begin = rebalance_after_delete(begin);
     } else {
       begin = erase(begin);
     }
@@ -2117,79 +2204,9 @@ auto btree<P>::erase_range(iterator begin, iterator end)
 }
 
 template <typename P>
-void btree<P>::erase_same_node(iterator begin, iterator end) {
-  assert(begin.node == end.node);
-  assert(end.position > begin.position);
-
-  node_type *node = begin.node;
-  size_type to_erase = end.position - begin.position;
-  if (!node->leaf()) {
-    // Delete all children between begin and end.
-    for (size_type i = 0; i < to_erase; ++i) {
-      internal_clear(node->child(begin.position + i + 1));
-    }
-    // Rotate children after end into new positions.
-    for (size_type i = begin.position + to_erase + 1; i <= node->finish();
-         ++i) {
-      node->set_child(i - to_erase, node->child(i));
-      node->clear_child(i);
-    }
-  }
-  node->remove_values_ignore_children(begin.position, to_erase,
-                                      mutable_allocator());
-
-  // Do not need to update rightmost_, because
-  // * either end == this->end(), and therefore node == rightmost_, and still
-  //   exists
-  // * or end != this->end(), and therefore rightmost_ hasn't been erased, since
-  //   it wasn't covered in [begin, end)
-}
-
-template <typename P>
-auto btree<P>::erase_from_leaf_node(iterator begin, size_type to_erase)
-    -> iterator {
-  node_type *node = begin.node;
-  assert(node->leaf());
-  assert(node->finish() > begin.position);
-  assert(begin.position + to_erase <= node->finish());
-
-  node->remove_values_ignore_children(begin.position, to_erase,
-                                      mutable_allocator());
-
-  size_ -= to_erase;
-
-  return rebalance_after_delete(begin);
-}
-
-template <typename P>
-template <typename K>
-auto btree<P>::erase_unique(const K &key) -> size_type {
-  const iterator iter = internal_find(key);
-  if (iter.node == nullptr) {
-    // The key doesn't exist in the tree, return nothing done.
-    return 0;
-  }
-  erase(iter);
-  return 1;
-}
-
-template <typename P>
-template <typename K>
-auto btree<P>::erase_multi(const K &key) -> size_type {
-  const iterator begin = internal_lower_bound(key);
-  if (begin.node == nullptr) {
-    // The key doesn't exist in the tree, return nothing done.
-    return 0;
-  }
-  // Delete all of the keys between begin and upper_bound(key).
-  const iterator end = internal_end(internal_upper_bound(key));
-  return erase_range(begin, end).first;
-}
-
-template <typename P>
 void btree<P>::clear() {
   if (!empty()) {
-    internal_clear(root());
+    node_type::clear_and_delete(root(), mutable_allocator());
   }
   mutable_root() = EmptyNode();
   rightmost_ = EmptyNode();
@@ -2244,11 +2261,11 @@ void btree<P>::rebalance_or_split(iterator *iter) {
         // inserting at the end of the right node then we bias rebalancing to
         // fill up the left node.
         int to_move = (kNodeValues - left->count()) /
-                      (1 + (insert_position < kNodeValues));
+                      (1 + (insert_position < static_cast<int>(kNodeValues)));
         to_move = (std::max)(1, to_move);
 
         if (insert_position - to_move >= node->start() ||
-            left->count() + to_move < kNodeValues) {
+            left->count() + to_move < static_cast<int>(kNodeValues)) {
           left->rebalance_right_to_left(to_move, node, mutable_allocator());
 
           assert(node->max_count() - node->count() == to_move);
@@ -2272,12 +2289,12 @@ void btree<P>::rebalance_or_split(iterator *iter) {
         // We bias rebalancing based on the position being inserted. If we're
         // inserting at the beginning of the left node then we bias rebalancing
         // to fill up the right node.
-        int to_move = (kNodeValues - right->count()) /
+        int to_move = (static_cast<int>(kNodeValues) - right->count()) /
                       (1 + (insert_position > node->start()));
         to_move = (std::max)(1, to_move);
 
         if (insert_position <= node->finish() - to_move ||
-            right->count() + to_move < kNodeValues) {
+            right->count() + to_move < static_cast<int>(kNodeValues)) {
           node->rebalance_left_to_right(to_move, right, mutable_allocator());
 
           if (insert_position > node->finish()) {
@@ -2330,12 +2347,7 @@ void btree<P>::rebalance_or_split(iterator *iter) {
 template <typename P>
 void btree<P>::merge_nodes(node_type *left, node_type *right) {
   left->merge(right, mutable_allocator());
-  if (right->leaf()) {
-    if (rightmost_ == right) rightmost_ = left;
-    delete_leaf_node(right);
-  } else {
-    delete_internal_node(right);
-  }
+  if (rightmost_ == right) rightmost_ = left;
 }
 
 template <typename P>
@@ -2345,7 +2357,7 @@ bool btree<P>::try_merge_or_rebalance(iterator *iter) {
     // Try merging with our left sibling.
     node_type *left = parent->child(iter->node->position() - 1);
     assert(left->max_count() == kNodeValues);
-    if (1 + left->count() + iter->node->count() <= kNodeValues) {
+    if (1U + left->count() + iter->node->count() <= kNodeValues) {
       iter->position += 1 + left->count();
       merge_nodes(left, iter->node);
       iter->node = left;
@@ -2356,7 +2368,7 @@ bool btree<P>::try_merge_or_rebalance(iterator *iter) {
     // Try merging with our right sibling.
     node_type *right = parent->child(iter->node->position() + 1);
     assert(right->max_count() == kNodeValues);
-    if (1 + iter->node->count() + right->count() <= kNodeValues) {
+    if (1U + iter->node->count() + right->count() <= kNodeValues) {
       merge_nodes(iter->node, right);
       return true;
     }
@@ -2392,20 +2404,20 @@ bool btree<P>::try_merge_or_rebalance(iterator *iter) {
 
 template <typename P>
 void btree<P>::try_shrink() {
-  if (root()->count() > 0) {
+  node_type *orig_root = root();
+  if (orig_root->count() > 0) {
     return;
   }
   // Deleted the last item on the root node, shrink the height of the tree.
-  if (root()->leaf()) {
+  if (orig_root->leaf()) {
     assert(size() == 0);
-    delete_leaf_node(root());
     mutable_root() = rightmost_ = EmptyNode();
   } else {
-    node_type *child = root()->start_child();
+    node_type *child = orig_root->start_child();
     child->make_root();
-    delete_internal_node(root());
     mutable_root() = child;
   }
+  node_type::clear_and_delete(orig_root, mutable_allocator());
 }
 
 template <typename P>
@@ -2433,7 +2445,7 @@ inline auto btree<P>::internal_emplace(iterator iter, Args &&... args)
     --iter;
     ++iter.position;
   }
-  const int max_count = iter.node->max_count();
+  const field_type max_count = iter.node->max_count();
   allocator_type *alloc = mutable_allocator();
   if (iter.node->count() == max_count) {
     // Make room in the leaf for the new item.
@@ -2450,7 +2462,7 @@ inline auto btree<P>::internal_emplace(iterator iter, Args &&... args)
                            old_root->start(), old_root, alloc);
       new_root->set_finish(old_root->finish());
       old_root->set_finish(old_root->start());
-      delete_leaf_node(old_root);
+      node_type::clear_and_delete(old_root, alloc);
       mutable_root() = rightmost_ = new_root;
     } else {
       rebalance_or_split(&iter);
@@ -2465,61 +2477,48 @@ template <typename P>
 template <typename K>
 inline auto btree<P>::internal_locate(const K &key) const
     -> SearchResult<iterator, is_key_compare_to::value> {
-  return internal_locate_impl(key, is_key_compare_to());
-}
-
-template <typename P>
-template <typename K>
-inline auto btree<P>::internal_locate_impl(
-    const K &key, std::false_type /* IsCompareTo */) const
-    -> SearchResult<iterator, false> {
   iterator iter(const_cast<node_type *>(root()));
   for (;;) {
-    iter.position = iter.node->lower_bound(key, key_comp()).value;
-    // NOTE: we don't need to walk all the way down the tree if the keys are
-    // equal, but determining equality would require doing an extra comparison
-    // on each node on the way down, and we will need to go all the way to the
-    // leaf node in the expected case.
-    if (iter.node->leaf()) {
-      break;
-    }
-    iter.node = iter.node->child(iter.position);
-  }
-  return {iter};
-}
-
-template <typename P>
-template <typename K>
-inline auto btree<P>::internal_locate_impl(
-    const K &key, std::true_type /* IsCompareTo */) const
-    -> SearchResult<iterator, true> {
-  iterator iter(const_cast<node_type *>(root()));
-  for (;;) {
-    SearchResult<int, true> res = iter.node->lower_bound(key, key_comp());
+    SearchResult<int, is_key_compare_to::value> res =
+        iter.node->lower_bound(key, key_comp());
     iter.position = res.value;
-    if (res.match == MatchKind::kEq) {
+    if (res.IsEq()) {
       return {iter, MatchKind::kEq};
     }
+    // Note: in the non-key-compare-to case, we don't need to walk all the way
+    // down the tree if the keys are equal, but determining equality would
+    // require doing an extra comparison on each node on the way down, and we
+    // will need to go all the way to the leaf node in the expected case.
     if (iter.node->leaf()) {
       break;
     }
     iter.node = iter.node->child(iter.position);
   }
+  // Note: in the non-key-compare-to case, the key may actually be equivalent
+  // here (and the MatchKind::kNe is ignored).
   return {iter, MatchKind::kNe};
 }
 
 template <typename P>
 template <typename K>
-auto btree<P>::internal_lower_bound(const K &key) const -> iterator {
+auto btree<P>::internal_lower_bound(const K &key) const
+    -> SearchResult<iterator, is_key_compare_to::value> {
   iterator iter(const_cast<node_type *>(root()));
+  SearchResult<int, is_key_compare_to::value> res;
+  bool seen_eq = false;
   for (;;) {
-    iter.position = iter.node->lower_bound(key, key_comp()).value;
+    res = iter.node->lower_bound(key, key_comp());
+    iter.position = res.value;
+    // TODO(ezb): we should be able to terminate early on IsEq() if there can't
+    // be multiple equivalent keys in container for this lookup type.
     if (iter.node->leaf()) {
       break;
     }
+    seen_eq = seen_eq || res.IsEq();
     iter.node = iter.node->child(iter.position);
   }
-  return internal_last(iter);
+  if (res.IsEq()) return {iter, MatchKind::kEq};
+  return {internal_last(iter), seen_eq ? MatchKind::kEq : MatchKind::kNe};
 }
 
 template <typename P>
@@ -2539,7 +2538,7 @@ auto btree<P>::internal_upper_bound(const K &key) const -> iterator {
 template <typename P>
 template <typename K>
 auto btree<P>::internal_find(const K &key) const -> iterator {
-  auto res = internal_locate(key);
+  SearchResult<iterator, is_key_compare_to::value> res = internal_locate(key);
   if (res.HasMatch()) {
     if (res.IsEq()) {
       return res.value;
@@ -2554,18 +2553,6 @@ auto btree<P>::internal_find(const K &key) const -> iterator {
 }
 
 template <typename P>
-void btree<P>::internal_clear(node_type *node) {
-  if (!node->leaf()) {
-    for (int i = node->start(); i <= node->finish(); ++i) {
-      internal_clear(node->child(i));
-    }
-    delete_internal_node(node);
-  } else {
-    delete_leaf_node(node);
-  }
-}
-
-template <typename P>
 int btree<P>::internal_verify(const node_type *node, const key_type *lo,
                               const key_type *hi) const {
   assert(node->count() > 0);
diff --git a/third_party/abseil_cpp/absl/container/internal/btree_container.h b/third_party/abseil_cpp/absl/container/internal/btree_container.h
index 734c90ef3d9c..887eda4122f8 100644
--- a/third_party/abseil_cpp/absl/container/internal/btree_container.h
+++ b/third_party/abseil_cpp/absl/container/internal/btree_container.h
@@ -23,6 +23,7 @@
 #include "absl/base/internal/throw_delegate.h"
 #include "absl/container/internal/btree.h"  // IWYU pragma: export
 #include "absl/container/internal/common.h"
+#include "absl/memory/memory.h"
 #include "absl/meta/type_traits.h"
 
 namespace absl {
@@ -68,8 +69,21 @@ class btree_container {
   explicit btree_container(const key_compare &comp,
                            const allocator_type &alloc = allocator_type())
       : tree_(comp, alloc) {}
-  btree_container(const btree_container &other) = default;
-  btree_container(btree_container &&other) noexcept = default;
+  explicit btree_container(const allocator_type &alloc)
+      : tree_(key_compare(), alloc) {}
+
+  btree_container(const btree_container &other)
+      : btree_container(other, absl::allocator_traits<allocator_type>::
+                                   select_on_container_copy_construction(
+                                       other.get_allocator())) {}
+  btree_container(const btree_container &other, const allocator_type &alloc)
+      : tree_(other.tree_, alloc) {}
+
+  btree_container(btree_container &&other) noexcept(
+      std::is_nothrow_move_constructible<Tree>::value) = default;
+  btree_container(btree_container &&other, const allocator_type &alloc)
+      : tree_(std::move(other.tree_), alloc) {}
+
   btree_container &operator=(const btree_container &other) = default;
   btree_container &operator=(btree_container &&other) noexcept(
       std::is_nothrow_move_assignable<Tree>::value) = default;
@@ -90,6 +104,11 @@ class btree_container {
 
   // Lookup routines.
   template <typename K = key_type>
+  size_type count(const key_arg<K> &key) const {
+    auto equal_range = this->equal_range(key);
+    return std::distance(equal_range.first, equal_range.second);
+  }
+  template <typename K = key_type>
   iterator find(const key_arg<K> &key) {
     return tree_.find(key);
   }
@@ -138,6 +157,11 @@ class btree_container {
   iterator erase(const_iterator first, const_iterator last) {
     return tree_.erase_range(iterator(first), iterator(last)).second;
   }
+  template <typename K = key_type>
+  size_type erase(const key_arg<K> &key) {
+    auto equal_range = this->equal_range(key);
+    return tree_.erase_range(equal_range.first, equal_range.second).first;
+  }
 
   // Extract routines.
   node_type extract(iterator position) {
@@ -151,7 +175,6 @@ class btree_container {
     return extract(iterator(position));
   }
 
- public:
   // Utility routines.
   void clear() { tree_.clear(); }
   void swap(btree_container &other) { tree_.swap(other.tree_); }
@@ -235,7 +258,7 @@ class btree_set_container : public btree_container<Tree> {
   using super_type::super_type;
   btree_set_container() {}
 
-  // Range constructor.
+  // Range constructors.
   template <class InputIterator>
   btree_set_container(InputIterator b, InputIterator e,
                       const key_compare &comp = key_compare(),
@@ -243,18 +266,19 @@ class btree_set_container : public btree_container<Tree> {
       : super_type(comp, alloc) {
     insert(b, e);
   }
+  template <class InputIterator>
+  btree_set_container(InputIterator b, InputIterator e,
+                      const allocator_type &alloc)
+      : btree_set_container(b, e, key_compare(), alloc) {}
 
-  // Initializer list constructor.
+  // Initializer list constructors.
   btree_set_container(std::initializer_list<init_type> init,
                       const key_compare &comp = key_compare(),
                       const allocator_type &alloc = allocator_type())
       : btree_set_container(init.begin(), init.end(), comp, alloc) {}
-
-  // Lookup routines.
-  template <typename K = key_type>
-  size_type count(const key_arg<K> &key) const {
-    return this->tree_.count_unique(key);
-  }
+  btree_set_container(std::initializer_list<init_type> init,
+                      const allocator_type &alloc)
+      : btree_set_container(init.begin(), init.end(), alloc) {}
 
   // Insertion routines.
   std::pair<iterator, bool> insert(const value_type &v) {
@@ -268,31 +292,29 @@ class btree_set_container : public btree_container<Tree> {
     init_type v(std::forward<Args>(args)...);
     return this->tree_.insert_unique(params_type::key(v), std::move(v));
   }
-  iterator insert(const_iterator position, const value_type &v) {
+  iterator insert(const_iterator hint, const value_type &v) {
     return this->tree_
-        .insert_hint_unique(iterator(position), params_type::key(v), v)
+        .insert_hint_unique(iterator(hint), params_type::key(v), v)
         .first;
   }
-  iterator insert(const_iterator position, value_type &&v) {
+  iterator insert(const_iterator hint, value_type &&v) {
     return this->tree_
-        .insert_hint_unique(iterator(position), params_type::key(v),
-                            std::move(v))
+        .insert_hint_unique(iterator(hint), params_type::key(v), std::move(v))
         .first;
   }
   template <typename... Args>
-  iterator emplace_hint(const_iterator position, Args &&... args) {
+  iterator emplace_hint(const_iterator hint, Args &&... args) {
     init_type v(std::forward<Args>(args)...);
     return this->tree_
-        .insert_hint_unique(iterator(position), params_type::key(v),
-                            std::move(v))
+        .insert_hint_unique(iterator(hint), params_type::key(v), std::move(v))
         .first;
   }
   template <typename InputIterator>
   void insert(InputIterator b, InputIterator e) {
-    this->tree_.insert_iterator_unique(b, e);
+    this->tree_.insert_iterator_unique(b, e, 0);
   }
   void insert(std::initializer_list<init_type> init) {
-    this->tree_.insert_iterator_unique(init.begin(), init.end());
+    this->tree_.insert_iterator_unique(init.begin(), init.end(), 0);
   }
   insert_return_type insert(node_type &&node) {
     if (!node) return {this->end(), false, node_type()};
@@ -315,14 +337,10 @@ class btree_set_container : public btree_container<Tree> {
     return res.first;
   }
 
-  // Deletion routines.
-  template <typename K = key_type>
-  size_type erase(const key_arg<K> &key) {
-    return this->tree_.erase_unique(key);
-  }
-  using super_type::erase;
-
   // Node extraction routines.
+  // TODO(ezb): when the comparator is heterogeneous and has different
+  // equivalence classes for different lookup types, we should extract the first
+  // equivalent value if there are multiple.
   template <typename K = key_type>
   node_type extract(const key_arg<K> &key) {
     auto it = this->find(key);
@@ -344,7 +362,7 @@ class btree_set_container : public btree_container<Tree> {
           int> = 0>
   void merge(btree_container<T> &src) {  // NOLINT
     for (auto src_it = src.begin(); src_it != src.end();) {
-      if (insert(std::move(*src_it)).second) {
+      if (insert(std::move(params_type::element(src_it.slot()))).second) {
         src_it = src.erase(src_it);
       } else {
         ++src_it;
@@ -371,6 +389,7 @@ template <typename Tree>
 class btree_map_container : public btree_set_container<Tree> {
   using super_type = btree_set_container<Tree>;
   using params_type = typename Tree::params_type;
+  friend class BtreeNodePeer;
 
  private:
   template <class K>
@@ -392,111 +411,72 @@ class btree_map_container : public btree_set_container<Tree> {
   // Insertion routines.
   // Note: the nullptr template arguments and extra `const M&` overloads allow
   // for supporting bitfield arguments.
-  // Note: when we call `std::forward<M>(obj)` twice, it's safe because
-  // insert_unique/insert_hint_unique are guaranteed to not consume `obj` when
-  // `ret.second` is false.
-  template <class M>
-  std::pair<iterator, bool> insert_or_assign(const key_type &k, const M &obj) {
-    const std::pair<iterator, bool> ret = this->tree_.insert_unique(k, k, obj);
-    if (!ret.second) ret.first->second = obj;
-    return ret;
+  template <typename K = key_type, class M>
+  std::pair<iterator, bool> insert_or_assign(const key_arg<K> &k,
+                                             const M &obj) {
+    return insert_or_assign_impl(k, obj);
   }
-  template <class M, key_type * = nullptr>
-  std::pair<iterator, bool> insert_or_assign(key_type &&k, const M &obj) {
-    const std::pair<iterator, bool> ret =
-        this->tree_.insert_unique(k, std::move(k), obj);
-    if (!ret.second) ret.first->second = obj;
-    return ret;
+  template <typename K = key_type, class M, K * = nullptr>
+  std::pair<iterator, bool> insert_or_assign(key_arg<K> &&k, const M &obj) {
+    return insert_or_assign_impl(std::forward<K>(k), obj);
   }
-  template <class M, M * = nullptr>
-  std::pair<iterator, bool> insert_or_assign(const key_type &k, M &&obj) {
-    const std::pair<iterator, bool> ret =
-        this->tree_.insert_unique(k, k, std::forward<M>(obj));
-    if (!ret.second) ret.first->second = std::forward<M>(obj);
-    return ret;
+  template <typename K = key_type, class M, M * = nullptr>
+  std::pair<iterator, bool> insert_or_assign(const key_arg<K> &k, M &&obj) {
+    return insert_or_assign_impl(k, std::forward<M>(obj));
   }
-  template <class M, key_type * = nullptr, M * = nullptr>
-  std::pair<iterator, bool> insert_or_assign(key_type &&k, M &&obj) {
-    const std::pair<iterator, bool> ret =
-        this->tree_.insert_unique(k, std::move(k), std::forward<M>(obj));
-    if (!ret.second) ret.first->second = std::forward<M>(obj);
-    return ret;
+  template <typename K = key_type, class M, K * = nullptr, M * = nullptr>
+  std::pair<iterator, bool> insert_or_assign(key_arg<K> &&k, M &&obj) {
+    return insert_or_assign_impl(std::forward<K>(k), std::forward<M>(obj));
   }
-  template <class M>
-  iterator insert_or_assign(const_iterator position, const key_type &k,
+  template <typename K = key_type, class M>
+  iterator insert_or_assign(const_iterator hint, const key_arg<K> &k,
                             const M &obj) {
-    const std::pair<iterator, bool> ret =
-        this->tree_.insert_hint_unique(iterator(position), k, k, obj);
-    if (!ret.second) ret.first->second = obj;
-    return ret.first;
+    return insert_or_assign_hint_impl(hint, k, obj);
   }
-  template <class M, key_type * = nullptr>
-  iterator insert_or_assign(const_iterator position, key_type &&k,
-                            const M &obj) {
-    const std::pair<iterator, bool> ret = this->tree_.insert_hint_unique(
-        iterator(position), k, std::move(k), obj);
-    if (!ret.second) ret.first->second = obj;
-    return ret.first;
+  template <typename K = key_type, class M, K * = nullptr>
+  iterator insert_or_assign(const_iterator hint, key_arg<K> &&k, const M &obj) {
+    return insert_or_assign_hint_impl(hint, std::forward<K>(k), obj);
   }
-  template <class M, M * = nullptr>
-  iterator insert_or_assign(const_iterator position, const key_type &k,
-                            M &&obj) {
-    const std::pair<iterator, bool> ret = this->tree_.insert_hint_unique(
-        iterator(position), k, k, std::forward<M>(obj));
-    if (!ret.second) ret.first->second = std::forward<M>(obj);
-    return ret.first;
+  template <typename K = key_type, class M, M * = nullptr>
+  iterator insert_or_assign(const_iterator hint, const key_arg<K> &k, M &&obj) {
+    return insert_or_assign_hint_impl(hint, k, std::forward<M>(obj));
   }
-  template <class M, key_type * = nullptr, M * = nullptr>
-  iterator insert_or_assign(const_iterator position, key_type &&k, M &&obj) {
-    const std::pair<iterator, bool> ret = this->tree_.insert_hint_unique(
-        iterator(position), k, std::move(k), std::forward<M>(obj));
-    if (!ret.second) ret.first->second = std::forward<M>(obj);
-    return ret.first;
+  template <typename K = key_type, class M, K * = nullptr, M * = nullptr>
+  iterator insert_or_assign(const_iterator hint, key_arg<K> &&k, M &&obj) {
+    return insert_or_assign_hint_impl(hint, std::forward<K>(k),
+                                      std::forward<M>(obj));
   }
-  template <typename... Args>
-  std::pair<iterator, bool> try_emplace(const key_type &k, Args &&... args) {
-    return this->tree_.insert_unique(
-        k, std::piecewise_construct, std::forward_as_tuple(k),
-        std::forward_as_tuple(std::forward<Args>(args)...));
+
+  template <typename K = key_type, typename... Args,
+            typename absl::enable_if_t<
+                !std::is_convertible<K, const_iterator>::value, int> = 0>
+  std::pair<iterator, bool> try_emplace(const key_arg<K> &k, Args &&... args) {
+    return try_emplace_impl(k, std::forward<Args>(args)...);
   }
-  template <typename... Args>
-  std::pair<iterator, bool> try_emplace(key_type &&k, Args &&... args) {
-    // Note: `key_ref` exists to avoid a ClangTidy warning about moving from `k`
-    // and then using `k` unsequenced. This is safe because the move is into a
-    // forwarding reference and insert_unique guarantees that `key` is never
-    // referenced after consuming `args`.
-    const key_type &key_ref = k;
-    return this->tree_.insert_unique(
-        key_ref, std::piecewise_construct, std::forward_as_tuple(std::move(k)),
-        std::forward_as_tuple(std::forward<Args>(args)...));
+  template <typename K = key_type, typename... Args,
+            typename absl::enable_if_t<
+                !std::is_convertible<K, const_iterator>::value, int> = 0>
+  std::pair<iterator, bool> try_emplace(key_arg<K> &&k, Args &&... args) {
+    return try_emplace_impl(std::forward<K>(k), std::forward<Args>(args)...);
   }
-  template <typename... Args>
-  iterator try_emplace(const_iterator hint, const key_type &k,
+  template <typename K = key_type, typename... Args>
+  iterator try_emplace(const_iterator hint, const key_arg<K> &k,
                        Args &&... args) {
-    return this->tree_
-        .insert_hint_unique(iterator(hint), k, std::piecewise_construct,
-                            std::forward_as_tuple(k),
-                            std::forward_as_tuple(std::forward<Args>(args)...))
-        .first;
+    return try_emplace_hint_impl(hint, k, std::forward<Args>(args)...);
   }
-  template <typename... Args>
-  iterator try_emplace(const_iterator hint, key_type &&k, Args &&... args) {
-    // Note: `key_ref` exists to avoid a ClangTidy warning about moving from `k`
-    // and then using `k` unsequenced. This is safe because the move is into a
-    // forwarding reference and insert_hint_unique guarantees that `key` is
-    // never referenced after consuming `args`.
-    const key_type &key_ref = k;
-    return this->tree_
-        .insert_hint_unique(iterator(hint), key_ref, std::piecewise_construct,
-                            std::forward_as_tuple(std::move(k)),
-                            std::forward_as_tuple(std::forward<Args>(args)...))
-        .first;
+  template <typename K = key_type, typename... Args>
+  iterator try_emplace(const_iterator hint, key_arg<K> &&k, Args &&... args) {
+    return try_emplace_hint_impl(hint, std::forward<K>(k),
+                                 std::forward<Args>(args)...);
   }
-  mapped_type &operator[](const key_type &k) {
+
+  template <typename K = key_type>
+  mapped_type &operator[](const key_arg<K> &k) {
     return try_emplace(k).first->second;
   }
-  mapped_type &operator[](key_type &&k) {
-    return try_emplace(std::move(k)).first->second;
+  template <typename K = key_type>
+  mapped_type &operator[](key_arg<K> &&k) {
+    return try_emplace(std::forward<K>(k)).first->second;
   }
 
   template <typename K = key_type>
@@ -513,6 +493,40 @@ class btree_map_container : public btree_set_container<Tree> {
       base_internal::ThrowStdOutOfRange("absl::btree_map::at");
     return it->second;
   }
+
+ private:
+  // Note: when we call `std::forward<M>(obj)` twice, it's safe because
+  // insert_unique/insert_hint_unique are guaranteed to not consume `obj` when
+  // `ret.second` is false.
+  template <class K, class M>
+  std::pair<iterator, bool> insert_or_assign_impl(K &&k, M &&obj) {
+    const std::pair<iterator, bool> ret =
+        this->tree_.insert_unique(k, std::forward<K>(k), std::forward<M>(obj));
+    if (!ret.second) ret.first->second = std::forward<M>(obj);
+    return ret;
+  }
+  template <class K, class M>
+  iterator insert_or_assign_hint_impl(const_iterator hint, K &&k, M &&obj) {
+    const std::pair<iterator, bool> ret = this->tree_.insert_hint_unique(
+        iterator(hint), k, std::forward<K>(k), std::forward<M>(obj));
+    if (!ret.second) ret.first->second = std::forward<M>(obj);
+    return ret.first;
+  }
+
+  template <class K, class... Args>
+  std::pair<iterator, bool> try_emplace_impl(K &&k, Args &&... args) {
+    return this->tree_.insert_unique(
+        k, std::piecewise_construct, std::forward_as_tuple(std::forward<K>(k)),
+        std::forward_as_tuple(std::forward<Args>(args)...));
+  }
+  template <class K, class... Args>
+  iterator try_emplace_hint_impl(const_iterator hint, K &&k, Args &&... args) {
+    return this->tree_
+        .insert_hint_unique(iterator(hint), k, std::piecewise_construct,
+                            std::forward_as_tuple(std::forward<K>(k)),
+                            std::forward_as_tuple(std::forward<Args>(args)...))
+        .first;
+  }
 };
 
 // A common base class for btree_multiset and btree_multimap.
@@ -540,7 +554,7 @@ class btree_multiset_container : public btree_container<Tree> {
   using super_type::super_type;
   btree_multiset_container() {}
 
-  // Range constructor.
+  // Range constructors.
   template <class InputIterator>
   btree_multiset_container(InputIterator b, InputIterator e,
                            const key_compare &comp = key_compare(),
@@ -548,29 +562,30 @@ class btree_multiset_container : public btree_container<Tree> {
       : super_type(comp, alloc) {
     insert(b, e);
   }
+  template <class InputIterator>
+  btree_multiset_container(InputIterator b, InputIterator e,
+                           const allocator_type &alloc)
+      : btree_multiset_container(b, e, key_compare(), alloc) {}
 
-  // Initializer list constructor.
+  // Initializer list constructors.
   btree_multiset_container(std::initializer_list<init_type> init,
                            const key_compare &comp = key_compare(),
                            const allocator_type &alloc = allocator_type())
       : btree_multiset_container(init.begin(), init.end(), comp, alloc) {}
-
-  // Lookup routines.
-  template <typename K = key_type>
-  size_type count(const key_arg<K> &key) const {
-    return this->tree_.count_multi(key);
-  }
+  btree_multiset_container(std::initializer_list<init_type> init,
+                           const allocator_type &alloc)
+      : btree_multiset_container(init.begin(), init.end(), alloc) {}
 
   // Insertion routines.
   iterator insert(const value_type &v) { return this->tree_.insert_multi(v); }
   iterator insert(value_type &&v) {
     return this->tree_.insert_multi(std::move(v));
   }
-  iterator insert(const_iterator position, const value_type &v) {
-    return this->tree_.insert_hint_multi(iterator(position), v);
+  iterator insert(const_iterator hint, const value_type &v) {
+    return this->tree_.insert_hint_multi(iterator(hint), v);
   }
-  iterator insert(const_iterator position, value_type &&v) {
-    return this->tree_.insert_hint_multi(iterator(position), std::move(v));
+  iterator insert(const_iterator hint, value_type &&v) {
+    return this->tree_.insert_hint_multi(iterator(hint), std::move(v));
   }
   template <typename InputIterator>
   void insert(InputIterator b, InputIterator e) {
@@ -584,9 +599,9 @@ class btree_multiset_container : public btree_container<Tree> {
     return this->tree_.insert_multi(init_type(std::forward<Args>(args)...));
   }
   template <typename... Args>
-  iterator emplace_hint(const_iterator position, Args &&... args) {
+  iterator emplace_hint(const_iterator hint, Args &&... args) {
     return this->tree_.insert_hint_multi(
-        iterator(position), init_type(std::forward<Args>(args)...));
+        iterator(hint), init_type(std::forward<Args>(args)...));
   }
   iterator insert(node_type &&node) {
     if (!node) return this->end();
@@ -605,14 +620,9 @@ class btree_multiset_container : public btree_container<Tree> {
     return res;
   }
 
-  // Deletion routines.
-  template <typename K = key_type>
-  size_type erase(const key_arg<K> &key) {
-    return this->tree_.erase_multi(key);
-  }
-  using super_type::erase;
-
   // Node extraction routines.
+  // TODO(ezb): we are supposed to extract the first equivalent key if there are
+  // multiple, but this isn't guaranteed to extract the first one.
   template <typename K = key_type>
   node_type extract(const key_arg<K> &key) {
     auto it = this->find(key);
@@ -632,8 +642,9 @@ class btree_multiset_container : public btree_container<Tree> {
                            typename T::params_type::is_map_container>>::value,
           int> = 0>
   void merge(btree_container<T> &src) {  // NOLINT
-    insert(std::make_move_iterator(src.begin()),
-           std::make_move_iterator(src.end()));
+    for (auto src_it = src.begin(), end = src.end(); src_it != end; ++src_it) {
+      insert(std::move(params_type::element(src_it.slot())));
+    }
     src.clear();
   }
 
diff --git a/third_party/abseil_cpp/absl/container/internal/common.h b/third_party/abseil_cpp/absl/container/internal/common.h
index 8990f2947273..030e9d4ab07d 100644
--- a/third_party/abseil_cpp/absl/container/internal/common.h
+++ b/third_party/abseil_cpp/absl/container/internal/common.h
@@ -146,8 +146,11 @@ class node_handle<Policy, PolicyTraits, Alloc,
 
   constexpr node_handle() {}
 
-  auto key() const -> decltype(PolicyTraits::key(std::declval<slot_type*>())) {
-    return PolicyTraits::key(this->slot());
+  // When C++17 is available, we can use std::launder to provide mutable
+  // access to the key. Otherwise, we provide const access.
+  auto key() const
+      -> decltype(PolicyTraits::mutable_key(std::declval<slot_type*>())) {
+    return PolicyTraits::mutable_key(this->slot());
   }
 
   mapped_type& mapped() const {
diff --git a/third_party/abseil_cpp/absl/container/internal/compressed_tuple.h b/third_party/abseil_cpp/absl/container/internal/compressed_tuple.h
index 02bfd03f6ce5..5ebe16494249 100644
--- a/third_party/abseil_cpp/absl/container/internal/compressed_tuple.h
+++ b/third_party/abseil_cpp/absl/container/internal/compressed_tuple.h
@@ -257,7 +257,7 @@ class ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTuple
 
   template <int I>
   ElemT<I>& get() & {
-    return internal_compressed_tuple::Storage<ElemT<I>, I>::get();
+    return StorageT<I>::get();
   }
 
   template <int I>
diff --git a/third_party/abseil_cpp/absl/container/internal/container_memory.h b/third_party/abseil_cpp/absl/container/internal/container_memory.h
index 536ea398eb10..e67529ecb6e6 100644
--- a/third_party/abseil_cpp/absl/container/internal/container_memory.h
+++ b/third_party/abseil_cpp/absl/container/internal/container_memory.h
@@ -15,25 +15,27 @@
 #ifndef ABSL_CONTAINER_INTERNAL_CONTAINER_MEMORY_H_
 #define ABSL_CONTAINER_INTERNAL_CONTAINER_MEMORY_H_
 
-#ifdef ADDRESS_SANITIZER
-#include <sanitizer/asan_interface.h>
-#endif
-
-#ifdef MEMORY_SANITIZER
-#include <sanitizer/msan_interface.h>
-#endif
-
 #include <cassert>
 #include <cstddef>
 #include <memory>
+#include <new>
 #include <tuple>
 #include <type_traits>
 #include <utility>
 
+#include "absl/base/config.h"
 #include "absl/memory/memory.h"
 #include "absl/meta/type_traits.h"
 #include "absl/utility/utility.h"
 
+#ifdef ABSL_HAVE_ADDRESS_SANITIZER
+#include <sanitizer/asan_interface.h>
+#endif
+
+#ifdef ABSL_HAVE_MEMORY_SANITIZER
+#include <sanitizer/msan_interface.h>
+#endif
+
 namespace absl {
 ABSL_NAMESPACE_BEGIN
 namespace container_internal {
@@ -55,8 +57,11 @@ void* Allocate(Alloc* alloc, size_t n) {
   using M = AlignedType<Alignment>;
   using A = typename absl::allocator_traits<Alloc>::template rebind_alloc<M>;
   using AT = typename absl::allocator_traits<Alloc>::template rebind_traits<M>;
-  A mem_alloc(*alloc);
-  void* p = AT::allocate(mem_alloc, (n + sizeof(M) - 1) / sizeof(M));
+  // On macOS, "mem_alloc" is a #define with one argument defined in
+  // rpc/types.h, so we can't name the variable "mem_alloc" and initialize it
+  // with the "foo(bar)" syntax.
+  A my_mem_alloc(*alloc);
+  void* p = AT::allocate(my_mem_alloc, (n + sizeof(M) - 1) / sizeof(M));
   assert(reinterpret_cast<uintptr_t>(p) % Alignment == 0 &&
          "allocator does not respect alignment");
   return p;
@@ -71,8 +76,11 @@ void Deallocate(Alloc* alloc, void* p, size_t n) {
   using M = AlignedType<Alignment>;
   using A = typename absl::allocator_traits<Alloc>::template rebind_alloc<M>;
   using AT = typename absl::allocator_traits<Alloc>::template rebind_traits<M>;
-  A mem_alloc(*alloc);
-  AT::deallocate(mem_alloc, static_cast<M*>(p),
+  // On macOS, "mem_alloc" is a #define with one argument defined in
+  // rpc/types.h, so we can't name the variable "mem_alloc" and initialize it
+  // with the "foo(bar)" syntax.
+  A my_mem_alloc(*alloc);
+  AT::deallocate(my_mem_alloc, static_cast<M*>(p),
                  (n + sizeof(M) - 1) / sizeof(M));
 }
 
@@ -209,10 +217,10 @@ DecomposeValue(F&& f, Arg&& arg) {
 
 // Helper functions for asan and msan.
 inline void SanitizerPoisonMemoryRegion(const void* m, size_t s) {
-#ifdef ADDRESS_SANITIZER
+#ifdef ABSL_HAVE_ADDRESS_SANITIZER
   ASAN_POISON_MEMORY_REGION(m, s);
 #endif
-#ifdef MEMORY_SANITIZER
+#ifdef ABSL_HAVE_MEMORY_SANITIZER
   __msan_poison(m, s);
 #endif
   (void)m;
@@ -220,10 +228,10 @@ inline void SanitizerPoisonMemoryRegion(const void* m, size_t s) {
 }
 
 inline void SanitizerUnpoisonMemoryRegion(const void* m, size_t s) {
-#ifdef ADDRESS_SANITIZER
+#ifdef ABSL_HAVE_ADDRESS_SANITIZER
   ASAN_UNPOISON_MEMORY_REGION(m, s);
 #endif
-#ifdef MEMORY_SANITIZER
+#ifdef ABSL_HAVE_MEMORY_SANITIZER
   __msan_unpoison(m, s);
 #endif
   (void)m;
@@ -351,6 +359,20 @@ struct map_slot_policy {
     return slot->value;
   }
 
+  // When C++17 is available, we can use std::launder to provide mutable
+  // access to the key for use in node handle.
+#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606
+  static K& mutable_key(slot_type* slot) {
+    // Still check for kMutableKeys so that we can avoid calling std::launder
+    // unless necessary because it can interfere with optimizations.
+    return kMutableKeys::value ? slot->key
+                               : *std::launder(const_cast<K*>(
+                                     std::addressof(slot->value.first)));
+  }
+#else  // !(defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606)
+  static const K& mutable_key(slot_type* slot) { return key(slot); }
+#endif
+
   static const K& key(const slot_type* slot) {
     return kMutableKeys::value ? slot->key : slot->value.first;
   }
@@ -429,13 +451,6 @@ struct map_slot_policy {
                                                    std::move(src->value));
     }
   }
-
-  template <class Allocator>
-  static void move(Allocator* alloc, slot_type* first, slot_type* last,
-                   slot_type* result) {
-    for (slot_type *src = first, *dest = result; src != last; ++src, ++dest)
-      move(alloc, src, dest);
-  }
 };
 
 }  // namespace container_internal
diff --git a/third_party/abseil_cpp/absl/container/internal/hash_function_defaults_test.cc b/third_party/abseil_cpp/absl/container/internal/hash_function_defaults_test.cc
index 2d05a0b72a0d..59576b8edebc 100644
--- a/third_party/abseil_cpp/absl/container/internal/hash_function_defaults_test.cc
+++ b/third_party/abseil_cpp/absl/container/internal/hash_function_defaults_test.cc
@@ -337,11 +337,11 @@ ABSL_NAMESPACE_END
 }  // namespace absl
 
 enum Hash : size_t {
-  kStd = 0x2,       // std::hash
+  kStd = 0x1,       // std::hash
 #ifdef _MSC_VER
   kExtension = kStd,  // In MSVC, std::hash == ::hash
 #else                 // _MSC_VER
-  kExtension = 0x4,  // ::hash (GCC extension)
+  kExtension = 0x2,  // ::hash (GCC extension)
 #endif                // _MSC_VER
 };
 
diff --git a/third_party/abseil_cpp/absl/container/internal/hash_generator_testing.cc b/third_party/abseil_cpp/absl/container/internal/hash_generator_testing.cc
index 75c4db6c3661..59cc5aac7ab8 100644
--- a/third_party/abseil_cpp/absl/container/internal/hash_generator_testing.cc
+++ b/third_party/abseil_cpp/absl/container/internal/hash_generator_testing.cc
@@ -41,8 +41,10 @@ class RandomDeviceSeedSeq {
 }  // namespace
 
 std::mt19937_64* GetSharedRng() {
-  RandomDeviceSeedSeq seed_seq;
-  static auto* rng = new std::mt19937_64(seed_seq);
+  static auto* rng = [] {
+    RandomDeviceSeedSeq seed_seq;
+    return new std::mt19937_64(seed_seq);
+  }();
   return rng;
 }
 
diff --git a/third_party/abseil_cpp/absl/container/internal/hash_policy_traits.h b/third_party/abseil_cpp/absl/container/internal/hash_policy_traits.h
index 3e1209c6ebec..46c97b18a227 100644
--- a/third_party/abseil_cpp/absl/container/internal/hash_policy_traits.h
+++ b/third_party/abseil_cpp/absl/container/internal/hash_policy_traits.h
@@ -17,6 +17,7 @@
 
 #include <cstddef>
 #include <memory>
+#include <new>
 #include <type_traits>
 #include <utility>
 
@@ -29,15 +30,34 @@ namespace container_internal {
 // Defines how slots are initialized/destroyed/moved.
 template <class Policy, class = void>
 struct hash_policy_traits {
+  // The type of the keys stored in the hashtable.
+  using key_type = typename Policy::key_type;
+
  private:
   struct ReturnKey {
-    // We return `Key` here.
+    // When C++17 is available, we can use std::launder to provide mutable
+    // access to the key for use in node handle.
+#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606
+    template <class Key,
+              absl::enable_if_t<std::is_lvalue_reference<Key>::value, int> = 0>
+    static key_type& Impl(Key&& k, int) {
+      return *std::launder(
+          const_cast<key_type*>(std::addressof(std::forward<Key>(k))));
+    }
+#endif
+
+    template <class Key>
+    static Key Impl(Key&& k, char) {
+      return std::forward<Key>(k);
+    }
+
     // When Key=T&, we forward the lvalue reference.
     // When Key=T, we return by value to avoid a dangling reference.
     // eg, for string_hash_map.
     template <class Key, class... Args>
-    Key operator()(Key&& k, const Args&...) const {
-      return std::forward<Key>(k);
+    auto operator()(Key&& k, const Args&...) const
+        -> decltype(Impl(std::forward<Key>(k), 0)) {
+      return Impl(std::forward<Key>(k), 0);
     }
   };
 
@@ -52,9 +72,6 @@ struct hash_policy_traits {
   // The actual object stored in the hash table.
   using slot_type = typename Policy::slot_type;
 
-  // The type of the keys stored in the hashtable.
-  using key_type = typename Policy::key_type;
-
   // The argument type for insertions into the hashtable. This is different
   // from value_type for increased performance. See initializer_list constructor
   // and insert() member functions for more details.
@@ -156,7 +173,7 @@ struct hash_policy_traits {
   // Returns the "key" portion of the slot.
   // Used for node handle manipulation.
   template <class P = Policy>
-  static auto key(slot_type* slot)
+  static auto mutable_key(slot_type* slot)
       -> decltype(P::apply(ReturnKey(), element(slot))) {
     return P::apply(ReturnKey(), element(slot));
   }
diff --git a/third_party/abseil_cpp/absl/container/internal/hashtablez_sampler.cc b/third_party/abseil_cpp/absl/container/internal/hashtablez_sampler.cc
index 886524f18070..e4484fbb1b43 100644
--- a/third_party/abseil_cpp/absl/container/internal/hashtablez_sampler.cc
+++ b/third_party/abseil_cpp/absl/container/internal/hashtablez_sampler.cc
@@ -67,6 +67,7 @@ void HashtablezInfo::PrepareForSampling() {
   capacity.store(0, std::memory_order_relaxed);
   size.store(0, std::memory_order_relaxed);
   num_erases.store(0, std::memory_order_relaxed);
+  num_rehashes.store(0, std::memory_order_relaxed);
   max_probe_length.store(0, std::memory_order_relaxed);
   total_probe_length.store(0, std::memory_order_relaxed);
   hashes_bitwise_or.store(0, std::memory_order_relaxed);
diff --git a/third_party/abseil_cpp/absl/container/internal/hashtablez_sampler.h b/third_party/abseil_cpp/absl/container/internal/hashtablez_sampler.h
index 308119cf17cf..394348da58f5 100644
--- a/third_party/abseil_cpp/absl/container/internal/hashtablez_sampler.h
+++ b/third_party/abseil_cpp/absl/container/internal/hashtablez_sampler.h
@@ -73,6 +73,7 @@ struct HashtablezInfo {
   std::atomic<size_t> capacity;
   std::atomic<size_t> size;
   std::atomic<size_t> num_erases;
+  std::atomic<size_t> num_rehashes;
   std::atomic<size_t> max_probe_length;
   std::atomic<size_t> total_probe_length;
   std::atomic<size_t> hashes_bitwise_or;
@@ -105,6 +106,11 @@ inline void RecordRehashSlow(HashtablezInfo* info, size_t total_probe_length) {
 #endif
   info->total_probe_length.store(total_probe_length, std::memory_order_relaxed);
   info->num_erases.store(0, std::memory_order_relaxed);
+  // There is only one concurrent writer, so `load` then `store` is sufficient
+  // instead of using `fetch_add`.
+  info->num_rehashes.store(
+      1 + info->num_rehashes.load(std::memory_order_relaxed),
+      std::memory_order_relaxed);
 }
 
 inline void RecordStorageChangedSlow(HashtablezInfo* info, size_t size,
@@ -113,7 +119,8 @@ inline void RecordStorageChangedSlow(HashtablezInfo* info, size_t size,
   info->capacity.store(capacity, std::memory_order_relaxed);
   if (size == 0) {
     // This is a clear, reset the total/num_erases too.
-    RecordRehashSlow(info, 0);
+    info->total_probe_length.store(0, std::memory_order_relaxed);
+    info->num_erases.store(0, std::memory_order_relaxed);
   }
 }
 
@@ -122,12 +129,21 @@ void RecordInsertSlow(HashtablezInfo* info, size_t hash,
 
 inline void RecordEraseSlow(HashtablezInfo* info) {
   info->size.fetch_sub(1, std::memory_order_relaxed);
-  info->num_erases.fetch_add(1, std::memory_order_relaxed);
+  // There is only one concurrent writer, so `load` then `store` is sufficient
+  // instead of using `fetch_add`.
+  info->num_erases.store(
+      1 + info->num_erases.load(std::memory_order_relaxed),
+      std::memory_order_relaxed);
 }
 
 HashtablezInfo* SampleSlow(int64_t* next_sample);
 void UnsampleSlow(HashtablezInfo* info);
 
+#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
+#error ABSL_INTERNAL_HASHTABLEZ_SAMPLE cannot be directly set
+#endif  // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
+
+#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
 class HashtablezInfoHandle {
  public:
   explicit HashtablezInfoHandle() : info_(nullptr) {}
@@ -179,14 +195,27 @@ class HashtablezInfoHandle {
   friend class HashtablezInfoHandlePeer;
   HashtablezInfo* info_;
 };
+#else
+// Ensure that when Hashtablez is turned off at compile time, HashtablezInfo can
+// be removed by the linker, in order to reduce the binary size.
+class HashtablezInfoHandle {
+ public:
+  explicit HashtablezInfoHandle() = default;
+  explicit HashtablezInfoHandle(std::nullptr_t) {}
 
-#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
-#error ABSL_INTERNAL_HASHTABLEZ_SAMPLE cannot be directly set
+  inline void RecordStorageChanged(size_t /*size*/, size_t /*capacity*/) {}
+  inline void RecordRehash(size_t /*total_probe_length*/) {}
+  inline void RecordInsert(size_t /*hash*/, size_t /*distance_from_desired*/) {}
+  inline void RecordErase() {}
+
+  friend inline void swap(HashtablezInfoHandle& /*lhs*/,
+                          HashtablezInfoHandle& /*rhs*/) {}
+};
 #endif  // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
 
 #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
 extern ABSL_PER_THREAD_TLS_KEYWORD int64_t global_next_sample;
-#endif  // ABSL_PER_THREAD_TLS
+#endif  // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
 
 // Returns an RAII sampling handle that manages registration and unregistation
 // with the global sampler.
diff --git a/third_party/abseil_cpp/absl/container/internal/hashtablez_sampler_test.cc b/third_party/abseil_cpp/absl/container/internal/hashtablez_sampler_test.cc
index b4c4ff92e75a..8d10a1e94030 100644
--- a/third_party/abseil_cpp/absl/container/internal/hashtablez_sampler_test.cc
+++ b/third_party/abseil_cpp/absl/container/internal/hashtablez_sampler_test.cc
@@ -38,6 +38,7 @@ constexpr int kProbeLength = 8;
 namespace absl {
 ABSL_NAMESPACE_BEGIN
 namespace container_internal {
+#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
 class HashtablezInfoHandlePeer {
  public:
   static bool IsSampled(const HashtablezInfoHandle& h) {
@@ -46,6 +47,13 @@ class HashtablezInfoHandlePeer {
 
   static HashtablezInfo* GetInfo(HashtablezInfoHandle* h) { return h->info_; }
 };
+#else
+class HashtablezInfoHandlePeer {
+ public:
+  static bool IsSampled(const HashtablezInfoHandle&) { return false; }
+  static HashtablezInfo* GetInfo(HashtablezInfoHandle*) { return nullptr; }
+};
+#endif  // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
 
 namespace {
 using ::absl::synchronization_internal::ThreadPool;
@@ -76,6 +84,7 @@ TEST(HashtablezInfoTest, PrepareForSampling) {
   EXPECT_EQ(info.capacity.load(), 0);
   EXPECT_EQ(info.size.load(), 0);
   EXPECT_EQ(info.num_erases.load(), 0);
+  EXPECT_EQ(info.num_rehashes.load(), 0);
   EXPECT_EQ(info.max_probe_length.load(), 0);
   EXPECT_EQ(info.total_probe_length.load(), 0);
   EXPECT_EQ(info.hashes_bitwise_or.load(), 0);
@@ -95,6 +104,7 @@ TEST(HashtablezInfoTest, PrepareForSampling) {
   EXPECT_EQ(info.capacity.load(), 0);
   EXPECT_EQ(info.size.load(), 0);
   EXPECT_EQ(info.num_erases.load(), 0);
+  EXPECT_EQ(info.num_rehashes.load(), 0);
   EXPECT_EQ(info.max_probe_length.load(), 0);
   EXPECT_EQ(info.total_probe_length.load(), 0);
   EXPECT_EQ(info.hashes_bitwise_or.load(), 0);
@@ -167,9 +177,10 @@ TEST(HashtablezInfoTest, RecordRehash) {
   EXPECT_EQ(info.size.load(), 2);
   EXPECT_EQ(info.total_probe_length.load(), 3);
   EXPECT_EQ(info.num_erases.load(), 0);
+  EXPECT_EQ(info.num_rehashes.load(), 1);
 }
 
-#if defined(ABSL_HASHTABLEZ_SAMPLE)
+#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
 TEST(HashtablezSamplerTest, SmallSampleParameter) {
   SetHashtablezEnabled(true);
   SetHashtablezSampleParameter(100);
@@ -213,7 +224,6 @@ TEST(HashtablezSamplerTest, Sample) {
   }
   EXPECT_NEAR(sample_rate, 0.01, 0.005);
 }
-#endif
 
 TEST(HashtablezSamplerTest, Handle) {
   auto& sampler = HashtablezSampler::Global();
@@ -243,6 +253,8 @@ TEST(HashtablezSamplerTest, Handle) {
   });
   EXPECT_FALSE(found);
 }
+#endif
+
 
 TEST(HashtablezSamplerTest, Registration) {
   HashtablezSampler sampler;
diff --git a/third_party/abseil_cpp/absl/container/internal/inlined_vector.h b/third_party/abseil_cpp/absl/container/internal/inlined_vector.h
index 4d80b727bf4c..c98c25c44221 100644
--- a/third_party/abseil_cpp/absl/container/internal/inlined_vector.h
+++ b/third_party/abseil_cpp/absl/container/internal/inlined_vector.h
@@ -462,6 +462,9 @@ class Storage {
     Inlined inlined;
   };
 
+  template <typename... Args>
+  ABSL_ATTRIBUTE_NOINLINE reference EmplaceBackSlow(Args&&... args);
+
   Metadata metadata_;
   Data data_;
 };
@@ -542,48 +545,42 @@ template <typename T, size_t N, typename A>
 template <typename ValueAdapter>
 auto Storage<T, N, A>::Resize(ValueAdapter values, size_type new_size) -> void {
   StorageView storage_view = MakeStorageView();
-
-  IteratorValueAdapter<MoveIterator> move_values(
-      MoveIterator(storage_view.data));
-
-  AllocationTransaction allocation_tx(GetAllocPtr());
-  ConstructionTransaction construction_tx(GetAllocPtr());
-
-  absl::Span<value_type> construct_loop;
-  absl::Span<value_type> move_construct_loop;
-  absl::Span<value_type> destroy_loop;
-
-  if (new_size > storage_view.capacity) {
+  auto* const base = storage_view.data;
+  const size_type size = storage_view.size;
+  auto* alloc = GetAllocPtr();
+  if (new_size <= size) {
+    // Destroy extra old elements.
+    inlined_vector_internal::DestroyElements(alloc, base + new_size,
+                                             size - new_size);
+  } else if (new_size <= storage_view.capacity) {
+    // Construct new elements in place.
+    inlined_vector_internal::ConstructElements(alloc, base + size, &values,
+                                               new_size - size);
+  } else {
+    // Steps:
+    //  a. Allocate new backing store.
+    //  b. Construct new elements in new backing store.
+    //  c. Move existing elements from old backing store to now.
+    //  d. Destroy all elements in old backing store.
+    // Use transactional wrappers for the first two steps so we can roll
+    // back if necessary due to exceptions.
+    AllocationTransaction allocation_tx(alloc);
     size_type new_capacity = ComputeCapacity(storage_view.capacity, new_size);
     pointer new_data = allocation_tx.Allocate(new_capacity);
-    construct_loop = {new_data + storage_view.size,
-                      new_size - storage_view.size};
-    move_construct_loop = {new_data, storage_view.size};
-    destroy_loop = {storage_view.data, storage_view.size};
-  } else if (new_size > storage_view.size) {
-    construct_loop = {storage_view.data + storage_view.size,
-                      new_size - storage_view.size};
-  } else {
-    destroy_loop = {storage_view.data + new_size, storage_view.size - new_size};
-  }
-
-  construction_tx.Construct(construct_loop.data(), &values,
-                            construct_loop.size());
 
-  inlined_vector_internal::ConstructElements(
-      GetAllocPtr(), move_construct_loop.data(), &move_values,
-      move_construct_loop.size());
+    ConstructionTransaction construction_tx(alloc);
+    construction_tx.Construct(new_data + size, &values, new_size - size);
 
-  inlined_vector_internal::DestroyElements(GetAllocPtr(), destroy_loop.data(),
-                                           destroy_loop.size());
+    IteratorValueAdapter<MoveIterator> move_values((MoveIterator(base)));
+    inlined_vector_internal::ConstructElements(alloc, new_data, &move_values,
+                                               size);
 
-  construction_tx.Commit();
-  if (allocation_tx.DidAllocate()) {
+    inlined_vector_internal::DestroyElements(alloc, base, size);
+    construction_tx.Commit();
     DeallocateIfAllocated();
     AcquireAllocatedData(&allocation_tx);
     SetIsAllocated();
   }
-
   SetSize(new_size);
 }
 
@@ -684,44 +681,50 @@ template <typename T, size_t N, typename A>
 template <typename... Args>
 auto Storage<T, N, A>::EmplaceBack(Args&&... args) -> reference {
   StorageView storage_view = MakeStorageView();
+  const auto n = storage_view.size;
+  if (ABSL_PREDICT_TRUE(n != storage_view.capacity)) {
+    // Fast path; new element fits.
+    pointer last_ptr = storage_view.data + n;
+    AllocatorTraits::construct(*GetAllocPtr(), last_ptr,
+                               std::forward<Args>(args)...);
+    AddSize(1);
+    return *last_ptr;
+  }
+  // TODO(b/173712035): Annotate with musttail attribute to prevent regression.
+  return EmplaceBackSlow(std::forward<Args>(args)...);
+}
 
+template <typename T, size_t N, typename A>
+template <typename... Args>
+auto Storage<T, N, A>::EmplaceBackSlow(Args&&... args) -> reference {
+  StorageView storage_view = MakeStorageView();
   AllocationTransaction allocation_tx(GetAllocPtr());
-
   IteratorValueAdapter<MoveIterator> move_values(
       MoveIterator(storage_view.data));
-
-  pointer construct_data;
-  if (storage_view.size == storage_view.capacity) {
-    size_type new_capacity = NextCapacity(storage_view.capacity);
-    construct_data = allocation_tx.Allocate(new_capacity);
-  } else {
-    construct_data = storage_view.data;
-  }
-
+  size_type new_capacity = NextCapacity(storage_view.capacity);
+  pointer construct_data = allocation_tx.Allocate(new_capacity);
   pointer last_ptr = construct_data + storage_view.size;
 
+  // Construct new element.
   AllocatorTraits::construct(*GetAllocPtr(), last_ptr,
                              std::forward<Args>(args)...);
-
-  if (allocation_tx.DidAllocate()) {
-    ABSL_INTERNAL_TRY {
-      inlined_vector_internal::ConstructElements(
-          GetAllocPtr(), allocation_tx.GetData(), &move_values,
-          storage_view.size);
-    }
-    ABSL_INTERNAL_CATCH_ANY {
-      AllocatorTraits::destroy(*GetAllocPtr(), last_ptr);
-      ABSL_INTERNAL_RETHROW;
-    }
-
-    inlined_vector_internal::DestroyElements(GetAllocPtr(), storage_view.data,
-                                             storage_view.size);
-
-    DeallocateIfAllocated();
-    AcquireAllocatedData(&allocation_tx);
-    SetIsAllocated();
+  // Move elements from old backing store to new backing store.
+  ABSL_INTERNAL_TRY {
+    inlined_vector_internal::ConstructElements(
+        GetAllocPtr(), allocation_tx.GetData(), &move_values,
+        storage_view.size);
   }
+  ABSL_INTERNAL_CATCH_ANY {
+    AllocatorTraits::destroy(*GetAllocPtr(), last_ptr);
+    ABSL_INTERNAL_RETHROW;
+  }
+  // Destroy elements in old backing store.
+  inlined_vector_internal::DestroyElements(GetAllocPtr(), storage_view.data,
+                                           storage_view.size);
 
+  DeallocateIfAllocated();
+  AcquireAllocatedData(&allocation_tx);
+  SetIsAllocated();
   AddSize(1);
   return *last_ptr;
 }
diff --git a/third_party/abseil_cpp/absl/container/internal/layout.h b/third_party/abseil_cpp/absl/container/internal/layout.h
index 69cc85dd6679..233678331543 100644
--- a/third_party/abseil_cpp/absl/container/internal/layout.h
+++ b/third_party/abseil_cpp/absl/container/internal/layout.h
@@ -163,6 +163,7 @@
 #include <assert.h>
 #include <stddef.h>
 #include <stdint.h>
+
 #include <ostream>
 #include <string>
 #include <tuple>
@@ -170,15 +171,16 @@
 #include <typeinfo>
 #include <utility>
 
-#ifdef ADDRESS_SANITIZER
-#include <sanitizer/asan_interface.h>
-#endif
-
+#include "absl/base/config.h"
 #include "absl/meta/type_traits.h"
 #include "absl/strings/str_cat.h"
 #include "absl/types/span.h"
 #include "absl/utility/utility.h"
 
+#ifdef ABSL_HAVE_ADDRESS_SANITIZER
+#include <sanitizer/asan_interface.h>
+#endif
+
 #if defined(__GXX_RTTI)
 #define ABSL_INTERNAL_HAS_CXA_DEMANGLE
 #endif
@@ -614,7 +616,7 @@ class LayoutImpl<std::tuple<Elements...>, absl::index_sequence<SizeSeq...>,
   void PoisonPadding(const Char* p) const {
     static_assert(N < NumOffsets, "Index out of bounds");
     (void)p;
-#ifdef ADDRESS_SANITIZER
+#ifdef ABSL_HAVE_ADDRESS_SANITIZER
     PoisonPadding<Char, N - 1>(p);
     // The `if` is an optimization. It doesn't affect the observable behaviour.
     if (ElementAlignment<N - 1>::value % ElementAlignment<N>::value) {
diff --git a/third_party/abseil_cpp/absl/container/internal/layout_test.cc b/third_party/abseil_cpp/absl/container/internal/layout_test.cc
index 8f3628a1f1a5..1d7158ffc0b9 100644
--- a/third_party/abseil_cpp/absl/container/internal/layout_test.cc
+++ b/third_party/abseil_cpp/absl/container/internal/layout_test.cc
@@ -17,6 +17,7 @@
 // We need ::max_align_t because some libstdc++ versions don't provide
 // std::max_align_t
 #include <stddef.h>
+
 #include <cstdint>
 #include <memory>
 #include <sstream>
@@ -24,6 +25,7 @@
 
 #include "gmock/gmock.h"
 #include "gtest/gtest.h"
+#include "absl/base/config.h"
 #include "absl/base/internal/raw_logging.h"
 #include "absl/types/span.h"
 
@@ -126,8 +128,10 @@ TEST(Layout, ElementTypes) {
   {
     using L = Layout<int32_t, int32_t>;
     SameType<std::tuple<int32_t, int32_t>, L::ElementTypes>();
-    SameType<std::tuple<int32_t, int32_t>, decltype(L::Partial())::ElementTypes>();
-    SameType<std::tuple<int32_t, int32_t>, decltype(L::Partial(0))::ElementTypes>();
+    SameType<std::tuple<int32_t, int32_t>,
+             decltype(L::Partial())::ElementTypes>();
+    SameType<std::tuple<int32_t, int32_t>,
+             decltype(L::Partial(0))::ElementTypes>();
   }
   {
     using L = Layout<int8_t, int32_t, Int128>;
@@ -366,18 +370,21 @@ TEST(Layout, PointerByIndex) {
   {
     using L = Layout<int32_t>;
     EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L::Partial().Pointer<0>(p))));
-    EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L::Partial(3).Pointer<0>(p))));
+    EXPECT_EQ(0,
+              Distance(p, Type<const int32_t*>(L::Partial(3).Pointer<0>(p))));
     EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L(3).Pointer<0>(p))));
   }
   {
     using L = Layout<int32_t, int32_t>;
     EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L::Partial().Pointer<0>(p))));
-    EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L::Partial(3).Pointer<0>(p))));
-    EXPECT_EQ(12, Distance(p, Type<const int32_t*>(L::Partial(3).Pointer<1>(p))));
     EXPECT_EQ(0,
-              Distance(p, Type<const int32_t*>(L::Partial(3, 5).Pointer<0>(p))));
+              Distance(p, Type<const int32_t*>(L::Partial(3).Pointer<0>(p))));
     EXPECT_EQ(12,
-              Distance(p, Type<const int32_t*>(L::Partial(3, 5).Pointer<1>(p))));
+              Distance(p, Type<const int32_t*>(L::Partial(3).Pointer<1>(p))));
+    EXPECT_EQ(
+        0, Distance(p, Type<const int32_t*>(L::Partial(3, 5).Pointer<0>(p))));
+    EXPECT_EQ(
+        12, Distance(p, Type<const int32_t*>(L::Partial(3, 5).Pointer<1>(p))));
     EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L(3, 5).Pointer<0>(p))));
     EXPECT_EQ(12, Distance(p, Type<const int32_t*>(L(3, 5).Pointer<1>(p))));
   }
@@ -385,39 +392,44 @@ TEST(Layout, PointerByIndex) {
     using L = Layout<int8_t, int32_t, Int128>;
     EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L::Partial().Pointer<0>(p))));
     EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L::Partial(0).Pointer<0>(p))));
-    EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L::Partial(0).Pointer<1>(p))));
+    EXPECT_EQ(0,
+              Distance(p, Type<const int32_t*>(L::Partial(0).Pointer<1>(p))));
     EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L::Partial(1).Pointer<0>(p))));
-    EXPECT_EQ(4, Distance(p, Type<const int32_t*>(L::Partial(1).Pointer<1>(p))));
+    EXPECT_EQ(4,
+              Distance(p, Type<const int32_t*>(L::Partial(1).Pointer<1>(p))));
     EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L::Partial(5).Pointer<0>(p))));
-    EXPECT_EQ(8, Distance(p, Type<const int32_t*>(L::Partial(5).Pointer<1>(p))));
+    EXPECT_EQ(8,
+              Distance(p, Type<const int32_t*>(L::Partial(5).Pointer<1>(p))));
     EXPECT_EQ(0,
               Distance(p, Type<const int8_t*>(L::Partial(0, 0).Pointer<0>(p))));
-    EXPECT_EQ(0,
-              Distance(p, Type<const int32_t*>(L::Partial(0, 0).Pointer<1>(p))));
+    EXPECT_EQ(
+        0, Distance(p, Type<const int32_t*>(L::Partial(0, 0).Pointer<1>(p))));
     EXPECT_EQ(0,
               Distance(p, Type<const Int128*>(L::Partial(0, 0).Pointer<2>(p))));
     EXPECT_EQ(0,
               Distance(p, Type<const int8_t*>(L::Partial(1, 0).Pointer<0>(p))));
-    EXPECT_EQ(4,
-              Distance(p, Type<const int32_t*>(L::Partial(1, 0).Pointer<1>(p))));
+    EXPECT_EQ(
+        4, Distance(p, Type<const int32_t*>(L::Partial(1, 0).Pointer<1>(p))));
     EXPECT_EQ(8,
               Distance(p, Type<const Int128*>(L::Partial(1, 0).Pointer<2>(p))));
     EXPECT_EQ(0,
               Distance(p, Type<const int8_t*>(L::Partial(5, 3).Pointer<0>(p))));
-    EXPECT_EQ(8,
-              Distance(p, Type<const int32_t*>(L::Partial(5, 3).Pointer<1>(p))));
+    EXPECT_EQ(
+        8, Distance(p, Type<const int32_t*>(L::Partial(5, 3).Pointer<1>(p))));
     EXPECT_EQ(24,
               Distance(p, Type<const Int128*>(L::Partial(5, 3).Pointer<2>(p))));
     EXPECT_EQ(
         0, Distance(p, Type<const int8_t*>(L::Partial(0, 0, 0).Pointer<0>(p))));
     EXPECT_EQ(
-        0, Distance(p, Type<const int32_t*>(L::Partial(0, 0, 0).Pointer<1>(p))));
+        0,
+        Distance(p, Type<const int32_t*>(L::Partial(0, 0, 0).Pointer<1>(p))));
     EXPECT_EQ(
         0, Distance(p, Type<const Int128*>(L::Partial(0, 0, 0).Pointer<2>(p))));
     EXPECT_EQ(
         0, Distance(p, Type<const int8_t*>(L::Partial(1, 0, 0).Pointer<0>(p))));
     EXPECT_EQ(
-        4, Distance(p, Type<const int32_t*>(L::Partial(1, 0, 0).Pointer<1>(p))));
+        4,
+        Distance(p, Type<const int32_t*>(L::Partial(1, 0, 0).Pointer<1>(p))));
     EXPECT_EQ(
         8, Distance(p, Type<const Int128*>(L::Partial(1, 0, 0).Pointer<2>(p))));
     EXPECT_EQ(
@@ -426,7 +438,8 @@ TEST(Layout, PointerByIndex) {
         24,
         Distance(p, Type<const Int128*>(L::Partial(5, 3, 1).Pointer<2>(p))));
     EXPECT_EQ(
-        8, Distance(p, Type<const int32_t*>(L::Partial(5, 3, 1).Pointer<1>(p))));
+        8,
+        Distance(p, Type<const int32_t*>(L::Partial(5, 3, 1).Pointer<1>(p))));
     EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L(5, 3, 1).Pointer<0>(p))));
     EXPECT_EQ(24, Distance(p, Type<const Int128*>(L(5, 3, 1).Pointer<2>(p))));
     EXPECT_EQ(8, Distance(p, Type<const int32_t*>(L(5, 3, 1).Pointer<1>(p))));
@@ -437,75 +450,78 @@ TEST(Layout, PointerByType) {
   alignas(max_align_t) const unsigned char p[100] = {};
   {
     using L = Layout<int32_t>;
-    EXPECT_EQ(0,
-              Distance(p, Type<const int32_t*>(L::Partial().Pointer<int32_t>(p))));
-    EXPECT_EQ(0,
-              Distance(p, Type<const int32_t*>(L::Partial(3).Pointer<int32_t>(p))));
+    EXPECT_EQ(
+        0, Distance(p, Type<const int32_t*>(L::Partial().Pointer<int32_t>(p))));
+    EXPECT_EQ(
+        0,
+        Distance(p, Type<const int32_t*>(L::Partial(3).Pointer<int32_t>(p))));
     EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L(3).Pointer<int32_t>(p))));
   }
   {
     using L = Layout<int8_t, int32_t, Int128>;
-    EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L::Partial().Pointer<int8_t>(p))));
-    EXPECT_EQ(0,
-              Distance(p, Type<const int8_t*>(L::Partial(0).Pointer<int8_t>(p))));
-    EXPECT_EQ(0,
-              Distance(p, Type<const int32_t*>(L::Partial(0).Pointer<int32_t>(p))));
-    EXPECT_EQ(0,
-              Distance(p, Type<const int8_t*>(L::Partial(1).Pointer<int8_t>(p))));
-    EXPECT_EQ(4,
-              Distance(p, Type<const int32_t*>(L::Partial(1).Pointer<int32_t>(p))));
-    EXPECT_EQ(0,
-              Distance(p, Type<const int8_t*>(L::Partial(5).Pointer<int8_t>(p))));
-    EXPECT_EQ(8,
-              Distance(p, Type<const int32_t*>(L::Partial(5).Pointer<int32_t>(p))));
     EXPECT_EQ(
-        0, Distance(p, Type<const int8_t*>(L::Partial(0, 0).Pointer<int8_t>(p))));
+        0, Distance(p, Type<const int8_t*>(L::Partial().Pointer<int8_t>(p))));
     EXPECT_EQ(
-        0, Distance(p, Type<const int32_t*>(L::Partial(0, 0).Pointer<int32_t>(p))));
+        0, Distance(p, Type<const int8_t*>(L::Partial(0).Pointer<int8_t>(p))));
     EXPECT_EQ(
         0,
-        Distance(p, Type<const Int128*>(L::Partial(0, 0).Pointer<Int128>(p))));
-    EXPECT_EQ(
-        0, Distance(p, Type<const int8_t*>(L::Partial(1, 0).Pointer<int8_t>(p))));
+        Distance(p, Type<const int32_t*>(L::Partial(0).Pointer<int32_t>(p))));
     EXPECT_EQ(
-        4, Distance(p, Type<const int32_t*>(L::Partial(1, 0).Pointer<int32_t>(p))));
+        0, Distance(p, Type<const int8_t*>(L::Partial(1).Pointer<int8_t>(p))));
     EXPECT_EQ(
-        8,
-        Distance(p, Type<const Int128*>(L::Partial(1, 0).Pointer<Int128>(p))));
+        4,
+        Distance(p, Type<const int32_t*>(L::Partial(1).Pointer<int32_t>(p))));
     EXPECT_EQ(
-        0, Distance(p, Type<const int8_t*>(L::Partial(5, 3).Pointer<int8_t>(p))));
+        0, Distance(p, Type<const int8_t*>(L::Partial(5).Pointer<int8_t>(p))));
     EXPECT_EQ(
-        8, Distance(p, Type<const int32_t*>(L::Partial(5, 3).Pointer<int32_t>(p))));
+        8,
+        Distance(p, Type<const int32_t*>(L::Partial(5).Pointer<int32_t>(p))));
     EXPECT_EQ(
-        24,
-        Distance(p, Type<const Int128*>(L::Partial(5, 3).Pointer<Int128>(p))));
+        0,
+        Distance(p, Type<const int8_t*>(L::Partial(0, 0).Pointer<int8_t>(p))));
+    EXPECT_EQ(0, Distance(p, Type<const int32_t*>(
+                                 L::Partial(0, 0).Pointer<int32_t>(p))));
     EXPECT_EQ(
         0,
-        Distance(p, Type<const int8_t*>(L::Partial(0, 0, 0).Pointer<int8_t>(p))));
+        Distance(p, Type<const Int128*>(L::Partial(0, 0).Pointer<Int128>(p))));
     EXPECT_EQ(
         0,
-        Distance(p, Type<const int32_t*>(L::Partial(0, 0, 0).Pointer<int32_t>(p))));
-    EXPECT_EQ(0, Distance(p, Type<const Int128*>(
-                                 L::Partial(0, 0, 0).Pointer<Int128>(p))));
+        Distance(p, Type<const int8_t*>(L::Partial(1, 0).Pointer<int8_t>(p))));
+    EXPECT_EQ(4, Distance(p, Type<const int32_t*>(
+                                 L::Partial(1, 0).Pointer<int32_t>(p))));
+    EXPECT_EQ(
+        8,
+        Distance(p, Type<const Int128*>(L::Partial(1, 0).Pointer<Int128>(p))));
     EXPECT_EQ(
         0,
-        Distance(p, Type<const int8_t*>(L::Partial(1, 0, 0).Pointer<int8_t>(p))));
+        Distance(p, Type<const int8_t*>(L::Partial(5, 3).Pointer<int8_t>(p))));
+    EXPECT_EQ(8, Distance(p, Type<const int32_t*>(
+                                 L::Partial(5, 3).Pointer<int32_t>(p))));
     EXPECT_EQ(
-        4,
-        Distance(p, Type<const int32_t*>(L::Partial(1, 0, 0).Pointer<int32_t>(p))));
+        24,
+        Distance(p, Type<const Int128*>(L::Partial(5, 3).Pointer<Int128>(p))));
+    EXPECT_EQ(0, Distance(p, Type<const int8_t*>(
+                                 L::Partial(0, 0, 0).Pointer<int8_t>(p))));
+    EXPECT_EQ(0, Distance(p, Type<const int32_t*>(
+                                 L::Partial(0, 0, 0).Pointer<int32_t>(p))));
+    EXPECT_EQ(0, Distance(p, Type<const Int128*>(
+                                 L::Partial(0, 0, 0).Pointer<Int128>(p))));
+    EXPECT_EQ(0, Distance(p, Type<const int8_t*>(
+                                 L::Partial(1, 0, 0).Pointer<int8_t>(p))));
+    EXPECT_EQ(4, Distance(p, Type<const int32_t*>(
+                                 L::Partial(1, 0, 0).Pointer<int32_t>(p))));
     EXPECT_EQ(8, Distance(p, Type<const Int128*>(
                                  L::Partial(1, 0, 0).Pointer<Int128>(p))));
-    EXPECT_EQ(
-        0,
-        Distance(p, Type<const int8_t*>(L::Partial(5, 3, 1).Pointer<int8_t>(p))));
+    EXPECT_EQ(0, Distance(p, Type<const int8_t*>(
+                                 L::Partial(5, 3, 1).Pointer<int8_t>(p))));
     EXPECT_EQ(24, Distance(p, Type<const Int128*>(
                                   L::Partial(5, 3, 1).Pointer<Int128>(p))));
-    EXPECT_EQ(
-        8,
-        Distance(p, Type<const int32_t*>(L::Partial(5, 3, 1).Pointer<int32_t>(p))));
+    EXPECT_EQ(8, Distance(p, Type<const int32_t*>(
+                                 L::Partial(5, 3, 1).Pointer<int32_t>(p))));
     EXPECT_EQ(24,
               Distance(p, Type<const Int128*>(L(5, 3, 1).Pointer<Int128>(p))));
-    EXPECT_EQ(8, Distance(p, Type<const int32_t*>(L(5, 3, 1).Pointer<int32_t>(p))));
+    EXPECT_EQ(
+        8, Distance(p, Type<const int32_t*>(L(5, 3, 1).Pointer<int32_t>(p))));
   }
 }
 
@@ -546,15 +562,18 @@ TEST(Layout, MutablePointerByIndex) {
     EXPECT_EQ(8, Distance(p, Type<int32_t*>(L::Partial(5, 3).Pointer<1>(p))));
     EXPECT_EQ(24, Distance(p, Type<Int128*>(L::Partial(5, 3).Pointer<2>(p))));
     EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(0, 0, 0).Pointer<0>(p))));
-    EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial(0, 0, 0).Pointer<1>(p))));
+    EXPECT_EQ(0,
+              Distance(p, Type<int32_t*>(L::Partial(0, 0, 0).Pointer<1>(p))));
     EXPECT_EQ(0, Distance(p, Type<Int128*>(L::Partial(0, 0, 0).Pointer<2>(p))));
     EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(1, 0, 0).Pointer<0>(p))));
-    EXPECT_EQ(4, Distance(p, Type<int32_t*>(L::Partial(1, 0, 0).Pointer<1>(p))));
+    EXPECT_EQ(4,
+              Distance(p, Type<int32_t*>(L::Partial(1, 0, 0).Pointer<1>(p))));
     EXPECT_EQ(8, Distance(p, Type<Int128*>(L::Partial(1, 0, 0).Pointer<2>(p))));
     EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(5, 3, 1).Pointer<0>(p))));
     EXPECT_EQ(24,
               Distance(p, Type<Int128*>(L::Partial(5, 3, 1).Pointer<2>(p))));
-    EXPECT_EQ(8, Distance(p, Type<int32_t*>(L::Partial(5, 3, 1).Pointer<1>(p))));
+    EXPECT_EQ(8,
+              Distance(p, Type<int32_t*>(L::Partial(5, 3, 1).Pointer<1>(p))));
     EXPECT_EQ(0, Distance(p, Type<int8_t*>(L(5, 3, 1).Pointer<0>(p))));
     EXPECT_EQ(24, Distance(p, Type<Int128*>(L(5, 3, 1).Pointer<2>(p))));
     EXPECT_EQ(8, Distance(p, Type<int32_t*>(L(5, 3, 1).Pointer<1>(p))));
@@ -566,48 +585,61 @@ TEST(Layout, MutablePointerByType) {
   {
     using L = Layout<int32_t>;
     EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial().Pointer<int32_t>(p))));
-    EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial(3).Pointer<int32_t>(p))));
+    EXPECT_EQ(0,
+              Distance(p, Type<int32_t*>(L::Partial(3).Pointer<int32_t>(p))));
     EXPECT_EQ(0, Distance(p, Type<int32_t*>(L(3).Pointer<int32_t>(p))));
   }
   {
     using L = Layout<int8_t, int32_t, Int128>;
     EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial().Pointer<int8_t>(p))));
     EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(0).Pointer<int8_t>(p))));
-    EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial(0).Pointer<int32_t>(p))));
+    EXPECT_EQ(0,
+              Distance(p, Type<int32_t*>(L::Partial(0).Pointer<int32_t>(p))));
     EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(1).Pointer<int8_t>(p))));
-    EXPECT_EQ(4, Distance(p, Type<int32_t*>(L::Partial(1).Pointer<int32_t>(p))));
+    EXPECT_EQ(4,
+              Distance(p, Type<int32_t*>(L::Partial(1).Pointer<int32_t>(p))));
     EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(5).Pointer<int8_t>(p))));
-    EXPECT_EQ(8, Distance(p, Type<int32_t*>(L::Partial(5).Pointer<int32_t>(p))));
-    EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(0, 0).Pointer<int8_t>(p))));
-    EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial(0, 0).Pointer<int32_t>(p))));
+    EXPECT_EQ(8,
+              Distance(p, Type<int32_t*>(L::Partial(5).Pointer<int32_t>(p))));
+    EXPECT_EQ(0,
+              Distance(p, Type<int8_t*>(L::Partial(0, 0).Pointer<int8_t>(p))));
+    EXPECT_EQ(
+        0, Distance(p, Type<int32_t*>(L::Partial(0, 0).Pointer<int32_t>(p))));
     EXPECT_EQ(0,
               Distance(p, Type<Int128*>(L::Partial(0, 0).Pointer<Int128>(p))));
-    EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(1, 0).Pointer<int8_t>(p))));
-    EXPECT_EQ(4, Distance(p, Type<int32_t*>(L::Partial(1, 0).Pointer<int32_t>(p))));
+    EXPECT_EQ(0,
+              Distance(p, Type<int8_t*>(L::Partial(1, 0).Pointer<int8_t>(p))));
+    EXPECT_EQ(
+        4, Distance(p, Type<int32_t*>(L::Partial(1, 0).Pointer<int32_t>(p))));
     EXPECT_EQ(8,
               Distance(p, Type<Int128*>(L::Partial(1, 0).Pointer<Int128>(p))));
-    EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(5, 3).Pointer<int8_t>(p))));
-    EXPECT_EQ(8, Distance(p, Type<int32_t*>(L::Partial(5, 3).Pointer<int32_t>(p))));
+    EXPECT_EQ(0,
+              Distance(p, Type<int8_t*>(L::Partial(5, 3).Pointer<int8_t>(p))));
+    EXPECT_EQ(
+        8, Distance(p, Type<int32_t*>(L::Partial(5, 3).Pointer<int32_t>(p))));
     EXPECT_EQ(24,
               Distance(p, Type<Int128*>(L::Partial(5, 3).Pointer<Int128>(p))));
-    EXPECT_EQ(0,
-              Distance(p, Type<int8_t*>(L::Partial(0, 0, 0).Pointer<int8_t>(p))));
-    EXPECT_EQ(0,
-              Distance(p, Type<int32_t*>(L::Partial(0, 0, 0).Pointer<int32_t>(p))));
+    EXPECT_EQ(
+        0, Distance(p, Type<int8_t*>(L::Partial(0, 0, 0).Pointer<int8_t>(p))));
+    EXPECT_EQ(
+        0,
+        Distance(p, Type<int32_t*>(L::Partial(0, 0, 0).Pointer<int32_t>(p))));
     EXPECT_EQ(
         0, Distance(p, Type<Int128*>(L::Partial(0, 0, 0).Pointer<Int128>(p))));
-    EXPECT_EQ(0,
-              Distance(p, Type<int8_t*>(L::Partial(1, 0, 0).Pointer<int8_t>(p))));
-    EXPECT_EQ(4,
-              Distance(p, Type<int32_t*>(L::Partial(1, 0, 0).Pointer<int32_t>(p))));
+    EXPECT_EQ(
+        0, Distance(p, Type<int8_t*>(L::Partial(1, 0, 0).Pointer<int8_t>(p))));
+    EXPECT_EQ(
+        4,
+        Distance(p, Type<int32_t*>(L::Partial(1, 0, 0).Pointer<int32_t>(p))));
     EXPECT_EQ(
         8, Distance(p, Type<Int128*>(L::Partial(1, 0, 0).Pointer<Int128>(p))));
-    EXPECT_EQ(0,
-              Distance(p, Type<int8_t*>(L::Partial(5, 3, 1).Pointer<int8_t>(p))));
+    EXPECT_EQ(
+        0, Distance(p, Type<int8_t*>(L::Partial(5, 3, 1).Pointer<int8_t>(p))));
     EXPECT_EQ(
         24, Distance(p, Type<Int128*>(L::Partial(5, 3, 1).Pointer<Int128>(p))));
-    EXPECT_EQ(8,
-              Distance(p, Type<int32_t*>(L::Partial(5, 3, 1).Pointer<int32_t>(p))));
+    EXPECT_EQ(
+        8,
+        Distance(p, Type<int32_t*>(L::Partial(5, 3, 1).Pointer<int32_t>(p))));
     EXPECT_EQ(0, Distance(p, Type<int8_t*>(L(5, 3, 1).Pointer<int8_t>(p))));
     EXPECT_EQ(24, Distance(p, Type<Int128*>(L(5, 3, 1).Pointer<Int128>(p))));
     EXPECT_EQ(8, Distance(p, Type<int32_t*>(L(5, 3, 1).Pointer<int32_t>(p))));
@@ -788,67 +820,72 @@ TEST(Layout, SliceByIndexData) {
   {
     using L = Layout<int32_t>;
     EXPECT_EQ(
-        0,
-        Distance(p, Type<Span<const int32_t>>(L::Partial(0).Slice<0>(p)).data()));
+        0, Distance(
+               p, Type<Span<const int32_t>>(L::Partial(0).Slice<0>(p)).data()));
     EXPECT_EQ(
-        0,
-        Distance(p, Type<Span<const int32_t>>(L::Partial(3).Slice<0>(p)).data()));
-    EXPECT_EQ(0, Distance(p, Type<Span<const int32_t>>(L(3).Slice<0>(p)).data()));
+        0, Distance(
+               p, Type<Span<const int32_t>>(L::Partial(3).Slice<0>(p)).data()));
+    EXPECT_EQ(0,
+              Distance(p, Type<Span<const int32_t>>(L(3).Slice<0>(p)).data()));
   }
   {
     using L = Layout<int32_t, int32_t>;
     EXPECT_EQ(
-        0,
-        Distance(p, Type<Span<const int32_t>>(L::Partial(3).Slice<0>(p)).data()));
+        0, Distance(
+               p, Type<Span<const int32_t>>(L::Partial(3).Slice<0>(p)).data()));
     EXPECT_EQ(
         0,
-        Distance(p,
-                 Type<Span<const int32_t>>(L::Partial(3, 5).Slice<0>(p)).data()));
+        Distance(
+            p, Type<Span<const int32_t>>(L::Partial(3, 5).Slice<0>(p)).data()));
     EXPECT_EQ(
         12,
-        Distance(p,
-                 Type<Span<const int32_t>>(L::Partial(3, 5).Slice<1>(p)).data()));
-    EXPECT_EQ(0,
-              Distance(p, Type<Span<const int32_t>>(L(3, 5).Slice<0>(p)).data()));
-    EXPECT_EQ(12,
-              Distance(p, Type<Span<const int32_t>>(L(3, 5).Slice<1>(p)).data()));
+        Distance(
+            p, Type<Span<const int32_t>>(L::Partial(3, 5).Slice<1>(p)).data()));
+    EXPECT_EQ(
+        0, Distance(p, Type<Span<const int32_t>>(L(3, 5).Slice<0>(p)).data()));
+    EXPECT_EQ(
+        12, Distance(p, Type<Span<const int32_t>>(L(3, 5).Slice<1>(p)).data()));
   }
   {
     using L = Layout<int8_t, int32_t, Int128>;
     EXPECT_EQ(
-        0,
-        Distance(p, Type<Span<const int8_t>>(L::Partial(0).Slice<0>(p)).data()));
-    EXPECT_EQ(
-        0,
-        Distance(p, Type<Span<const int8_t>>(L::Partial(1).Slice<0>(p)).data()));
+        0, Distance(
+               p, Type<Span<const int8_t>>(L::Partial(0).Slice<0>(p)).data()));
     EXPECT_EQ(
-        0,
-        Distance(p, Type<Span<const int8_t>>(L::Partial(5).Slice<0>(p)).data()));
+        0, Distance(
+               p, Type<Span<const int8_t>>(L::Partial(1).Slice<0>(p)).data()));
     EXPECT_EQ(
         0, Distance(
-               p, Type<Span<const int8_t>>(L::Partial(0, 0).Slice<0>(p)).data()));
+               p, Type<Span<const int8_t>>(L::Partial(5).Slice<0>(p)).data()));
     EXPECT_EQ(
         0,
-        Distance(p,
-                 Type<Span<const int32_t>>(L::Partial(0, 0).Slice<1>(p)).data()));
+        Distance(
+            p, Type<Span<const int8_t>>(L::Partial(0, 0).Slice<0>(p)).data()));
     EXPECT_EQ(
-        0, Distance(
-               p, Type<Span<const int8_t>>(L::Partial(1, 0).Slice<0>(p)).data()));
+        0,
+        Distance(
+            p, Type<Span<const int32_t>>(L::Partial(0, 0).Slice<1>(p)).data()));
+    EXPECT_EQ(
+        0,
+        Distance(
+            p, Type<Span<const int8_t>>(L::Partial(1, 0).Slice<0>(p)).data()));
     EXPECT_EQ(
         4,
-        Distance(p,
-                 Type<Span<const int32_t>>(L::Partial(1, 0).Slice<1>(p)).data()));
+        Distance(
+            p, Type<Span<const int32_t>>(L::Partial(1, 0).Slice<1>(p)).data()));
     EXPECT_EQ(
-        0, Distance(
-               p, Type<Span<const int8_t>>(L::Partial(5, 3).Slice<0>(p)).data()));
+        0,
+        Distance(
+            p, Type<Span<const int8_t>>(L::Partial(5, 3).Slice<0>(p)).data()));
     EXPECT_EQ(
         8,
-        Distance(p,
-                 Type<Span<const int32_t>>(L::Partial(5, 3).Slice<1>(p)).data()));
+        Distance(
+            p, Type<Span<const int32_t>>(L::Partial(5, 3).Slice<1>(p)).data()));
     EXPECT_EQ(
         0,
         Distance(
-            p, Type<Span<const int8_t>>(L::Partial(0, 0, 0).Slice<0>(p)).data()));
+            p,
+            Type<Span<const int8_t>>(L::Partial(0, 0, 0).Slice<0>(p)).data()));
     EXPECT_EQ(
         0,
         Distance(
@@ -862,7 +899,8 @@ TEST(Layout, SliceByIndexData) {
     EXPECT_EQ(
         0,
         Distance(
-            p, Type<Span<const int8_t>>(L::Partial(1, 0, 0).Slice<0>(p)).data()));
+            p,
+            Type<Span<const int8_t>>(L::Partial(1, 0, 0).Slice<0>(p)).data()));
     EXPECT_EQ(
         4,
         Distance(
@@ -876,7 +914,8 @@ TEST(Layout, SliceByIndexData) {
     EXPECT_EQ(
         0,
         Distance(
-            p, Type<Span<const int8_t>>(L::Partial(5, 3, 1).Slice<0>(p)).data()));
+            p,
+            Type<Span<const int8_t>>(L::Partial(5, 3, 1).Slice<0>(p)).data()));
     EXPECT_EQ(
         24,
         Distance(
@@ -888,12 +927,14 @@ TEST(Layout, SliceByIndexData) {
             p,
             Type<Span<const int32_t>>(L::Partial(5, 3, 1).Slice<1>(p)).data()));
     EXPECT_EQ(
-        0, Distance(p, Type<Span<const int8_t>>(L(5, 3, 1).Slice<0>(p)).data()));
+        0,
+        Distance(p, Type<Span<const int8_t>>(L(5, 3, 1).Slice<0>(p)).data()));
     EXPECT_EQ(
         24,
         Distance(p, Type<Span<const Int128>>(L(5, 3, 1).Slice<2>(p)).data()));
     EXPECT_EQ(
-        8, Distance(p, Type<Span<const int32_t>>(L(5, 3, 1).Slice<1>(p)).data()));
+        8,
+        Distance(p, Type<Span<const int32_t>>(L(5, 3, 1).Slice<1>(p)).data()));
   }
 }
 
@@ -904,98 +945,94 @@ TEST(Layout, SliceByTypeData) {
     EXPECT_EQ(
         0,
         Distance(
-            p, Type<Span<const int32_t>>(L::Partial(0).Slice<int32_t>(p)).data()));
+            p,
+            Type<Span<const int32_t>>(L::Partial(0).Slice<int32_t>(p)).data()));
     EXPECT_EQ(
         0,
         Distance(
-            p, Type<Span<const int32_t>>(L::Partial(3).Slice<int32_t>(p)).data()));
+            p,
+            Type<Span<const int32_t>>(L::Partial(3).Slice<int32_t>(p)).data()));
     EXPECT_EQ(
-        0, Distance(p, Type<Span<const int32_t>>(L(3).Slice<int32_t>(p)).data()));
+        0,
+        Distance(p, Type<Span<const int32_t>>(L(3).Slice<int32_t>(p)).data()));
   }
   {
     using L = Layout<int8_t, int32_t, Int128>;
     EXPECT_EQ(
-        0, Distance(
-               p, Type<Span<const int8_t>>(L::Partial(0).Slice<int8_t>(p)).data()));
-    EXPECT_EQ(
-        0, Distance(
-               p, Type<Span<const int8_t>>(L::Partial(1).Slice<int8_t>(p)).data()));
-    EXPECT_EQ(
-        0, Distance(
-               p, Type<Span<const int8_t>>(L::Partial(5).Slice<int8_t>(p)).data()));
-    EXPECT_EQ(
-        0,
-        Distance(
-            p, Type<Span<const int8_t>>(L::Partial(0, 0).Slice<int8_t>(p)).data()));
-    EXPECT_EQ(
         0,
         Distance(
             p,
-            Type<Span<const int32_t>>(L::Partial(0, 0).Slice<int32_t>(p)).data()));
+            Type<Span<const int8_t>>(L::Partial(0).Slice<int8_t>(p)).data()));
     EXPECT_EQ(
         0,
         Distance(
-            p, Type<Span<const int8_t>>(L::Partial(1, 0).Slice<int8_t>(p)).data()));
-    EXPECT_EQ(
-        4,
-        Distance(
             p,
-            Type<Span<const int32_t>>(L::Partial(1, 0).Slice<int32_t>(p)).data()));
+            Type<Span<const int8_t>>(L::Partial(1).Slice<int8_t>(p)).data()));
     EXPECT_EQ(
         0,
         Distance(
-            p, Type<Span<const int8_t>>(L::Partial(5, 3).Slice<int8_t>(p)).data()));
-    EXPECT_EQ(
-        8,
-        Distance(
             p,
-            Type<Span<const int32_t>>(L::Partial(5, 3).Slice<int32_t>(p)).data()));
+            Type<Span<const int8_t>>(L::Partial(5).Slice<int8_t>(p)).data()));
     EXPECT_EQ(
         0,
-        Distance(
-            p,
-            Type<Span<const int8_t>>(L::Partial(0, 0, 0).Slice<int8_t>(p)).data()));
+        Distance(p, Type<Span<const int8_t>>(L::Partial(0, 0).Slice<int8_t>(p))
+                        .data()));
+    EXPECT_EQ(0, Distance(p, Type<Span<const int32_t>>(
+                                 L::Partial(0, 0).Slice<int32_t>(p))
+                                 .data()));
     EXPECT_EQ(
         0,
-        Distance(p, Type<Span<const int32_t>>(L::Partial(0, 0, 0).Slice<int32_t>(p))
+        Distance(p, Type<Span<const int8_t>>(L::Partial(1, 0).Slice<int8_t>(p))
                         .data()));
-    EXPECT_EQ(0, Distance(p, Type<Span<const Int128>>(
-                                 L::Partial(0, 0, 0).Slice<Int128>(p))
+    EXPECT_EQ(4, Distance(p, Type<Span<const int32_t>>(
+                                 L::Partial(1, 0).Slice<int32_t>(p))
                                  .data()));
     EXPECT_EQ(
         0,
-        Distance(
-            p,
-            Type<Span<const int8_t>>(L::Partial(1, 0, 0).Slice<int8_t>(p)).data()));
-    EXPECT_EQ(
-        4,
-        Distance(p, Type<Span<const int32_t>>(L::Partial(1, 0, 0).Slice<int32_t>(p))
+        Distance(p, Type<Span<const int8_t>>(L::Partial(5, 3).Slice<int8_t>(p))
                         .data()));
+    EXPECT_EQ(8, Distance(p, Type<Span<const int32_t>>(
+                                 L::Partial(5, 3).Slice<int32_t>(p))
+                                 .data()));
+    EXPECT_EQ(0, Distance(p, Type<Span<const int8_t>>(
+                                 L::Partial(0, 0, 0).Slice<int8_t>(p))
+                                 .data()));
+    EXPECT_EQ(0, Distance(p, Type<Span<const int32_t>>(
+                                 L::Partial(0, 0, 0).Slice<int32_t>(p))
+                                 .data()));
+    EXPECT_EQ(0, Distance(p, Type<Span<const Int128>>(
+                                 L::Partial(0, 0, 0).Slice<Int128>(p))
+                                 .data()));
+    EXPECT_EQ(0, Distance(p, Type<Span<const int8_t>>(
+                                 L::Partial(1, 0, 0).Slice<int8_t>(p))
+                                 .data()));
+    EXPECT_EQ(4, Distance(p, Type<Span<const int32_t>>(
+                                 L::Partial(1, 0, 0).Slice<int32_t>(p))
+                                 .data()));
     EXPECT_EQ(8, Distance(p, Type<Span<const Int128>>(
                                  L::Partial(1, 0, 0).Slice<Int128>(p))
                                  .data()));
-    EXPECT_EQ(
-        0,
-        Distance(
-            p,
-            Type<Span<const int8_t>>(L::Partial(5, 3, 1).Slice<int8_t>(p)).data()));
+    EXPECT_EQ(0, Distance(p, Type<Span<const int8_t>>(
+                                 L::Partial(5, 3, 1).Slice<int8_t>(p))
+                                 .data()));
     EXPECT_EQ(24, Distance(p, Type<Span<const Int128>>(
                                   L::Partial(5, 3, 1).Slice<Int128>(p))
                                   .data()));
-    EXPECT_EQ(
-        8,
-        Distance(p, Type<Span<const int32_t>>(L::Partial(5, 3, 1).Slice<int32_t>(p))
-                        .data()));
+    EXPECT_EQ(8, Distance(p, Type<Span<const int32_t>>(
+                                 L::Partial(5, 3, 1).Slice<int32_t>(p))
+                                 .data()));
     EXPECT_EQ(
         0,
-        Distance(p, Type<Span<const int8_t>>(L(5, 3, 1).Slice<int8_t>(p)).data()));
+        Distance(p,
+                 Type<Span<const int8_t>>(L(5, 3, 1).Slice<int8_t>(p)).data()));
     EXPECT_EQ(
         24,
         Distance(p,
                  Type<Span<const Int128>>(L(5, 3, 1).Slice<Int128>(p)).data()));
     EXPECT_EQ(
-        8, Distance(
-               p, Type<Span<const int32_t>>(L(5, 3, 1).Slice<int32_t>(p)).data()));
+        8,
+        Distance(
+            p, Type<Span<const int32_t>>(L(5, 3, 1).Slice<int32_t>(p)).data()));
   }
 }
 
@@ -1003,18 +1040,19 @@ TEST(Layout, MutableSliceByIndexData) {
   alignas(max_align_t) unsigned char p[100];
   {
     using L = Layout<int32_t>;
-    EXPECT_EQ(0,
-              Distance(p, Type<Span<int32_t>>(L::Partial(0).Slice<0>(p)).data()));
-    EXPECT_EQ(0,
-              Distance(p, Type<Span<int32_t>>(L::Partial(3).Slice<0>(p)).data()));
+    EXPECT_EQ(
+        0, Distance(p, Type<Span<int32_t>>(L::Partial(0).Slice<0>(p)).data()));
+    EXPECT_EQ(
+        0, Distance(p, Type<Span<int32_t>>(L::Partial(3).Slice<0>(p)).data()));
     EXPECT_EQ(0, Distance(p, Type<Span<int32_t>>(L(3).Slice<0>(p)).data()));
   }
   {
     using L = Layout<int32_t, int32_t>;
-    EXPECT_EQ(0,
-              Distance(p, Type<Span<int32_t>>(L::Partial(3).Slice<0>(p)).data()));
     EXPECT_EQ(
-        0, Distance(p, Type<Span<int32_t>>(L::Partial(3, 5).Slice<0>(p)).data()));
+        0, Distance(p, Type<Span<int32_t>>(L::Partial(3).Slice<0>(p)).data()));
+    EXPECT_EQ(
+        0,
+        Distance(p, Type<Span<int32_t>>(L::Partial(3, 5).Slice<0>(p)).data()));
     EXPECT_EQ(
         12,
         Distance(p, Type<Span<int32_t>>(L::Partial(3, 5).Slice<1>(p)).data()));
@@ -1023,55 +1061,63 @@ TEST(Layout, MutableSliceByIndexData) {
   }
   {
     using L = Layout<int8_t, int32_t, Int128>;
-    EXPECT_EQ(0,
-              Distance(p, Type<Span<int8_t>>(L::Partial(0).Slice<0>(p)).data()));
-    EXPECT_EQ(0,
-              Distance(p, Type<Span<int8_t>>(L::Partial(1).Slice<0>(p)).data()));
-    EXPECT_EQ(0,
-              Distance(p, Type<Span<int8_t>>(L::Partial(5).Slice<0>(p)).data()));
-    EXPECT_EQ(
-        0, Distance(p, Type<Span<int8_t>>(L::Partial(0, 0).Slice<0>(p)).data()));
     EXPECT_EQ(
-        0, Distance(p, Type<Span<int32_t>>(L::Partial(0, 0).Slice<1>(p)).data()));
+        0, Distance(p, Type<Span<int8_t>>(L::Partial(0).Slice<0>(p)).data()));
     EXPECT_EQ(
-        0, Distance(p, Type<Span<int8_t>>(L::Partial(1, 0).Slice<0>(p)).data()));
+        0, Distance(p, Type<Span<int8_t>>(L::Partial(1).Slice<0>(p)).data()));
     EXPECT_EQ(
-        4, Distance(p, Type<Span<int32_t>>(L::Partial(1, 0).Slice<1>(p)).data()));
+        0, Distance(p, Type<Span<int8_t>>(L::Partial(5).Slice<0>(p)).data()));
     EXPECT_EQ(
-        0, Distance(p, Type<Span<int8_t>>(L::Partial(5, 3).Slice<0>(p)).data()));
+        0,
+        Distance(p, Type<Span<int8_t>>(L::Partial(0, 0).Slice<0>(p)).data()));
     EXPECT_EQ(
-        8, Distance(p, Type<Span<int32_t>>(L::Partial(5, 3).Slice<1>(p)).data()));
+        0,
+        Distance(p, Type<Span<int32_t>>(L::Partial(0, 0).Slice<1>(p)).data()));
     EXPECT_EQ(
         0,
-        Distance(p, Type<Span<int8_t>>(L::Partial(0, 0, 0).Slice<0>(p)).data()));
+        Distance(p, Type<Span<int8_t>>(L::Partial(1, 0).Slice<0>(p)).data()));
+    EXPECT_EQ(
+        4,
+        Distance(p, Type<Span<int32_t>>(L::Partial(1, 0).Slice<1>(p)).data()));
     EXPECT_EQ(
         0,
-        Distance(p, Type<Span<int32_t>>(L::Partial(0, 0, 0).Slice<1>(p)).data()));
+        Distance(p, Type<Span<int8_t>>(L::Partial(5, 3).Slice<0>(p)).data()));
+    EXPECT_EQ(
+        8,
+        Distance(p, Type<Span<int32_t>>(L::Partial(5, 3).Slice<1>(p)).data()));
+    EXPECT_EQ(
+        0, Distance(
+               p, Type<Span<int8_t>>(L::Partial(0, 0, 0).Slice<0>(p)).data()));
+    EXPECT_EQ(
+        0, Distance(
+               p, Type<Span<int32_t>>(L::Partial(0, 0, 0).Slice<1>(p)).data()));
     EXPECT_EQ(
         0, Distance(
                p, Type<Span<Int128>>(L::Partial(0, 0, 0).Slice<2>(p)).data()));
     EXPECT_EQ(
-        0,
-        Distance(p, Type<Span<int8_t>>(L::Partial(1, 0, 0).Slice<0>(p)).data()));
+        0, Distance(
+               p, Type<Span<int8_t>>(L::Partial(1, 0, 0).Slice<0>(p)).data()));
     EXPECT_EQ(
-        4,
-        Distance(p, Type<Span<int32_t>>(L::Partial(1, 0, 0).Slice<1>(p)).data()));
+        4, Distance(
+               p, Type<Span<int32_t>>(L::Partial(1, 0, 0).Slice<1>(p)).data()));
     EXPECT_EQ(
         8, Distance(
                p, Type<Span<Int128>>(L::Partial(1, 0, 0).Slice<2>(p)).data()));
     EXPECT_EQ(
-        0,
-        Distance(p, Type<Span<int8_t>>(L::Partial(5, 3, 1).Slice<0>(p)).data()));
+        0, Distance(
+               p, Type<Span<int8_t>>(L::Partial(5, 3, 1).Slice<0>(p)).data()));
     EXPECT_EQ(
         24, Distance(
                 p, Type<Span<Int128>>(L::Partial(5, 3, 1).Slice<2>(p)).data()));
     EXPECT_EQ(
-        8,
-        Distance(p, Type<Span<int32_t>>(L::Partial(5, 3, 1).Slice<1>(p)).data()));
-    EXPECT_EQ(0, Distance(p, Type<Span<int8_t>>(L(5, 3, 1).Slice<0>(p)).data()));
+        8, Distance(
+               p, Type<Span<int32_t>>(L::Partial(5, 3, 1).Slice<1>(p)).data()));
+    EXPECT_EQ(0,
+              Distance(p, Type<Span<int8_t>>(L(5, 3, 1).Slice<0>(p)).data()));
     EXPECT_EQ(24,
               Distance(p, Type<Span<Int128>>(L(5, 3, 1).Slice<2>(p)).data()));
-    EXPECT_EQ(8, Distance(p, Type<Span<int32_t>>(L(5, 3, 1).Slice<1>(p)).data()));
+    EXPECT_EQ(8,
+              Distance(p, Type<Span<int32_t>>(L(5, 3, 1).Slice<1>(p)).data()));
   }
 }
 
@@ -1080,66 +1126,84 @@ TEST(Layout, MutableSliceByTypeData) {
   {
     using L = Layout<int32_t>;
     EXPECT_EQ(
-        0,
-        Distance(p, Type<Span<int32_t>>(L::Partial(0).Slice<int32_t>(p)).data()));
+        0, Distance(
+               p, Type<Span<int32_t>>(L::Partial(0).Slice<int32_t>(p)).data()));
     EXPECT_EQ(
-        0,
-        Distance(p, Type<Span<int32_t>>(L::Partial(3).Slice<int32_t>(p)).data()));
-    EXPECT_EQ(0, Distance(p, Type<Span<int32_t>>(L(3).Slice<int32_t>(p)).data()));
+        0, Distance(
+               p, Type<Span<int32_t>>(L::Partial(3).Slice<int32_t>(p)).data()));
+    EXPECT_EQ(0,
+              Distance(p, Type<Span<int32_t>>(L(3).Slice<int32_t>(p)).data()));
   }
   {
     using L = Layout<int8_t, int32_t, Int128>;
     EXPECT_EQ(
-        0, Distance(p, Type<Span<int8_t>>(L::Partial(0).Slice<int8_t>(p)).data()));
+        0,
+        Distance(p, Type<Span<int8_t>>(L::Partial(0).Slice<int8_t>(p)).data()));
     EXPECT_EQ(
-        0, Distance(p, Type<Span<int8_t>>(L::Partial(1).Slice<int8_t>(p)).data()));
+        0,
+        Distance(p, Type<Span<int8_t>>(L::Partial(1).Slice<int8_t>(p)).data()));
     EXPECT_EQ(
-        0, Distance(p, Type<Span<int8_t>>(L::Partial(5).Slice<int8_t>(p)).data()));
+        0,
+        Distance(p, Type<Span<int8_t>>(L::Partial(5).Slice<int8_t>(p)).data()));
     EXPECT_EQ(
         0,
-        Distance(p, Type<Span<int8_t>>(L::Partial(0, 0).Slice<int8_t>(p)).data()));
+        Distance(p,
+                 Type<Span<int8_t>>(L::Partial(0, 0).Slice<int8_t>(p)).data()));
     EXPECT_EQ(
-        0, Distance(
-               p, Type<Span<int32_t>>(L::Partial(0, 0).Slice<int32_t>(p)).data()));
+        0,
+        Distance(
+            p, Type<Span<int32_t>>(L::Partial(0, 0).Slice<int32_t>(p)).data()));
     EXPECT_EQ(
         0,
-        Distance(p, Type<Span<int8_t>>(L::Partial(1, 0).Slice<int8_t>(p)).data()));
+        Distance(p,
+                 Type<Span<int8_t>>(L::Partial(1, 0).Slice<int8_t>(p)).data()));
     EXPECT_EQ(
-        4, Distance(
-               p, Type<Span<int32_t>>(L::Partial(1, 0).Slice<int32_t>(p)).data()));
+        4,
+        Distance(
+            p, Type<Span<int32_t>>(L::Partial(1, 0).Slice<int32_t>(p)).data()));
     EXPECT_EQ(
         0,
-        Distance(p, Type<Span<int8_t>>(L::Partial(5, 3).Slice<int8_t>(p)).data()));
+        Distance(p,
+                 Type<Span<int8_t>>(L::Partial(5, 3).Slice<int8_t>(p)).data()));
     EXPECT_EQ(
-        8, Distance(
-               p, Type<Span<int32_t>>(L::Partial(5, 3).Slice<int32_t>(p)).data()));
+        8,
+        Distance(
+            p, Type<Span<int32_t>>(L::Partial(5, 3).Slice<int32_t>(p)).data()));
     EXPECT_EQ(
-        0, Distance(
-               p, Type<Span<int8_t>>(L::Partial(0, 0, 0).Slice<int8_t>(p)).data()));
+        0,
+        Distance(
+            p,
+            Type<Span<int8_t>>(L::Partial(0, 0, 0).Slice<int8_t>(p)).data()));
     EXPECT_EQ(
         0,
         Distance(
-            p, Type<Span<int32_t>>(L::Partial(0, 0, 0).Slice<int32_t>(p)).data()));
+            p,
+            Type<Span<int32_t>>(L::Partial(0, 0, 0).Slice<int32_t>(p)).data()));
     EXPECT_EQ(
         0,
         Distance(
             p,
             Type<Span<Int128>>(L::Partial(0, 0, 0).Slice<Int128>(p)).data()));
     EXPECT_EQ(
-        0, Distance(
-               p, Type<Span<int8_t>>(L::Partial(1, 0, 0).Slice<int8_t>(p)).data()));
+        0,
+        Distance(
+            p,
+            Type<Span<int8_t>>(L::Partial(1, 0, 0).Slice<int8_t>(p)).data()));
     EXPECT_EQ(
         4,
         Distance(
-            p, Type<Span<int32_t>>(L::Partial(1, 0, 0).Slice<int32_t>(p)).data()));
+            p,
+            Type<Span<int32_t>>(L::Partial(1, 0, 0).Slice<int32_t>(p)).data()));
     EXPECT_EQ(
         8,
         Distance(
             p,
             Type<Span<Int128>>(L::Partial(1, 0, 0).Slice<Int128>(p)).data()));
     EXPECT_EQ(
-        0, Distance(
-               p, Type<Span<int8_t>>(L::Partial(5, 3, 1).Slice<int8_t>(p)).data()));
+        0,
+        Distance(
+            p,
+            Type<Span<int8_t>>(L::Partial(5, 3, 1).Slice<int8_t>(p)).data()));
     EXPECT_EQ(
         24,
         Distance(
@@ -1148,14 +1212,16 @@ TEST(Layout, MutableSliceByTypeData) {
     EXPECT_EQ(
         8,
         Distance(
-            p, Type<Span<int32_t>>(L::Partial(5, 3, 1).Slice<int32_t>(p)).data()));
-    EXPECT_EQ(0,
-              Distance(p, Type<Span<int8_t>>(L(5, 3, 1).Slice<int8_t>(p)).data()));
+            p,
+            Type<Span<int32_t>>(L::Partial(5, 3, 1).Slice<int32_t>(p)).data()));
+    EXPECT_EQ(
+        0, Distance(p, Type<Span<int8_t>>(L(5, 3, 1).Slice<int8_t>(p)).data()));
     EXPECT_EQ(
         24,
         Distance(p, Type<Span<Int128>>(L(5, 3, 1).Slice<Int128>(p)).data()));
     EXPECT_EQ(
-        8, Distance(p, Type<Span<int32_t>>(L(5, 3, 1).Slice<int32_t>(p)).data()));
+        8,
+        Distance(p, Type<Span<int32_t>>(L(5, 3, 1).Slice<int32_t>(p)).data()));
   }
 }
 
@@ -1254,17 +1320,17 @@ TEST(Layout, MutableSlices) {
   }
   {
     const auto x = L::Partial(1, 2, 3);
-    EXPECT_THAT(
-        (Type<std::tuple<Span<int8_t>, Span<int8_t>, Span<Int128>>>(x.Slices(p))),
-        Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)),
-              IsSameSlice(x.Slice<2>(p))));
+    EXPECT_THAT((Type<std::tuple<Span<int8_t>, Span<int8_t>, Span<Int128>>>(
+                    x.Slices(p))),
+                Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)),
+                      IsSameSlice(x.Slice<2>(p))));
   }
   {
     const L x(1, 2, 3);
-    EXPECT_THAT(
-        (Type<std::tuple<Span<int8_t>, Span<int8_t>, Span<Int128>>>(x.Slices(p))),
-        Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)),
-              IsSameSlice(x.Slice<2>(p))));
+    EXPECT_THAT((Type<std::tuple<Span<int8_t>, Span<int8_t>, Span<Int128>>>(
+                    x.Slices(p))),
+                Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)),
+                      IsSameSlice(x.Slice<2>(p))));
   }
 }
 
@@ -1314,7 +1380,7 @@ struct Region {
 };
 
 void ExpectRegionPoisoned(const unsigned char* p, size_t n, bool poisoned) {
-#ifdef ADDRESS_SANITIZER
+#ifdef ABSL_HAVE_ADDRESS_SANITIZER
   for (size_t i = 0; i != n; ++i) {
     EXPECT_EQ(poisoned, __asan_address_is_poisoned(p + i));
   }
@@ -1396,7 +1462,8 @@ TEST(Layout, DebugString) {
               x.DebugString());
   }
   {
-    constexpr auto x = Layout<int8_t, int32_t, int8_t, Int128>::Partial(1, 2, 3);
+    constexpr auto x =
+        Layout<int8_t, int32_t, int8_t, Int128>::Partial(1, 2, 3);
     EXPECT_EQ(
         "@0<signed char>(1)[1]; @4<int>(4)[2]; @12<signed char>(1)[3]; "
         "@16" +
@@ -1404,7 +1471,8 @@ TEST(Layout, DebugString) {
         x.DebugString());
   }
   {
-    constexpr auto x = Layout<int8_t, int32_t, int8_t, Int128>::Partial(1, 2, 3, 4);
+    constexpr auto x =
+        Layout<int8_t, int32_t, int8_t, Int128>::Partial(1, 2, 3, 4);
     EXPECT_EQ(
         "@0<signed char>(1)[1]; @4<int>(4)[2]; @12<signed char>(1)[3]; "
         "@16" +
diff --git a/third_party/abseil_cpp/absl/container/internal/raw_hash_set.cc b/third_party/abseil_cpp/absl/container/internal/raw_hash_set.cc
index 919ac0740573..bfef071f29e6 100644
--- a/third_party/abseil_cpp/absl/container/internal/raw_hash_set.cc
+++ b/third_party/abseil_cpp/absl/container/internal/raw_hash_set.cc
@@ -27,7 +27,7 @@ constexpr size_t Group::kWidth;
 
 // Returns "random" seed.
 inline size_t RandomSeed() {
-#if ABSL_HAVE_THREAD_LOCAL
+#ifdef ABSL_HAVE_THREAD_LOCAL
   static thread_local size_t counter = 0;
   size_t value = ++counter;
 #else   // ABSL_HAVE_THREAD_LOCAL
@@ -43,6 +43,19 @@ bool ShouldInsertBackwards(size_t hash, ctrl_t* ctrl) {
   return (H1(hash, ctrl) ^ RandomSeed()) % 13 > 6;
 }
 
+void ConvertDeletedToEmptyAndFullToDeleted(
+    ctrl_t* ctrl, size_t capacity) {
+  assert(ctrl[capacity] == kSentinel);
+  assert(IsValidCapacity(capacity));
+  for (ctrl_t* pos = ctrl; pos != ctrl + capacity + 1; pos += Group::kWidth) {
+    Group{pos}.ConvertSpecialToEmptyAndFullToDeleted(pos);
+  }
+  // Copy the cloned ctrl bytes.
+  std::memcpy(ctrl + capacity + 1, ctrl, Group::kWidth);
+  ctrl[capacity] = kSentinel;
+}
+
+
 }  // namespace container_internal
 ABSL_NAMESPACE_END
 }  // namespace absl
diff --git a/third_party/abseil_cpp/absl/container/internal/raw_hash_set.h b/third_party/abseil_cpp/absl/container/internal/raw_hash_set.h
index df0f2b2b54be..02158c4e0886 100644
--- a/third_party/abseil_cpp/absl/container/internal/raw_hash_set.h
+++ b/third_party/abseil_cpp/absl/container/internal/raw_hash_set.h
@@ -122,6 +122,16 @@ namespace absl {
 ABSL_NAMESPACE_BEGIN
 namespace container_internal {
 
+template <typename AllocType>
+void SwapAlloc(AllocType& lhs, AllocType& rhs,
+               std::true_type /* propagate_on_container_swap */) {
+  using std::swap;
+  swap(lhs, rhs);
+}
+template <typename AllocType>
+void SwapAlloc(AllocType& /*lhs*/, AllocType& /*rhs*/,
+               std::false_type /* propagate_on_container_swap */) {}
+
 template <size_t Width>
 class probe_seq {
  public:
@@ -169,10 +179,14 @@ struct IsDecomposable<
 
 // TODO(alkis): Switch to std::is_nothrow_swappable when gcc/clang supports it.
 template <class T>
-constexpr bool IsNoThrowSwappable() {
+constexpr bool IsNoThrowSwappable(std::true_type = {} /* is_swappable */) {
   using std::swap;
   return noexcept(swap(std::declval<T&>(), std::declval<T&>()));
 }
+template <class T>
+constexpr bool IsNoThrowSwappable(std::false_type /* is_swappable */) {
+  return false;
+}
 
 template <typename T>
 int TrailingZeros(T x) {
@@ -458,17 +472,7 @@ inline bool IsValidCapacity(size_t n) { return ((n + 1) & n) == 0 && n > 0; }
 //   DELETED -> EMPTY
 //   EMPTY -> EMPTY
 //   FULL -> DELETED
-inline void ConvertDeletedToEmptyAndFullToDeleted(
-    ctrl_t* ctrl, size_t capacity) {
-  assert(ctrl[capacity] == kSentinel);
-  assert(IsValidCapacity(capacity));
-  for (ctrl_t* pos = ctrl; pos != ctrl + capacity + 1; pos += Group::kWidth) {
-    Group{pos}.ConvertSpecialToEmptyAndFullToDeleted(pos);
-  }
-  // Copy the cloned ctrl bytes.
-  std::memcpy(ctrl + capacity + 1, ctrl, Group::kWidth);
-  ctrl[capacity] = kSentinel;
-}
+void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t* ctrl, size_t capacity);
 
 // Rounds up the capacity to the next power of 2 minus 1, with a minimum of 1.
 inline size_t NormalizeCapacity(size_t n) {
@@ -497,6 +501,76 @@ inline size_t GrowthToLowerboundCapacity(size_t growth) {
   return growth + static_cast<size_t>((static_cast<int64_t>(growth) - 1) / 7);
 }
 
+inline void AssertIsFull(ctrl_t* ctrl) {
+  ABSL_HARDENING_ASSERT((ctrl != nullptr && IsFull(*ctrl)) &&
+                        "Invalid operation on iterator. The element might have "
+                        "been erased, or the table might have rehashed.");
+}
+
+inline void AssertIsValid(ctrl_t* ctrl) {
+  ABSL_HARDENING_ASSERT((ctrl == nullptr || IsFull(*ctrl)) &&
+                        "Invalid operation on iterator. The element might have "
+                        "been erased, or the table might have rehashed.");
+}
+
+struct FindInfo {
+  size_t offset;
+  size_t probe_length;
+};
+
+// The representation of the object has two modes:
+//  - small: For capacities < kWidth-1
+//  - large: For the rest.
+//
+// Differences:
+//  - In small mode we are able to use the whole capacity. The extra control
+//  bytes give us at least one "empty" control byte to stop the iteration.
+//  This is important to make 1 a valid capacity.
+//
+//  - In small mode only the first `capacity()` control bytes after the
+//  sentinel are valid. The rest contain dummy kEmpty values that do not
+//  represent a real slot. This is important to take into account on
+//  find_first_non_full(), where we never try ShouldInsertBackwards() for
+//  small tables.
+inline bool is_small(size_t capacity) { return capacity < Group::kWidth - 1; }
+
+inline probe_seq<Group::kWidth> probe(ctrl_t* ctrl, size_t hash,
+                                      size_t capacity) {
+  return probe_seq<Group::kWidth>(H1(hash, ctrl), capacity);
+}
+
+// Probes the raw_hash_set with the probe sequence for hash and returns the
+// pointer to the first empty or deleted slot.
+// NOTE: this function must work with tables having both kEmpty and kDelete
+// in one group. Such tables appears during drop_deletes_without_resize.
+//
+// This function is very useful when insertions happen and:
+// - the input is already a set
+// - there are enough slots
+// - the element with the hash is not in the table
+inline FindInfo find_first_non_full(ctrl_t* ctrl, size_t hash,
+                                    size_t capacity) {
+  auto seq = probe(ctrl, hash, capacity);
+  while (true) {
+    Group g{ctrl + seq.offset()};
+    auto mask = g.MatchEmptyOrDeleted();
+    if (mask) {
+#if !defined(NDEBUG)
+      // We want to add entropy even when ASLR is not enabled.
+      // In debug build we will randomly insert in either the front or back of
+      // the group.
+      // TODO(kfm,sbenza): revisit after we do unconditional mixing
+      if (!is_small(capacity) && ShouldInsertBackwards(hash, ctrl)) {
+        return {seq.offset(mask.HighestBitSet()), seq.index()};
+      }
+#endif
+      return {seq.offset(mask.LowestBitSet()), seq.index()};
+    }
+    seq.next();
+    assert(seq.index() < capacity && "full table!");
+  }
+}
+
 // Policy: a policy defines how to perform different operations on
 // the slots of the hashtable (see hash_policy_traits.h for the full interface
 // of policy).
@@ -511,7 +585,8 @@ inline size_t GrowthToLowerboundCapacity(size_t growth) {
 // if they are equal, false if they are not. If two keys compare equal, then
 // their hash values as defined by Hash MUST be equal.
 //
-// Allocator: an Allocator [https://devdocs.io/cpp/concept/allocator] with which
+// Allocator: an Allocator
+// [https://en.cppreference.com/w/cpp/named_req/Allocator] with which
 // the storage of the hashtable will be allocated and the elements will be
 // constructed and destroyed.
 template <class Policy, class Hash, class Eq, class Alloc>
@@ -617,7 +692,7 @@ class raw_hash_set {
 
     // PRECONDITION: not an end() iterator.
     reference operator*() const {
-      assert_is_full();
+      AssertIsFull(ctrl_);
       return PolicyTraits::element(slot_);
     }
 
@@ -626,7 +701,7 @@ class raw_hash_set {
 
     // PRECONDITION: not an end() iterator.
     iterator& operator++() {
-      assert_is_full();
+      AssertIsFull(ctrl_);
       ++ctrl_;
       ++slot_;
       skip_empty_or_deleted();
@@ -640,8 +715,8 @@ class raw_hash_set {
     }
 
     friend bool operator==(const iterator& a, const iterator& b) {
-      a.assert_is_valid();
-      b.assert_is_valid();
+      AssertIsValid(a.ctrl_);
+      AssertIsValid(b.ctrl_);
       return a.ctrl_ == b.ctrl_;
     }
     friend bool operator!=(const iterator& a, const iterator& b) {
@@ -655,13 +730,6 @@ class raw_hash_set {
       ABSL_INTERNAL_ASSUME(ctrl != nullptr);
     }
 
-    void assert_is_full() const {
-      ABSL_HARDENING_ASSERT(ctrl_ != nullptr && IsFull(*ctrl_));
-    }
-    void assert_is_valid() const {
-      ABSL_HARDENING_ASSERT(ctrl_ == nullptr || IsFull(*ctrl_));
-    }
-
     void skip_empty_or_deleted() {
       while (IsEmptyOrDeleted(*ctrl_)) {
         uint32_t shift = Group{ctrl_}.CountLeadingEmptyOrDeleted();
@@ -730,7 +798,6 @@ class raw_hash_set {
       : ctrl_(EmptyGroup()), settings_(0, hash, eq, alloc) {
     if (bucket_count) {
       capacity_ = NormalizeCapacity(bucket_count);
-      reset_growth_left();
       initialize_slots();
     }
   }
@@ -836,7 +903,7 @@ class raw_hash_set {
     // than a full `insert`.
     for (const auto& v : that) {
       const size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, v);
-      auto target = find_first_non_full(hash);
+      auto target = find_first_non_full(ctrl_, hash, capacity_);
       set_ctrl(target.offset, H2(hash));
       emplace_at(target.offset, v);
       infoz_.RecordInsert(hash, target.probe_length);
@@ -1045,7 +1112,9 @@ class raw_hash_set {
   }
 
   iterator insert(const_iterator, node_type&& node) {
-    return insert(std::move(node)).first;
+    auto res = insert(std::move(node));
+    node = std::move(res.node);
+    return res.position;
   }
 
   // This overload kicks in if we can deduce the key from args. This enables us
@@ -1174,7 +1243,7 @@ class raw_hash_set {
   // This overload is necessary because otherwise erase<K>(const K&) would be
   // a better match if non-const iterator is passed as an argument.
   void erase(iterator it) {
-    it.assert_is_full();
+    AssertIsFull(it.ctrl_);
     PolicyTraits::destroy(&alloc_ref(), it.slot_);
     erase_meta_only(it);
   }
@@ -1208,7 +1277,7 @@ class raw_hash_set {
   }
 
   node_type extract(const_iterator position) {
-    position.inner_.assert_is_full();
+    AssertIsFull(position.inner_.ctrl_);
     auto node =
         CommonAccess::Transfer<node_type>(alloc_ref(), position.inner_.slot_);
     erase_meta_only(position);
@@ -1225,8 +1294,8 @@ class raw_hash_set {
 
   void swap(raw_hash_set& that) noexcept(
       IsNoThrowSwappable<hasher>() && IsNoThrowSwappable<key_equal>() &&
-      (!AllocTraits::propagate_on_container_swap::value ||
-       IsNoThrowSwappable<allocator_type>())) {
+      IsNoThrowSwappable<allocator_type>(
+          typename AllocTraits::propagate_on_container_swap{})) {
     using std::swap;
     swap(ctrl_, that.ctrl_);
     swap(slots_, that.slots_);
@@ -1236,12 +1305,8 @@ class raw_hash_set {
     swap(hash_ref(), that.hash_ref());
     swap(eq_ref(), that.eq_ref());
     swap(infoz_, that.infoz_);
-    if (AllocTraits::propagate_on_container_swap::value) {
-      swap(alloc_ref(), that.alloc_ref());
-    } else {
-      // If the allocators do not compare equal it is officially undefined
-      // behavior. We choose to do nothing.
-    }
+    SwapAlloc(alloc_ref(), that.alloc_ref(),
+              typename AllocTraits::propagate_on_container_swap{});
   }
 
   void rehash(size_t n) {
@@ -1260,7 +1325,12 @@ class raw_hash_set {
     }
   }
 
-  void reserve(size_t n) { rehash(GrowthToLowerboundCapacity(n)); }
+  void reserve(size_t n) {
+    size_t m = GrowthToLowerboundCapacity(n);
+    if (m > capacity_) {
+      resize(NormalizeCapacity(m));
+    }
+  }
 
   // Extension API: support for heterogeneous keys.
   //
@@ -1285,7 +1355,7 @@ class raw_hash_set {
   void prefetch(const key_arg<K>& key) const {
     (void)key;
 #if defined(__GNUC__)
-    auto seq = probe(hash_ref()(key));
+    auto seq = probe(ctrl_, hash_ref()(key), capacity_);
     __builtin_prefetch(static_cast<const void*>(ctrl_ + seq.offset()));
     __builtin_prefetch(static_cast<const void*>(slots_ + seq.offset()));
 #endif  // __GNUC__
@@ -1300,7 +1370,7 @@ class raw_hash_set {
   // called heterogeneous key support.
   template <class K = key_type>
   iterator find(const key_arg<K>& key, size_t hash) {
-    auto seq = probe(hash);
+    auto seq = probe(ctrl_, hash, capacity_);
     while (true) {
       Group g{ctrl_ + seq.offset()};
       for (int i : g.Match(H2(hash))) {
@@ -1311,6 +1381,7 @@ class raw_hash_set {
       }
       if (ABSL_PREDICT_TRUE(g.MatchEmpty())) return end();
       seq.next();
+      assert(seq.index() < capacity_ && "full table!");
     }
   }
   template <class K = key_type>
@@ -1521,7 +1592,7 @@ class raw_hash_set {
       if (IsFull(old_ctrl[i])) {
         size_t hash = PolicyTraits::apply(HashElement{hash_ref()},
                                           PolicyTraits::element(old_slots + i));
-        auto target = find_first_non_full(hash);
+        auto target = find_first_non_full(ctrl_, hash, capacity_);
         size_t new_i = target.offset;
         total_probe_length += target.probe_length;
         set_ctrl(new_i, H2(hash));
@@ -1540,7 +1611,7 @@ class raw_hash_set {
 
   void drop_deletes_without_resize() ABSL_ATTRIBUTE_NOINLINE {
     assert(IsValidCapacity(capacity_));
-    assert(!is_small());
+    assert(!is_small(capacity_));
     // Algorithm:
     // - mark all DELETED slots as EMPTY
     // - mark all FULL slots as DELETED
@@ -1565,7 +1636,7 @@ class raw_hash_set {
       if (!IsDeleted(ctrl_[i])) continue;
       size_t hash = PolicyTraits::apply(HashElement{hash_ref()},
                                         PolicyTraits::element(slots_ + i));
-      auto target = find_first_non_full(hash);
+      auto target = find_first_non_full(ctrl_, hash, capacity_);
       size_t new_i = target.offset;
       total_probe_length += target.probe_length;
 
@@ -1573,7 +1644,8 @@ class raw_hash_set {
       // If they do, we don't need to move the object as it falls already in the
       // best probe we can.
       const auto probe_index = [&](size_t pos) {
-        return ((pos - probe(hash).offset()) & capacity_) / Group::kWidth;
+        return ((pos - probe(ctrl_, hash, capacity_).offset()) & capacity_) /
+               Group::kWidth;
       };
 
       // Element doesn't move.
@@ -1617,7 +1689,7 @@ class raw_hash_set {
 
   bool has_element(const value_type& elem) const {
     size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, elem);
-    auto seq = probe(hash);
+    auto seq = probe(ctrl_, hash, capacity_);
     while (true) {
       Group g{ctrl_ + seq.offset()};
       for (int i : g.Match(H2(hash))) {
@@ -1632,41 +1704,6 @@ class raw_hash_set {
     return false;
   }
 
-  // Probes the raw_hash_set with the probe sequence for hash and returns the
-  // pointer to the first empty or deleted slot.
-  // NOTE: this function must work with tables having both kEmpty and kDelete
-  // in one group. Such tables appears during drop_deletes_without_resize.
-  //
-  // This function is very useful when insertions happen and:
-  // - the input is already a set
-  // - there are enough slots
-  // - the element with the hash is not in the table
-  struct FindInfo {
-    size_t offset;
-    size_t probe_length;
-  };
-  FindInfo find_first_non_full(size_t hash) {
-    auto seq = probe(hash);
-    while (true) {
-      Group g{ctrl_ + seq.offset()};
-      auto mask = g.MatchEmptyOrDeleted();
-      if (mask) {
-#if !defined(NDEBUG)
-        // We want to add entropy even when ASLR is not enabled.
-        // In debug build we will randomly insert in either the front or back of
-        // the group.
-        // TODO(kfm,sbenza): revisit after we do unconditional mixing
-        if (!is_small() && ShouldInsertBackwards(hash, ctrl_)) {
-          return {seq.offset(mask.HighestBitSet()), seq.index()};
-        }
-#endif
-        return {seq.offset(mask.LowestBitSet()), seq.index()};
-      }
-      assert(seq.index() < capacity_ && "full table!");
-      seq.next();
-    }
-  }
-
   // TODO(alkis): Optimize this assuming *this and that don't overlap.
   raw_hash_set& move_assign(raw_hash_set&& that, std::true_type) {
     raw_hash_set tmp(std::move(that));
@@ -1683,7 +1720,7 @@ class raw_hash_set {
   template <class K>
   std::pair<size_t, bool> find_or_prepare_insert(const K& key) {
     auto hash = hash_ref()(key);
-    auto seq = probe(hash);
+    auto seq = probe(ctrl_, hash, capacity_);
     while (true) {
       Group g{ctrl_ + seq.offset()};
       for (int i : g.Match(H2(hash))) {
@@ -1694,16 +1731,17 @@ class raw_hash_set {
       }
       if (ABSL_PREDICT_TRUE(g.MatchEmpty())) break;
       seq.next();
+      assert(seq.index() < capacity_ && "full table!");
     }
     return {prepare_insert(hash), true};
   }
 
   size_t prepare_insert(size_t hash) ABSL_ATTRIBUTE_NOINLINE {
-    auto target = find_first_non_full(hash);
+    auto target = find_first_non_full(ctrl_, hash, capacity_);
     if (ABSL_PREDICT_FALSE(growth_left() == 0 &&
                            !IsDeleted(ctrl_[target.offset]))) {
       rehash_and_grow_if_necessary();
-      target = find_first_non_full(hash);
+      target = find_first_non_full(ctrl_, hash, capacity_);
     }
     ++size_;
     growth_left() -= IsEmpty(ctrl_[target.offset]);
@@ -1736,10 +1774,6 @@ class raw_hash_set {
  private:
   friend struct RawHashSetTestOnlyAccess;
 
-  probe_seq<Group::kWidth> probe(size_t hash) const {
-    return probe_seq<Group::kWidth>(H1(hash, ctrl_), capacity_);
-  }
-
   // Reset all ctrl bytes back to kEmpty, except the sentinel.
   void reset_ctrl() {
     std::memset(ctrl_, kEmpty, capacity_ + Group::kWidth);
@@ -1769,22 +1803,6 @@ class raw_hash_set {
 
   size_t& growth_left() { return settings_.template get<0>(); }
 
-  // The representation of the object has two modes:
-  //  - small: For capacities < kWidth-1
-  //  - large: For the rest.
-  //
-  // Differences:
-  //  - In small mode we are able to use the whole capacity. The extra control
-  //  bytes give us at least one "empty" control byte to stop the iteration.
-  //  This is important to make 1 a valid capacity.
-  //
-  //  - In small mode only the first `capacity()` control bytes after the
-  //  sentinel are valid. The rest contain dummy kEmpty values that do not
-  //  represent a real slot. This is important to take into account on
-  //  find_first_non_full(), where we never try ShouldInsertBackwards() for
-  //  small tables.
-  bool is_small() const { return capacity_ < Group::kWidth - 1; }
-
   hasher& hash_ref() { return settings_.template get<1>(); }
   const hasher& hash_ref() const { return settings_.template get<1>(); }
   key_equal& eq_ref() { return settings_.template get<2>(); }
@@ -1828,7 +1846,7 @@ struct HashtableDebugAccess<Set, absl::void_t<typename Set::raw_hash_set>> {
                              const typename Set::key_type& key) {
     size_t num_probes = 0;
     size_t hash = set.hash_ref()(key);
-    auto seq = set.probe(hash);
+    auto seq = probe(set.ctrl_, hash, set.capacity_);
     while (true) {
       container_internal::Group g{set.ctrl_ + seq.offset()};
       for (int i : g.Match(container_internal::H2(hash))) {
diff --git a/third_party/abseil_cpp/absl/container/internal/raw_hash_set_allocator_test.cc b/third_party/abseil_cpp/absl/container/internal/raw_hash_set_allocator_test.cc
index 7ac4b9f7dfc5..e73f53fd637b 100644
--- a/third_party/abseil_cpp/absl/container/internal/raw_hash_set_allocator_test.cc
+++ b/third_party/abseil_cpp/absl/container/internal/raw_hash_set_allocator_test.cc
@@ -424,6 +424,81 @@ TEST_F(PropagateOnAll, Swap) {
   EXPECT_EQ(0, it->num_copies());
 }
 
+// This allocator is similar to std::pmr::polymorphic_allocator.
+// Note the disabled assignment.
+template <class T>
+class PAlloc {
+  template <class>
+  friend class PAlloc;
+
+ public:
+  // types
+  using value_type = T;
+
+  // traits
+  using propagate_on_container_swap = std::false_type;
+
+  PAlloc() noexcept = default;
+  explicit PAlloc(size_t id) noexcept : id_(id) {}
+  PAlloc(const PAlloc&) noexcept = default;
+  PAlloc& operator=(const PAlloc&) noexcept = delete;
+
+  template <class U>
+  PAlloc(const PAlloc<U>& that) noexcept : id_(that.id_) {}  // NOLINT
+
+  template <class U>
+  struct rebind {
+    using other = PAlloc<U>;
+  };
+
+  constexpr PAlloc select_on_container_copy_construction() const { return {}; }
+
+  // public member functions
+  T* allocate(size_t) { return new T; }
+  void deallocate(T* p, size_t) { delete p; }
+
+  friend bool operator==(const PAlloc& a, const PAlloc& b) {
+    return a.id_ == b.id_;
+  }
+  friend bool operator!=(const PAlloc& a, const PAlloc& b) { return !(a == b); }
+
+ private:
+  size_t id_ = std::numeric_limits<size_t>::max();
+};
+
+// This doesn't compile with GCC 5.4 and 5.5 due to a bug in noexcept handing.
+#if !defined(__GNUC__) || __GNUC__ != 5 || (__GNUC_MINOR__ != 4 && \
+    __GNUC_MINOR__ != 5)
+TEST(NoPropagateOn, Swap) {
+  using PA = PAlloc<char>;
+  using Table = raw_hash_set<Policy, Identity, std::equal_to<int32_t>, PA>;
+
+  Table t1(PA{1}), t2(PA{2});
+  swap(t1, t2);
+  EXPECT_EQ(t1.get_allocator(), PA(1));
+  EXPECT_EQ(t2.get_allocator(), PA(2));
+}
+#endif
+
+TEST(NoPropagateOn, CopyConstruct) {
+  using PA = PAlloc<char>;
+  using Table = raw_hash_set<Policy, Identity, std::equal_to<int32_t>, PA>;
+
+  Table t1(PA{1}), t2(t1);
+  EXPECT_EQ(t1.get_allocator(), PA(1));
+  EXPECT_EQ(t2.get_allocator(), PA());
+}
+
+TEST(NoPropagateOn, Assignment) {
+  using PA = PAlloc<char>;
+  using Table = raw_hash_set<Policy, Identity, std::equal_to<int32_t>, PA>;
+
+  Table t1(PA{1}), t2(PA{2});
+  t1 = t2;
+  EXPECT_EQ(t1.get_allocator(), PA(1));
+  EXPECT_EQ(t2.get_allocator(), PA(2));
+}
+
 }  // namespace
 }  // namespace container_internal
 ABSL_NAMESPACE_END
diff --git a/third_party/abseil_cpp/absl/container/internal/raw_hash_set_test.cc b/third_party/abseil_cpp/absl/container/internal/raw_hash_set_test.cc
index 2fc85591ca72..33d2773de302 100644
--- a/third_party/abseil_cpp/absl/container/internal/raw_hash_set_test.cc
+++ b/third_party/abseil_cpp/absl/container/internal/raw_hash_set_test.cc
@@ -26,6 +26,7 @@
 #include "gmock/gmock.h"
 #include "gtest/gtest.h"
 #include "absl/base/attributes.h"
+#include "absl/base/config.h"
 #include "absl/base/internal/cycleclock.h"
 #include "absl/base/internal/raw_logging.h"
 #include "absl/container/internal/container_memory.h"
@@ -846,7 +847,8 @@ TEST(Table, EraseMaintainsValidIterator) {
 std::vector<int64_t> CollectBadMergeKeys(size_t N) {
   static constexpr int kGroupSize = Group::kWidth - 1;
 
-  auto topk_range = [](size_t b, size_t e, IntTable* t) -> std::vector<int64_t> {
+  auto topk_range = [](size_t b, size_t e,
+                       IntTable* t) -> std::vector<int64_t> {
     for (size_t i = b; i != e; ++i) {
       t->emplace(i);
     }
@@ -1000,8 +1002,8 @@ using ProbeStatsPerSize = std::map<size_t, ProbeStats>;
 // 1. Create new table and reserve it to keys.size() * 2
 // 2. Insert all keys xored with seed
 // 3. Collect ProbeStats from final table.
-ProbeStats CollectProbeStatsOnKeysXoredWithSeed(const std::vector<int64_t>& keys,
-                                                size_t num_iters) {
+ProbeStats CollectProbeStatsOnKeysXoredWithSeed(
+    const std::vector<int64_t>& keys, size_t num_iters) {
   const size_t reserve_size = keys.size() * 2;
 
   ProbeStats stats;
@@ -1709,6 +1711,26 @@ TEST(Nodes, ExtractInsert) {
   EXPECT_FALSE(node);
 }
 
+TEST(Nodes, HintInsert) {
+  IntTable t = {1, 2, 3};
+  auto node = t.extract(1);
+  EXPECT_THAT(t, UnorderedElementsAre(2, 3));
+  auto it = t.insert(t.begin(), std::move(node));
+  EXPECT_THAT(t, UnorderedElementsAre(1, 2, 3));
+  EXPECT_EQ(*it, 1);
+  EXPECT_FALSE(node);
+
+  node = t.extract(2);
+  EXPECT_THAT(t, UnorderedElementsAre(1, 3));
+  // reinsert 2 to make the next insert fail.
+  t.insert(2);
+  EXPECT_THAT(t, UnorderedElementsAre(1, 2, 3));
+  it = t.insert(t.begin(), std::move(node));
+  EXPECT_EQ(*it, 2);
+  // The node was not emptied by the insert call.
+  EXPECT_TRUE(node);
+}
+
 IntTable MakeSimpleTable(size_t size) {
   IntTable t;
   while (t.size() < size) t.insert(t.size());
@@ -1791,11 +1813,11 @@ TEST(TableDeathTest, EraseOfEndAsserts) {
 
   IntTable t;
   // Extra simple "regexp" as regexp support is highly varied across platforms.
-  constexpr char kDeathMsg[] = "IsFull";
+  constexpr char kDeathMsg[] = "Invalid operation on iterator";
   EXPECT_DEATH_IF_SUPPORTED(t.erase(t.end()), kDeathMsg);
 }
 
-#if defined(ABSL_HASHTABLEZ_SAMPLE)
+#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
 TEST(RawHashSamplerTest, Sample) {
   // Enable the feature even if the prod default is off.
   SetHashtablezEnabled(true);
@@ -1816,7 +1838,7 @@ TEST(RawHashSamplerTest, Sample) {
   EXPECT_NEAR((end_size - start_size) / static_cast<double>(tables.size()),
               0.01, 0.005);
 }
-#endif  // ABSL_HASHTABLEZ_SAMPLER
+#endif  // ABSL_INTERNAL_HASHTABLEZ_SAMPLE
 
 TEST(RawHashSamplerTest, DoNotSampleCustomAllocators) {
   // Enable the feature even if the prod default is off.
@@ -1839,7 +1861,7 @@ TEST(RawHashSamplerTest, DoNotSampleCustomAllocators) {
               0.00, 0.001);
 }
 
-#ifdef ADDRESS_SANITIZER
+#ifdef ABSL_HAVE_ADDRESS_SANITIZER
 TEST(Sanitizer, PoisoningUnused) {
   IntTable t;
   t.reserve(5);
@@ -1863,7 +1885,7 @@ TEST(Sanitizer, PoisoningOnErase) {
   t.erase(0);
   EXPECT_TRUE(__asan_address_is_poisoned(&v));
 }
-#endif  // ADDRESS_SANITIZER
+#endif  // ABSL_HAVE_ADDRESS_SANITIZER
 
 }  // namespace
 }  // namespace container_internal
diff --git a/third_party/abseil_cpp/absl/container/node_hash_map.h b/third_party/abseil_cpp/absl/container/node_hash_map.h
index 174b971e99ce..7a39f6284cf5 100644
--- a/third_party/abseil_cpp/absl/container/node_hash_map.h
+++ b/third_party/abseil_cpp/absl/container/node_hash_map.h
@@ -225,7 +225,8 @@ class node_hash_map
   //
   // size_type erase(const key_type& key):
   //
-  //   Erases the element with the matching key, if it exists.
+  //   Erases the element with the matching key, if it exists, returning the
+  //   number of elements erased (0 or 1).
   using Base::erase;
 
   // node_hash_map::insert()
@@ -374,6 +375,11 @@ class node_hash_map
   //   key value and returns a node handle owning that extracted data. If the
   //   `node_hash_map` does not contain an element with a matching key, this
   //   function returns an empty node handle.
+  //
+  // NOTE: when compiled in an earlier version of C++ than C++17,
+  // `node_type::key()` returns a const reference to the key instead of a
+  // mutable reference. We cannot safely return a mutable reference without
+  // std::launder (which is not available before C++17).
   using Base::extract;
 
   // node_hash_map::merge()
diff --git a/third_party/abseil_cpp/absl/container/node_hash_map_test.cc b/third_party/abseil_cpp/absl/container/node_hash_map_test.cc
index 5d74b814b584..8f59a1e4a210 100644
--- a/third_party/abseil_cpp/absl/container/node_hash_map_test.cc
+++ b/third_party/abseil_cpp/absl/container/node_hash_map_test.cc
@@ -254,6 +254,21 @@ TEST(NodeHashMap, EraseIf) {
   }
 }
 
+// This test requires std::launder for mutable key access in node handles.
+#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606
+TEST(NodeHashMap, NodeHandleMutableKeyAccess) {
+  node_hash_map<std::string, std::string> map;
+
+  map["key1"] = "mapped";
+
+  auto nh = map.extract(map.begin());
+  nh.key().resize(3);
+  map.insert(std::move(nh));
+
+  EXPECT_THAT(map, testing::ElementsAre(Pair("key", "mapped")));
+}
+#endif
+
 }  // namespace
 }  // namespace container_internal
 ABSL_NAMESPACE_END
diff --git a/third_party/abseil_cpp/absl/container/node_hash_set.h b/third_party/abseil_cpp/absl/container/node_hash_set.h
index 56bab5c2c001..56ce3b66c0be 100644
--- a/third_party/abseil_cpp/absl/container/node_hash_set.h
+++ b/third_party/abseil_cpp/absl/container/node_hash_set.h
@@ -217,7 +217,8 @@ class node_hash_set
   //
   // size_type erase(const key_type& key):
   //
-  //   Erases the element with the matching key, if it exists.
+  //   Erases the element with the matching key, if it exists, returning the
+  //   number of elements erased (0 or 1).
   using Base::erase;
 
   // node_hash_set::insert()