about summary refs log tree commit diff
path: root/third_party/abseil_cpp/absl/container/internal
diff options
context:
space:
mode:
Diffstat (limited to 'third_party/abseil_cpp/absl/container/internal')
-rw-r--r--third_party/abseil_cpp/absl/container/internal/btree.h2587
-rw-r--r--third_party/abseil_cpp/absl/container/internal/btree_container.h683
-rw-r--r--third_party/abseil_cpp/absl/container/internal/common.h206
-rw-r--r--third_party/abseil_cpp/absl/container/internal/compressed_tuple.h290
-rw-r--r--third_party/abseil_cpp/absl/container/internal/compressed_tuple_test.cc409
-rw-r--r--third_party/abseil_cpp/absl/container/internal/container_memory.h460
-rw-r--r--third_party/abseil_cpp/absl/container/internal/container_memory_test.cc256
-rw-r--r--third_party/abseil_cpp/absl/container/internal/counting_allocator.h114
-rw-r--r--third_party/abseil_cpp/absl/container/internal/hash_function_defaults.h161
-rw-r--r--third_party/abseil_cpp/absl/container/internal/hash_function_defaults_test.cc383
-rw-r--r--third_party/abseil_cpp/absl/container/internal/hash_generator_testing.cc76
-rw-r--r--third_party/abseil_cpp/absl/container/internal/hash_generator_testing.h161
-rw-r--r--third_party/abseil_cpp/absl/container/internal/hash_policy_testing.h184
-rw-r--r--third_party/abseil_cpp/absl/container/internal/hash_policy_testing_test.cc45
-rw-r--r--third_party/abseil_cpp/absl/container/internal/hash_policy_traits.h208
-rw-r--r--third_party/abseil_cpp/absl/container/internal/hash_policy_traits_test.cc144
-rw-r--r--third_party/abseil_cpp/absl/container/internal/hashtable_debug.h110
-rw-r--r--third_party/abseil_cpp/absl/container/internal/hashtable_debug_hooks.h85
-rw-r--r--third_party/abseil_cpp/absl/container/internal/hashtablez_sampler.cc270
-rw-r--r--third_party/abseil_cpp/absl/container/internal/hashtablez_sampler.h321
-rw-r--r--third_party/abseil_cpp/absl/container/internal/hashtablez_sampler_force_weak_definition.cc30
-rw-r--r--third_party/abseil_cpp/absl/container/internal/hashtablez_sampler_test.cc371
-rw-r--r--third_party/abseil_cpp/absl/container/internal/have_sse.h50
-rw-r--r--third_party/abseil_cpp/absl/container/internal/inlined_vector.h895
-rw-r--r--third_party/abseil_cpp/absl/container/internal/layout.h743
-rw-r--r--third_party/abseil_cpp/absl/container/internal/layout_test.cc1635
-rw-r--r--third_party/abseil_cpp/absl/container/internal/node_hash_policy.h92
-rw-r--r--third_party/abseil_cpp/absl/container/internal/node_hash_policy_test.cc69
-rw-r--r--third_party/abseil_cpp/absl/container/internal/raw_hash_map.h197
-rw-r--r--third_party/abseil_cpp/absl/container/internal/raw_hash_set.cc61
-rw-r--r--third_party/abseil_cpp/absl/container/internal/raw_hash_set.h1903
-rw-r--r--third_party/abseil_cpp/absl/container/internal/raw_hash_set_allocator_test.cc505
-rw-r--r--third_party/abseil_cpp/absl/container/internal/raw_hash_set_test.cc1893
-rw-r--r--third_party/abseil_cpp/absl/container/internal/test_instance_tracker.cc29
-rw-r--r--third_party/abseil_cpp/absl/container/internal/test_instance_tracker.h274
-rw-r--r--third_party/abseil_cpp/absl/container/internal/test_instance_tracker_test.cc184
-rw-r--r--third_party/abseil_cpp/absl/container/internal/tracked.h83
-rw-r--r--third_party/abseil_cpp/absl/container/internal/unordered_map_constructor_test.h489
-rw-r--r--third_party/abseil_cpp/absl/container/internal/unordered_map_lookup_test.h117
-rw-r--r--third_party/abseil_cpp/absl/container/internal/unordered_map_members_test.h87
-rw-r--r--third_party/abseil_cpp/absl/container/internal/unordered_map_modifiers_test.h318
-rw-r--r--third_party/abseil_cpp/absl/container/internal/unordered_map_test.cc50
-rw-r--r--third_party/abseil_cpp/absl/container/internal/unordered_set_constructor_test.h496
-rw-r--r--third_party/abseil_cpp/absl/container/internal/unordered_set_lookup_test.h91
-rw-r--r--third_party/abseil_cpp/absl/container/internal/unordered_set_members_test.h86
-rw-r--r--third_party/abseil_cpp/absl/container/internal/unordered_set_modifiers_test.h190
-rw-r--r--third_party/abseil_cpp/absl/container/internal/unordered_set_test.cc41
47 files changed, 18132 insertions, 0 deletions
diff --git a/third_party/abseil_cpp/absl/container/internal/btree.h b/third_party/abseil_cpp/absl/container/internal/btree.h
new file mode 100644
index 000000000000..f2fc31df8d41
--- /dev/null
+++ b/third_party/abseil_cpp/absl/container/internal/btree.h
@@ -0,0 +1,2587 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// A btree implementation of the STL set and map interfaces. A btree is smaller
+// and generally also faster than STL set/map (refer to the benchmarks below).
+// The red-black tree implementation of STL set/map has an overhead of 3
+// pointers (left, right and parent) plus the node color information for each
+// stored value. So a set<int32_t> consumes 40 bytes for each value stored in
+// 64-bit mode. This btree implementation stores multiple values on fixed
+// size nodes (usually 256 bytes) and doesn't store child pointers for leaf
+// nodes. The result is that a btree_set<int32_t> may use much less memory per
+// stored value. For the random insertion benchmark in btree_bench.cc, a
+// btree_set<int32_t> with node-size of 256 uses 5.1 bytes per stored value.
+//
+// The packing of multiple values on to each node of a btree has another effect
+// besides better space utilization: better cache locality due to fewer cache
+// lines being accessed. Better cache locality translates into faster
+// operations.
+//
+// CAVEATS
+//
+// Insertions and deletions on a btree can cause splitting, merging or
+// rebalancing of btree nodes. And even without these operations, insertions
+// and deletions on a btree will move values around within a node. In both
+// cases, the result is that insertions and deletions can invalidate iterators
+// pointing to values other than the one being inserted/deleted. Therefore, this
+// container does not provide pointer stability. This is notably different from
+// STL set/map which takes care to not invalidate iterators on insert/erase
+// except, of course, for iterators pointing to the value being erased.  A
+// partial workaround when erasing is available: erase() returns an iterator
+// pointing to the item just after the one that was erased (or end() if none
+// exists).
+
+#ifndef ABSL_CONTAINER_INTERNAL_BTREE_H_
+#define ABSL_CONTAINER_INTERNAL_BTREE_H_
+
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <cstring>
+#include <functional>
+#include <iterator>
+#include <limits>
+#include <new>
+#include <string>
+#include <type_traits>
+#include <utility>
+
+#include "absl/base/macros.h"
+#include "absl/container/internal/common.h"
+#include "absl/container/internal/compressed_tuple.h"
+#include "absl/container/internal/container_memory.h"
+#include "absl/container/internal/layout.h"
+#include "absl/memory/memory.h"
+#include "absl/meta/type_traits.h"
+#include "absl/strings/cord.h"
+#include "absl/strings/string_view.h"
+#include "absl/types/compare.h"
+#include "absl/utility/utility.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+// A helper class that indicates if the Compare parameter is a key-compare-to
+// comparator.
+template <typename Compare, typename T>
+using btree_is_key_compare_to =
+    std::is_convertible<absl::result_of_t<Compare(const T &, const T &)>,
+                        absl::weak_ordering>;
+
+struct StringBtreeDefaultLess {
+  using is_transparent = void;
+
+  StringBtreeDefaultLess() = default;
+
+  // Compatibility constructor.
+  StringBtreeDefaultLess(std::less<std::string>) {}  // NOLINT
+  StringBtreeDefaultLess(std::less<string_view>) {}  // NOLINT
+
+  absl::weak_ordering operator()(absl::string_view lhs,
+                                 absl::string_view rhs) const {
+    return compare_internal::compare_result_as_ordering(lhs.compare(rhs));
+  }
+  StringBtreeDefaultLess(std::less<absl::Cord>) {}  // NOLINT
+  absl::weak_ordering operator()(const absl::Cord &lhs,
+                                 const absl::Cord &rhs) const {
+    return compare_internal::compare_result_as_ordering(lhs.Compare(rhs));
+  }
+  absl::weak_ordering operator()(const absl::Cord &lhs,
+                                 absl::string_view rhs) const {
+    return compare_internal::compare_result_as_ordering(lhs.Compare(rhs));
+  }
+  absl::weak_ordering operator()(absl::string_view lhs,
+                                 const absl::Cord &rhs) const {
+    return compare_internal::compare_result_as_ordering(-rhs.Compare(lhs));
+  }
+};
+
+struct StringBtreeDefaultGreater {
+  using is_transparent = void;
+
+  StringBtreeDefaultGreater() = default;
+
+  StringBtreeDefaultGreater(std::greater<std::string>) {}  // NOLINT
+  StringBtreeDefaultGreater(std::greater<string_view>) {}  // NOLINT
+
+  absl::weak_ordering operator()(absl::string_view lhs,
+                                 absl::string_view rhs) const {
+    return compare_internal::compare_result_as_ordering(rhs.compare(lhs));
+  }
+  StringBtreeDefaultGreater(std::greater<absl::Cord>) {}  // NOLINT
+  absl::weak_ordering operator()(const absl::Cord &lhs,
+                                 const absl::Cord &rhs) const {
+    return compare_internal::compare_result_as_ordering(rhs.Compare(lhs));
+  }
+  absl::weak_ordering operator()(const absl::Cord &lhs,
+                                 absl::string_view rhs) const {
+    return compare_internal::compare_result_as_ordering(-lhs.Compare(rhs));
+  }
+  absl::weak_ordering operator()(absl::string_view lhs,
+                                 const absl::Cord &rhs) const {
+    return compare_internal::compare_result_as_ordering(rhs.Compare(lhs));
+  }
+};
+
+// A helper class to convert a boolean comparison into a three-way "compare-to"
+// comparison that returns an `absl::weak_ordering`. This helper
+// class is specialized for less<std::string>, greater<std::string>,
+// less<string_view>, greater<string_view>, less<absl::Cord>, and
+// greater<absl::Cord>.
+//
+// key_compare_to_adapter is provided so that btree users
+// automatically get the more efficient compare-to code when using common
+// Abseil string types with common comparison functors.
+// These string-like specializations also turn on heterogeneous lookup by
+// default.
+template <typename Compare>
+struct key_compare_to_adapter {
+  using type = Compare;
+};
+
+template <>
+struct key_compare_to_adapter<std::less<std::string>> {
+  using type = StringBtreeDefaultLess;
+};
+
+template <>
+struct key_compare_to_adapter<std::greater<std::string>> {
+  using type = StringBtreeDefaultGreater;
+};
+
+template <>
+struct key_compare_to_adapter<std::less<absl::string_view>> {
+  using type = StringBtreeDefaultLess;
+};
+
+template <>
+struct key_compare_to_adapter<std::greater<absl::string_view>> {
+  using type = StringBtreeDefaultGreater;
+};
+
+template <>
+struct key_compare_to_adapter<std::less<absl::Cord>> {
+  using type = StringBtreeDefaultLess;
+};
+
+template <>
+struct key_compare_to_adapter<std::greater<absl::Cord>> {
+  using type = StringBtreeDefaultGreater;
+};
+
+// Detects an 'absl_btree_prefer_linear_node_search' member. This is
+// a protocol used as an opt-in or opt-out of linear search.
+//
+//  For example, this would be useful for key types that wrap an integer
+//  and define their own cheap operator<(). For example:
+//
+//   class K {
+//    public:
+//     using absl_btree_prefer_linear_node_search = std::true_type;
+//     ...
+//    private:
+//     friend bool operator<(K a, K b) { return a.k_ < b.k_; }
+//     int k_;
+//   };
+//
+//   btree_map<K, V> m;  // Uses linear search
+//
+// If T has the preference tag, then it has a preference.
+// Btree will use the tag's truth value.
+template <typename T, typename = void>
+struct has_linear_node_search_preference : std::false_type {};
+template <typename T, typename = void>
+struct prefers_linear_node_search : std::false_type {};
+template <typename T>
+struct has_linear_node_search_preference<
+    T, absl::void_t<typename T::absl_btree_prefer_linear_node_search>>
+    : std::true_type {};
+template <typename T>
+struct prefers_linear_node_search<
+    T, absl::void_t<typename T::absl_btree_prefer_linear_node_search>>
+    : T::absl_btree_prefer_linear_node_search {};
+
+template <typename Key, typename Compare, typename Alloc, int TargetNodeSize,
+          bool Multi, typename SlotPolicy>
+struct common_params {
+  // If Compare is a common comparator for a string-like type, then we adapt it
+  // to use heterogeneous lookup and to be a key-compare-to comparator.
+  using key_compare = typename key_compare_to_adapter<Compare>::type;
+  // True when key_compare has been adapted to StringBtreeDefault{Less,Greater}.
+  using is_key_compare_adapted =
+      absl::negation<std::is_same<key_compare, Compare>>;
+  // A type which indicates if we have a key-compare-to functor or a plain old
+  // key-compare functor.
+  using is_key_compare_to = btree_is_key_compare_to<key_compare, Key>;
+
+  using allocator_type = Alloc;
+  using key_type = Key;
+  using size_type = std::make_signed<size_t>::type;
+  using difference_type = ptrdiff_t;
+
+  // True if this is a multiset or multimap.
+  using is_multi_container = std::integral_constant<bool, Multi>;
+
+  using slot_policy = SlotPolicy;
+  using slot_type = typename slot_policy::slot_type;
+  using value_type = typename slot_policy::value_type;
+  using init_type = typename slot_policy::mutable_value_type;
+  using pointer = value_type *;
+  using const_pointer = const value_type *;
+  using reference = value_type &;
+  using const_reference = const value_type &;
+
+  enum {
+    kTargetNodeSize = TargetNodeSize,
+
+    // Upper bound for the available space for values. This is largest for leaf
+    // nodes, which have overhead of at least a pointer + 4 bytes (for storing
+    // 3 field_types and an enum).
+    kNodeValueSpace =
+        TargetNodeSize - /*minimum overhead=*/(sizeof(void *) + 4),
+  };
+
+  // This is an integral type large enough to hold as many
+  // ValueSize-values as will fit a node of TargetNodeSize bytes.
+  using node_count_type =
+      absl::conditional_t<(kNodeValueSpace / sizeof(value_type) >
+                           (std::numeric_limits<uint8_t>::max)()),
+                          uint16_t, uint8_t>;  // NOLINT
+
+  // The following methods are necessary for passing this struct as PolicyTraits
+  // for node_handle and/or are used within btree.
+  static value_type &element(slot_type *slot) {
+    return slot_policy::element(slot);
+  }
+  static const value_type &element(const slot_type *slot) {
+    return slot_policy::element(slot);
+  }
+  template <class... Args>
+  static void construct(Alloc *alloc, slot_type *slot, Args &&... args) {
+    slot_policy::construct(alloc, slot, std::forward<Args>(args)...);
+  }
+  static void construct(Alloc *alloc, slot_type *slot, slot_type *other) {
+    slot_policy::construct(alloc, slot, other);
+  }
+  static void destroy(Alloc *alloc, slot_type *slot) {
+    slot_policy::destroy(alloc, slot);
+  }
+  static void transfer(Alloc *alloc, slot_type *new_slot, slot_type *old_slot) {
+    construct(alloc, new_slot, old_slot);
+    destroy(alloc, old_slot);
+  }
+  static void swap(Alloc *alloc, slot_type *a, slot_type *b) {
+    slot_policy::swap(alloc, a, b);
+  }
+  static void move(Alloc *alloc, slot_type *src, slot_type *dest) {
+    slot_policy::move(alloc, src, dest);
+  }
+};
+
+// A parameters structure for holding the type parameters for a btree_map.
+// Compare and Alloc should be nothrow copy-constructible.
+template <typename Key, typename Data, typename Compare, typename Alloc,
+          int TargetNodeSize, bool Multi>
+struct map_params : common_params<Key, Compare, Alloc, TargetNodeSize, Multi,
+                                  map_slot_policy<Key, Data>> {
+  using super_type = typename map_params::common_params;
+  using mapped_type = Data;
+  // This type allows us to move keys when it is safe to do so. It is safe
+  // for maps in which value_type and mutable_value_type are layout compatible.
+  using slot_policy = typename super_type::slot_policy;
+  using slot_type = typename super_type::slot_type;
+  using value_type = typename super_type::value_type;
+  using init_type = typename super_type::init_type;
+
+  using key_compare = typename super_type::key_compare;
+  // Inherit from key_compare for empty base class optimization.
+  struct value_compare : private key_compare {
+    value_compare() = default;
+    explicit value_compare(const key_compare &cmp) : key_compare(cmp) {}
+
+    template <typename T, typename U>
+    auto operator()(const T &left, const U &right) const
+        -> decltype(std::declval<key_compare>()(left.first, right.first)) {
+      return key_compare::operator()(left.first, right.first);
+    }
+  };
+  using is_map_container = std::true_type;
+
+  template <typename V>
+  static auto key(const V &value) -> decltype(value.first) {
+    return value.first;
+  }
+  static const Key &key(const slot_type *s) { return slot_policy::key(s); }
+  static const Key &key(slot_type *s) { return slot_policy::key(s); }
+  // For use in node handle.
+  static auto mutable_key(slot_type *s)
+      -> decltype(slot_policy::mutable_key(s)) {
+    return slot_policy::mutable_key(s);
+  }
+  static mapped_type &value(value_type *value) { return value->second; }
+};
+
+// This type implements the necessary functions from the
+// absl::container_internal::slot_type interface.
+template <typename Key>
+struct set_slot_policy {
+  using slot_type = Key;
+  using value_type = Key;
+  using mutable_value_type = Key;
+
+  static value_type &element(slot_type *slot) { return *slot; }
+  static const value_type &element(const slot_type *slot) { return *slot; }
+
+  template <typename Alloc, class... Args>
+  static void construct(Alloc *alloc, slot_type *slot, Args &&... args) {
+    absl::allocator_traits<Alloc>::construct(*alloc, slot,
+                                             std::forward<Args>(args)...);
+  }
+
+  template <typename Alloc>
+  static void construct(Alloc *alloc, slot_type *slot, slot_type *other) {
+    absl::allocator_traits<Alloc>::construct(*alloc, slot, std::move(*other));
+  }
+
+  template <typename Alloc>
+  static void destroy(Alloc *alloc, slot_type *slot) {
+    absl::allocator_traits<Alloc>::destroy(*alloc, slot);
+  }
+
+  template <typename Alloc>
+  static void swap(Alloc * /*alloc*/, slot_type *a, slot_type *b) {
+    using std::swap;
+    swap(*a, *b);
+  }
+
+  template <typename Alloc>
+  static void move(Alloc * /*alloc*/, slot_type *src, slot_type *dest) {
+    *dest = std::move(*src);
+  }
+};
+
+// A parameters structure for holding the type parameters for a btree_set.
+// Compare and Alloc should be nothrow copy-constructible.
+template <typename Key, typename Compare, typename Alloc, int TargetNodeSize,
+          bool Multi>
+struct set_params : common_params<Key, Compare, Alloc, TargetNodeSize, Multi,
+                                  set_slot_policy<Key>> {
+  using value_type = Key;
+  using slot_type = typename set_params::common_params::slot_type;
+  using value_compare = typename set_params::common_params::key_compare;
+  using is_map_container = std::false_type;
+
+  template <typename V>
+  static const V &key(const V &value) { return value; }
+  static const Key &key(const slot_type *slot) { return *slot; }
+  static const Key &key(slot_type *slot) { return *slot; }
+};
+
+// An adapter class that converts a lower-bound compare into an upper-bound
+// compare. Note: there is no need to make a version of this adapter specialized
+// for key-compare-to functors because the upper-bound (the first value greater
+// than the input) is never an exact match.
+template <typename Compare>
+struct upper_bound_adapter {
+  explicit upper_bound_adapter(const Compare &c) : comp(c) {}
+  template <typename K1, typename K2>
+  bool operator()(const K1 &a, const K2 &b) const {
+    // Returns true when a is not greater than b.
+    return !compare_internal::compare_result_as_less_than(comp(b, a));
+  }
+
+ private:
+  Compare comp;
+};
+
+enum class MatchKind : uint8_t { kEq, kNe };
+
+template <typename V, bool IsCompareTo>
+struct SearchResult {
+  V value;
+  MatchKind match;
+
+  static constexpr bool HasMatch() { return true; }
+  bool IsEq() const { return match == MatchKind::kEq; }
+};
+
+// When we don't use CompareTo, `match` is not present.
+// This ensures that callers can't use it accidentally when it provides no
+// useful information.
+template <typename V>
+struct SearchResult<V, false> {
+  SearchResult() {}
+  explicit SearchResult(V value) : value(value) {}
+  SearchResult(V value, MatchKind /*match*/) : value(value) {}
+
+  V value;
+
+  static constexpr bool HasMatch() { return false; }
+  static constexpr bool IsEq() { return false; }
+};
+
+// A node in the btree holding. The same node type is used for both internal
+// and leaf nodes in the btree, though the nodes are allocated in such a way
+// that the children array is only valid in internal nodes.
+template <typename Params>
+class btree_node {
+  using is_key_compare_to = typename Params::is_key_compare_to;
+  using is_multi_container = typename Params::is_multi_container;
+  using field_type = typename Params::node_count_type;
+  using allocator_type = typename Params::allocator_type;
+  using slot_type = typename Params::slot_type;
+
+ public:
+  using params_type = Params;
+  using key_type = typename Params::key_type;
+  using value_type = typename Params::value_type;
+  using pointer = typename Params::pointer;
+  using const_pointer = typename Params::const_pointer;
+  using reference = typename Params::reference;
+  using const_reference = typename Params::const_reference;
+  using key_compare = typename Params::key_compare;
+  using size_type = typename Params::size_type;
+  using difference_type = typename Params::difference_type;
+
+  // Btree decides whether to use linear node search as follows:
+  //   - If the comparator expresses a preference, use that.
+  //   - If the key expresses a preference, use that.
+  //   - If the key is arithmetic and the comparator is std::less or
+  //     std::greater, choose linear.
+  //   - Otherwise, choose binary.
+  // TODO(ezb): Might make sense to add condition(s) based on node-size.
+  using use_linear_search = std::integral_constant<
+      bool,
+      has_linear_node_search_preference<key_compare>::value
+          ? prefers_linear_node_search<key_compare>::value
+          : has_linear_node_search_preference<key_type>::value
+                ? prefers_linear_node_search<key_type>::value
+                : std::is_arithmetic<key_type>::value &&
+                      (std::is_same<std::less<key_type>, key_compare>::value ||
+                       std::is_same<std::greater<key_type>,
+                                    key_compare>::value)>;
+
+  // This class is organized by gtl::Layout as if it had the following
+  // structure:
+  //   // A pointer to the node's parent.
+  //   btree_node *parent;
+  //
+  //   // The position of the node in the node's parent.
+  //   field_type position;
+  //   // The index of the first populated value in `values`.
+  //   // TODO(ezb): right now, `start` is always 0. Update insertion/merge
+  //   // logic to allow for floating storage within nodes.
+  //   field_type start;
+  //   // The index after the last populated value in `values`. Currently, this
+  //   // is the same as the count of values.
+  //   field_type finish;
+  //   // The maximum number of values the node can hold. This is an integer in
+  //   // [1, kNodeValues] for root leaf nodes, kNodeValues for non-root leaf
+  //   // nodes, and kInternalNodeMaxCount (as a sentinel value) for internal
+  //   // nodes (even though there are still kNodeValues values in the node).
+  //   // TODO(ezb): make max_count use only 4 bits and record log2(capacity)
+  //   // to free extra bits for is_root, etc.
+  //   field_type max_count;
+  //
+  //   // The array of values. The capacity is `max_count` for leaf nodes and
+  //   // kNodeValues for internal nodes. Only the values in
+  //   // [start, finish) have been initialized and are valid.
+  //   slot_type values[max_count];
+  //
+  //   // The array of child pointers. The keys in children[i] are all less
+  //   // than key(i). The keys in children[i + 1] are all greater than key(i).
+  //   // There are 0 children for leaf nodes and kNodeValues + 1 children for
+  //   // internal nodes.
+  //   btree_node *children[kNodeValues + 1];
+  //
+  // This class is only constructed by EmptyNodeType. Normally, pointers to the
+  // layout above are allocated, cast to btree_node*, and de-allocated within
+  // the btree implementation.
+  ~btree_node() = default;
+  btree_node(btree_node const &) = delete;
+  btree_node &operator=(btree_node const &) = delete;
+
+  // Public for EmptyNodeType.
+  constexpr static size_type Alignment() {
+    static_assert(LeafLayout(1).Alignment() == InternalLayout().Alignment(),
+                  "Alignment of all nodes must be equal.");
+    return InternalLayout().Alignment();
+  }
+
+ protected:
+  btree_node() = default;
+
+ private:
+  using layout_type = absl::container_internal::Layout<btree_node *, field_type,
+                                                       slot_type, btree_node *>;
+  constexpr static size_type SizeWithNValues(size_type n) {
+    return layout_type(/*parent*/ 1,
+                       /*position, start, finish, max_count*/ 4,
+                       /*values*/ n,
+                       /*children*/ 0)
+        .AllocSize();
+  }
+  // A lower bound for the overhead of fields other than values in a leaf node.
+  constexpr static size_type MinimumOverhead() {
+    return SizeWithNValues(1) - sizeof(value_type);
+  }
+
+  // Compute how many values we can fit onto a leaf node taking into account
+  // padding.
+  constexpr static size_type NodeTargetValues(const int begin, const int end) {
+    return begin == end ? begin
+                        : SizeWithNValues((begin + end) / 2 + 1) >
+                                  params_type::kTargetNodeSize
+                              ? NodeTargetValues(begin, (begin + end) / 2)
+                              : NodeTargetValues((begin + end) / 2 + 1, end);
+  }
+
+  enum {
+    kTargetNodeSize = params_type::kTargetNodeSize,
+    kNodeTargetValues = NodeTargetValues(0, params_type::kTargetNodeSize),
+
+    // We need a minimum of 3 values per internal node in order to perform
+    // splitting (1 value for the two nodes involved in the split and 1 value
+    // propagated to the parent as the delimiter for the split).
+    kNodeValues = kNodeTargetValues >= 3 ? kNodeTargetValues : 3,
+
+    // The node is internal (i.e. is not a leaf node) if and only if `max_count`
+    // has this value.
+    kInternalNodeMaxCount = 0,
+  };
+
+  // Leaves can have less than kNodeValues values.
+  constexpr static layout_type LeafLayout(const int max_values = kNodeValues) {
+    return layout_type(/*parent*/ 1,
+                       /*position, start, finish, max_count*/ 4,
+                       /*values*/ max_values,
+                       /*children*/ 0);
+  }
+  constexpr static layout_type InternalLayout() {
+    return layout_type(/*parent*/ 1,
+                       /*position, start, finish, max_count*/ 4,
+                       /*values*/ kNodeValues,
+                       /*children*/ kNodeValues + 1);
+  }
+  constexpr static size_type LeafSize(const int max_values = kNodeValues) {
+    return LeafLayout(max_values).AllocSize();
+  }
+  constexpr static size_type InternalSize() {
+    return InternalLayout().AllocSize();
+  }
+
+  // N is the index of the type in the Layout definition.
+  // ElementType<N> is the Nth type in the Layout definition.
+  template <size_type N>
+  inline typename layout_type::template ElementType<N> *GetField() {
+    // We assert that we don't read from values that aren't there.
+    assert(N < 3 || !leaf());
+    return InternalLayout().template Pointer<N>(reinterpret_cast<char *>(this));
+  }
+  template <size_type N>
+  inline const typename layout_type::template ElementType<N> *GetField() const {
+    assert(N < 3 || !leaf());
+    return InternalLayout().template Pointer<N>(
+        reinterpret_cast<const char *>(this));
+  }
+  void set_parent(btree_node *p) { *GetField<0>() = p; }
+  field_type &mutable_finish() { return GetField<1>()[2]; }
+  slot_type *slot(int i) { return &GetField<2>()[i]; }
+  slot_type *start_slot() { return slot(start()); }
+  slot_type *finish_slot() { return slot(finish()); }
+  const slot_type *slot(int i) const { return &GetField<2>()[i]; }
+  void set_position(field_type v) { GetField<1>()[0] = v; }
+  void set_start(field_type v) { GetField<1>()[1] = v; }
+  void set_finish(field_type v) { GetField<1>()[2] = v; }
+  // This method is only called by the node init methods.
+  void set_max_count(field_type v) { GetField<1>()[3] = v; }
+
+ public:
+  // Whether this is a leaf node or not. This value doesn't change after the
+  // node is created.
+  bool leaf() const { return GetField<1>()[3] != kInternalNodeMaxCount; }
+
+  // Getter for the position of this node in its parent.
+  field_type position() const { return GetField<1>()[0]; }
+
+  // Getter for the offset of the first value in the `values` array.
+  field_type start() const {
+    // TODO(ezb): when floating storage is implemented, return GetField<1>()[1];
+    assert(GetField<1>()[1] == 0);
+    return 0;
+  }
+
+  // Getter for the offset after the last value in the `values` array.
+  field_type finish() const { return GetField<1>()[2]; }
+
+  // Getters for the number of values stored in this node.
+  field_type count() const {
+    assert(finish() >= start());
+    return finish() - start();
+  }
+  field_type max_count() const {
+    // Internal nodes have max_count==kInternalNodeMaxCount.
+    // Leaf nodes have max_count in [1, kNodeValues].
+    const field_type max_count = GetField<1>()[3];
+    return max_count == field_type{kInternalNodeMaxCount}
+               ? field_type{kNodeValues}
+               : max_count;
+  }
+
+  // Getter for the parent of this node.
+  btree_node *parent() const { return *GetField<0>(); }
+  // Getter for whether the node is the root of the tree. The parent of the
+  // root of the tree is the leftmost node in the tree which is guaranteed to
+  // be a leaf.
+  bool is_root() const { return parent()->leaf(); }
+  void make_root() {
+    assert(parent()->is_root());
+    set_parent(parent()->parent());
+  }
+
+  // Getters for the key/value at position i in the node.
+  const key_type &key(int i) const { return params_type::key(slot(i)); }
+  reference value(int i) { return params_type::element(slot(i)); }
+  const_reference value(int i) const { return params_type::element(slot(i)); }
+
+  // Getters/setter for the child at position i in the node.
+  btree_node *child(int i) const { return GetField<3>()[i]; }
+  btree_node *start_child() const { return child(start()); }
+  btree_node *&mutable_child(int i) { return GetField<3>()[i]; }
+  void clear_child(int i) {
+    absl::container_internal::SanitizerPoisonObject(&mutable_child(i));
+  }
+  void set_child(int i, btree_node *c) {
+    absl::container_internal::SanitizerUnpoisonObject(&mutable_child(i));
+    mutable_child(i) = c;
+    c->set_position(i);
+  }
+  void init_child(int i, btree_node *c) {
+    set_child(i, c);
+    c->set_parent(this);
+  }
+
+  // Returns the position of the first value whose key is not less than k.
+  template <typename K>
+  SearchResult<int, is_key_compare_to::value> lower_bound(
+      const K &k, const key_compare &comp) const {
+    return use_linear_search::value ? linear_search(k, comp)
+                                    : binary_search(k, comp);
+  }
+  // Returns the position of the first value whose key is greater than k.
+  template <typename K>
+  int upper_bound(const K &k, const key_compare &comp) const {
+    auto upper_compare = upper_bound_adapter<key_compare>(comp);
+    return use_linear_search::value ? linear_search(k, upper_compare).value
+                                    : binary_search(k, upper_compare).value;
+  }
+
+  template <typename K, typename Compare>
+  SearchResult<int, btree_is_key_compare_to<Compare, key_type>::value>
+  linear_search(const K &k, const Compare &comp) const {
+    return linear_search_impl(k, start(), finish(), comp,
+                              btree_is_key_compare_to<Compare, key_type>());
+  }
+
+  template <typename K, typename Compare>
+  SearchResult<int, btree_is_key_compare_to<Compare, key_type>::value>
+  binary_search(const K &k, const Compare &comp) const {
+    return binary_search_impl(k, start(), finish(), comp,
+                              btree_is_key_compare_to<Compare, key_type>());
+  }
+
+  // Returns the position of the first value whose key is not less than k using
+  // linear search performed using plain compare.
+  template <typename K, typename Compare>
+  SearchResult<int, false> linear_search_impl(
+      const K &k, int s, const int e, const Compare &comp,
+      std::false_type /* IsCompareTo */) const {
+    while (s < e) {
+      if (!comp(key(s), k)) {
+        break;
+      }
+      ++s;
+    }
+    return SearchResult<int, false>{s};
+  }
+
+  // Returns the position of the first value whose key is not less than k using
+  // linear search performed using compare-to.
+  template <typename K, typename Compare>
+  SearchResult<int, true> linear_search_impl(
+      const K &k, int s, const int e, const Compare &comp,
+      std::true_type /* IsCompareTo */) const {
+    while (s < e) {
+      const absl::weak_ordering c = comp(key(s), k);
+      if (c == 0) {
+        return {s, MatchKind::kEq};
+      } else if (c > 0) {
+        break;
+      }
+      ++s;
+    }
+    return {s, MatchKind::kNe};
+  }
+
+  // Returns the position of the first value whose key is not less than k using
+  // binary search performed using plain compare.
+  template <typename K, typename Compare>
+  SearchResult<int, false> binary_search_impl(
+      const K &k, int s, int e, const Compare &comp,
+      std::false_type /* IsCompareTo */) const {
+    while (s != e) {
+      const int mid = (s + e) >> 1;
+      if (comp(key(mid), k)) {
+        s = mid + 1;
+      } else {
+        e = mid;
+      }
+    }
+    return SearchResult<int, false>{s};
+  }
+
+  // Returns the position of the first value whose key is not less than k using
+  // binary search performed using compare-to.
+  template <typename K, typename CompareTo>
+  SearchResult<int, true> binary_search_impl(
+      const K &k, int s, int e, const CompareTo &comp,
+      std::true_type /* IsCompareTo */) const {
+    if (is_multi_container::value) {
+      MatchKind exact_match = MatchKind::kNe;
+      while (s != e) {
+        const int mid = (s + e) >> 1;
+        const absl::weak_ordering c = comp(key(mid), k);
+        if (c < 0) {
+          s = mid + 1;
+        } else {
+          e = mid;
+          if (c == 0) {
+            // Need to return the first value whose key is not less than k,
+            // which requires continuing the binary search if this is a
+            // multi-container.
+            exact_match = MatchKind::kEq;
+          }
+        }
+      }
+      return {s, exact_match};
+    } else {  // Not a multi-container.
+      while (s != e) {
+        const int mid = (s + e) >> 1;
+        const absl::weak_ordering c = comp(key(mid), k);
+        if (c < 0) {
+          s = mid + 1;
+        } else if (c > 0) {
+          e = mid;
+        } else {
+          return {mid, MatchKind::kEq};
+        }
+      }
+      return {s, MatchKind::kNe};
+    }
+  }
+
+  // Emplaces a value at position i, shifting all existing values and
+  // children at positions >= i to the right by 1.
+  template <typename... Args>
+  void emplace_value(size_type i, allocator_type *alloc, Args &&... args);
+
+  // Removes the values at positions [i, i + to_erase), shifting all existing
+  // values and children after that range to the left by to_erase. Clears all
+  // children between [i, i + to_erase).
+  void remove_values(field_type i, field_type to_erase, allocator_type *alloc);
+
+  // Rebalances a node with its right sibling.
+  void rebalance_right_to_left(int to_move, btree_node *right,
+                               allocator_type *alloc);
+  void rebalance_left_to_right(int to_move, btree_node *right,
+                               allocator_type *alloc);
+
+  // Splits a node, moving a portion of the node's values to its right sibling.
+  void split(int insert_position, btree_node *dest, allocator_type *alloc);
+
+  // Merges a node with its right sibling, moving all of the values and the
+  // delimiting key in the parent node onto itself, and deleting the src node.
+  void merge(btree_node *src, allocator_type *alloc);
+
+  // Node allocation/deletion routines.
+  void init_leaf(btree_node *parent, int max_count) {
+    set_parent(parent);
+    set_position(0);
+    set_start(0);
+    set_finish(0);
+    set_max_count(max_count);
+    absl::container_internal::SanitizerPoisonMemoryRegion(
+        start_slot(), max_count * sizeof(slot_type));
+  }
+  void init_internal(btree_node *parent) {
+    init_leaf(parent, kNodeValues);
+    // Set `max_count` to a sentinel value to indicate that this node is
+    // internal.
+    set_max_count(kInternalNodeMaxCount);
+    absl::container_internal::SanitizerPoisonMemoryRegion(
+        &mutable_child(start()), (kNodeValues + 1) * sizeof(btree_node *));
+  }
+
+  static void deallocate(const size_type size, btree_node *node,
+                         allocator_type *alloc) {
+    absl::container_internal::Deallocate<Alignment()>(alloc, node, size);
+  }
+
+  // Deletes a node and all of its children.
+  static void clear_and_delete(btree_node *node, allocator_type *alloc);
+
+ private:
+  template <typename... Args>
+  void value_init(const field_type i, allocator_type *alloc, Args &&... args) {
+    absl::container_internal::SanitizerUnpoisonObject(slot(i));
+    params_type::construct(alloc, slot(i), std::forward<Args>(args)...);
+  }
+  void value_destroy(const field_type i, allocator_type *alloc) {
+    params_type::destroy(alloc, slot(i));
+    absl::container_internal::SanitizerPoisonObject(slot(i));
+  }
+  void value_destroy_n(const field_type i, const field_type n,
+                       allocator_type *alloc) {
+    for (slot_type *s = slot(i), *end = slot(i + n); s != end; ++s) {
+      params_type::destroy(alloc, s);
+      absl::container_internal::SanitizerPoisonObject(s);
+    }
+  }
+
+  static void transfer(slot_type *dest, slot_type *src, allocator_type *alloc) {
+    absl::container_internal::SanitizerUnpoisonObject(dest);
+    params_type::transfer(alloc, dest, src);
+    absl::container_internal::SanitizerPoisonObject(src);
+  }
+
+  // Transfers value from slot `src_i` in `src_node` to slot `dest_i` in `this`.
+  void transfer(const size_type dest_i, const size_type src_i,
+                btree_node *src_node, allocator_type *alloc) {
+    transfer(slot(dest_i), src_node->slot(src_i), alloc);
+  }
+
+  // Transfers `n` values starting at value `src_i` in `src_node` into the
+  // values starting at value `dest_i` in `this`.
+  void transfer_n(const size_type n, const size_type dest_i,
+                  const size_type src_i, btree_node *src_node,
+                  allocator_type *alloc) {
+    for (slot_type *src = src_node->slot(src_i), *end = src + n,
+                   *dest = slot(dest_i);
+         src != end; ++src, ++dest) {
+      transfer(dest, src, alloc);
+    }
+  }
+
+  // Same as above, except that we start at the end and work our way to the
+  // beginning.
+  void transfer_n_backward(const size_type n, const size_type dest_i,
+                           const size_type src_i, btree_node *src_node,
+                           allocator_type *alloc) {
+    for (slot_type *src = src_node->slot(src_i + n - 1), *end = src - n,
+                   *dest = slot(dest_i + n - 1);
+         src != end; --src, --dest) {
+      transfer(dest, src, alloc);
+    }
+  }
+
+  template <typename P>
+  friend class btree;
+  template <typename N, typename R, typename P>
+  friend struct btree_iterator;
+  friend class BtreeNodePeer;
+};
+
+template <typename Node, typename Reference, typename Pointer>
+struct btree_iterator {
+ private:
+  using key_type = typename Node::key_type;
+  using size_type = typename Node::size_type;
+  using params_type = typename Node::params_type;
+
+  using node_type = Node;
+  using normal_node = typename std::remove_const<Node>::type;
+  using const_node = const Node;
+  using normal_pointer = typename params_type::pointer;
+  using normal_reference = typename params_type::reference;
+  using const_pointer = typename params_type::const_pointer;
+  using const_reference = typename params_type::const_reference;
+  using slot_type = typename params_type::slot_type;
+
+  using iterator =
+      btree_iterator<normal_node, normal_reference, normal_pointer>;
+  using const_iterator =
+      btree_iterator<const_node, const_reference, const_pointer>;
+
+ public:
+  // These aliases are public for std::iterator_traits.
+  using difference_type = typename Node::difference_type;
+  using value_type = typename params_type::value_type;
+  using pointer = Pointer;
+  using reference = Reference;
+  using iterator_category = std::bidirectional_iterator_tag;
+
+  btree_iterator() : node(nullptr), position(-1) {}
+  explicit btree_iterator(Node *n) : node(n), position(n->start()) {}
+  btree_iterator(Node *n, int p) : node(n), position(p) {}
+
+  // NOTE: this SFINAE allows for implicit conversions from iterator to
+  // const_iterator, but it specifically avoids defining copy constructors so
+  // that btree_iterator can be trivially copyable. This is for performance and
+  // binary size reasons.
+  template <typename N, typename R, typename P,
+            absl::enable_if_t<
+                std::is_same<btree_iterator<N, R, P>, iterator>::value &&
+                    std::is_same<btree_iterator, const_iterator>::value,
+                int> = 0>
+  btree_iterator(const btree_iterator<N, R, P> &other)  // NOLINT
+      : node(other.node), position(other.position) {}
+
+ private:
+  // This SFINAE allows explicit conversions from const_iterator to
+  // iterator, but also avoids defining a copy constructor.
+  // NOTE: the const_cast is safe because this constructor is only called by
+  // non-const methods and the container owns the nodes.
+  template <typename N, typename R, typename P,
+            absl::enable_if_t<
+                std::is_same<btree_iterator<N, R, P>, const_iterator>::value &&
+                    std::is_same<btree_iterator, iterator>::value,
+                int> = 0>
+  explicit btree_iterator(const btree_iterator<N, R, P> &other)
+      : node(const_cast<node_type *>(other.node)), position(other.position) {}
+
+  // Increment/decrement the iterator.
+  void increment() {
+    if (node->leaf() && ++position < node->finish()) {
+      return;
+    }
+    increment_slow();
+  }
+  void increment_slow();
+
+  void decrement() {
+    if (node->leaf() && --position >= node->start()) {
+      return;
+    }
+    decrement_slow();
+  }
+  void decrement_slow();
+
+ public:
+  bool operator==(const iterator &other) const {
+    return node == other.node && position == other.position;
+  }
+  bool operator==(const const_iterator &other) const {
+    return node == other.node && position == other.position;
+  }
+  bool operator!=(const iterator &other) const {
+    return node != other.node || position != other.position;
+  }
+  bool operator!=(const const_iterator &other) const {
+    return node != other.node || position != other.position;
+  }
+
+  // Accessors for the key/value the iterator is pointing at.
+  reference operator*() const {
+    ABSL_HARDENING_ASSERT(node != nullptr);
+    ABSL_HARDENING_ASSERT(node->start() <= position);
+    ABSL_HARDENING_ASSERT(node->finish() > position);
+    return node->value(position);
+  }
+  pointer operator->() const { return &operator*(); }
+
+  btree_iterator &operator++() {
+    increment();
+    return *this;
+  }
+  btree_iterator &operator--() {
+    decrement();
+    return *this;
+  }
+  btree_iterator operator++(int) {
+    btree_iterator tmp = *this;
+    ++*this;
+    return tmp;
+  }
+  btree_iterator operator--(int) {
+    btree_iterator tmp = *this;
+    --*this;
+    return tmp;
+  }
+
+ private:
+  template <typename Params>
+  friend class btree;
+  template <typename Tree>
+  friend class btree_container;
+  template <typename Tree>
+  friend class btree_set_container;
+  template <typename Tree>
+  friend class btree_map_container;
+  template <typename Tree>
+  friend class btree_multiset_container;
+  template <typename N, typename R, typename P>
+  friend struct btree_iterator;
+  template <typename TreeType, typename CheckerType>
+  friend class base_checker;
+
+  const key_type &key() const { return node->key(position); }
+  slot_type *slot() { return node->slot(position); }
+
+  // The node in the tree the iterator is pointing at.
+  Node *node;
+  // The position within the node of the tree the iterator is pointing at.
+  // NOTE: this is an int rather than a field_type because iterators can point
+  // to invalid positions (such as -1) in certain circumstances.
+  int position;
+};
+
+template <typename Params>
+class btree {
+  using node_type = btree_node<Params>;
+  using is_key_compare_to = typename Params::is_key_compare_to;
+  using init_type = typename Params::init_type;
+  using field_type = typename node_type::field_type;
+  using is_multi_container = typename Params::is_multi_container;
+  using is_key_compare_adapted = typename Params::is_key_compare_adapted;
+
+  // We use a static empty node for the root/leftmost/rightmost of empty btrees
+  // in order to avoid branching in begin()/end().
+  struct alignas(node_type::Alignment()) EmptyNodeType : node_type {
+    using field_type = typename node_type::field_type;
+    node_type *parent;
+    field_type position = 0;
+    field_type start = 0;
+    field_type finish = 0;
+    // max_count must be != kInternalNodeMaxCount (so that this node is regarded
+    // as a leaf node). max_count() is never called when the tree is empty.
+    field_type max_count = node_type::kInternalNodeMaxCount + 1;
+
+#ifdef _MSC_VER
+    // MSVC has constexpr code generations bugs here.
+    EmptyNodeType() : parent(this) {}
+#else
+    constexpr EmptyNodeType(node_type *p) : parent(p) {}
+#endif
+  };
+
+  static node_type *EmptyNode() {
+#ifdef _MSC_VER
+    static EmptyNodeType *empty_node = new EmptyNodeType;
+    // This assert fails on some other construction methods.
+    assert(empty_node->parent == empty_node);
+    return empty_node;
+#else
+    static constexpr EmptyNodeType empty_node(
+        const_cast<EmptyNodeType *>(&empty_node));
+    return const_cast<EmptyNodeType *>(&empty_node);
+#endif
+  }
+
+  enum : uint32_t {
+    kNodeValues = node_type::kNodeValues,
+    kMinNodeValues = kNodeValues / 2,
+  };
+
+  struct node_stats {
+    using size_type = typename Params::size_type;
+
+    node_stats(size_type l, size_type i) : leaf_nodes(l), internal_nodes(i) {}
+
+    node_stats &operator+=(const node_stats &other) {
+      leaf_nodes += other.leaf_nodes;
+      internal_nodes += other.internal_nodes;
+      return *this;
+    }
+
+    size_type leaf_nodes;
+    size_type internal_nodes;
+  };
+
+ public:
+  using key_type = typename Params::key_type;
+  using value_type = typename Params::value_type;
+  using size_type = typename Params::size_type;
+  using difference_type = typename Params::difference_type;
+  using key_compare = typename Params::key_compare;
+  using value_compare = typename Params::value_compare;
+  using allocator_type = typename Params::allocator_type;
+  using reference = typename Params::reference;
+  using const_reference = typename Params::const_reference;
+  using pointer = typename Params::pointer;
+  using const_pointer = typename Params::const_pointer;
+  using iterator = btree_iterator<node_type, reference, pointer>;
+  using const_iterator = typename iterator::const_iterator;
+  using reverse_iterator = std::reverse_iterator<iterator>;
+  using const_reverse_iterator = std::reverse_iterator<const_iterator>;
+  using node_handle_type = node_handle<Params, Params, allocator_type>;
+
+  // Internal types made public for use by btree_container types.
+  using params_type = Params;
+  using slot_type = typename Params::slot_type;
+
+ private:
+  // For use in copy_or_move_values_in_order.
+  const value_type &maybe_move_from_iterator(const_iterator it) { return *it; }
+  value_type &&maybe_move_from_iterator(iterator it) { return std::move(*it); }
+
+  // Copies or moves (depending on the template parameter) the values in
+  // other into this btree in their order in other. This btree must be empty
+  // before this method is called. This method is used in copy construction,
+  // copy assignment, and move assignment.
+  template <typename Btree>
+  void copy_or_move_values_in_order(Btree &other);
+
+  // Validates that various assumptions/requirements are true at compile time.
+  constexpr static bool static_assert_validation();
+
+ public:
+  btree(const key_compare &comp, const allocator_type &alloc)
+      : root_(comp, alloc, EmptyNode()), rightmost_(EmptyNode()), size_(0) {}
+
+  btree(const btree &other) : btree(other, other.allocator()) {}
+  btree(const btree &other, const allocator_type &alloc)
+      : btree(other.key_comp(), alloc) {
+    copy_or_move_values_in_order(other);
+  }
+  btree(btree &&other) noexcept
+      : root_(std::move(other.root_)),
+        rightmost_(absl::exchange(other.rightmost_, EmptyNode())),
+        size_(absl::exchange(other.size_, 0)) {
+    other.mutable_root() = EmptyNode();
+  }
+  btree(btree &&other, const allocator_type &alloc)
+      : btree(other.key_comp(), alloc) {
+    if (alloc == other.allocator()) {
+      swap(other);
+    } else {
+      // Move values from `other` one at a time when allocators are different.
+      copy_or_move_values_in_order(other);
+    }
+  }
+
+  ~btree() {
+    // Put static_asserts in destructor to avoid triggering them before the type
+    // is complete.
+    static_assert(static_assert_validation(), "This call must be elided.");
+    clear();
+  }
+
+  // Assign the contents of other to *this.
+  btree &operator=(const btree &other);
+  btree &operator=(btree &&other) noexcept;
+
+  iterator begin() { return iterator(leftmost()); }
+  const_iterator begin() const { return const_iterator(leftmost()); }
+  iterator end() { return iterator(rightmost_, rightmost_->finish()); }
+  const_iterator end() const {
+    return const_iterator(rightmost_, rightmost_->finish());
+  }
+  reverse_iterator rbegin() { return reverse_iterator(end()); }
+  const_reverse_iterator rbegin() const {
+    return const_reverse_iterator(end());
+  }
+  reverse_iterator rend() { return reverse_iterator(begin()); }
+  const_reverse_iterator rend() const {
+    return const_reverse_iterator(begin());
+  }
+
+  // Finds the first element whose key is not less than key.
+  template <typename K>
+  iterator lower_bound(const K &key) {
+    return internal_end(internal_lower_bound(key).value);
+  }
+  template <typename K>
+  const_iterator lower_bound(const K &key) const {
+    return internal_end(internal_lower_bound(key).value);
+  }
+
+  // Finds the first element whose key is greater than key.
+  template <typename K>
+  iterator upper_bound(const K &key) {
+    return internal_end(internal_upper_bound(key));
+  }
+  template <typename K>
+  const_iterator upper_bound(const K &key) const {
+    return internal_end(internal_upper_bound(key));
+  }
+
+  // Finds the range of values which compare equal to key. The first member of
+  // the returned pair is equal to lower_bound(key). The second member of the
+  // pair is equal to upper_bound(key).
+  template <typename K>
+  std::pair<iterator, iterator> equal_range(const K &key);
+  template <typename K>
+  std::pair<const_iterator, const_iterator> equal_range(const K &key) const {
+    return const_cast<btree *>(this)->equal_range(key);
+  }
+
+  // Inserts a value into the btree only if it does not already exist. The
+  // boolean return value indicates whether insertion succeeded or failed.
+  // Requirement: if `key` already exists in the btree, does not consume `args`.
+  // Requirement: `key` is never referenced after consuming `args`.
+  template <typename K, typename... Args>
+  std::pair<iterator, bool> insert_unique(const K &key, Args &&... args);
+
+  // Inserts with hint. Checks to see if the value should be placed immediately
+  // before `position` in the tree. If so, then the insertion will take
+  // amortized constant time. If not, the insertion will take amortized
+  // logarithmic time as if a call to insert_unique() were made.
+  // Requirement: if `key` already exists in the btree, does not consume `args`.
+  // Requirement: `key` is never referenced after consuming `args`.
+  template <typename K, typename... Args>
+  std::pair<iterator, bool> insert_hint_unique(iterator position,
+                                               const K &key,
+                                               Args &&... args);
+
+  // Insert a range of values into the btree.
+  // Note: the first overload avoids constructing a value_type if the key
+  // already exists in the btree.
+  template <typename InputIterator,
+            typename = decltype(std::declval<const key_compare &>()(
+                params_type::key(*std::declval<InputIterator>()),
+                std::declval<const key_type &>()))>
+  void insert_iterator_unique(InputIterator b, InputIterator e, int);
+  // We need the second overload for cases in which we need to construct a
+  // value_type in order to compare it with the keys already in the btree.
+  template <typename InputIterator>
+  void insert_iterator_unique(InputIterator b, InputIterator e, char);
+
+  // Inserts a value into the btree.
+  template <typename ValueType>
+  iterator insert_multi(const key_type &key, ValueType &&v);
+
+  // Inserts a value into the btree.
+  template <typename ValueType>
+  iterator insert_multi(ValueType &&v) {
+    return insert_multi(params_type::key(v), std::forward<ValueType>(v));
+  }
+
+  // Insert with hint. Check to see if the value should be placed immediately
+  // before position in the tree. If it does, then the insertion will take
+  // amortized constant time. If not, the insertion will take amortized
+  // logarithmic time as if a call to insert_multi(v) were made.
+  template <typename ValueType>
+  iterator insert_hint_multi(iterator position, ValueType &&v);
+
+  // Insert a range of values into the btree.
+  template <typename InputIterator>
+  void insert_iterator_multi(InputIterator b, InputIterator e);
+
+  // Erase the specified iterator from the btree. The iterator must be valid
+  // (i.e. not equal to end()).  Return an iterator pointing to the node after
+  // the one that was erased (or end() if none exists).
+  // Requirement: does not read the value at `*iter`.
+  iterator erase(iterator iter);
+
+  // Erases range. Returns the number of keys erased and an iterator pointing
+  // to the element after the last erased element.
+  std::pair<size_type, iterator> erase_range(iterator begin, iterator end);
+
+  // Finds the iterator corresponding to a key or returns end() if the key is
+  // not present.
+  template <typename K>
+  iterator find(const K &key) {
+    return internal_end(internal_find(key));
+  }
+  template <typename K>
+  const_iterator find(const K &key) const {
+    return internal_end(internal_find(key));
+  }
+
+  // Clear the btree, deleting all of the values it contains.
+  void clear();
+
+  // Swaps the contents of `this` and `other`.
+  void swap(btree &other);
+
+  const key_compare &key_comp() const noexcept {
+    return root_.template get<0>();
+  }
+  template <typename K1, typename K2>
+  bool compare_keys(const K1 &a, const K2 &b) const {
+    return compare_internal::compare_result_as_less_than(key_comp()(a, b));
+  }
+
+  value_compare value_comp() const { return value_compare(key_comp()); }
+
+  // Verifies the structure of the btree.
+  void verify() const;
+
+  // Size routines.
+  size_type size() const { return size_; }
+  size_type max_size() const { return (std::numeric_limits<size_type>::max)(); }
+  bool empty() const { return size_ == 0; }
+
+  // The height of the btree. An empty tree will have height 0.
+  size_type height() const {
+    size_type h = 0;
+    if (!empty()) {
+      // Count the length of the chain from the leftmost node up to the
+      // root. We actually count from the root back around to the level below
+      // the root, but the calculation is the same because of the circularity
+      // of that traversal.
+      const node_type *n = root();
+      do {
+        ++h;
+        n = n->parent();
+      } while (n != root());
+    }
+    return h;
+  }
+
+  // The number of internal, leaf and total nodes used by the btree.
+  size_type leaf_nodes() const { return internal_stats(root()).leaf_nodes; }
+  size_type internal_nodes() const {
+    return internal_stats(root()).internal_nodes;
+  }
+  size_type nodes() const {
+    node_stats stats = internal_stats(root());
+    return stats.leaf_nodes + stats.internal_nodes;
+  }
+
+  // The total number of bytes used by the btree.
+  size_type bytes_used() const {
+    node_stats stats = internal_stats(root());
+    if (stats.leaf_nodes == 1 && stats.internal_nodes == 0) {
+      return sizeof(*this) + node_type::LeafSize(root()->max_count());
+    } else {
+      return sizeof(*this) + stats.leaf_nodes * node_type::LeafSize() +
+             stats.internal_nodes * node_type::InternalSize();
+    }
+  }
+
+  // The average number of bytes used per value stored in the btree.
+  static double average_bytes_per_value() {
+    // Returns the number of bytes per value on a leaf node that is 75%
+    // full. Experimentally, this matches up nicely with the computed number of
+    // bytes per value in trees that had their values inserted in random order.
+    return node_type::LeafSize() / (kNodeValues * 0.75);
+  }
+
+  // The fullness of the btree. Computed as the number of elements in the btree
+  // divided by the maximum number of elements a tree with the current number
+  // of nodes could hold. A value of 1 indicates perfect space
+  // utilization. Smaller values indicate space wastage.
+  // Returns 0 for empty trees.
+  double fullness() const {
+    if (empty()) return 0.0;
+    return static_cast<double>(size()) / (nodes() * kNodeValues);
+  }
+  // The overhead of the btree structure in bytes per node. Computed as the
+  // total number of bytes used by the btree minus the number of bytes used for
+  // storing elements divided by the number of elements.
+  // Returns 0 for empty trees.
+  double overhead() const {
+    if (empty()) return 0.0;
+    return (bytes_used() - size() * sizeof(value_type)) /
+           static_cast<double>(size());
+  }
+
+  // The allocator used by the btree.
+  allocator_type get_allocator() const { return allocator(); }
+
+ private:
+  // Internal accessor routines.
+  node_type *root() { return root_.template get<2>(); }
+  const node_type *root() const { return root_.template get<2>(); }
+  node_type *&mutable_root() noexcept { return root_.template get<2>(); }
+  key_compare *mutable_key_comp() noexcept { return &root_.template get<0>(); }
+
+  // The leftmost node is stored as the parent of the root node.
+  node_type *leftmost() { return root()->parent(); }
+  const node_type *leftmost() const { return root()->parent(); }
+
+  // Allocator routines.
+  allocator_type *mutable_allocator() noexcept {
+    return &root_.template get<1>();
+  }
+  const allocator_type &allocator() const noexcept {
+    return root_.template get<1>();
+  }
+
+  // Allocates a correctly aligned node of at least size bytes using the
+  // allocator.
+  node_type *allocate(const size_type size) {
+    return reinterpret_cast<node_type *>(
+        absl::container_internal::Allocate<node_type::Alignment()>(
+            mutable_allocator(), size));
+  }
+
+  // Node creation/deletion routines.
+  node_type *new_internal_node(node_type *parent) {
+    node_type *n = allocate(node_type::InternalSize());
+    n->init_internal(parent);
+    return n;
+  }
+  node_type *new_leaf_node(node_type *parent) {
+    node_type *n = allocate(node_type::LeafSize());
+    n->init_leaf(parent, kNodeValues);
+    return n;
+  }
+  node_type *new_leaf_root_node(const int max_count) {
+    node_type *n = allocate(node_type::LeafSize(max_count));
+    n->init_leaf(/*parent=*/n, max_count);
+    return n;
+  }
+
+  // Deletion helper routines.
+  iterator rebalance_after_delete(iterator iter);
+
+  // Rebalances or splits the node iter points to.
+  void rebalance_or_split(iterator *iter);
+
+  // Merges the values of left, right and the delimiting key on their parent
+  // onto left, removing the delimiting key and deleting right.
+  void merge_nodes(node_type *left, node_type *right);
+
+  // Tries to merge node with its left or right sibling, and failing that,
+  // rebalance with its left or right sibling. Returns true if a merge
+  // occurred, at which point it is no longer valid to access node. Returns
+  // false if no merging took place.
+  bool try_merge_or_rebalance(iterator *iter);
+
+  // Tries to shrink the height of the tree by 1.
+  void try_shrink();
+
+  iterator internal_end(iterator iter) {
+    return iter.node != nullptr ? iter : end();
+  }
+  const_iterator internal_end(const_iterator iter) const {
+    return iter.node != nullptr ? iter : end();
+  }
+
+  // Emplaces a value into the btree immediately before iter. Requires that
+  // key(v) <= iter.key() and (--iter).key() <= key(v).
+  template <typename... Args>
+  iterator internal_emplace(iterator iter, Args &&... args);
+
+  // Returns an iterator pointing to the first value >= the value "iter" is
+  // pointing at. Note that "iter" might be pointing to an invalid location such
+  // as iter.position == iter.node->finish(). This routine simply moves iter up
+  // in the tree to a valid location.
+  // Requires: iter.node is non-null.
+  template <typename IterType>
+  static IterType internal_last(IterType iter);
+
+  // Returns an iterator pointing to the leaf position at which key would
+  // reside in the tree, unless there is an exact match - in which case, the
+  // result may not be on a leaf. When there's a three-way comparator, we can
+  // return whether there was an exact match. This allows the caller to avoid a
+  // subsequent comparison to determine if an exact match was made, which is
+  // important for keys with expensive comparison, such as strings.
+  template <typename K>
+  SearchResult<iterator, is_key_compare_to::value> internal_locate(
+      const K &key) const;
+
+  // Internal routine which implements lower_bound().
+  template <typename K>
+  SearchResult<iterator, is_key_compare_to::value> internal_lower_bound(
+      const K &key) const;
+
+  // Internal routine which implements upper_bound().
+  template <typename K>
+  iterator internal_upper_bound(const K &key) const;
+
+  // Internal routine which implements find().
+  template <typename K>
+  iterator internal_find(const K &key) const;
+
+  // Verifies the tree structure of node.
+  int internal_verify(const node_type *node, const key_type *lo,
+                      const key_type *hi) const;
+
+  node_stats internal_stats(const node_type *node) const {
+    // The root can be a static empty node.
+    if (node == nullptr || (node == root() && empty())) {
+      return node_stats(0, 0);
+    }
+    if (node->leaf()) {
+      return node_stats(1, 0);
+    }
+    node_stats res(0, 1);
+    for (int i = node->start(); i <= node->finish(); ++i) {
+      res += internal_stats(node->child(i));
+    }
+    return res;
+  }
+
+  // We use compressed tuple in order to save space because key_compare and
+  // allocator_type are usually empty.
+  absl::container_internal::CompressedTuple<key_compare, allocator_type,
+                                            node_type *>
+      root_;
+
+  // A pointer to the rightmost node. Note that the leftmost node is stored as
+  // the root's parent.
+  node_type *rightmost_;
+
+  // Number of values.
+  size_type size_;
+};
+
+////
+// btree_node methods
+template <typename P>
+template <typename... Args>
+inline void btree_node<P>::emplace_value(const size_type i,
+                                         allocator_type *alloc,
+                                         Args &&... args) {
+  assert(i >= start());
+  assert(i <= finish());
+  // Shift old values to create space for new value and then construct it in
+  // place.
+  if (i < finish()) {
+    transfer_n_backward(finish() - i, /*dest_i=*/i + 1, /*src_i=*/i, this,
+                        alloc);
+  }
+  value_init(i, alloc, std::forward<Args>(args)...);
+  set_finish(finish() + 1);
+
+  if (!leaf() && finish() > i + 1) {
+    for (int j = finish(); j > i + 1; --j) {
+      set_child(j, child(j - 1));
+    }
+    clear_child(i + 1);
+  }
+}
+
+template <typename P>
+inline void btree_node<P>::remove_values(const field_type i,
+                                         const field_type to_erase,
+                                         allocator_type *alloc) {
+  // Transfer values after the removed range into their new places.
+  value_destroy_n(i, to_erase, alloc);
+  const field_type orig_finish = finish();
+  const field_type src_i = i + to_erase;
+  transfer_n(orig_finish - src_i, i, src_i, this, alloc);
+
+  if (!leaf()) {
+    // Delete all children between begin and end.
+    for (int j = 0; j < to_erase; ++j) {
+      clear_and_delete(child(i + j + 1), alloc);
+    }
+    // Rotate children after end into new positions.
+    for (int j = i + to_erase + 1; j <= orig_finish; ++j) {
+      set_child(j - to_erase, child(j));
+      clear_child(j);
+    }
+  }
+  set_finish(orig_finish - to_erase);
+}
+
+template <typename P>
+void btree_node<P>::rebalance_right_to_left(const int to_move,
+                                            btree_node *right,
+                                            allocator_type *alloc) {
+  assert(parent() == right->parent());
+  assert(position() + 1 == right->position());
+  assert(right->count() >= count());
+  assert(to_move >= 1);
+  assert(to_move <= right->count());
+
+  // 1) Move the delimiting value in the parent to the left node.
+  transfer(finish(), position(), parent(), alloc);
+
+  // 2) Move the (to_move - 1) values from the right node to the left node.
+  transfer_n(to_move - 1, finish() + 1, right->start(), right, alloc);
+
+  // 3) Move the new delimiting value to the parent from the right node.
+  parent()->transfer(position(), right->start() + to_move - 1, right, alloc);
+
+  // 4) Shift the values in the right node to their correct positions.
+  right->transfer_n(right->count() - to_move, right->start(),
+                    right->start() + to_move, right, alloc);
+
+  if (!leaf()) {
+    // Move the child pointers from the right to the left node.
+    for (int i = 0; i < to_move; ++i) {
+      init_child(finish() + i + 1, right->child(i));
+    }
+    for (int i = right->start(); i <= right->finish() - to_move; ++i) {
+      assert(i + to_move <= right->max_count());
+      right->init_child(i, right->child(i + to_move));
+      right->clear_child(i + to_move);
+    }
+  }
+
+  // Fixup `finish` on the left and right nodes.
+  set_finish(finish() + to_move);
+  right->set_finish(right->finish() - to_move);
+}
+
+template <typename P>
+void btree_node<P>::rebalance_left_to_right(const int to_move,
+                                            btree_node *right,
+                                            allocator_type *alloc) {
+  assert(parent() == right->parent());
+  assert(position() + 1 == right->position());
+  assert(count() >= right->count());
+  assert(to_move >= 1);
+  assert(to_move <= count());
+
+  // Values in the right node are shifted to the right to make room for the
+  // new to_move values. Then, the delimiting value in the parent and the
+  // other (to_move - 1) values in the left node are moved into the right node.
+  // Lastly, a new delimiting value is moved from the left node into the
+  // parent, and the remaining empty left node entries are destroyed.
+
+  // 1) Shift existing values in the right node to their correct positions.
+  right->transfer_n_backward(right->count(), right->start() + to_move,
+                             right->start(), right, alloc);
+
+  // 2) Move the delimiting value in the parent to the right node.
+  right->transfer(right->start() + to_move - 1, position(), parent(), alloc);
+
+  // 3) Move the (to_move - 1) values from the left node to the right node.
+  right->transfer_n(to_move - 1, right->start(), finish() - (to_move - 1), this,
+                    alloc);
+
+  // 4) Move the new delimiting value to the parent from the left node.
+  parent()->transfer(position(), finish() - to_move, this, alloc);
+
+  if (!leaf()) {
+    // Move the child pointers from the left to the right node.
+    for (int i = right->finish(); i >= right->start(); --i) {
+      right->init_child(i + to_move, right->child(i));
+      right->clear_child(i);
+    }
+    for (int i = 1; i <= to_move; ++i) {
+      right->init_child(i - 1, child(finish() - to_move + i));
+      clear_child(finish() - to_move + i);
+    }
+  }
+
+  // Fixup the counts on the left and right nodes.
+  set_finish(finish() - to_move);
+  right->set_finish(right->finish() + to_move);
+}
+
+template <typename P>
+void btree_node<P>::split(const int insert_position, btree_node *dest,
+                          allocator_type *alloc) {
+  assert(dest->count() == 0);
+  assert(max_count() == kNodeValues);
+
+  // We bias the split based on the position being inserted. If we're
+  // inserting at the beginning of the left node then bias the split to put
+  // more values on the right node. If we're inserting at the end of the
+  // right node then bias the split to put more values on the left node.
+  if (insert_position == start()) {
+    dest->set_finish(dest->start() + finish() - 1);
+  } else if (insert_position == kNodeValues) {
+    dest->set_finish(dest->start());
+  } else {
+    dest->set_finish(dest->start() + count() / 2);
+  }
+  set_finish(finish() - dest->count());
+  assert(count() >= 1);
+
+  // Move values from the left sibling to the right sibling.
+  dest->transfer_n(dest->count(), dest->start(), finish(), this, alloc);
+
+  // The split key is the largest value in the left sibling.
+  --mutable_finish();
+  parent()->emplace_value(position(), alloc, finish_slot());
+  value_destroy(finish(), alloc);
+  parent()->init_child(position() + 1, dest);
+
+  if (!leaf()) {
+    for (int i = dest->start(), j = finish() + 1; i <= dest->finish();
+         ++i, ++j) {
+      assert(child(j) != nullptr);
+      dest->init_child(i, child(j));
+      clear_child(j);
+    }
+  }
+}
+
+template <typename P>
+void btree_node<P>::merge(btree_node *src, allocator_type *alloc) {
+  assert(parent() == src->parent());
+  assert(position() + 1 == src->position());
+
+  // Move the delimiting value to the left node.
+  value_init(finish(), alloc, parent()->slot(position()));
+
+  // Move the values from the right to the left node.
+  transfer_n(src->count(), finish() + 1, src->start(), src, alloc);
+
+  if (!leaf()) {
+    // Move the child pointers from the right to the left node.
+    for (int i = src->start(), j = finish() + 1; i <= src->finish(); ++i, ++j) {
+      init_child(j, src->child(i));
+      src->clear_child(i);
+    }
+  }
+
+  // Fixup `finish` on the src and dest nodes.
+  set_finish(start() + 1 + count() + src->count());
+  src->set_finish(src->start());
+
+  // Remove the value on the parent node and delete the src node.
+  parent()->remove_values(position(), /*to_erase=*/1, alloc);
+}
+
+template <typename P>
+void btree_node<P>::clear_and_delete(btree_node *node, allocator_type *alloc) {
+  if (node->leaf()) {
+    node->value_destroy_n(node->start(), node->count(), alloc);
+    deallocate(LeafSize(node->max_count()), node, alloc);
+    return;
+  }
+  if (node->count() == 0) {
+    deallocate(InternalSize(), node, alloc);
+    return;
+  }
+
+  // The parent of the root of the subtree we are deleting.
+  btree_node *delete_root_parent = node->parent();
+
+  // Navigate to the leftmost leaf under node, and then delete upwards.
+  while (!node->leaf()) node = node->start_child();
+  // Use `int` because `pos` needs to be able to hold `kNodeValues+1`, which
+  // isn't guaranteed to be a valid `field_type`.
+  int pos = node->position();
+  btree_node *parent = node->parent();
+  for (;;) {
+    // In each iteration of the next loop, we delete one leaf node and go right.
+    assert(pos <= parent->finish());
+    do {
+      node = parent->child(pos);
+      if (!node->leaf()) {
+        // Navigate to the leftmost leaf under node.
+        while (!node->leaf()) node = node->start_child();
+        pos = node->position();
+        parent = node->parent();
+      }
+      node->value_destroy_n(node->start(), node->count(), alloc);
+      deallocate(LeafSize(node->max_count()), node, alloc);
+      ++pos;
+    } while (pos <= parent->finish());
+
+    // Once we've deleted all children of parent, delete parent and go up/right.
+    assert(pos > parent->finish());
+    do {
+      node = parent;
+      pos = node->position();
+      parent = node->parent();
+      node->value_destroy_n(node->start(), node->count(), alloc);
+      deallocate(InternalSize(), node, alloc);
+      if (parent == delete_root_parent) return;
+      ++pos;
+    } while (pos > parent->finish());
+  }
+}
+
+////
+// btree_iterator methods
+template <typename N, typename R, typename P>
+void btree_iterator<N, R, P>::increment_slow() {
+  if (node->leaf()) {
+    assert(position >= node->finish());
+    btree_iterator save(*this);
+    while (position == node->finish() && !node->is_root()) {
+      assert(node->parent()->child(node->position()) == node);
+      position = node->position();
+      node = node->parent();
+    }
+    // TODO(ezb): assert we aren't incrementing end() instead of handling.
+    if (position == node->finish()) {
+      *this = save;
+    }
+  } else {
+    assert(position < node->finish());
+    node = node->child(position + 1);
+    while (!node->leaf()) {
+      node = node->start_child();
+    }
+    position = node->start();
+  }
+}
+
+template <typename N, typename R, typename P>
+void btree_iterator<N, R, P>::decrement_slow() {
+  if (node->leaf()) {
+    assert(position <= -1);
+    btree_iterator save(*this);
+    while (position < node->start() && !node->is_root()) {
+      assert(node->parent()->child(node->position()) == node);
+      position = node->position() - 1;
+      node = node->parent();
+    }
+    // TODO(ezb): assert we aren't decrementing begin() instead of handling.
+    if (position < node->start()) {
+      *this = save;
+    }
+  } else {
+    assert(position >= node->start());
+    node = node->child(position);
+    while (!node->leaf()) {
+      node = node->child(node->finish());
+    }
+    position = node->finish() - 1;
+  }
+}
+
+////
+// btree methods
+template <typename P>
+template <typename Btree>
+void btree<P>::copy_or_move_values_in_order(Btree &other) {
+  static_assert(std::is_same<btree, Btree>::value ||
+                    std::is_same<const btree, Btree>::value,
+                "Btree type must be same or const.");
+  assert(empty());
+
+  // We can avoid key comparisons because we know the order of the
+  // values is the same order we'll store them in.
+  auto iter = other.begin();
+  if (iter == other.end()) return;
+  insert_multi(maybe_move_from_iterator(iter));
+  ++iter;
+  for (; iter != other.end(); ++iter) {
+    // If the btree is not empty, we can just insert the new value at the end
+    // of the tree.
+    internal_emplace(end(), maybe_move_from_iterator(iter));
+  }
+}
+
+template <typename P>
+constexpr bool btree<P>::static_assert_validation() {
+  static_assert(std::is_nothrow_copy_constructible<key_compare>::value,
+                "Key comparison must be nothrow copy constructible");
+  static_assert(std::is_nothrow_copy_constructible<allocator_type>::value,
+                "Allocator must be nothrow copy constructible");
+  static_assert(type_traits_internal::is_trivially_copyable<iterator>::value,
+                "iterator not trivially copyable.");
+
+  // Note: We assert that kTargetValues, which is computed from
+  // Params::kTargetNodeSize, must fit the node_type::field_type.
+  static_assert(
+      kNodeValues < (1 << (8 * sizeof(typename node_type::field_type))),
+      "target node size too large");
+
+  // Verify that key_compare returns an absl::{weak,strong}_ordering or bool.
+  using compare_result_type =
+      absl::result_of_t<key_compare(key_type, key_type)>;
+  static_assert(
+      std::is_same<compare_result_type, bool>::value ||
+          std::is_convertible<compare_result_type, absl::weak_ordering>::value,
+      "key comparison function must return absl::{weak,strong}_ordering or "
+      "bool.");
+
+  // Test the assumption made in setting kNodeValueSpace.
+  static_assert(node_type::MinimumOverhead() >= sizeof(void *) + 4,
+                "node space assumption incorrect");
+
+  return true;
+}
+
+template <typename P>
+template <typename K>
+auto btree<P>::equal_range(const K &key) -> std::pair<iterator, iterator> {
+  const SearchResult<iterator, is_key_compare_to::value> res =
+      internal_lower_bound(key);
+  const iterator lower = internal_end(res.value);
+  if (res.HasMatch() ? !res.IsEq()
+                     : lower == end() || compare_keys(key, lower.key())) {
+    return {lower, lower};
+  }
+
+  const iterator next = std::next(lower);
+  // When the comparator is heterogeneous, we can't assume that comparison with
+  // non-`key_type` will be equivalent to `key_type` comparisons so there
+  // could be multiple equivalent keys even in a unique-container. But for
+  // heterogeneous comparisons from the default string adapted comparators, we
+  // don't need to worry about this.
+  if (!is_multi_container::value &&
+      (std::is_same<K, key_type>::value || is_key_compare_adapted::value)) {
+    // The next iterator after lower must point to a key greater than `key`.
+    // Note: if this assert fails, then it may indicate that the comparator does
+    // not meet the equivalence requirements for Compare
+    // (see https://en.cppreference.com/w/cpp/named_req/Compare).
+    assert(next == end() || compare_keys(key, next.key()));
+    return {lower, next};
+  }
+  // Try once more to avoid the call to upper_bound() if there's only one
+  // equivalent key. This should prevent all calls to upper_bound() in cases of
+  // unique-containers with heterogeneous comparators in which all comparison
+  // operators have the same equivalence classes.
+  if (next == end() || compare_keys(key, next.key())) return {lower, next};
+
+  // In this case, we need to call upper_bound() to avoid worst case O(N)
+  // behavior if we were to iterate over equal keys.
+  return {lower, upper_bound(key)};
+}
+
+template <typename P>
+template <typename K, typename... Args>
+auto btree<P>::insert_unique(const K &key, Args &&... args)
+    -> std::pair<iterator, bool> {
+  if (empty()) {
+    mutable_root() = rightmost_ = new_leaf_root_node(1);
+  }
+
+  SearchResult<iterator, is_key_compare_to::value> res = internal_locate(key);
+  iterator iter = res.value;
+
+  if (res.HasMatch()) {
+    if (res.IsEq()) {
+      // The key already exists in the tree, do nothing.
+      return {iter, false};
+    }
+  } else {
+    iterator last = internal_last(iter);
+    if (last.node && !compare_keys(key, last.key())) {
+      // The key already exists in the tree, do nothing.
+      return {last, false};
+    }
+  }
+  return {internal_emplace(iter, std::forward<Args>(args)...), true};
+}
+
+template <typename P>
+template <typename K, typename... Args>
+inline auto btree<P>::insert_hint_unique(iterator position, const K &key,
+                                         Args &&... args)
+    -> std::pair<iterator, bool> {
+  if (!empty()) {
+    if (position == end() || compare_keys(key, position.key())) {
+      if (position == begin() || compare_keys(std::prev(position).key(), key)) {
+        // prev.key() < key < position.key()
+        return {internal_emplace(position, std::forward<Args>(args)...), true};
+      }
+    } else if (compare_keys(position.key(), key)) {
+      ++position;
+      if (position == end() || compare_keys(key, position.key())) {
+        // {original `position`}.key() < key < {current `position`}.key()
+        return {internal_emplace(position, std::forward<Args>(args)...), true};
+      }
+    } else {
+      // position.key() == key
+      return {position, false};
+    }
+  }
+  return insert_unique(key, std::forward<Args>(args)...);
+}
+
+template <typename P>
+template <typename InputIterator, typename>
+void btree<P>::insert_iterator_unique(InputIterator b, InputIterator e, int) {
+  for (; b != e; ++b) {
+    insert_hint_unique(end(), params_type::key(*b), *b);
+  }
+}
+
+template <typename P>
+template <typename InputIterator>
+void btree<P>::insert_iterator_unique(InputIterator b, InputIterator e, char) {
+  for (; b != e; ++b) {
+    init_type value(*b);
+    insert_hint_unique(end(), params_type::key(value), std::move(value));
+  }
+}
+
+template <typename P>
+template <typename ValueType>
+auto btree<P>::insert_multi(const key_type &key, ValueType &&v) -> iterator {
+  if (empty()) {
+    mutable_root() = rightmost_ = new_leaf_root_node(1);
+  }
+
+  iterator iter = internal_upper_bound(key);
+  if (iter.node == nullptr) {
+    iter = end();
+  }
+  return internal_emplace(iter, std::forward<ValueType>(v));
+}
+
+template <typename P>
+template <typename ValueType>
+auto btree<P>::insert_hint_multi(iterator position, ValueType &&v) -> iterator {
+  if (!empty()) {
+    const key_type &key = params_type::key(v);
+    if (position == end() || !compare_keys(position.key(), key)) {
+      if (position == begin() ||
+          !compare_keys(key, std::prev(position).key())) {
+        // prev.key() <= key <= position.key()
+        return internal_emplace(position, std::forward<ValueType>(v));
+      }
+    } else {
+      ++position;
+      if (position == end() || !compare_keys(position.key(), key)) {
+        // {original `position`}.key() < key < {current `position`}.key()
+        return internal_emplace(position, std::forward<ValueType>(v));
+      }
+    }
+  }
+  return insert_multi(std::forward<ValueType>(v));
+}
+
+template <typename P>
+template <typename InputIterator>
+void btree<P>::insert_iterator_multi(InputIterator b, InputIterator e) {
+  for (; b != e; ++b) {
+    insert_hint_multi(end(), *b);
+  }
+}
+
+template <typename P>
+auto btree<P>::operator=(const btree &other) -> btree & {
+  if (this != &other) {
+    clear();
+
+    *mutable_key_comp() = other.key_comp();
+    if (absl::allocator_traits<
+            allocator_type>::propagate_on_container_copy_assignment::value) {
+      *mutable_allocator() = other.allocator();
+    }
+
+    copy_or_move_values_in_order(other);
+  }
+  return *this;
+}
+
+template <typename P>
+auto btree<P>::operator=(btree &&other) noexcept -> btree & {
+  if (this != &other) {
+    clear();
+
+    using std::swap;
+    if (absl::allocator_traits<
+            allocator_type>::propagate_on_container_copy_assignment::value) {
+      // Note: `root_` also contains the allocator and the key comparator.
+      swap(root_, other.root_);
+      swap(rightmost_, other.rightmost_);
+      swap(size_, other.size_);
+    } else {
+      if (allocator() == other.allocator()) {
+        swap(mutable_root(), other.mutable_root());
+        swap(*mutable_key_comp(), *other.mutable_key_comp());
+        swap(rightmost_, other.rightmost_);
+        swap(size_, other.size_);
+      } else {
+        // We aren't allowed to propagate the allocator and the allocator is
+        // different so we can't take over its memory. We must move each element
+        // individually. We need both `other` and `this` to have `other`s key
+        // comparator while moving the values so we can't swap the key
+        // comparators.
+        *mutable_key_comp() = other.key_comp();
+        copy_or_move_values_in_order(other);
+      }
+    }
+  }
+  return *this;
+}
+
+template <typename P>
+auto btree<P>::erase(iterator iter) -> iterator {
+  bool internal_delete = false;
+  if (!iter.node->leaf()) {
+    // Deletion of a value on an internal node. First, move the largest value
+    // from our left child here, then delete that position (in remove_values()
+    // below). We can get to the largest value from our left child by
+    // decrementing iter.
+    iterator internal_iter(iter);
+    --iter;
+    assert(iter.node->leaf());
+    params_type::move(mutable_allocator(), iter.node->slot(iter.position),
+                      internal_iter.node->slot(internal_iter.position));
+    internal_delete = true;
+  }
+
+  // Delete the key from the leaf.
+  iter.node->remove_values(iter.position, /*to_erase=*/1, mutable_allocator());
+  --size_;
+
+  // We want to return the next value after the one we just erased. If we
+  // erased from an internal node (internal_delete == true), then the next
+  // value is ++(++iter). If we erased from a leaf node (internal_delete ==
+  // false) then the next value is ++iter. Note that ++iter may point to an
+  // internal node and the value in the internal node may move to a leaf node
+  // (iter.node) when rebalancing is performed at the leaf level.
+
+  iterator res = rebalance_after_delete(iter);
+
+  // If we erased from an internal node, advance the iterator.
+  if (internal_delete) {
+    ++res;
+  }
+  return res;
+}
+
+template <typename P>
+auto btree<P>::rebalance_after_delete(iterator iter) -> iterator {
+  // Merge/rebalance as we walk back up the tree.
+  iterator res(iter);
+  bool first_iteration = true;
+  for (;;) {
+    if (iter.node == root()) {
+      try_shrink();
+      if (empty()) {
+        return end();
+      }
+      break;
+    }
+    if (iter.node->count() >= kMinNodeValues) {
+      break;
+    }
+    bool merged = try_merge_or_rebalance(&iter);
+    // On the first iteration, we should update `res` with `iter` because `res`
+    // may have been invalidated.
+    if (first_iteration) {
+      res = iter;
+      first_iteration = false;
+    }
+    if (!merged) {
+      break;
+    }
+    iter.position = iter.node->position();
+    iter.node = iter.node->parent();
+  }
+
+  // Adjust our return value. If we're pointing at the end of a node, advance
+  // the iterator.
+  if (res.position == res.node->finish()) {
+    res.position = res.node->finish() - 1;
+    ++res;
+  }
+
+  return res;
+}
+
+template <typename P>
+auto btree<P>::erase_range(iterator begin, iterator end)
+    -> std::pair<size_type, iterator> {
+  difference_type count = std::distance(begin, end);
+  assert(count >= 0);
+
+  if (count == 0) {
+    return {0, begin};
+  }
+
+  if (count == size_) {
+    clear();
+    return {count, this->end()};
+  }
+
+  if (begin.node == end.node) {
+    assert(end.position > begin.position);
+    begin.node->remove_values(begin.position, end.position - begin.position,
+                              mutable_allocator());
+    size_ -= count;
+    return {count, rebalance_after_delete(begin)};
+  }
+
+  const size_type target_size = size_ - count;
+  while (size_ > target_size) {
+    if (begin.node->leaf()) {
+      const size_type remaining_to_erase = size_ - target_size;
+      const size_type remaining_in_node = begin.node->finish() - begin.position;
+      const size_type to_erase =
+          (std::min)(remaining_to_erase, remaining_in_node);
+      begin.node->remove_values(begin.position, to_erase, mutable_allocator());
+      size_ -= to_erase;
+      begin = rebalance_after_delete(begin);
+    } else {
+      begin = erase(begin);
+    }
+  }
+  return {count, begin};
+}
+
+template <typename P>
+void btree<P>::clear() {
+  if (!empty()) {
+    node_type::clear_and_delete(root(), mutable_allocator());
+  }
+  mutable_root() = EmptyNode();
+  rightmost_ = EmptyNode();
+  size_ = 0;
+}
+
+template <typename P>
+void btree<P>::swap(btree &other) {
+  using std::swap;
+  if (absl::allocator_traits<
+          allocator_type>::propagate_on_container_swap::value) {
+    // Note: `root_` also contains the allocator and the key comparator.
+    swap(root_, other.root_);
+  } else {
+    // It's undefined behavior if the allocators are unequal here.
+    assert(allocator() == other.allocator());
+    swap(mutable_root(), other.mutable_root());
+    swap(*mutable_key_comp(), *other.mutable_key_comp());
+  }
+  swap(rightmost_, other.rightmost_);
+  swap(size_, other.size_);
+}
+
+template <typename P>
+void btree<P>::verify() const {
+  assert(root() != nullptr);
+  assert(leftmost() != nullptr);
+  assert(rightmost_ != nullptr);
+  assert(empty() || size() == internal_verify(root(), nullptr, nullptr));
+  assert(leftmost() == (++const_iterator(root(), -1)).node);
+  assert(rightmost_ == (--const_iterator(root(), root()->finish())).node);
+  assert(leftmost()->leaf());
+  assert(rightmost_->leaf());
+}
+
+template <typename P>
+void btree<P>::rebalance_or_split(iterator *iter) {
+  node_type *&node = iter->node;
+  int &insert_position = iter->position;
+  assert(node->count() == node->max_count());
+  assert(kNodeValues == node->max_count());
+
+  // First try to make room on the node by rebalancing.
+  node_type *parent = node->parent();
+  if (node != root()) {
+    if (node->position() > parent->start()) {
+      // Try rebalancing with our left sibling.
+      node_type *left = parent->child(node->position() - 1);
+      assert(left->max_count() == kNodeValues);
+      if (left->count() < kNodeValues) {
+        // We bias rebalancing based on the position being inserted. If we're
+        // inserting at the end of the right node then we bias rebalancing to
+        // fill up the left node.
+        int to_move = (kNodeValues - left->count()) /
+                      (1 + (insert_position < static_cast<int>(kNodeValues)));
+        to_move = (std::max)(1, to_move);
+
+        if (insert_position - to_move >= node->start() ||
+            left->count() + to_move < static_cast<int>(kNodeValues)) {
+          left->rebalance_right_to_left(to_move, node, mutable_allocator());
+
+          assert(node->max_count() - node->count() == to_move);
+          insert_position = insert_position - to_move;
+          if (insert_position < node->start()) {
+            insert_position = insert_position + left->count() + 1;
+            node = left;
+          }
+
+          assert(node->count() < node->max_count());
+          return;
+        }
+      }
+    }
+
+    if (node->position() < parent->finish()) {
+      // Try rebalancing with our right sibling.
+      node_type *right = parent->child(node->position() + 1);
+      assert(right->max_count() == kNodeValues);
+      if (right->count() < kNodeValues) {
+        // We bias rebalancing based on the position being inserted. If we're
+        // inserting at the beginning of the left node then we bias rebalancing
+        // to fill up the right node.
+        int to_move = (static_cast<int>(kNodeValues) - right->count()) /
+                      (1 + (insert_position > node->start()));
+        to_move = (std::max)(1, to_move);
+
+        if (insert_position <= node->finish() - to_move ||
+            right->count() + to_move < static_cast<int>(kNodeValues)) {
+          node->rebalance_left_to_right(to_move, right, mutable_allocator());
+
+          if (insert_position > node->finish()) {
+            insert_position = insert_position - node->count() - 1;
+            node = right;
+          }
+
+          assert(node->count() < node->max_count());
+          return;
+        }
+      }
+    }
+
+    // Rebalancing failed, make sure there is room on the parent node for a new
+    // value.
+    assert(parent->max_count() == kNodeValues);
+    if (parent->count() == kNodeValues) {
+      iterator parent_iter(node->parent(), node->position());
+      rebalance_or_split(&parent_iter);
+    }
+  } else {
+    // Rebalancing not possible because this is the root node.
+    // Create a new root node and set the current root node as the child of the
+    // new root.
+    parent = new_internal_node(parent);
+    parent->init_child(parent->start(), root());
+    mutable_root() = parent;
+    // If the former root was a leaf node, then it's now the rightmost node.
+    assert(!parent->start_child()->leaf() ||
+           parent->start_child() == rightmost_);
+  }
+
+  // Split the node.
+  node_type *split_node;
+  if (node->leaf()) {
+    split_node = new_leaf_node(parent);
+    node->split(insert_position, split_node, mutable_allocator());
+    if (rightmost_ == node) rightmost_ = split_node;
+  } else {
+    split_node = new_internal_node(parent);
+    node->split(insert_position, split_node, mutable_allocator());
+  }
+
+  if (insert_position > node->finish()) {
+    insert_position = insert_position - node->count() - 1;
+    node = split_node;
+  }
+}
+
+template <typename P>
+void btree<P>::merge_nodes(node_type *left, node_type *right) {
+  left->merge(right, mutable_allocator());
+  if (rightmost_ == right) rightmost_ = left;
+}
+
+template <typename P>
+bool btree<P>::try_merge_or_rebalance(iterator *iter) {
+  node_type *parent = iter->node->parent();
+  if (iter->node->position() > parent->start()) {
+    // Try merging with our left sibling.
+    node_type *left = parent->child(iter->node->position() - 1);
+    assert(left->max_count() == kNodeValues);
+    if (1U + left->count() + iter->node->count() <= kNodeValues) {
+      iter->position += 1 + left->count();
+      merge_nodes(left, iter->node);
+      iter->node = left;
+      return true;
+    }
+  }
+  if (iter->node->position() < parent->finish()) {
+    // Try merging with our right sibling.
+    node_type *right = parent->child(iter->node->position() + 1);
+    assert(right->max_count() == kNodeValues);
+    if (1U + iter->node->count() + right->count() <= kNodeValues) {
+      merge_nodes(iter->node, right);
+      return true;
+    }
+    // Try rebalancing with our right sibling. We don't perform rebalancing if
+    // we deleted the first element from iter->node and the node is not
+    // empty. This is a small optimization for the common pattern of deleting
+    // from the front of the tree.
+    if (right->count() > kMinNodeValues &&
+        (iter->node->count() == 0 || iter->position > iter->node->start())) {
+      int to_move = (right->count() - iter->node->count()) / 2;
+      to_move = (std::min)(to_move, right->count() - 1);
+      iter->node->rebalance_right_to_left(to_move, right, mutable_allocator());
+      return false;
+    }
+  }
+  if (iter->node->position() > parent->start()) {
+    // Try rebalancing with our left sibling. We don't perform rebalancing if
+    // we deleted the last element from iter->node and the node is not
+    // empty. This is a small optimization for the common pattern of deleting
+    // from the back of the tree.
+    node_type *left = parent->child(iter->node->position() - 1);
+    if (left->count() > kMinNodeValues &&
+        (iter->node->count() == 0 || iter->position < iter->node->finish())) {
+      int to_move = (left->count() - iter->node->count()) / 2;
+      to_move = (std::min)(to_move, left->count() - 1);
+      left->rebalance_left_to_right(to_move, iter->node, mutable_allocator());
+      iter->position += to_move;
+      return false;
+    }
+  }
+  return false;
+}
+
+template <typename P>
+void btree<P>::try_shrink() {
+  node_type *orig_root = root();
+  if (orig_root->count() > 0) {
+    return;
+  }
+  // Deleted the last item on the root node, shrink the height of the tree.
+  if (orig_root->leaf()) {
+    assert(size() == 0);
+    mutable_root() = rightmost_ = EmptyNode();
+  } else {
+    node_type *child = orig_root->start_child();
+    child->make_root();
+    mutable_root() = child;
+  }
+  node_type::clear_and_delete(orig_root, mutable_allocator());
+}
+
+template <typename P>
+template <typename IterType>
+inline IterType btree<P>::internal_last(IterType iter) {
+  assert(iter.node != nullptr);
+  while (iter.position == iter.node->finish()) {
+    iter.position = iter.node->position();
+    iter.node = iter.node->parent();
+    if (iter.node->leaf()) {
+      iter.node = nullptr;
+      break;
+    }
+  }
+  return iter;
+}
+
+template <typename P>
+template <typename... Args>
+inline auto btree<P>::internal_emplace(iterator iter, Args &&... args)
+    -> iterator {
+  if (!iter.node->leaf()) {
+    // We can't insert on an internal node. Instead, we'll insert after the
+    // previous value which is guaranteed to be on a leaf node.
+    --iter;
+    ++iter.position;
+  }
+  const field_type max_count = iter.node->max_count();
+  allocator_type *alloc = mutable_allocator();
+  if (iter.node->count() == max_count) {
+    // Make room in the leaf for the new item.
+    if (max_count < kNodeValues) {
+      // Insertion into the root where the root is smaller than the full node
+      // size. Simply grow the size of the root node.
+      assert(iter.node == root());
+      iter.node =
+          new_leaf_root_node((std::min<int>)(kNodeValues, 2 * max_count));
+      // Transfer the values from the old root to the new root.
+      node_type *old_root = root();
+      node_type *new_root = iter.node;
+      new_root->transfer_n(old_root->count(), new_root->start(),
+                           old_root->start(), old_root, alloc);
+      new_root->set_finish(old_root->finish());
+      old_root->set_finish(old_root->start());
+      node_type::clear_and_delete(old_root, alloc);
+      mutable_root() = rightmost_ = new_root;
+    } else {
+      rebalance_or_split(&iter);
+    }
+  }
+  iter.node->emplace_value(iter.position, alloc, std::forward<Args>(args)...);
+  ++size_;
+  return iter;
+}
+
+template <typename P>
+template <typename K>
+inline auto btree<P>::internal_locate(const K &key) const
+    -> SearchResult<iterator, is_key_compare_to::value> {
+  iterator iter(const_cast<node_type *>(root()));
+  for (;;) {
+    SearchResult<int, is_key_compare_to::value> res =
+        iter.node->lower_bound(key, key_comp());
+    iter.position = res.value;
+    if (res.IsEq()) {
+      return {iter, MatchKind::kEq};
+    }
+    // Note: in the non-key-compare-to case, we don't need to walk all the way
+    // down the tree if the keys are equal, but determining equality would
+    // require doing an extra comparison on each node on the way down, and we
+    // will need to go all the way to the leaf node in the expected case.
+    if (iter.node->leaf()) {
+      break;
+    }
+    iter.node = iter.node->child(iter.position);
+  }
+  // Note: in the non-key-compare-to case, the key may actually be equivalent
+  // here (and the MatchKind::kNe is ignored).
+  return {iter, MatchKind::kNe};
+}
+
+template <typename P>
+template <typename K>
+auto btree<P>::internal_lower_bound(const K &key) const
+    -> SearchResult<iterator, is_key_compare_to::value> {
+  iterator iter(const_cast<node_type *>(root()));
+  SearchResult<int, is_key_compare_to::value> res;
+  bool seen_eq = false;
+  for (;;) {
+    res = iter.node->lower_bound(key, key_comp());
+    iter.position = res.value;
+    // TODO(ezb): we should be able to terminate early on IsEq() if there can't
+    // be multiple equivalent keys in container for this lookup type.
+    if (iter.node->leaf()) {
+      break;
+    }
+    seen_eq = seen_eq || res.IsEq();
+    iter.node = iter.node->child(iter.position);
+  }
+  if (res.IsEq()) return {iter, MatchKind::kEq};
+  return {internal_last(iter), seen_eq ? MatchKind::kEq : MatchKind::kNe};
+}
+
+template <typename P>
+template <typename K>
+auto btree<P>::internal_upper_bound(const K &key) const -> iterator {
+  iterator iter(const_cast<node_type *>(root()));
+  for (;;) {
+    iter.position = iter.node->upper_bound(key, key_comp());
+    if (iter.node->leaf()) {
+      break;
+    }
+    iter.node = iter.node->child(iter.position);
+  }
+  return internal_last(iter);
+}
+
+template <typename P>
+template <typename K>
+auto btree<P>::internal_find(const K &key) const -> iterator {
+  SearchResult<iterator, is_key_compare_to::value> res = internal_locate(key);
+  if (res.HasMatch()) {
+    if (res.IsEq()) {
+      return res.value;
+    }
+  } else {
+    const iterator iter = internal_last(res.value);
+    if (iter.node != nullptr && !compare_keys(key, iter.key())) {
+      return iter;
+    }
+  }
+  return {nullptr, 0};
+}
+
+template <typename P>
+int btree<P>::internal_verify(const node_type *node, const key_type *lo,
+                              const key_type *hi) const {
+  assert(node->count() > 0);
+  assert(node->count() <= node->max_count());
+  if (lo) {
+    assert(!compare_keys(node->key(node->start()), *lo));
+  }
+  if (hi) {
+    assert(!compare_keys(*hi, node->key(node->finish() - 1)));
+  }
+  for (int i = node->start() + 1; i < node->finish(); ++i) {
+    assert(!compare_keys(node->key(i), node->key(i - 1)));
+  }
+  int count = node->count();
+  if (!node->leaf()) {
+    for (int i = node->start(); i <= node->finish(); ++i) {
+      assert(node->child(i) != nullptr);
+      assert(node->child(i)->parent() == node);
+      assert(node->child(i)->position() == i);
+      count += internal_verify(node->child(i),
+                               i == node->start() ? lo : &node->key(i - 1),
+                               i == node->finish() ? hi : &node->key(i));
+    }
+  }
+  return count;
+}
+
+}  // namespace container_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_CONTAINER_INTERNAL_BTREE_H_
diff --git a/third_party/abseil_cpp/absl/container/internal/btree_container.h b/third_party/abseil_cpp/absl/container/internal/btree_container.h
new file mode 100644
index 000000000000..887eda4122f8
--- /dev/null
+++ b/third_party/abseil_cpp/absl/container/internal/btree_container.h
@@ -0,0 +1,683 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_BTREE_CONTAINER_H_
+#define ABSL_CONTAINER_INTERNAL_BTREE_CONTAINER_H_
+
+#include <algorithm>
+#include <initializer_list>
+#include <iterator>
+#include <utility>
+
+#include "absl/base/internal/throw_delegate.h"
+#include "absl/container/internal/btree.h"  // IWYU pragma: export
+#include "absl/container/internal/common.h"
+#include "absl/memory/memory.h"
+#include "absl/meta/type_traits.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+// A common base class for btree_set, btree_map, btree_multiset, and
+// btree_multimap.
+template <typename Tree>
+class btree_container {
+  using params_type = typename Tree::params_type;
+
+ protected:
+  // Alias used for heterogeneous lookup functions.
+  // `key_arg<K>` evaluates to `K` when the functors are transparent and to
+  // `key_type` otherwise. It permits template argument deduction on `K` for the
+  // transparent case.
+  template <class K>
+  using key_arg =
+      typename KeyArg<IsTransparent<typename Tree::key_compare>::value>::
+          template type<K, typename Tree::key_type>;
+
+ public:
+  using key_type = typename Tree::key_type;
+  using value_type = typename Tree::value_type;
+  using size_type = typename Tree::size_type;
+  using difference_type = typename Tree::difference_type;
+  using key_compare = typename Tree::key_compare;
+  using value_compare = typename Tree::value_compare;
+  using allocator_type = typename Tree::allocator_type;
+  using reference = typename Tree::reference;
+  using const_reference = typename Tree::const_reference;
+  using pointer = typename Tree::pointer;
+  using const_pointer = typename Tree::const_pointer;
+  using iterator = typename Tree::iterator;
+  using const_iterator = typename Tree::const_iterator;
+  using reverse_iterator = typename Tree::reverse_iterator;
+  using const_reverse_iterator = typename Tree::const_reverse_iterator;
+  using node_type = typename Tree::node_handle_type;
+
+  // Constructors/assignments.
+  btree_container() : tree_(key_compare(), allocator_type()) {}
+  explicit btree_container(const key_compare &comp,
+                           const allocator_type &alloc = allocator_type())
+      : tree_(comp, alloc) {}
+  explicit btree_container(const allocator_type &alloc)
+      : tree_(key_compare(), alloc) {}
+
+  btree_container(const btree_container &other)
+      : btree_container(other, absl::allocator_traits<allocator_type>::
+                                   select_on_container_copy_construction(
+                                       other.get_allocator())) {}
+  btree_container(const btree_container &other, const allocator_type &alloc)
+      : tree_(other.tree_, alloc) {}
+
+  btree_container(btree_container &&other) noexcept(
+      std::is_nothrow_move_constructible<Tree>::value) = default;
+  btree_container(btree_container &&other, const allocator_type &alloc)
+      : tree_(std::move(other.tree_), alloc) {}
+
+  btree_container &operator=(const btree_container &other) = default;
+  btree_container &operator=(btree_container &&other) noexcept(
+      std::is_nothrow_move_assignable<Tree>::value) = default;
+
+  // Iterator routines.
+  iterator begin() { return tree_.begin(); }
+  const_iterator begin() const { return tree_.begin(); }
+  const_iterator cbegin() const { return tree_.begin(); }
+  iterator end() { return tree_.end(); }
+  const_iterator end() const { return tree_.end(); }
+  const_iterator cend() const { return tree_.end(); }
+  reverse_iterator rbegin() { return tree_.rbegin(); }
+  const_reverse_iterator rbegin() const { return tree_.rbegin(); }
+  const_reverse_iterator crbegin() const { return tree_.rbegin(); }
+  reverse_iterator rend() { return tree_.rend(); }
+  const_reverse_iterator rend() const { return tree_.rend(); }
+  const_reverse_iterator crend() const { return tree_.rend(); }
+
+  // Lookup routines.
+  template <typename K = key_type>
+  size_type count(const key_arg<K> &key) const {
+    auto equal_range = this->equal_range(key);
+    return std::distance(equal_range.first, equal_range.second);
+  }
+  template <typename K = key_type>
+  iterator find(const key_arg<K> &key) {
+    return tree_.find(key);
+  }
+  template <typename K = key_type>
+  const_iterator find(const key_arg<K> &key) const {
+    return tree_.find(key);
+  }
+  template <typename K = key_type>
+  bool contains(const key_arg<K> &key) const {
+    return find(key) != end();
+  }
+  template <typename K = key_type>
+  iterator lower_bound(const key_arg<K> &key) {
+    return tree_.lower_bound(key);
+  }
+  template <typename K = key_type>
+  const_iterator lower_bound(const key_arg<K> &key) const {
+    return tree_.lower_bound(key);
+  }
+  template <typename K = key_type>
+  iterator upper_bound(const key_arg<K> &key) {
+    return tree_.upper_bound(key);
+  }
+  template <typename K = key_type>
+  const_iterator upper_bound(const key_arg<K> &key) const {
+    return tree_.upper_bound(key);
+  }
+  template <typename K = key_type>
+  std::pair<iterator, iterator> equal_range(const key_arg<K> &key) {
+    return tree_.equal_range(key);
+  }
+  template <typename K = key_type>
+  std::pair<const_iterator, const_iterator> equal_range(
+      const key_arg<K> &key) const {
+    return tree_.equal_range(key);
+  }
+
+  // Deletion routines. Note that there is also a deletion routine that is
+  // specific to btree_set_container/btree_multiset_container.
+
+  // Erase the specified iterator from the btree. The iterator must be valid
+  // (i.e. not equal to end()).  Return an iterator pointing to the node after
+  // the one that was erased (or end() if none exists).
+  iterator erase(const_iterator iter) { return tree_.erase(iterator(iter)); }
+  iterator erase(iterator iter) { return tree_.erase(iter); }
+  iterator erase(const_iterator first, const_iterator last) {
+    return tree_.erase_range(iterator(first), iterator(last)).second;
+  }
+  template <typename K = key_type>
+  size_type erase(const key_arg<K> &key) {
+    auto equal_range = this->equal_range(key);
+    return tree_.erase_range(equal_range.first, equal_range.second).first;
+  }
+
+  // Extract routines.
+  node_type extract(iterator position) {
+    // Use Move instead of Transfer, because the rebalancing code expects to
+    // have a valid object to scribble metadata bits on top of.
+    auto node = CommonAccess::Move<node_type>(get_allocator(), position.slot());
+    erase(position);
+    return node;
+  }
+  node_type extract(const_iterator position) {
+    return extract(iterator(position));
+  }
+
+  // Utility routines.
+  void clear() { tree_.clear(); }
+  void swap(btree_container &other) { tree_.swap(other.tree_); }
+  void verify() const { tree_.verify(); }
+
+  // Size routines.
+  size_type size() const { return tree_.size(); }
+  size_type max_size() const { return tree_.max_size(); }
+  bool empty() const { return tree_.empty(); }
+
+  friend bool operator==(const btree_container &x, const btree_container &y) {
+    if (x.size() != y.size()) return false;
+    return std::equal(x.begin(), x.end(), y.begin());
+  }
+
+  friend bool operator!=(const btree_container &x, const btree_container &y) {
+    return !(x == y);
+  }
+
+  friend bool operator<(const btree_container &x, const btree_container &y) {
+    return std::lexicographical_compare(x.begin(), x.end(), y.begin(), y.end());
+  }
+
+  friend bool operator>(const btree_container &x, const btree_container &y) {
+    return y < x;
+  }
+
+  friend bool operator<=(const btree_container &x, const btree_container &y) {
+    return !(y < x);
+  }
+
+  friend bool operator>=(const btree_container &x, const btree_container &y) {
+    return !(x < y);
+  }
+
+  // The allocator used by the btree.
+  allocator_type get_allocator() const { return tree_.get_allocator(); }
+
+  // The key comparator used by the btree.
+  key_compare key_comp() const { return tree_.key_comp(); }
+  value_compare value_comp() const { return tree_.value_comp(); }
+
+  // Support absl::Hash.
+  template <typename State>
+  friend State AbslHashValue(State h, const btree_container &b) {
+    for (const auto &v : b) {
+      h = State::combine(std::move(h), v);
+    }
+    return State::combine(std::move(h), b.size());
+  }
+
+ protected:
+  Tree tree_;
+};
+
+// A common base class for btree_set and btree_map.
+template <typename Tree>
+class btree_set_container : public btree_container<Tree> {
+  using super_type = btree_container<Tree>;
+  using params_type = typename Tree::params_type;
+  using init_type = typename params_type::init_type;
+  using is_key_compare_to = typename params_type::is_key_compare_to;
+  friend class BtreeNodePeer;
+
+ protected:
+  template <class K>
+  using key_arg = typename super_type::template key_arg<K>;
+
+ public:
+  using key_type = typename Tree::key_type;
+  using value_type = typename Tree::value_type;
+  using size_type = typename Tree::size_type;
+  using key_compare = typename Tree::key_compare;
+  using allocator_type = typename Tree::allocator_type;
+  using iterator = typename Tree::iterator;
+  using const_iterator = typename Tree::const_iterator;
+  using node_type = typename super_type::node_type;
+  using insert_return_type = InsertReturnType<iterator, node_type>;
+
+  // Inherit constructors.
+  using super_type::super_type;
+  btree_set_container() {}
+
+  // Range constructors.
+  template <class InputIterator>
+  btree_set_container(InputIterator b, InputIterator e,
+                      const key_compare &comp = key_compare(),
+                      const allocator_type &alloc = allocator_type())
+      : super_type(comp, alloc) {
+    insert(b, e);
+  }
+  template <class InputIterator>
+  btree_set_container(InputIterator b, InputIterator e,
+                      const allocator_type &alloc)
+      : btree_set_container(b, e, key_compare(), alloc) {}
+
+  // Initializer list constructors.
+  btree_set_container(std::initializer_list<init_type> init,
+                      const key_compare &comp = key_compare(),
+                      const allocator_type &alloc = allocator_type())
+      : btree_set_container(init.begin(), init.end(), comp, alloc) {}
+  btree_set_container(std::initializer_list<init_type> init,
+                      const allocator_type &alloc)
+      : btree_set_container(init.begin(), init.end(), alloc) {}
+
+  // Insertion routines.
+  std::pair<iterator, bool> insert(const value_type &v) {
+    return this->tree_.insert_unique(params_type::key(v), v);
+  }
+  std::pair<iterator, bool> insert(value_type &&v) {
+    return this->tree_.insert_unique(params_type::key(v), std::move(v));
+  }
+  template <typename... Args>
+  std::pair<iterator, bool> emplace(Args &&... args) {
+    init_type v(std::forward<Args>(args)...);
+    return this->tree_.insert_unique(params_type::key(v), std::move(v));
+  }
+  iterator insert(const_iterator hint, const value_type &v) {
+    return this->tree_
+        .insert_hint_unique(iterator(hint), params_type::key(v), v)
+        .first;
+  }
+  iterator insert(const_iterator hint, value_type &&v) {
+    return this->tree_
+        .insert_hint_unique(iterator(hint), params_type::key(v), std::move(v))
+        .first;
+  }
+  template <typename... Args>
+  iterator emplace_hint(const_iterator hint, Args &&... args) {
+    init_type v(std::forward<Args>(args)...);
+    return this->tree_
+        .insert_hint_unique(iterator(hint), params_type::key(v), std::move(v))
+        .first;
+  }
+  template <typename InputIterator>
+  void insert(InputIterator b, InputIterator e) {
+    this->tree_.insert_iterator_unique(b, e, 0);
+  }
+  void insert(std::initializer_list<init_type> init) {
+    this->tree_.insert_iterator_unique(init.begin(), init.end(), 0);
+  }
+  insert_return_type insert(node_type &&node) {
+    if (!node) return {this->end(), false, node_type()};
+    std::pair<iterator, bool> res =
+        this->tree_.insert_unique(params_type::key(CommonAccess::GetSlot(node)),
+                                  CommonAccess::GetSlot(node));
+    if (res.second) {
+      CommonAccess::Destroy(&node);
+      return {res.first, true, node_type()};
+    } else {
+      return {res.first, false, std::move(node)};
+    }
+  }
+  iterator insert(const_iterator hint, node_type &&node) {
+    if (!node) return this->end();
+    std::pair<iterator, bool> res = this->tree_.insert_hint_unique(
+        iterator(hint), params_type::key(CommonAccess::GetSlot(node)),
+        CommonAccess::GetSlot(node));
+    if (res.second) CommonAccess::Destroy(&node);
+    return res.first;
+  }
+
+  // Node extraction routines.
+  // TODO(ezb): when the comparator is heterogeneous and has different
+  // equivalence classes for different lookup types, we should extract the first
+  // equivalent value if there are multiple.
+  template <typename K = key_type>
+  node_type extract(const key_arg<K> &key) {
+    auto it = this->find(key);
+    return it == this->end() ? node_type() : extract(it);
+  }
+  using super_type::extract;
+
+  // Merge routines.
+  // Moves elements from `src` into `this`. If the element already exists in
+  // `this`, it is left unmodified in `src`.
+  template <
+      typename T,
+      typename absl::enable_if_t<
+          absl::conjunction<
+              std::is_same<value_type, typename T::value_type>,
+              std::is_same<allocator_type, typename T::allocator_type>,
+              std::is_same<typename params_type::is_map_container,
+                           typename T::params_type::is_map_container>>::value,
+          int> = 0>
+  void merge(btree_container<T> &src) {  // NOLINT
+    for (auto src_it = src.begin(); src_it != src.end();) {
+      if (insert(std::move(params_type::element(src_it.slot()))).second) {
+        src_it = src.erase(src_it);
+      } else {
+        ++src_it;
+      }
+    }
+  }
+
+  template <
+      typename T,
+      typename absl::enable_if_t<
+          absl::conjunction<
+              std::is_same<value_type, typename T::value_type>,
+              std::is_same<allocator_type, typename T::allocator_type>,
+              std::is_same<typename params_type::is_map_container,
+                           typename T::params_type::is_map_container>>::value,
+          int> = 0>
+  void merge(btree_container<T> &&src) {
+    merge(src);
+  }
+};
+
+// Base class for btree_map.
+template <typename Tree>
+class btree_map_container : public btree_set_container<Tree> {
+  using super_type = btree_set_container<Tree>;
+  using params_type = typename Tree::params_type;
+  friend class BtreeNodePeer;
+
+ private:
+  template <class K>
+  using key_arg = typename super_type::template key_arg<K>;
+
+ public:
+  using key_type = typename Tree::key_type;
+  using mapped_type = typename params_type::mapped_type;
+  using value_type = typename Tree::value_type;
+  using key_compare = typename Tree::key_compare;
+  using allocator_type = typename Tree::allocator_type;
+  using iterator = typename Tree::iterator;
+  using const_iterator = typename Tree::const_iterator;
+
+  // Inherit constructors.
+  using super_type::super_type;
+  btree_map_container() {}
+
+  // Insertion routines.
+  // Note: the nullptr template arguments and extra `const M&` overloads allow
+  // for supporting bitfield arguments.
+  template <typename K = key_type, class M>
+  std::pair<iterator, bool> insert_or_assign(const key_arg<K> &k,
+                                             const M &obj) {
+    return insert_or_assign_impl(k, obj);
+  }
+  template <typename K = key_type, class M, K * = nullptr>
+  std::pair<iterator, bool> insert_or_assign(key_arg<K> &&k, const M &obj) {
+    return insert_or_assign_impl(std::forward<K>(k), obj);
+  }
+  template <typename K = key_type, class M, M * = nullptr>
+  std::pair<iterator, bool> insert_or_assign(const key_arg<K> &k, M &&obj) {
+    return insert_or_assign_impl(k, std::forward<M>(obj));
+  }
+  template <typename K = key_type, class M, K * = nullptr, M * = nullptr>
+  std::pair<iterator, bool> insert_or_assign(key_arg<K> &&k, M &&obj) {
+    return insert_or_assign_impl(std::forward<K>(k), std::forward<M>(obj));
+  }
+  template <typename K = key_type, class M>
+  iterator insert_or_assign(const_iterator hint, const key_arg<K> &k,
+                            const M &obj) {
+    return insert_or_assign_hint_impl(hint, k, obj);
+  }
+  template <typename K = key_type, class M, K * = nullptr>
+  iterator insert_or_assign(const_iterator hint, key_arg<K> &&k, const M &obj) {
+    return insert_or_assign_hint_impl(hint, std::forward<K>(k), obj);
+  }
+  template <typename K = key_type, class M, M * = nullptr>
+  iterator insert_or_assign(const_iterator hint, const key_arg<K> &k, M &&obj) {
+    return insert_or_assign_hint_impl(hint, k, std::forward<M>(obj));
+  }
+  template <typename K = key_type, class M, K * = nullptr, M * = nullptr>
+  iterator insert_or_assign(const_iterator hint, key_arg<K> &&k, M &&obj) {
+    return insert_or_assign_hint_impl(hint, std::forward<K>(k),
+                                      std::forward<M>(obj));
+  }
+
+  template <typename K = key_type, typename... Args,
+            typename absl::enable_if_t<
+                !std::is_convertible<K, const_iterator>::value, int> = 0>
+  std::pair<iterator, bool> try_emplace(const key_arg<K> &k, Args &&... args) {
+    return try_emplace_impl(k, std::forward<Args>(args)...);
+  }
+  template <typename K = key_type, typename... Args,
+            typename absl::enable_if_t<
+                !std::is_convertible<K, const_iterator>::value, int> = 0>
+  std::pair<iterator, bool> try_emplace(key_arg<K> &&k, Args &&... args) {
+    return try_emplace_impl(std::forward<K>(k), std::forward<Args>(args)...);
+  }
+  template <typename K = key_type, typename... Args>
+  iterator try_emplace(const_iterator hint, const key_arg<K> &k,
+                       Args &&... args) {
+    return try_emplace_hint_impl(hint, k, std::forward<Args>(args)...);
+  }
+  template <typename K = key_type, typename... Args>
+  iterator try_emplace(const_iterator hint, key_arg<K> &&k, Args &&... args) {
+    return try_emplace_hint_impl(hint, std::forward<K>(k),
+                                 std::forward<Args>(args)...);
+  }
+
+  template <typename K = key_type>
+  mapped_type &operator[](const key_arg<K> &k) {
+    return try_emplace(k).first->second;
+  }
+  template <typename K = key_type>
+  mapped_type &operator[](key_arg<K> &&k) {
+    return try_emplace(std::forward<K>(k)).first->second;
+  }
+
+  template <typename K = key_type>
+  mapped_type &at(const key_arg<K> &key) {
+    auto it = this->find(key);
+    if (it == this->end())
+      base_internal::ThrowStdOutOfRange("absl::btree_map::at");
+    return it->second;
+  }
+  template <typename K = key_type>
+  const mapped_type &at(const key_arg<K> &key) const {
+    auto it = this->find(key);
+    if (it == this->end())
+      base_internal::ThrowStdOutOfRange("absl::btree_map::at");
+    return it->second;
+  }
+
+ private:
+  // Note: when we call `std::forward<M>(obj)` twice, it's safe because
+  // insert_unique/insert_hint_unique are guaranteed to not consume `obj` when
+  // `ret.second` is false.
+  template <class K, class M>
+  std::pair<iterator, bool> insert_or_assign_impl(K &&k, M &&obj) {
+    const std::pair<iterator, bool> ret =
+        this->tree_.insert_unique(k, std::forward<K>(k), std::forward<M>(obj));
+    if (!ret.second) ret.first->second = std::forward<M>(obj);
+    return ret;
+  }
+  template <class K, class M>
+  iterator insert_or_assign_hint_impl(const_iterator hint, K &&k, M &&obj) {
+    const std::pair<iterator, bool> ret = this->tree_.insert_hint_unique(
+        iterator(hint), k, std::forward<K>(k), std::forward<M>(obj));
+    if (!ret.second) ret.first->second = std::forward<M>(obj);
+    return ret.first;
+  }
+
+  template <class K, class... Args>
+  std::pair<iterator, bool> try_emplace_impl(K &&k, Args &&... args) {
+    return this->tree_.insert_unique(
+        k, std::piecewise_construct, std::forward_as_tuple(std::forward<K>(k)),
+        std::forward_as_tuple(std::forward<Args>(args)...));
+  }
+  template <class K, class... Args>
+  iterator try_emplace_hint_impl(const_iterator hint, K &&k, Args &&... args) {
+    return this->tree_
+        .insert_hint_unique(iterator(hint), k, std::piecewise_construct,
+                            std::forward_as_tuple(std::forward<K>(k)),
+                            std::forward_as_tuple(std::forward<Args>(args)...))
+        .first;
+  }
+};
+
+// A common base class for btree_multiset and btree_multimap.
+template <typename Tree>
+class btree_multiset_container : public btree_container<Tree> {
+  using super_type = btree_container<Tree>;
+  using params_type = typename Tree::params_type;
+  using init_type = typename params_type::init_type;
+  using is_key_compare_to = typename params_type::is_key_compare_to;
+
+  template <class K>
+  using key_arg = typename super_type::template key_arg<K>;
+
+ public:
+  using key_type = typename Tree::key_type;
+  using value_type = typename Tree::value_type;
+  using size_type = typename Tree::size_type;
+  using key_compare = typename Tree::key_compare;
+  using allocator_type = typename Tree::allocator_type;
+  using iterator = typename Tree::iterator;
+  using const_iterator = typename Tree::const_iterator;
+  using node_type = typename super_type::node_type;
+
+  // Inherit constructors.
+  using super_type::super_type;
+  btree_multiset_container() {}
+
+  // Range constructors.
+  template <class InputIterator>
+  btree_multiset_container(InputIterator b, InputIterator e,
+                           const key_compare &comp = key_compare(),
+                           const allocator_type &alloc = allocator_type())
+      : super_type(comp, alloc) {
+    insert(b, e);
+  }
+  template <class InputIterator>
+  btree_multiset_container(InputIterator b, InputIterator e,
+                           const allocator_type &alloc)
+      : btree_multiset_container(b, e, key_compare(), alloc) {}
+
+  // Initializer list constructors.
+  btree_multiset_container(std::initializer_list<init_type> init,
+                           const key_compare &comp = key_compare(),
+                           const allocator_type &alloc = allocator_type())
+      : btree_multiset_container(init.begin(), init.end(), comp, alloc) {}
+  btree_multiset_container(std::initializer_list<init_type> init,
+                           const allocator_type &alloc)
+      : btree_multiset_container(init.begin(), init.end(), alloc) {}
+
+  // Insertion routines.
+  iterator insert(const value_type &v) { return this->tree_.insert_multi(v); }
+  iterator insert(value_type &&v) {
+    return this->tree_.insert_multi(std::move(v));
+  }
+  iterator insert(const_iterator hint, const value_type &v) {
+    return this->tree_.insert_hint_multi(iterator(hint), v);
+  }
+  iterator insert(const_iterator hint, value_type &&v) {
+    return this->tree_.insert_hint_multi(iterator(hint), std::move(v));
+  }
+  template <typename InputIterator>
+  void insert(InputIterator b, InputIterator e) {
+    this->tree_.insert_iterator_multi(b, e);
+  }
+  void insert(std::initializer_list<init_type> init) {
+    this->tree_.insert_iterator_multi(init.begin(), init.end());
+  }
+  template <typename... Args>
+  iterator emplace(Args &&... args) {
+    return this->tree_.insert_multi(init_type(std::forward<Args>(args)...));
+  }
+  template <typename... Args>
+  iterator emplace_hint(const_iterator hint, Args &&... args) {
+    return this->tree_.insert_hint_multi(
+        iterator(hint), init_type(std::forward<Args>(args)...));
+  }
+  iterator insert(node_type &&node) {
+    if (!node) return this->end();
+    iterator res =
+        this->tree_.insert_multi(params_type::key(CommonAccess::GetSlot(node)),
+                                 CommonAccess::GetSlot(node));
+    CommonAccess::Destroy(&node);
+    return res;
+  }
+  iterator insert(const_iterator hint, node_type &&node) {
+    if (!node) return this->end();
+    iterator res = this->tree_.insert_hint_multi(
+        iterator(hint),
+        std::move(params_type::element(CommonAccess::GetSlot(node))));
+    CommonAccess::Destroy(&node);
+    return res;
+  }
+
+  // Node extraction routines.
+  // TODO(ezb): we are supposed to extract the first equivalent key if there are
+  // multiple, but this isn't guaranteed to extract the first one.
+  template <typename K = key_type>
+  node_type extract(const key_arg<K> &key) {
+    auto it = this->find(key);
+    return it == this->end() ? node_type() : extract(it);
+  }
+  using super_type::extract;
+
+  // Merge routines.
+  // Moves all elements from `src` into `this`.
+  template <
+      typename T,
+      typename absl::enable_if_t<
+          absl::conjunction<
+              std::is_same<value_type, typename T::value_type>,
+              std::is_same<allocator_type, typename T::allocator_type>,
+              std::is_same<typename params_type::is_map_container,
+                           typename T::params_type::is_map_container>>::value,
+          int> = 0>
+  void merge(btree_container<T> &src) {  // NOLINT
+    for (auto src_it = src.begin(), end = src.end(); src_it != end; ++src_it) {
+      insert(std::move(params_type::element(src_it.slot())));
+    }
+    src.clear();
+  }
+
+  template <
+      typename T,
+      typename absl::enable_if_t<
+          absl::conjunction<
+              std::is_same<value_type, typename T::value_type>,
+              std::is_same<allocator_type, typename T::allocator_type>,
+              std::is_same<typename params_type::is_map_container,
+                           typename T::params_type::is_map_container>>::value,
+          int> = 0>
+  void merge(btree_container<T> &&src) {
+    merge(src);
+  }
+};
+
+// A base class for btree_multimap.
+template <typename Tree>
+class btree_multimap_container : public btree_multiset_container<Tree> {
+  using super_type = btree_multiset_container<Tree>;
+  using params_type = typename Tree::params_type;
+
+ public:
+  using mapped_type = typename params_type::mapped_type;
+
+  // Inherit constructors.
+  using super_type::super_type;
+  btree_multimap_container() {}
+};
+
+}  // namespace container_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_CONTAINER_INTERNAL_BTREE_CONTAINER_H_
diff --git a/third_party/abseil_cpp/absl/container/internal/common.h b/third_party/abseil_cpp/absl/container/internal/common.h
new file mode 100644
index 000000000000..030e9d4ab07d
--- /dev/null
+++ b/third_party/abseil_cpp/absl/container/internal/common.h
@@ -0,0 +1,206 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_CONTAINER_H_
+#define ABSL_CONTAINER_INTERNAL_CONTAINER_H_
+
+#include <cassert>
+#include <type_traits>
+
+#include "absl/meta/type_traits.h"
+#include "absl/types/optional.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+template <class, class = void>
+struct IsTransparent : std::false_type {};
+template <class T>
+struct IsTransparent<T, absl::void_t<typename T::is_transparent>>
+    : std::true_type {};
+
+template <bool is_transparent>
+struct KeyArg {
+  // Transparent. Forward `K`.
+  template <typename K, typename key_type>
+  using type = K;
+};
+
+template <>
+struct KeyArg<false> {
+  // Not transparent. Always use `key_type`.
+  template <typename K, typename key_type>
+  using type = key_type;
+};
+
+// The node_handle concept from C++17.
+// We specialize node_handle for sets and maps. node_handle_base holds the
+// common API of both.
+template <typename PolicyTraits, typename Alloc>
+class node_handle_base {
+ protected:
+  using slot_type = typename PolicyTraits::slot_type;
+
+ public:
+  using allocator_type = Alloc;
+
+  constexpr node_handle_base() = default;
+  node_handle_base(node_handle_base&& other) noexcept {
+    *this = std::move(other);
+  }
+  ~node_handle_base() { destroy(); }
+  node_handle_base& operator=(node_handle_base&& other) noexcept {
+    destroy();
+    if (!other.empty()) {
+      alloc_ = other.alloc_;
+      PolicyTraits::transfer(alloc(), slot(), other.slot());
+      other.reset();
+    }
+    return *this;
+  }
+
+  bool empty() const noexcept { return !alloc_; }
+  explicit operator bool() const noexcept { return !empty(); }
+  allocator_type get_allocator() const { return *alloc_; }
+
+ protected:
+  friend struct CommonAccess;
+
+  struct transfer_tag_t {};
+  node_handle_base(transfer_tag_t, const allocator_type& a, slot_type* s)
+      : alloc_(a) {
+    PolicyTraits::transfer(alloc(), slot(), s);
+  }
+
+  struct move_tag_t {};
+  node_handle_base(move_tag_t, const allocator_type& a, slot_type* s)
+      : alloc_(a) {
+    PolicyTraits::construct(alloc(), slot(), s);
+  }
+
+  void destroy() {
+    if (!empty()) {
+      PolicyTraits::destroy(alloc(), slot());
+      reset();
+    }
+  }
+
+  void reset() {
+    assert(alloc_.has_value());
+    alloc_ = absl::nullopt;
+  }
+
+  slot_type* slot() const {
+    assert(!empty());
+    return reinterpret_cast<slot_type*>(std::addressof(slot_space_));
+  }
+  allocator_type* alloc() { return std::addressof(*alloc_); }
+
+ private:
+  absl::optional<allocator_type> alloc_ = {};
+  alignas(slot_type) mutable unsigned char slot_space_[sizeof(slot_type)] = {};
+};
+
+// For sets.
+template <typename Policy, typename PolicyTraits, typename Alloc,
+          typename = void>
+class node_handle : public node_handle_base<PolicyTraits, Alloc> {
+  using Base = node_handle_base<PolicyTraits, Alloc>;
+
+ public:
+  using value_type = typename PolicyTraits::value_type;
+
+  constexpr node_handle() {}
+
+  value_type& value() const { return PolicyTraits::element(this->slot()); }
+
+ private:
+  friend struct CommonAccess;
+
+  using Base::Base;
+};
+
+// For maps.
+template <typename Policy, typename PolicyTraits, typename Alloc>
+class node_handle<Policy, PolicyTraits, Alloc,
+                  absl::void_t<typename Policy::mapped_type>>
+    : public node_handle_base<PolicyTraits, Alloc> {
+  using Base = node_handle_base<PolicyTraits, Alloc>;
+  using slot_type = typename PolicyTraits::slot_type;
+
+ public:
+  using key_type = typename Policy::key_type;
+  using mapped_type = typename Policy::mapped_type;
+
+  constexpr node_handle() {}
+
+  // When C++17 is available, we can use std::launder to provide mutable
+  // access to the key. Otherwise, we provide const access.
+  auto key() const
+      -> decltype(PolicyTraits::mutable_key(std::declval<slot_type*>())) {
+    return PolicyTraits::mutable_key(this->slot());
+  }
+
+  mapped_type& mapped() const {
+    return PolicyTraits::value(&PolicyTraits::element(this->slot()));
+  }
+
+ private:
+  friend struct CommonAccess;
+
+  using Base::Base;
+};
+
+// Provide access to non-public node-handle functions.
+struct CommonAccess {
+  template <typename Node>
+  static auto GetSlot(const Node& node) -> decltype(node.slot()) {
+    return node.slot();
+  }
+
+  template <typename Node>
+  static void Destroy(Node* node) {
+    node->destroy();
+  }
+
+  template <typename Node>
+  static void Reset(Node* node) {
+    node->reset();
+  }
+
+  template <typename T, typename... Args>
+  static T Transfer(Args&&... args) {
+    return T(typename T::transfer_tag_t{}, std::forward<Args>(args)...);
+  }
+
+  template <typename T, typename... Args>
+  static T Move(Args&&... args) {
+    return T(typename T::move_tag_t{}, std::forward<Args>(args)...);
+  }
+};
+
+// Implement the insert_return_type<> concept of C++17.
+template <class Iterator, class NodeType>
+struct InsertReturnType {
+  Iterator position;
+  bool inserted;
+  NodeType node;
+};
+
+}  // namespace container_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_CONTAINER_INTERNAL_CONTAINER_H_
diff --git a/third_party/abseil_cpp/absl/container/internal/compressed_tuple.h b/third_party/abseil_cpp/absl/container/internal/compressed_tuple.h
new file mode 100644
index 000000000000..5ebe16494249
--- /dev/null
+++ b/third_party/abseil_cpp/absl/container/internal/compressed_tuple.h
@@ -0,0 +1,290 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Helper class to perform the Empty Base Optimization.
+// Ts can contain classes and non-classes, empty or not. For the ones that
+// are empty classes, we perform the optimization. If all types in Ts are empty
+// classes, then CompressedTuple<Ts...> is itself an empty class.
+//
+// To access the members, use member get<N>() function.
+//
+// Eg:
+//   absl::container_internal::CompressedTuple<int, T1, T2, T3> value(7, t1, t2,
+//                                                                    t3);
+//   assert(value.get<0>() == 7);
+//   T1& t1 = value.get<1>();
+//   const T2& t2 = value.get<2>();
+//   ...
+//
+// https://en.cppreference.com/w/cpp/language/ebo
+
+#ifndef ABSL_CONTAINER_INTERNAL_COMPRESSED_TUPLE_H_
+#define ABSL_CONTAINER_INTERNAL_COMPRESSED_TUPLE_H_
+
+#include <initializer_list>
+#include <tuple>
+#include <type_traits>
+#include <utility>
+
+#include "absl/utility/utility.h"
+
+#if defined(_MSC_VER) && !defined(__NVCC__)
+// We need to mark these classes with this declspec to ensure that
+// CompressedTuple happens.
+#define ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC __declspec(empty_bases)
+#else
+#define ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC
+#endif
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+template <typename... Ts>
+class CompressedTuple;
+
+namespace internal_compressed_tuple {
+
+template <typename D, size_t I>
+struct Elem;
+template <typename... B, size_t I>
+struct Elem<CompressedTuple<B...>, I>
+    : std::tuple_element<I, std::tuple<B...>> {};
+template <typename D, size_t I>
+using ElemT = typename Elem<D, I>::type;
+
+// Use the __is_final intrinsic if available. Where it's not available, classes
+// declared with the 'final' specifier cannot be used as CompressedTuple
+// elements.
+// TODO(sbenza): Replace this with std::is_final in C++14.
+template <typename T>
+constexpr bool IsFinal() {
+#if defined(__clang__) || defined(__GNUC__)
+  return __is_final(T);
+#else
+  return false;
+#endif
+}
+
+// We can't use EBCO on other CompressedTuples because that would mean that we
+// derive from multiple Storage<> instantiations with the same I parameter,
+// and potentially from multiple identical Storage<> instantiations.  So anytime
+// we use type inheritance rather than encapsulation, we mark
+// CompressedTupleImpl, to make this easy to detect.
+struct uses_inheritance {};
+
+template <typename T>
+constexpr bool ShouldUseBase() {
+  return std::is_class<T>::value && std::is_empty<T>::value && !IsFinal<T>() &&
+         !std::is_base_of<uses_inheritance, T>::value;
+}
+
+// The storage class provides two specializations:
+//  - For empty classes, it stores T as a base class.
+//  - For everything else, it stores T as a member.
+template <typename T, size_t I,
+#if defined(_MSC_VER)
+          bool UseBase =
+              ShouldUseBase<typename std::enable_if<true, T>::type>()>
+#else
+          bool UseBase = ShouldUseBase<T>()>
+#endif
+struct Storage {
+  T value;
+  constexpr Storage() = default;
+  template <typename V>
+  explicit constexpr Storage(absl::in_place_t, V&& v)
+      : value(absl::forward<V>(v)) {}
+  constexpr const T& get() const& { return value; }
+  T& get() & { return value; }
+  constexpr const T&& get() const&& { return absl::move(*this).value; }
+  T&& get() && { return std::move(*this).value; }
+};
+
+template <typename T, size_t I>
+struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC Storage<T, I, true> : T {
+  constexpr Storage() = default;
+
+  template <typename V>
+  explicit constexpr Storage(absl::in_place_t, V&& v)
+      : T(absl::forward<V>(v)) {}
+
+  constexpr const T& get() const& { return *this; }
+  T& get() & { return *this; }
+  constexpr const T&& get() const&& { return absl::move(*this); }
+  T&& get() && { return std::move(*this); }
+};
+
+template <typename D, typename I, bool ShouldAnyUseBase>
+struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTupleImpl;
+
+template <typename... Ts, size_t... I, bool ShouldAnyUseBase>
+struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTupleImpl<
+    CompressedTuple<Ts...>, absl::index_sequence<I...>, ShouldAnyUseBase>
+    // We use the dummy identity function through std::integral_constant to
+    // convince MSVC of accepting and expanding I in that context. Without it
+    // you would get:
+    //   error C3548: 'I': parameter pack cannot be used in this context
+    : uses_inheritance,
+      Storage<Ts, std::integral_constant<size_t, I>::value>... {
+  constexpr CompressedTupleImpl() = default;
+  template <typename... Vs>
+  explicit constexpr CompressedTupleImpl(absl::in_place_t, Vs&&... args)
+      : Storage<Ts, I>(absl::in_place, absl::forward<Vs>(args))... {}
+  friend CompressedTuple<Ts...>;
+};
+
+template <typename... Ts, size_t... I>
+struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTupleImpl<
+    CompressedTuple<Ts...>, absl::index_sequence<I...>, false>
+    // We use the dummy identity function as above...
+    : Storage<Ts, std::integral_constant<size_t, I>::value, false>... {
+  constexpr CompressedTupleImpl() = default;
+  template <typename... Vs>
+  explicit constexpr CompressedTupleImpl(absl::in_place_t, Vs&&... args)
+      : Storage<Ts, I, false>(absl::in_place, absl::forward<Vs>(args))... {}
+  friend CompressedTuple<Ts...>;
+};
+
+std::false_type Or(std::initializer_list<std::false_type>);
+std::true_type Or(std::initializer_list<bool>);
+
+// MSVC requires this to be done separately rather than within the declaration
+// of CompressedTuple below.
+template <typename... Ts>
+constexpr bool ShouldAnyUseBase() {
+  return decltype(
+      Or({std::integral_constant<bool, ShouldUseBase<Ts>()>()...})){};
+}
+
+template <typename T, typename V>
+using TupleElementMoveConstructible =
+    typename std::conditional<std::is_reference<T>::value,
+                              std::is_convertible<V, T>,
+                              std::is_constructible<T, V&&>>::type;
+
+template <bool SizeMatches, class T, class... Vs>
+struct TupleMoveConstructible : std::false_type {};
+
+template <class... Ts, class... Vs>
+struct TupleMoveConstructible<true, CompressedTuple<Ts...>, Vs...>
+    : std::integral_constant<
+          bool, absl::conjunction<
+                    TupleElementMoveConstructible<Ts, Vs&&>...>::value> {};
+
+template <typename T>
+struct compressed_tuple_size;
+
+template <typename... Es>
+struct compressed_tuple_size<CompressedTuple<Es...>>
+    : public std::integral_constant<std::size_t, sizeof...(Es)> {};
+
+template <class T, class... Vs>
+struct TupleItemsMoveConstructible
+    : std::integral_constant<
+          bool, TupleMoveConstructible<compressed_tuple_size<T>::value ==
+                                           sizeof...(Vs),
+                                       T, Vs...>::value> {};
+
+}  // namespace internal_compressed_tuple
+
+// Helper class to perform the Empty Base Class Optimization.
+// Ts can contain classes and non-classes, empty or not. For the ones that
+// are empty classes, we perform the CompressedTuple. If all types in Ts are
+// empty classes, then CompressedTuple<Ts...> is itself an empty class.  (This
+// does not apply when one or more of those empty classes is itself an empty
+// CompressedTuple.)
+//
+// To access the members, use member .get<N>() function.
+//
+// Eg:
+//   absl::container_internal::CompressedTuple<int, T1, T2, T3> value(7, t1, t2,
+//                                                                    t3);
+//   assert(value.get<0>() == 7);
+//   T1& t1 = value.get<1>();
+//   const T2& t2 = value.get<2>();
+//   ...
+//
+// https://en.cppreference.com/w/cpp/language/ebo
+template <typename... Ts>
+class ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTuple
+    : private internal_compressed_tuple::CompressedTupleImpl<
+          CompressedTuple<Ts...>, absl::index_sequence_for<Ts...>,
+          internal_compressed_tuple::ShouldAnyUseBase<Ts...>()> {
+ private:
+  template <int I>
+  using ElemT = internal_compressed_tuple::ElemT<CompressedTuple, I>;
+
+  template <int I>
+  using StorageT = internal_compressed_tuple::Storage<ElemT<I>, I>;
+
+ public:
+  // There seems to be a bug in MSVC dealing in which using '=default' here will
+  // cause the compiler to ignore the body of other constructors. The work-
+  // around is to explicitly implement the default constructor.
+#if defined(_MSC_VER)
+  constexpr CompressedTuple() : CompressedTuple::CompressedTupleImpl() {}
+#else
+  constexpr CompressedTuple() = default;
+#endif
+  explicit constexpr CompressedTuple(const Ts&... base)
+      : CompressedTuple::CompressedTupleImpl(absl::in_place, base...) {}
+
+  template <typename First, typename... Vs,
+            absl::enable_if_t<
+                absl::conjunction<
+                    // Ensure we are not hiding default copy/move constructors.
+                    absl::negation<std::is_same<void(CompressedTuple),
+                                                void(absl::decay_t<First>)>>,
+                    internal_compressed_tuple::TupleItemsMoveConstructible<
+                        CompressedTuple<Ts...>, First, Vs...>>::value,
+                bool> = true>
+  explicit constexpr CompressedTuple(First&& first, Vs&&... base)
+      : CompressedTuple::CompressedTupleImpl(absl::in_place,
+                                             absl::forward<First>(first),
+                                             absl::forward<Vs>(base)...) {}
+
+  template <int I>
+  ElemT<I>& get() & {
+    return StorageT<I>::get();
+  }
+
+  template <int I>
+  constexpr const ElemT<I>& get() const& {
+    return StorageT<I>::get();
+  }
+
+  template <int I>
+  ElemT<I>&& get() && {
+    return std::move(*this).StorageT<I>::get();
+  }
+
+  template <int I>
+  constexpr const ElemT<I>&& get() const&& {
+    return absl::move(*this).StorageT<I>::get();
+  }
+};
+
+// Explicit specialization for a zero-element tuple
+// (needed to avoid ambiguous overloads for the default constructor).
+template <>
+class ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTuple<> {};
+
+}  // namespace container_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#undef ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC
+
+#endif  // ABSL_CONTAINER_INTERNAL_COMPRESSED_TUPLE_H_
diff --git a/third_party/abseil_cpp/absl/container/internal/compressed_tuple_test.cc b/third_party/abseil_cpp/absl/container/internal/compressed_tuple_test.cc
new file mode 100644
index 000000000000..62a7483ee311
--- /dev/null
+++ b/third_party/abseil_cpp/absl/container/internal/compressed_tuple_test.cc
@@ -0,0 +1,409 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/container/internal/compressed_tuple.h"
+
+#include <memory>
+#include <string>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/container/internal/test_instance_tracker.h"
+#include "absl/memory/memory.h"
+#include "absl/types/any.h"
+#include "absl/types/optional.h"
+#include "absl/utility/utility.h"
+
+// These are declared at global scope purely so that error messages
+// are smaller and easier to understand.
+enum class CallType { kConstRef, kConstMove };
+
+template <int>
+struct Empty {
+  constexpr CallType value() const& { return CallType::kConstRef; }
+  constexpr CallType value() const&& { return CallType::kConstMove; }
+};
+
+template <typename T>
+struct NotEmpty {
+  T value;
+};
+
+template <typename T, typename U>
+struct TwoValues {
+  T value1;
+  U value2;
+};
+
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+namespace {
+
+using absl::test_internal::CopyableMovableInstance;
+using absl::test_internal::InstanceTracker;
+
+TEST(CompressedTupleTest, Sizeof) {
+  EXPECT_EQ(sizeof(int), sizeof(CompressedTuple<int>));
+  EXPECT_EQ(sizeof(int), sizeof(CompressedTuple<int, Empty<0>>));
+  EXPECT_EQ(sizeof(int), sizeof(CompressedTuple<int, Empty<0>, Empty<1>>));
+  EXPECT_EQ(sizeof(int),
+            sizeof(CompressedTuple<int, Empty<0>, Empty<1>, Empty<2>>));
+
+  EXPECT_EQ(sizeof(TwoValues<int, double>),
+            sizeof(CompressedTuple<int, NotEmpty<double>>));
+  EXPECT_EQ(sizeof(TwoValues<int, double>),
+            sizeof(CompressedTuple<int, Empty<0>, NotEmpty<double>>));
+  EXPECT_EQ(sizeof(TwoValues<int, double>),
+            sizeof(CompressedTuple<int, Empty<0>, NotEmpty<double>, Empty<1>>));
+}
+
+TEST(CompressedTupleTest, OneMoveOnRValueConstructionTemp) {
+  InstanceTracker tracker;
+  CompressedTuple<CopyableMovableInstance> x1(CopyableMovableInstance(1));
+  EXPECT_EQ(tracker.instances(), 1);
+  EXPECT_EQ(tracker.copies(), 0);
+  EXPECT_LE(tracker.moves(), 1);
+  EXPECT_EQ(x1.get<0>().value(), 1);
+}
+
+TEST(CompressedTupleTest, OneMoveOnRValueConstructionMove) {
+  InstanceTracker tracker;
+
+  CopyableMovableInstance i1(1);
+  CompressedTuple<CopyableMovableInstance> x1(std::move(i1));
+  EXPECT_EQ(tracker.instances(), 2);
+  EXPECT_EQ(tracker.copies(), 0);
+  EXPECT_LE(tracker.moves(), 1);
+  EXPECT_EQ(x1.get<0>().value(), 1);
+}
+
+TEST(CompressedTupleTest, OneMoveOnRValueConstructionMixedTypes) {
+  InstanceTracker tracker;
+  CopyableMovableInstance i1(1);
+  CopyableMovableInstance i2(2);
+  Empty<0> empty;
+  CompressedTuple<CopyableMovableInstance, CopyableMovableInstance&, Empty<0>>
+      x1(std::move(i1), i2, empty);
+  EXPECT_EQ(x1.get<0>().value(), 1);
+  EXPECT_EQ(x1.get<1>().value(), 2);
+  EXPECT_EQ(tracker.copies(), 0);
+  EXPECT_EQ(tracker.moves(), 1);
+}
+
+struct IncompleteType;
+CompressedTuple<CopyableMovableInstance, IncompleteType&, Empty<0>>
+MakeWithIncomplete(CopyableMovableInstance i1,
+                   IncompleteType& t,  // NOLINT
+                   Empty<0> empty) {
+  return CompressedTuple<CopyableMovableInstance, IncompleteType&, Empty<0>>{
+      std::move(i1), t, empty};
+}
+
+struct IncompleteType {};
+TEST(CompressedTupleTest, OneMoveOnRValueConstructionWithIncompleteType) {
+  InstanceTracker tracker;
+  CopyableMovableInstance i1(1);
+  Empty<0> empty;
+  struct DerivedType : IncompleteType {int value = 0;};
+  DerivedType fd;
+  fd.value = 7;
+
+  CompressedTuple<CopyableMovableInstance, IncompleteType&, Empty<0>> x1 =
+      MakeWithIncomplete(std::move(i1), fd, empty);
+
+  EXPECT_EQ(x1.get<0>().value(), 1);
+  EXPECT_EQ(static_cast<DerivedType&>(x1.get<1>()).value, 7);
+
+  EXPECT_EQ(tracker.copies(), 0);
+  EXPECT_EQ(tracker.moves(), 2);
+}
+
+TEST(CompressedTupleTest,
+     OneMoveOnRValueConstructionMixedTypes_BraceInitPoisonPillExpected) {
+  InstanceTracker tracker;
+  CopyableMovableInstance i1(1);
+  CopyableMovableInstance i2(2);
+  CompressedTuple<CopyableMovableInstance, CopyableMovableInstance&, Empty<0>>
+      x1(std::move(i1), i2, {});  // NOLINT
+  EXPECT_EQ(x1.get<0>().value(), 1);
+  EXPECT_EQ(x1.get<1>().value(), 2);
+  EXPECT_EQ(tracker.instances(), 3);
+  // We are forced into the `const Ts&...` constructor (invoking copies)
+  // because we need it to deduce the type of `{}`.
+  // std::tuple also has this behavior.
+  // Note, this test is proof that this is expected behavior, but it is not
+  // _desired_ behavior.
+  EXPECT_EQ(tracker.copies(), 1);
+  EXPECT_EQ(tracker.moves(), 0);
+}
+
+TEST(CompressedTupleTest, OneCopyOnLValueConstruction) {
+  InstanceTracker tracker;
+  CopyableMovableInstance i1(1);
+
+  CompressedTuple<CopyableMovableInstance> x1(i1);
+  EXPECT_EQ(tracker.copies(), 1);
+  EXPECT_EQ(tracker.moves(), 0);
+
+  tracker.ResetCopiesMovesSwaps();
+
+  CopyableMovableInstance i2(2);
+  const CopyableMovableInstance& i2_ref = i2;
+  CompressedTuple<CopyableMovableInstance> x2(i2_ref);
+  EXPECT_EQ(tracker.copies(), 1);
+  EXPECT_EQ(tracker.moves(), 0);
+}
+
+TEST(CompressedTupleTest, OneMoveOnRValueAccess) {
+  InstanceTracker tracker;
+  CopyableMovableInstance i1(1);
+  CompressedTuple<CopyableMovableInstance> x(std::move(i1));
+  tracker.ResetCopiesMovesSwaps();
+
+  CopyableMovableInstance i2 = std::move(x).get<0>();
+  EXPECT_EQ(tracker.copies(), 0);
+  EXPECT_EQ(tracker.moves(), 1);
+}
+
+TEST(CompressedTupleTest, OneCopyOnLValueAccess) {
+  InstanceTracker tracker;
+
+  CompressedTuple<CopyableMovableInstance> x(CopyableMovableInstance(0));
+  EXPECT_EQ(tracker.copies(), 0);
+  EXPECT_EQ(tracker.moves(), 1);
+
+  CopyableMovableInstance t = x.get<0>();
+  EXPECT_EQ(tracker.copies(), 1);
+  EXPECT_EQ(tracker.moves(), 1);
+}
+
+TEST(CompressedTupleTest, ZeroCopyOnRefAccess) {
+  InstanceTracker tracker;
+
+  CompressedTuple<CopyableMovableInstance> x(CopyableMovableInstance(0));
+  EXPECT_EQ(tracker.copies(), 0);
+  EXPECT_EQ(tracker.moves(), 1);
+
+  CopyableMovableInstance& t1 = x.get<0>();
+  const CopyableMovableInstance& t2 = x.get<0>();
+  EXPECT_EQ(tracker.copies(), 0);
+  EXPECT_EQ(tracker.moves(), 1);
+  EXPECT_EQ(t1.value(), 0);
+  EXPECT_EQ(t2.value(), 0);
+}
+
+TEST(CompressedTupleTest, Access) {
+  struct S {
+    std::string x;
+  };
+  CompressedTuple<int, Empty<0>, S> x(7, {}, S{"ABC"});
+  EXPECT_EQ(sizeof(x), sizeof(TwoValues<int, S>));
+  EXPECT_EQ(7, x.get<0>());
+  EXPECT_EQ("ABC", x.get<2>().x);
+}
+
+TEST(CompressedTupleTest, NonClasses) {
+  CompressedTuple<int, const char*> x(7, "ABC");
+  EXPECT_EQ(7, x.get<0>());
+  EXPECT_STREQ("ABC", x.get<1>());
+}
+
+TEST(CompressedTupleTest, MixClassAndNonClass) {
+  CompressedTuple<int, const char*, Empty<0>, NotEmpty<double>> x(7, "ABC", {},
+                                                                  {1.25});
+  struct Mock {
+    int v;
+    const char* p;
+    double d;
+  };
+  EXPECT_EQ(sizeof(x), sizeof(Mock));
+  EXPECT_EQ(7, x.get<0>());
+  EXPECT_STREQ("ABC", x.get<1>());
+  EXPECT_EQ(1.25, x.get<3>().value);
+}
+
+TEST(CompressedTupleTest, Nested) {
+  CompressedTuple<int, CompressedTuple<int>,
+                  CompressedTuple<int, CompressedTuple<int>>>
+      x(1, CompressedTuple<int>(2),
+        CompressedTuple<int, CompressedTuple<int>>(3, CompressedTuple<int>(4)));
+  EXPECT_EQ(1, x.get<0>());
+  EXPECT_EQ(2, x.get<1>().get<0>());
+  EXPECT_EQ(3, x.get<2>().get<0>());
+  EXPECT_EQ(4, x.get<2>().get<1>().get<0>());
+
+  CompressedTuple<Empty<0>, Empty<0>,
+                  CompressedTuple<Empty<0>, CompressedTuple<Empty<0>>>>
+      y;
+  std::set<Empty<0>*> empties{&y.get<0>(), &y.get<1>(), &y.get<2>().get<0>(),
+                              &y.get<2>().get<1>().get<0>()};
+#ifdef _MSC_VER
+  // MSVC has a bug where many instances of the same base class are layed out in
+  // the same address when using __declspec(empty_bases).
+  // This will be fixed in a future version of MSVC.
+  int expected = 1;
+#else
+  int expected = 4;
+#endif
+  EXPECT_EQ(expected, sizeof(y));
+  EXPECT_EQ(expected, empties.size());
+  EXPECT_EQ(sizeof(y), sizeof(Empty<0>) * empties.size());
+
+  EXPECT_EQ(4 * sizeof(char),
+            sizeof(CompressedTuple<CompressedTuple<char, char>,
+                                   CompressedTuple<char, char>>));
+  EXPECT_TRUE((std::is_empty<CompressedTuple<Empty<0>, Empty<1>>>::value));
+
+  // Make sure everything still works when things are nested.
+  struct CT_Empty : CompressedTuple<Empty<0>> {};
+  CompressedTuple<Empty<0>, CT_Empty> nested_empty;
+  auto contained = nested_empty.get<0>();
+  auto nested = nested_empty.get<1>().get<0>();
+  EXPECT_TRUE((std::is_same<decltype(contained), decltype(nested)>::value));
+}
+
+TEST(CompressedTupleTest, Reference) {
+  int i = 7;
+  std::string s = "Very long string that goes in the heap";
+  CompressedTuple<int, int&, std::string, std::string&> x(i, i, s, s);
+
+  // Sanity check. We should have not moved from `s`
+  EXPECT_EQ(s, "Very long string that goes in the heap");
+
+  EXPECT_EQ(x.get<0>(), x.get<1>());
+  EXPECT_NE(&x.get<0>(), &x.get<1>());
+  EXPECT_EQ(&x.get<1>(), &i);
+
+  EXPECT_EQ(x.get<2>(), x.get<3>());
+  EXPECT_NE(&x.get<2>(), &x.get<3>());
+  EXPECT_EQ(&x.get<3>(), &s);
+}
+
+TEST(CompressedTupleTest, NoElements) {
+  CompressedTuple<> x;
+  static_cast<void>(x);  // Silence -Wunused-variable.
+  EXPECT_TRUE(std::is_empty<CompressedTuple<>>::value);
+}
+
+TEST(CompressedTupleTest, MoveOnlyElements) {
+  CompressedTuple<std::unique_ptr<std::string>> str_tup(
+      absl::make_unique<std::string>("str"));
+
+  CompressedTuple<CompressedTuple<std::unique_ptr<std::string>>,
+                  std::unique_ptr<int>>
+  x(std::move(str_tup), absl::make_unique<int>(5));
+
+  EXPECT_EQ(*x.get<0>().get<0>(), "str");
+  EXPECT_EQ(*x.get<1>(), 5);
+
+  std::unique_ptr<std::string> x0 = std::move(x.get<0>()).get<0>();
+  std::unique_ptr<int> x1 = std::move(x).get<1>();
+
+  EXPECT_EQ(*x0, "str");
+  EXPECT_EQ(*x1, 5);
+}
+
+TEST(CompressedTupleTest, MoveConstructionMoveOnlyElements) {
+  CompressedTuple<std::unique_ptr<std::string>> base(
+      absl::make_unique<std::string>("str"));
+  EXPECT_EQ(*base.get<0>(), "str");
+
+  CompressedTuple<std::unique_ptr<std::string>> copy(std::move(base));
+  EXPECT_EQ(*copy.get<0>(), "str");
+}
+
+TEST(CompressedTupleTest, AnyElements) {
+  any a(std::string("str"));
+  CompressedTuple<any, any&> x(any(5), a);
+  EXPECT_EQ(absl::any_cast<int>(x.get<0>()), 5);
+  EXPECT_EQ(absl::any_cast<std::string>(x.get<1>()), "str");
+
+  a = 0.5f;
+  EXPECT_EQ(absl::any_cast<float>(x.get<1>()), 0.5);
+}
+
+TEST(CompressedTupleTest, Constexpr) {
+  struct NonTrivialStruct {
+    constexpr NonTrivialStruct() = default;
+    constexpr int value() const { return v; }
+    int v = 5;
+  };
+  struct TrivialStruct {
+    TrivialStruct() = default;
+    constexpr int value() const { return v; }
+    int v;
+  };
+  constexpr CompressedTuple<int, double, CompressedTuple<int>, Empty<0>> x(
+      7, 1.25, CompressedTuple<int>(5), {});
+  constexpr int x0 = x.get<0>();
+  constexpr double x1 = x.get<1>();
+  constexpr int x2 = x.get<2>().get<0>();
+  constexpr CallType x3 = x.get<3>().value();
+
+  EXPECT_EQ(x0, 7);
+  EXPECT_EQ(x1, 1.25);
+  EXPECT_EQ(x2, 5);
+  EXPECT_EQ(x3, CallType::kConstRef);
+
+#if !defined(__GNUC__) || defined(__clang__) || __GNUC__ > 4
+  constexpr CompressedTuple<Empty<0>, TrivialStruct, int> trivial = {};
+  constexpr CallType trivial0 = trivial.get<0>().value();
+  constexpr int trivial1 = trivial.get<1>().value();
+  constexpr int trivial2 = trivial.get<2>();
+
+  EXPECT_EQ(trivial0, CallType::kConstRef);
+  EXPECT_EQ(trivial1, 0);
+  EXPECT_EQ(trivial2, 0);
+#endif
+
+  constexpr CompressedTuple<Empty<0>, NonTrivialStruct, absl::optional<int>>
+      non_trivial = {};
+  constexpr CallType non_trivial0 = non_trivial.get<0>().value();
+  constexpr int non_trivial1 = non_trivial.get<1>().value();
+  constexpr absl::optional<int> non_trivial2 = non_trivial.get<2>();
+
+  EXPECT_EQ(non_trivial0, CallType::kConstRef);
+  EXPECT_EQ(non_trivial1, 5);
+  EXPECT_EQ(non_trivial2, absl::nullopt);
+
+  static constexpr char data[] = "DEF";
+  constexpr CompressedTuple<const char*> z(data);
+  constexpr const char* z1 = z.get<0>();
+  EXPECT_EQ(std::string(z1), std::string(data));
+
+#if defined(__clang__)
+  // An apparent bug in earlier versions of gcc claims these are ambiguous.
+  constexpr int x2m = absl::move(x.get<2>()).get<0>();
+  constexpr CallType x3m = absl::move(x).get<3>().value();
+  EXPECT_EQ(x2m, 5);
+  EXPECT_EQ(x3m, CallType::kConstMove);
+#endif
+}
+
+#if defined(__clang__) || defined(__GNUC__)
+TEST(CompressedTupleTest, EmptyFinalClass) {
+  struct S final {
+    int f() const { return 5; }
+  };
+  CompressedTuple<S> x;
+  EXPECT_EQ(x.get<0>().f(), 5);
+}
+#endif
+
+}  // namespace
+}  // namespace container_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/third_party/abseil_cpp/absl/container/internal/container_memory.h b/third_party/abseil_cpp/absl/container/internal/container_memory.h
new file mode 100644
index 000000000000..e67529ecb6e6
--- /dev/null
+++ b/third_party/abseil_cpp/absl/container/internal/container_memory.h
@@ -0,0 +1,460 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_CONTAINER_MEMORY_H_
+#define ABSL_CONTAINER_INTERNAL_CONTAINER_MEMORY_H_
+
+#include <cassert>
+#include <cstddef>
+#include <memory>
+#include <new>
+#include <tuple>
+#include <type_traits>
+#include <utility>
+
+#include "absl/base/config.h"
+#include "absl/memory/memory.h"
+#include "absl/meta/type_traits.h"
+#include "absl/utility/utility.h"
+
+#ifdef ABSL_HAVE_ADDRESS_SANITIZER
+#include <sanitizer/asan_interface.h>
+#endif
+
+#ifdef ABSL_HAVE_MEMORY_SANITIZER
+#include <sanitizer/msan_interface.h>
+#endif
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+template <size_t Alignment>
+struct alignas(Alignment) AlignedType {};
+
+// Allocates at least n bytes aligned to the specified alignment.
+// Alignment must be a power of 2. It must be positive.
+//
+// Note that many allocators don't honor alignment requirements above certain
+// threshold (usually either alignof(std::max_align_t) or alignof(void*)).
+// Allocate() doesn't apply alignment corrections. If the underlying allocator
+// returns insufficiently alignment pointer, that's what you are going to get.
+template <size_t Alignment, class Alloc>
+void* Allocate(Alloc* alloc, size_t n) {
+  static_assert(Alignment > 0, "");
+  assert(n && "n must be positive");
+  using M = AlignedType<Alignment>;
+  using A = typename absl::allocator_traits<Alloc>::template rebind_alloc<M>;
+  using AT = typename absl::allocator_traits<Alloc>::template rebind_traits<M>;
+  // On macOS, "mem_alloc" is a #define with one argument defined in
+  // rpc/types.h, so we can't name the variable "mem_alloc" and initialize it
+  // with the "foo(bar)" syntax.
+  A my_mem_alloc(*alloc);
+  void* p = AT::allocate(my_mem_alloc, (n + sizeof(M) - 1) / sizeof(M));
+  assert(reinterpret_cast<uintptr_t>(p) % Alignment == 0 &&
+         "allocator does not respect alignment");
+  return p;
+}
+
+// The pointer must have been previously obtained by calling
+// Allocate<Alignment>(alloc, n).
+template <size_t Alignment, class Alloc>
+void Deallocate(Alloc* alloc, void* p, size_t n) {
+  static_assert(Alignment > 0, "");
+  assert(n && "n must be positive");
+  using M = AlignedType<Alignment>;
+  using A = typename absl::allocator_traits<Alloc>::template rebind_alloc<M>;
+  using AT = typename absl::allocator_traits<Alloc>::template rebind_traits<M>;
+  // On macOS, "mem_alloc" is a #define with one argument defined in
+  // rpc/types.h, so we can't name the variable "mem_alloc" and initialize it
+  // with the "foo(bar)" syntax.
+  A my_mem_alloc(*alloc);
+  AT::deallocate(my_mem_alloc, static_cast<M*>(p),
+                 (n + sizeof(M) - 1) / sizeof(M));
+}
+
+namespace memory_internal {
+
+// Constructs T into uninitialized storage pointed by `ptr` using the args
+// specified in the tuple.
+template <class Alloc, class T, class Tuple, size_t... I>
+void ConstructFromTupleImpl(Alloc* alloc, T* ptr, Tuple&& t,
+                            absl::index_sequence<I...>) {
+  absl::allocator_traits<Alloc>::construct(
+      *alloc, ptr, std::get<I>(std::forward<Tuple>(t))...);
+}
+
+template <class T, class F>
+struct WithConstructedImplF {
+  template <class... Args>
+  decltype(std::declval<F>()(std::declval<T>())) operator()(
+      Args&&... args) const {
+    return std::forward<F>(f)(T(std::forward<Args>(args)...));
+  }
+  F&& f;
+};
+
+template <class T, class Tuple, size_t... Is, class F>
+decltype(std::declval<F>()(std::declval<T>())) WithConstructedImpl(
+    Tuple&& t, absl::index_sequence<Is...>, F&& f) {
+  return WithConstructedImplF<T, F>{std::forward<F>(f)}(
+      std::get<Is>(std::forward<Tuple>(t))...);
+}
+
+template <class T, size_t... Is>
+auto TupleRefImpl(T&& t, absl::index_sequence<Is...>)
+    -> decltype(std::forward_as_tuple(std::get<Is>(std::forward<T>(t))...)) {
+  return std::forward_as_tuple(std::get<Is>(std::forward<T>(t))...);
+}
+
+// Returns a tuple of references to the elements of the input tuple. T must be a
+// tuple.
+template <class T>
+auto TupleRef(T&& t) -> decltype(
+    TupleRefImpl(std::forward<T>(t),
+                 absl::make_index_sequence<
+                     std::tuple_size<typename std::decay<T>::type>::value>())) {
+  return TupleRefImpl(
+      std::forward<T>(t),
+      absl::make_index_sequence<
+          std::tuple_size<typename std::decay<T>::type>::value>());
+}
+
+template <class F, class K, class V>
+decltype(std::declval<F>()(std::declval<const K&>(), std::piecewise_construct,
+                           std::declval<std::tuple<K>>(), std::declval<V>()))
+DecomposePairImpl(F&& f, std::pair<std::tuple<K>, V> p) {
+  const auto& key = std::get<0>(p.first);
+  return std::forward<F>(f)(key, std::piecewise_construct, std::move(p.first),
+                            std::move(p.second));
+}
+
+}  // namespace memory_internal
+
+// Constructs T into uninitialized storage pointed by `ptr` using the args
+// specified in the tuple.
+template <class Alloc, class T, class Tuple>
+void ConstructFromTuple(Alloc* alloc, T* ptr, Tuple&& t) {
+  memory_internal::ConstructFromTupleImpl(
+      alloc, ptr, std::forward<Tuple>(t),
+      absl::make_index_sequence<
+          std::tuple_size<typename std::decay<Tuple>::type>::value>());
+}
+
+// Constructs T using the args specified in the tuple and calls F with the
+// constructed value.
+template <class T, class Tuple, class F>
+decltype(std::declval<F>()(std::declval<T>())) WithConstructed(
+    Tuple&& t, F&& f) {
+  return memory_internal::WithConstructedImpl<T>(
+      std::forward<Tuple>(t),
+      absl::make_index_sequence<
+          std::tuple_size<typename std::decay<Tuple>::type>::value>(),
+      std::forward<F>(f));
+}
+
+// Given arguments of an std::pair's consructor, PairArgs() returns a pair of
+// tuples with references to the passed arguments. The tuples contain
+// constructor arguments for the first and the second elements of the pair.
+//
+// The following two snippets are equivalent.
+//
+// 1. std::pair<F, S> p(args...);
+//
+// 2. auto a = PairArgs(args...);
+//    std::pair<F, S> p(std::piecewise_construct,
+//                      std::move(p.first), std::move(p.second));
+inline std::pair<std::tuple<>, std::tuple<>> PairArgs() { return {}; }
+template <class F, class S>
+std::pair<std::tuple<F&&>, std::tuple<S&&>> PairArgs(F&& f, S&& s) {
+  return {std::piecewise_construct, std::forward_as_tuple(std::forward<F>(f)),
+          std::forward_as_tuple(std::forward<S>(s))};
+}
+template <class F, class S>
+std::pair<std::tuple<const F&>, std::tuple<const S&>> PairArgs(
+    const std::pair<F, S>& p) {
+  return PairArgs(p.first, p.second);
+}
+template <class F, class S>
+std::pair<std::tuple<F&&>, std::tuple<S&&>> PairArgs(std::pair<F, S>&& p) {
+  return PairArgs(std::forward<F>(p.first), std::forward<S>(p.second));
+}
+template <class F, class S>
+auto PairArgs(std::piecewise_construct_t, F&& f, S&& s)
+    -> decltype(std::make_pair(memory_internal::TupleRef(std::forward<F>(f)),
+                               memory_internal::TupleRef(std::forward<S>(s)))) {
+  return std::make_pair(memory_internal::TupleRef(std::forward<F>(f)),
+                        memory_internal::TupleRef(std::forward<S>(s)));
+}
+
+// A helper function for implementing apply() in map policies.
+template <class F, class... Args>
+auto DecomposePair(F&& f, Args&&... args)
+    -> decltype(memory_internal::DecomposePairImpl(
+        std::forward<F>(f), PairArgs(std::forward<Args>(args)...))) {
+  return memory_internal::DecomposePairImpl(
+      std::forward<F>(f), PairArgs(std::forward<Args>(args)...));
+}
+
+// A helper function for implementing apply() in set policies.
+template <class F, class Arg>
+decltype(std::declval<F>()(std::declval<const Arg&>(), std::declval<Arg>()))
+DecomposeValue(F&& f, Arg&& arg) {
+  const auto& key = arg;
+  return std::forward<F>(f)(key, std::forward<Arg>(arg));
+}
+
+// Helper functions for asan and msan.
+inline void SanitizerPoisonMemoryRegion(const void* m, size_t s) {
+#ifdef ABSL_HAVE_ADDRESS_SANITIZER
+  ASAN_POISON_MEMORY_REGION(m, s);
+#endif
+#ifdef ABSL_HAVE_MEMORY_SANITIZER
+  __msan_poison(m, s);
+#endif
+  (void)m;
+  (void)s;
+}
+
+inline void SanitizerUnpoisonMemoryRegion(const void* m, size_t s) {
+#ifdef ABSL_HAVE_ADDRESS_SANITIZER
+  ASAN_UNPOISON_MEMORY_REGION(m, s);
+#endif
+#ifdef ABSL_HAVE_MEMORY_SANITIZER
+  __msan_unpoison(m, s);
+#endif
+  (void)m;
+  (void)s;
+}
+
+template <typename T>
+inline void SanitizerPoisonObject(const T* object) {
+  SanitizerPoisonMemoryRegion(object, sizeof(T));
+}
+
+template <typename T>
+inline void SanitizerUnpoisonObject(const T* object) {
+  SanitizerUnpoisonMemoryRegion(object, sizeof(T));
+}
+
+namespace memory_internal {
+
+// If Pair is a standard-layout type, OffsetOf<Pair>::kFirst and
+// OffsetOf<Pair>::kSecond are equivalent to offsetof(Pair, first) and
+// offsetof(Pair, second) respectively. Otherwise they are -1.
+//
+// The purpose of OffsetOf is to avoid calling offsetof() on non-standard-layout
+// type, which is non-portable.
+template <class Pair, class = std::true_type>
+struct OffsetOf {
+  static constexpr size_t kFirst = static_cast<size_t>(-1);
+  static constexpr size_t kSecond = static_cast<size_t>(-1);
+};
+
+template <class Pair>
+struct OffsetOf<Pair, typename std::is_standard_layout<Pair>::type> {
+  static constexpr size_t kFirst = offsetof(Pair, first);
+  static constexpr size_t kSecond = offsetof(Pair, second);
+};
+
+template <class K, class V>
+struct IsLayoutCompatible {
+ private:
+  struct Pair {
+    K first;
+    V second;
+  };
+
+  // Is P layout-compatible with Pair?
+  template <class P>
+  static constexpr bool LayoutCompatible() {
+    return std::is_standard_layout<P>() && sizeof(P) == sizeof(Pair) &&
+           alignof(P) == alignof(Pair) &&
+           memory_internal::OffsetOf<P>::kFirst ==
+               memory_internal::OffsetOf<Pair>::kFirst &&
+           memory_internal::OffsetOf<P>::kSecond ==
+               memory_internal::OffsetOf<Pair>::kSecond;
+  }
+
+ public:
+  // Whether pair<const K, V> and pair<K, V> are layout-compatible. If they are,
+  // then it is safe to store them in a union and read from either.
+  static constexpr bool value = std::is_standard_layout<K>() &&
+                                std::is_standard_layout<Pair>() &&
+                                memory_internal::OffsetOf<Pair>::kFirst == 0 &&
+                                LayoutCompatible<std::pair<K, V>>() &&
+                                LayoutCompatible<std::pair<const K, V>>();
+};
+
+}  // namespace memory_internal
+
+// The internal storage type for key-value containers like flat_hash_map.
+//
+// It is convenient for the value_type of a flat_hash_map<K, V> to be
+// pair<const K, V>; the "const K" prevents accidental modification of the key
+// when dealing with the reference returned from find() and similar methods.
+// However, this creates other problems; we want to be able to emplace(K, V)
+// efficiently with move operations, and similarly be able to move a
+// pair<K, V> in insert().
+//
+// The solution is this union, which aliases the const and non-const versions
+// of the pair. This also allows flat_hash_map<const K, V> to work, even though
+// that has the same efficiency issues with move in emplace() and insert() -
+// but people do it anyway.
+//
+// If kMutableKeys is false, only the value member can be accessed.
+//
+// If kMutableKeys is true, key can be accessed through all slots while value
+// and mutable_value must be accessed only via INITIALIZED slots. Slots are
+// created and destroyed via mutable_value so that the key can be moved later.
+//
+// Accessing one of the union fields while the other is active is safe as
+// long as they are layout-compatible, which is guaranteed by the definition of
+// kMutableKeys. For C++11, the relevant section of the standard is
+// https://timsong-cpp.github.io/cppwp/n3337/class.mem#19 (9.2.19)
+template <class K, class V>
+union map_slot_type {
+  map_slot_type() {}
+  ~map_slot_type() = delete;
+  using value_type = std::pair<const K, V>;
+  using mutable_value_type =
+      std::pair<absl::remove_const_t<K>, absl::remove_const_t<V>>;
+
+  value_type value;
+  mutable_value_type mutable_value;
+  absl::remove_const_t<K> key;
+};
+
+template <class K, class V>
+struct map_slot_policy {
+  using slot_type = map_slot_type<K, V>;
+  using value_type = std::pair<const K, V>;
+  using mutable_value_type = std::pair<K, V>;
+
+ private:
+  static void emplace(slot_type* slot) {
+    // The construction of union doesn't do anything at runtime but it allows us
+    // to access its members without violating aliasing rules.
+    new (slot) slot_type;
+  }
+  // If pair<const K, V> and pair<K, V> are layout-compatible, we can accept one
+  // or the other via slot_type. We are also free to access the key via
+  // slot_type::key in this case.
+  using kMutableKeys = memory_internal::IsLayoutCompatible<K, V>;
+
+ public:
+  static value_type& element(slot_type* slot) { return slot->value; }
+  static const value_type& element(const slot_type* slot) {
+    return slot->value;
+  }
+
+  // When C++17 is available, we can use std::launder to provide mutable
+  // access to the key for use in node handle.
+#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606
+  static K& mutable_key(slot_type* slot) {
+    // Still check for kMutableKeys so that we can avoid calling std::launder
+    // unless necessary because it can interfere with optimizations.
+    return kMutableKeys::value ? slot->key
+                               : *std::launder(const_cast<K*>(
+                                     std::addressof(slot->value.first)));
+  }
+#else  // !(defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606)
+  static const K& mutable_key(slot_type* slot) { return key(slot); }
+#endif
+
+  static const K& key(const slot_type* slot) {
+    return kMutableKeys::value ? slot->key : slot->value.first;
+  }
+
+  template <class Allocator, class... Args>
+  static void construct(Allocator* alloc, slot_type* slot, Args&&... args) {
+    emplace(slot);
+    if (kMutableKeys::value) {
+      absl::allocator_traits<Allocator>::construct(*alloc, &slot->mutable_value,
+                                                   std::forward<Args>(args)...);
+    } else {
+      absl::allocator_traits<Allocator>::construct(*alloc, &slot->value,
+                                                   std::forward<Args>(args)...);
+    }
+  }
+
+  // Construct this slot by moving from another slot.
+  template <class Allocator>
+  static void construct(Allocator* alloc, slot_type* slot, slot_type* other) {
+    emplace(slot);
+    if (kMutableKeys::value) {
+      absl::allocator_traits<Allocator>::construct(
+          *alloc, &slot->mutable_value, std::move(other->mutable_value));
+    } else {
+      absl::allocator_traits<Allocator>::construct(*alloc, &slot->value,
+                                                   std::move(other->value));
+    }
+  }
+
+  template <class Allocator>
+  static void destroy(Allocator* alloc, slot_type* slot) {
+    if (kMutableKeys::value) {
+      absl::allocator_traits<Allocator>::destroy(*alloc, &slot->mutable_value);
+    } else {
+      absl::allocator_traits<Allocator>::destroy(*alloc, &slot->value);
+    }
+  }
+
+  template <class Allocator>
+  static void transfer(Allocator* alloc, slot_type* new_slot,
+                       slot_type* old_slot) {
+    emplace(new_slot);
+    if (kMutableKeys::value) {
+      absl::allocator_traits<Allocator>::construct(
+          *alloc, &new_slot->mutable_value, std::move(old_slot->mutable_value));
+    } else {
+      absl::allocator_traits<Allocator>::construct(*alloc, &new_slot->value,
+                                                   std::move(old_slot->value));
+    }
+    destroy(alloc, old_slot);
+  }
+
+  template <class Allocator>
+  static void swap(Allocator* alloc, slot_type* a, slot_type* b) {
+    if (kMutableKeys::value) {
+      using std::swap;
+      swap(a->mutable_value, b->mutable_value);
+    } else {
+      value_type tmp = std::move(a->value);
+      absl::allocator_traits<Allocator>::destroy(*alloc, &a->value);
+      absl::allocator_traits<Allocator>::construct(*alloc, &a->value,
+                                                   std::move(b->value));
+      absl::allocator_traits<Allocator>::destroy(*alloc, &b->value);
+      absl::allocator_traits<Allocator>::construct(*alloc, &b->value,
+                                                   std::move(tmp));
+    }
+  }
+
+  template <class Allocator>
+  static void move(Allocator* alloc, slot_type* src, slot_type* dest) {
+    if (kMutableKeys::value) {
+      dest->mutable_value = std::move(src->mutable_value);
+    } else {
+      absl::allocator_traits<Allocator>::destroy(*alloc, &dest->value);
+      absl::allocator_traits<Allocator>::construct(*alloc, &dest->value,
+                                                   std::move(src->value));
+    }
+  }
+};
+
+}  // namespace container_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_CONTAINER_INTERNAL_CONTAINER_MEMORY_H_
diff --git a/third_party/abseil_cpp/absl/container/internal/container_memory_test.cc b/third_party/abseil_cpp/absl/container/internal/container_memory_test.cc
new file mode 100644
index 000000000000..6a7fcd29ba90
--- /dev/null
+++ b/third_party/abseil_cpp/absl/container/internal/container_memory_test.cc
@@ -0,0 +1,256 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/container/internal/container_memory.h"
+
+#include <cstdint>
+#include <tuple>
+#include <typeindex>
+#include <typeinfo>
+#include <utility>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/container/internal/test_instance_tracker.h"
+#include "absl/strings/string_view.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+namespace {
+
+using ::absl::test_internal::CopyableMovableInstance;
+using ::absl::test_internal::InstanceTracker;
+using ::testing::_;
+using ::testing::ElementsAre;
+using ::testing::Gt;
+using ::testing::Pair;
+
+TEST(Memory, AlignmentLargerThanBase) {
+  std::allocator<int8_t> alloc;
+  void* mem = Allocate<2>(&alloc, 3);
+  EXPECT_EQ(0, reinterpret_cast<uintptr_t>(mem) % 2);
+  memcpy(mem, "abc", 3);
+  Deallocate<2>(&alloc, mem, 3);
+}
+
+TEST(Memory, AlignmentSmallerThanBase) {
+  std::allocator<int64_t> alloc;
+  void* mem = Allocate<2>(&alloc, 3);
+  EXPECT_EQ(0, reinterpret_cast<uintptr_t>(mem) % 2);
+  memcpy(mem, "abc", 3);
+  Deallocate<2>(&alloc, mem, 3);
+}
+
+std::map<std::type_index, int>& AllocationMap() {
+  static auto* map = new std::map<std::type_index, int>;
+  return *map;
+}
+
+template <typename T>
+struct TypeCountingAllocator {
+  TypeCountingAllocator() = default;
+  template <typename U>
+  TypeCountingAllocator(const TypeCountingAllocator<U>&) {}  // NOLINT
+
+  using value_type = T;
+
+  T* allocate(size_t n, const void* = nullptr) {
+    AllocationMap()[typeid(T)] += n;
+    return std::allocator<T>().allocate(n);
+  }
+  void deallocate(T* p, std::size_t n) {
+    AllocationMap()[typeid(T)] -= n;
+    return std::allocator<T>().deallocate(p, n);
+  }
+};
+
+TEST(Memory, AllocateDeallocateMatchType) {
+  TypeCountingAllocator<int> alloc;
+  void* mem = Allocate<1>(&alloc, 1);
+  // Verify that it was allocated
+  EXPECT_THAT(AllocationMap(), ElementsAre(Pair(_, Gt(0))));
+  Deallocate<1>(&alloc, mem, 1);
+  // Verify that the deallocation matched.
+  EXPECT_THAT(AllocationMap(), ElementsAre(Pair(_, 0)));
+}
+
+class Fixture : public ::testing::Test {
+  using Alloc = std::allocator<std::string>;
+
+ public:
+  Fixture() { ptr_ = std::allocator_traits<Alloc>::allocate(*alloc(), 1); }
+  ~Fixture() override {
+    std::allocator_traits<Alloc>::destroy(*alloc(), ptr_);
+    std::allocator_traits<Alloc>::deallocate(*alloc(), ptr_, 1);
+  }
+  std::string* ptr() { return ptr_; }
+  Alloc* alloc() { return &alloc_; }
+
+ private:
+  Alloc alloc_;
+  std::string* ptr_;
+};
+
+TEST_F(Fixture, ConstructNoArgs) {
+  ConstructFromTuple(alloc(), ptr(), std::forward_as_tuple());
+  EXPECT_EQ(*ptr(), "");
+}
+
+TEST_F(Fixture, ConstructOneArg) {
+  ConstructFromTuple(alloc(), ptr(), std::forward_as_tuple("abcde"));
+  EXPECT_EQ(*ptr(), "abcde");
+}
+
+TEST_F(Fixture, ConstructTwoArg) {
+  ConstructFromTuple(alloc(), ptr(), std::forward_as_tuple(5, 'a'));
+  EXPECT_EQ(*ptr(), "aaaaa");
+}
+
+TEST(PairArgs, NoArgs) {
+  EXPECT_THAT(PairArgs(),
+              Pair(std::forward_as_tuple(), std::forward_as_tuple()));
+}
+
+TEST(PairArgs, TwoArgs) {
+  EXPECT_EQ(
+      std::make_pair(std::forward_as_tuple(1), std::forward_as_tuple('A')),
+      PairArgs(1, 'A'));
+}
+
+TEST(PairArgs, Pair) {
+  EXPECT_EQ(
+      std::make_pair(std::forward_as_tuple(1), std::forward_as_tuple('A')),
+      PairArgs(std::make_pair(1, 'A')));
+}
+
+TEST(PairArgs, Piecewise) {
+  EXPECT_EQ(
+      std::make_pair(std::forward_as_tuple(1), std::forward_as_tuple('A')),
+      PairArgs(std::piecewise_construct, std::forward_as_tuple(1),
+               std::forward_as_tuple('A')));
+}
+
+TEST(WithConstructed, Simple) {
+  EXPECT_EQ(1, WithConstructed<absl::string_view>(
+                   std::make_tuple(std::string("a")),
+                   [](absl::string_view str) { return str.size(); }));
+}
+
+template <class F, class Arg>
+decltype(DecomposeValue(std::declval<F>(), std::declval<Arg>()))
+DecomposeValueImpl(int, F&& f, Arg&& arg) {
+  return DecomposeValue(std::forward<F>(f), std::forward<Arg>(arg));
+}
+
+template <class F, class Arg>
+const char* DecomposeValueImpl(char, F&& f, Arg&& arg) {
+  return "not decomposable";
+}
+
+template <class F, class Arg>
+decltype(DecomposeValueImpl(0, std::declval<F>(), std::declval<Arg>()))
+TryDecomposeValue(F&& f, Arg&& arg) {
+  return DecomposeValueImpl(0, std::forward<F>(f), std::forward<Arg>(arg));
+}
+
+TEST(DecomposeValue, Decomposable) {
+  auto f = [](const int& x, int&& y) {
+    EXPECT_EQ(&x, &y);
+    EXPECT_EQ(42, x);
+    return 'A';
+  };
+  EXPECT_EQ('A', TryDecomposeValue(f, 42));
+}
+
+TEST(DecomposeValue, NotDecomposable) {
+  auto f = [](void*) {
+    ADD_FAILURE() << "Must not be called";
+    return 'A';
+  };
+  EXPECT_STREQ("not decomposable", TryDecomposeValue(f, 42));
+}
+
+template <class F, class... Args>
+decltype(DecomposePair(std::declval<F>(), std::declval<Args>()...))
+DecomposePairImpl(int, F&& f, Args&&... args) {
+  return DecomposePair(std::forward<F>(f), std::forward<Args>(args)...);
+}
+
+template <class F, class... Args>
+const char* DecomposePairImpl(char, F&& f, Args&&... args) {
+  return "not decomposable";
+}
+
+template <class F, class... Args>
+decltype(DecomposePairImpl(0, std::declval<F>(), std::declval<Args>()...))
+TryDecomposePair(F&& f, Args&&... args) {
+  return DecomposePairImpl(0, std::forward<F>(f), std::forward<Args>(args)...);
+}
+
+TEST(DecomposePair, Decomposable) {
+  auto f = [](const int& x, std::piecewise_construct_t, std::tuple<int&&> k,
+              std::tuple<double>&& v) {
+    EXPECT_EQ(&x, &std::get<0>(k));
+    EXPECT_EQ(42, x);
+    EXPECT_EQ(0.5, std::get<0>(v));
+    return 'A';
+  };
+  EXPECT_EQ('A', TryDecomposePair(f, 42, 0.5));
+  EXPECT_EQ('A', TryDecomposePair(f, std::make_pair(42, 0.5)));
+  EXPECT_EQ('A', TryDecomposePair(f, std::piecewise_construct,
+                                  std::make_tuple(42), std::make_tuple(0.5)));
+}
+
+TEST(DecomposePair, NotDecomposable) {
+  auto f = [](...) {
+    ADD_FAILURE() << "Must not be called";
+    return 'A';
+  };
+  EXPECT_STREQ("not decomposable",
+               TryDecomposePair(f));
+  EXPECT_STREQ("not decomposable",
+               TryDecomposePair(f, std::piecewise_construct, std::make_tuple(),
+                                std::make_tuple(0.5)));
+}
+
+TEST(MapSlotPolicy, ConstKeyAndValue) {
+  using slot_policy = map_slot_policy<const CopyableMovableInstance,
+                                      const CopyableMovableInstance>;
+  using slot_type = typename slot_policy::slot_type;
+
+  union Slots {
+    Slots() {}
+    ~Slots() {}
+    slot_type slots[100];
+  } slots;
+
+  std::allocator<
+      std::pair<const CopyableMovableInstance, const CopyableMovableInstance>>
+      alloc;
+  InstanceTracker tracker;
+  slot_policy::construct(&alloc, &slots.slots[0], CopyableMovableInstance(1),
+                         CopyableMovableInstance(1));
+  for (int i = 0; i < 99; ++i) {
+    slot_policy::transfer(&alloc, &slots.slots[i + 1], &slots.slots[i]);
+  }
+  slot_policy::destroy(&alloc, &slots.slots[99]);
+
+  EXPECT_EQ(tracker.copies(), 0);
+}
+
+}  // namespace
+}  // namespace container_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/third_party/abseil_cpp/absl/container/internal/counting_allocator.h b/third_party/abseil_cpp/absl/container/internal/counting_allocator.h
new file mode 100644
index 000000000000..927cf0825579
--- /dev/null
+++ b/third_party/abseil_cpp/absl/container/internal/counting_allocator.h
@@ -0,0 +1,114 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_COUNTING_ALLOCATOR_H_
+#define ABSL_CONTAINER_INTERNAL_COUNTING_ALLOCATOR_H_
+
+#include <cstdint>
+#include <memory>
+
+#include "absl/base/config.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+// This is a stateful allocator, but the state lives outside of the
+// allocator (in whatever test is using the allocator). This is odd
+// but helps in tests where the allocator is propagated into nested
+// containers - that chain of allocators uses the same state and is
+// thus easier to query for aggregate allocation information.
+template <typename T>
+class CountingAllocator {
+ public:
+  using Allocator = std::allocator<T>;
+  using AllocatorTraits = std::allocator_traits<Allocator>;
+  using value_type = typename AllocatorTraits::value_type;
+  using pointer = typename AllocatorTraits::pointer;
+  using const_pointer = typename AllocatorTraits::const_pointer;
+  using size_type = typename AllocatorTraits::size_type;
+  using difference_type = typename AllocatorTraits::difference_type;
+
+  CountingAllocator() = default;
+  explicit CountingAllocator(int64_t* bytes_used) : bytes_used_(bytes_used) {}
+  CountingAllocator(int64_t* bytes_used, int64_t* instance_count)
+      : bytes_used_(bytes_used), instance_count_(instance_count) {}
+
+  template <typename U>
+  CountingAllocator(const CountingAllocator<U>& x)
+      : bytes_used_(x.bytes_used_), instance_count_(x.instance_count_) {}
+
+  pointer allocate(
+      size_type n,
+      typename AllocatorTraits::const_void_pointer hint = nullptr) {
+    Allocator allocator;
+    pointer ptr = AllocatorTraits::allocate(allocator, n, hint);
+    if (bytes_used_ != nullptr) {
+      *bytes_used_ += n * sizeof(T);
+    }
+    return ptr;
+  }
+
+  void deallocate(pointer p, size_type n) {
+    Allocator allocator;
+    AllocatorTraits::deallocate(allocator, p, n);
+    if (bytes_used_ != nullptr) {
+      *bytes_used_ -= n * sizeof(T);
+    }
+  }
+
+  template <typename U, typename... Args>
+  void construct(U* p, Args&&... args) {
+    Allocator allocator;
+    AllocatorTraits::construct(allocator, p, std::forward<Args>(args)...);
+    if (instance_count_ != nullptr) {
+      *instance_count_ += 1;
+    }
+  }
+
+  template <typename U>
+  void destroy(U* p) {
+    Allocator allocator;
+    AllocatorTraits::destroy(allocator, p);
+    if (instance_count_ != nullptr) {
+      *instance_count_ -= 1;
+    }
+  }
+
+  template <typename U>
+  class rebind {
+   public:
+    using other = CountingAllocator<U>;
+  };
+
+  friend bool operator==(const CountingAllocator& a,
+                         const CountingAllocator& b) {
+    return a.bytes_used_ == b.bytes_used_ &&
+           a.instance_count_ == b.instance_count_;
+  }
+
+  friend bool operator!=(const CountingAllocator& a,
+                         const CountingAllocator& b) {
+    return !(a == b);
+  }
+
+  int64_t* bytes_used_ = nullptr;
+  int64_t* instance_count_ = nullptr;
+};
+
+}  // namespace container_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_CONTAINER_INTERNAL_COUNTING_ALLOCATOR_H_
diff --git a/third_party/abseil_cpp/absl/container/internal/hash_function_defaults.h b/third_party/abseil_cpp/absl/container/internal/hash_function_defaults.h
new file mode 100644
index 000000000000..0683422ad89b
--- /dev/null
+++ b/third_party/abseil_cpp/absl/container/internal/hash_function_defaults.h
@@ -0,0 +1,161 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Define the default Hash and Eq functions for SwissTable containers.
+//
+// std::hash<T> and std::equal_to<T> are not appropriate hash and equal
+// functions for SwissTable containers. There are two reasons for this.
+//
+// SwissTable containers are power of 2 sized containers:
+//
+// This means they use the lower bits of the hash value to find the slot for
+// each entry. The typical hash function for integral types is the identity.
+// This is a very weak hash function for SwissTable and any power of 2 sized
+// hashtable implementation which will lead to excessive collisions. For
+// SwissTable we use murmur3 style mixing to reduce collisions to a minimum.
+//
+// SwissTable containers support heterogeneous lookup:
+//
+// In order to make heterogeneous lookup work, hash and equal functions must be
+// polymorphic. At the same time they have to satisfy the same requirements the
+// C++ standard imposes on hash functions and equality operators. That is:
+//
+//   if hash_default_eq<T>(a, b) returns true for any a and b of type T, then
+//   hash_default_hash<T>(a) must equal hash_default_hash<T>(b)
+//
+// For SwissTable containers this requirement is relaxed to allow a and b of
+// any and possibly different types. Note that like the standard the hash and
+// equal functions are still bound to T. This is important because some type U
+// can be hashed by/tested for equality differently depending on T. A notable
+// example is `const char*`. `const char*` is treated as a c-style string when
+// the hash function is hash<std::string> but as a pointer when the hash
+// function is hash<void*>.
+//
+#ifndef ABSL_CONTAINER_INTERNAL_HASH_FUNCTION_DEFAULTS_H_
+#define ABSL_CONTAINER_INTERNAL_HASH_FUNCTION_DEFAULTS_H_
+
+#include <stdint.h>
+#include <cstddef>
+#include <memory>
+#include <string>
+#include <type_traits>
+
+#include "absl/base/config.h"
+#include "absl/hash/hash.h"
+#include "absl/strings/cord.h"
+#include "absl/strings/string_view.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+// The hash of an object of type T is computed by using absl::Hash.
+template <class T, class E = void>
+struct HashEq {
+  using Hash = absl::Hash<T>;
+  using Eq = std::equal_to<T>;
+};
+
+struct StringHash {
+  using is_transparent = void;
+
+  size_t operator()(absl::string_view v) const {
+    return absl::Hash<absl::string_view>{}(v);
+  }
+  size_t operator()(const absl::Cord& v) const {
+    return absl::Hash<absl::Cord>{}(v);
+  }
+};
+
+// Supports heterogeneous lookup for string-like elements.
+struct StringHashEq {
+  using Hash = StringHash;
+  struct Eq {
+    using is_transparent = void;
+    bool operator()(absl::string_view lhs, absl::string_view rhs) const {
+      return lhs == rhs;
+    }
+    bool operator()(const absl::Cord& lhs, const absl::Cord& rhs) const {
+      return lhs == rhs;
+    }
+    bool operator()(const absl::Cord& lhs, absl::string_view rhs) const {
+      return lhs == rhs;
+    }
+    bool operator()(absl::string_view lhs, const absl::Cord& rhs) const {
+      return lhs == rhs;
+    }
+  };
+};
+
+template <>
+struct HashEq<std::string> : StringHashEq {};
+template <>
+struct HashEq<absl::string_view> : StringHashEq {};
+template <>
+struct HashEq<absl::Cord> : StringHashEq {};
+
+// Supports heterogeneous lookup for pointers and smart pointers.
+template <class T>
+struct HashEq<T*> {
+  struct Hash {
+    using is_transparent = void;
+    template <class U>
+    size_t operator()(const U& ptr) const {
+      return absl::Hash<const T*>{}(HashEq::ToPtr(ptr));
+    }
+  };
+  struct Eq {
+    using is_transparent = void;
+    template <class A, class B>
+    bool operator()(const A& a, const B& b) const {
+      return HashEq::ToPtr(a) == HashEq::ToPtr(b);
+    }
+  };
+
+ private:
+  static const T* ToPtr(const T* ptr) { return ptr; }
+  template <class U, class D>
+  static const T* ToPtr(const std::unique_ptr<U, D>& ptr) {
+    return ptr.get();
+  }
+  template <class U>
+  static const T* ToPtr(const std::shared_ptr<U>& ptr) {
+    return ptr.get();
+  }
+};
+
+template <class T, class D>
+struct HashEq<std::unique_ptr<T, D>> : HashEq<T*> {};
+template <class T>
+struct HashEq<std::shared_ptr<T>> : HashEq<T*> {};
+
+// This header's visibility is restricted.  If you need to access the default
+// hasher please use the container's ::hasher alias instead.
+//
+// Example: typename Hash = typename absl::flat_hash_map<K, V>::hasher
+template <class T>
+using hash_default_hash = typename container_internal::HashEq<T>::Hash;
+
+// This header's visibility is restricted.  If you need to access the default
+// key equal please use the container's ::key_equal alias instead.
+//
+// Example: typename Eq = typename absl::flat_hash_map<K, V, Hash>::key_equal
+template <class T>
+using hash_default_eq = typename container_internal::HashEq<T>::Eq;
+
+}  // namespace container_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_CONTAINER_INTERNAL_HASH_FUNCTION_DEFAULTS_H_
diff --git a/third_party/abseil_cpp/absl/container/internal/hash_function_defaults_test.cc b/third_party/abseil_cpp/absl/container/internal/hash_function_defaults_test.cc
new file mode 100644
index 000000000000..59576b8edebc
--- /dev/null
+++ b/third_party/abseil_cpp/absl/container/internal/hash_function_defaults_test.cc
@@ -0,0 +1,383 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/container/internal/hash_function_defaults.h"
+
+#include <functional>
+#include <type_traits>
+#include <utility>
+
+#include "gtest/gtest.h"
+#include "absl/random/random.h"
+#include "absl/strings/cord.h"
+#include "absl/strings/cord_test_helpers.h"
+#include "absl/strings/string_view.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+namespace {
+
+using ::testing::Types;
+
+TEST(Eq, Int32) {
+  hash_default_eq<int32_t> eq;
+  EXPECT_TRUE(eq(1, 1u));
+  EXPECT_TRUE(eq(1, char{1}));
+  EXPECT_TRUE(eq(1, true));
+  EXPECT_TRUE(eq(1, double{1.1}));
+  EXPECT_FALSE(eq(1, char{2}));
+  EXPECT_FALSE(eq(1, 2u));
+  EXPECT_FALSE(eq(1, false));
+  EXPECT_FALSE(eq(1, 2.));
+}
+
+TEST(Hash, Int32) {
+  hash_default_hash<int32_t> hash;
+  auto h = hash(1);
+  EXPECT_EQ(h, hash(1u));
+  EXPECT_EQ(h, hash(char{1}));
+  EXPECT_EQ(h, hash(true));
+  EXPECT_EQ(h, hash(double{1.1}));
+  EXPECT_NE(h, hash(2u));
+  EXPECT_NE(h, hash(char{2}));
+  EXPECT_NE(h, hash(false));
+  EXPECT_NE(h, hash(2.));
+}
+
+enum class MyEnum { A, B, C, D };
+
+TEST(Eq, Enum) {
+  hash_default_eq<MyEnum> eq;
+  EXPECT_TRUE(eq(MyEnum::A, MyEnum::A));
+  EXPECT_FALSE(eq(MyEnum::A, MyEnum::B));
+}
+
+TEST(Hash, Enum) {
+  hash_default_hash<MyEnum> hash;
+
+  for (MyEnum e : {MyEnum::A, MyEnum::B, MyEnum::C}) {
+    auto h = hash(e);
+    EXPECT_EQ(h, hash_default_hash<int>{}(static_cast<int>(e)));
+    EXPECT_NE(h, hash(MyEnum::D));
+  }
+}
+
+using StringTypes = ::testing::Types<std::string, absl::string_view>;
+
+template <class T>
+struct EqString : ::testing::Test {
+  hash_default_eq<T> key_eq;
+};
+
+TYPED_TEST_SUITE(EqString, StringTypes);
+
+template <class T>
+struct HashString : ::testing::Test {
+  hash_default_hash<T> hasher;
+};
+
+TYPED_TEST_SUITE(HashString, StringTypes);
+
+TYPED_TEST(EqString, Works) {
+  auto eq = this->key_eq;
+  EXPECT_TRUE(eq("a", "a"));
+  EXPECT_TRUE(eq("a", absl::string_view("a")));
+  EXPECT_TRUE(eq("a", std::string("a")));
+  EXPECT_FALSE(eq("a", "b"));
+  EXPECT_FALSE(eq("a", absl::string_view("b")));
+  EXPECT_FALSE(eq("a", std::string("b")));
+}
+
+TYPED_TEST(HashString, Works) {
+  auto hash = this->hasher;
+  auto h = hash("a");
+  EXPECT_EQ(h, hash(absl::string_view("a")));
+  EXPECT_EQ(h, hash(std::string("a")));
+  EXPECT_NE(h, hash(absl::string_view("b")));
+  EXPECT_NE(h, hash(std::string("b")));
+}
+
+struct NoDeleter {
+  template <class T>
+  void operator()(const T* ptr) const {}
+};
+
+using PointerTypes =
+    ::testing::Types<const int*, int*, std::unique_ptr<const int>,
+                     std::unique_ptr<const int, NoDeleter>,
+                     std::unique_ptr<int>, std::unique_ptr<int, NoDeleter>,
+                     std::shared_ptr<const int>, std::shared_ptr<int>>;
+
+template <class T>
+struct EqPointer : ::testing::Test {
+  hash_default_eq<T> key_eq;
+};
+
+TYPED_TEST_SUITE(EqPointer, PointerTypes);
+
+template <class T>
+struct HashPointer : ::testing::Test {
+  hash_default_hash<T> hasher;
+};
+
+TYPED_TEST_SUITE(HashPointer, PointerTypes);
+
+TYPED_TEST(EqPointer, Works) {
+  int dummy;
+  auto eq = this->key_eq;
+  auto sptr = std::make_shared<int>();
+  std::shared_ptr<const int> csptr = sptr;
+  int* ptr = sptr.get();
+  const int* cptr = ptr;
+  std::unique_ptr<int, NoDeleter> uptr(ptr);
+  std::unique_ptr<const int, NoDeleter> cuptr(ptr);
+
+  EXPECT_TRUE(eq(ptr, cptr));
+  EXPECT_TRUE(eq(ptr, sptr));
+  EXPECT_TRUE(eq(ptr, uptr));
+  EXPECT_TRUE(eq(ptr, csptr));
+  EXPECT_TRUE(eq(ptr, cuptr));
+  EXPECT_FALSE(eq(&dummy, cptr));
+  EXPECT_FALSE(eq(&dummy, sptr));
+  EXPECT_FALSE(eq(&dummy, uptr));
+  EXPECT_FALSE(eq(&dummy, csptr));
+  EXPECT_FALSE(eq(&dummy, cuptr));
+}
+
+TEST(Hash, DerivedAndBase) {
+  struct Base {};
+  struct Derived : Base {};
+
+  hash_default_hash<Base*> hasher;
+
+  Base base;
+  Derived derived;
+  EXPECT_NE(hasher(&base), hasher(&derived));
+  EXPECT_EQ(hasher(static_cast<Base*>(&derived)), hasher(&derived));
+
+  auto dp = std::make_shared<Derived>();
+  EXPECT_EQ(hasher(static_cast<Base*>(dp.get())), hasher(dp));
+}
+
+TEST(Hash, FunctionPointer) {
+  using Func = int (*)();
+  hash_default_hash<Func> hasher;
+  hash_default_eq<Func> eq;
+
+  Func p1 = [] { return 1; }, p2 = [] { return 2; };
+  EXPECT_EQ(hasher(p1), hasher(p1));
+  EXPECT_TRUE(eq(p1, p1));
+
+  EXPECT_NE(hasher(p1), hasher(p2));
+  EXPECT_FALSE(eq(p1, p2));
+}
+
+TYPED_TEST(HashPointer, Works) {
+  int dummy;
+  auto hash = this->hasher;
+  auto sptr = std::make_shared<int>();
+  std::shared_ptr<const int> csptr = sptr;
+  int* ptr = sptr.get();
+  const int* cptr = ptr;
+  std::unique_ptr<int, NoDeleter> uptr(ptr);
+  std::unique_ptr<const int, NoDeleter> cuptr(ptr);
+
+  EXPECT_EQ(hash(ptr), hash(cptr));
+  EXPECT_EQ(hash(ptr), hash(sptr));
+  EXPECT_EQ(hash(ptr), hash(uptr));
+  EXPECT_EQ(hash(ptr), hash(csptr));
+  EXPECT_EQ(hash(ptr), hash(cuptr));
+  EXPECT_NE(hash(&dummy), hash(cptr));
+  EXPECT_NE(hash(&dummy), hash(sptr));
+  EXPECT_NE(hash(&dummy), hash(uptr));
+  EXPECT_NE(hash(&dummy), hash(csptr));
+  EXPECT_NE(hash(&dummy), hash(cuptr));
+}
+
+TEST(EqCord, Works) {
+  hash_default_eq<absl::Cord> eq;
+  const absl::string_view a_string_view = "a";
+  const absl::Cord a_cord(a_string_view);
+  const absl::string_view b_string_view = "b";
+  const absl::Cord b_cord(b_string_view);
+
+  EXPECT_TRUE(eq(a_cord, a_cord));
+  EXPECT_TRUE(eq(a_cord, a_string_view));
+  EXPECT_TRUE(eq(a_string_view, a_cord));
+  EXPECT_FALSE(eq(a_cord, b_cord));
+  EXPECT_FALSE(eq(a_cord, b_string_view));
+  EXPECT_FALSE(eq(b_string_view, a_cord));
+}
+
+TEST(HashCord, Works) {
+  hash_default_hash<absl::Cord> hash;
+  const absl::string_view a_string_view = "a";
+  const absl::Cord a_cord(a_string_view);
+  const absl::string_view b_string_view = "b";
+  const absl::Cord b_cord(b_string_view);
+
+  EXPECT_EQ(hash(a_cord), hash(a_cord));
+  EXPECT_EQ(hash(b_cord), hash(b_cord));
+  EXPECT_EQ(hash(a_string_view), hash(a_cord));
+  EXPECT_EQ(hash(b_string_view), hash(b_cord));
+  EXPECT_EQ(hash(absl::Cord("")), hash(""));
+  EXPECT_EQ(hash(absl::Cord()), hash(absl::string_view()));
+
+  EXPECT_NE(hash(a_cord), hash(b_cord));
+  EXPECT_NE(hash(a_cord), hash(b_string_view));
+  EXPECT_NE(hash(a_string_view), hash(b_cord));
+  EXPECT_NE(hash(a_string_view), hash(b_string_view));
+}
+
+void NoOpReleaser(absl::string_view data, void* arg) {}
+
+TEST(HashCord, FragmentedCordWorks) {
+  hash_default_hash<absl::Cord> hash;
+  absl::Cord c = absl::MakeFragmentedCord({"a", "b", "c"});
+  EXPECT_FALSE(c.TryFlat().has_value());
+  EXPECT_EQ(hash(c), hash("abc"));
+}
+
+TEST(HashCord, FragmentedLongCordWorks) {
+  hash_default_hash<absl::Cord> hash;
+  // Crete some large strings which do not fit on the stack.
+  std::string a(65536, 'a');
+  std::string b(65536, 'b');
+  absl::Cord c = absl::MakeFragmentedCord({a, b});
+  EXPECT_FALSE(c.TryFlat().has_value());
+  EXPECT_EQ(hash(c), hash(a + b));
+}
+
+TEST(HashCord, RandomCord) {
+  hash_default_hash<absl::Cord> hash;
+  auto bitgen = absl::BitGen();
+  for (int i = 0; i < 1000; ++i) {
+    const int number_of_segments = absl::Uniform(bitgen, 0, 10);
+    std::vector<std::string> pieces;
+    for (size_t s = 0; s < number_of_segments; ++s) {
+      std::string str;
+      str.resize(absl::Uniform(bitgen, 0, 4096));
+      // MSVC needed the explicit return type in the lambda.
+      std::generate(str.begin(), str.end(), [&]() -> char {
+        return static_cast<char>(absl::Uniform<unsigned char>(bitgen));
+      });
+      pieces.push_back(str);
+    }
+    absl::Cord c = absl::MakeFragmentedCord(pieces);
+    EXPECT_EQ(hash(c), hash(std::string(c)));
+  }
+}
+
+// Cartesian product of (std::string, absl::string_view)
+// with (std::string, absl::string_view, const char*, absl::Cord).
+using StringTypesCartesianProduct = Types<
+    // clang-format off
+    std::pair<absl::Cord, std::string>,
+    std::pair<absl::Cord, absl::string_view>,
+    std::pair<absl::Cord, absl::Cord>,
+    std::pair<absl::Cord, const char*>,
+
+    std::pair<std::string, absl::Cord>,
+    std::pair<absl::string_view, absl::Cord>,
+
+    std::pair<absl::string_view, std::string>,
+    std::pair<absl::string_view, absl::string_view>,
+    std::pair<absl::string_view, const char*>>;
+// clang-format on
+
+constexpr char kFirstString[] = "abc123";
+constexpr char kSecondString[] = "ijk456";
+
+template <typename T>
+struct StringLikeTest : public ::testing::Test {
+  typename T::first_type a1{kFirstString};
+  typename T::second_type b1{kFirstString};
+  typename T::first_type a2{kSecondString};
+  typename T::second_type b2{kSecondString};
+  hash_default_eq<typename T::first_type> eq;
+  hash_default_hash<typename T::first_type> hash;
+};
+
+TYPED_TEST_CASE_P(StringLikeTest);
+
+TYPED_TEST_P(StringLikeTest, Eq) {
+  EXPECT_TRUE(this->eq(this->a1, this->b1));
+  EXPECT_TRUE(this->eq(this->b1, this->a1));
+}
+
+TYPED_TEST_P(StringLikeTest, NotEq) {
+  EXPECT_FALSE(this->eq(this->a1, this->b2));
+  EXPECT_FALSE(this->eq(this->b2, this->a1));
+}
+
+TYPED_TEST_P(StringLikeTest, HashEq) {
+  EXPECT_EQ(this->hash(this->a1), this->hash(this->b1));
+  EXPECT_EQ(this->hash(this->a2), this->hash(this->b2));
+  // It would be a poor hash function which collides on these strings.
+  EXPECT_NE(this->hash(this->a1), this->hash(this->b2));
+}
+
+TYPED_TEST_SUITE(StringLikeTest, StringTypesCartesianProduct);
+
+}  // namespace
+}  // namespace container_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+enum Hash : size_t {
+  kStd = 0x1,       // std::hash
+#ifdef _MSC_VER
+  kExtension = kStd,  // In MSVC, std::hash == ::hash
+#else                 // _MSC_VER
+  kExtension = 0x2,  // ::hash (GCC extension)
+#endif                // _MSC_VER
+};
+
+// H is a bitmask of Hash enumerations.
+// Hashable<H> is hashable via all means specified in H.
+template <int H>
+struct Hashable {
+  static constexpr bool HashableBy(Hash h) { return h & H; }
+};
+
+namespace std {
+template <int H>
+struct hash<Hashable<H>> {
+  template <class E = Hashable<H>,
+            class = typename std::enable_if<E::HashableBy(kStd)>::type>
+  size_t operator()(E) const {
+    return kStd;
+  }
+};
+}  // namespace std
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+namespace {
+
+template <class T>
+size_t Hash(const T& v) {
+  return hash_default_hash<T>()(v);
+}
+
+TEST(Delegate, HashDispatch) {
+  EXPECT_EQ(Hash(kStd), Hash(Hashable<kStd>()));
+}
+
+}  // namespace
+}  // namespace container_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/third_party/abseil_cpp/absl/container/internal/hash_generator_testing.cc b/third_party/abseil_cpp/absl/container/internal/hash_generator_testing.cc
new file mode 100644
index 000000000000..59cc5aac7ab8
--- /dev/null
+++ b/third_party/abseil_cpp/absl/container/internal/hash_generator_testing.cc
@@ -0,0 +1,76 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/container/internal/hash_generator_testing.h"
+
+#include <deque>
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+namespace hash_internal {
+namespace {
+
+class RandomDeviceSeedSeq {
+ public:
+  using result_type = typename std::random_device::result_type;
+
+  template <class Iterator>
+  void generate(Iterator start, Iterator end) {
+    while (start != end) {
+      *start = gen_();
+      ++start;
+    }
+  }
+
+ private:
+  std::random_device gen_;
+};
+
+}  // namespace
+
+std::mt19937_64* GetSharedRng() {
+  static auto* rng = [] {
+    RandomDeviceSeedSeq seed_seq;
+    return new std::mt19937_64(seed_seq);
+  }();
+  return rng;
+}
+
+std::string Generator<std::string>::operator()() const {
+  // NOLINTNEXTLINE(runtime/int)
+  std::uniform_int_distribution<short> chars(0x20, 0x7E);
+  std::string res;
+  res.resize(32);
+  std::generate(res.begin(), res.end(),
+                [&]() { return chars(*GetSharedRng()); });
+  return res;
+}
+
+absl::string_view Generator<absl::string_view>::operator()() const {
+  static auto* arena = new std::deque<std::string>();
+  // NOLINTNEXTLINE(runtime/int)
+  std::uniform_int_distribution<short> chars(0x20, 0x7E);
+  arena->emplace_back();
+  auto& res = arena->back();
+  res.resize(32);
+  std::generate(res.begin(), res.end(),
+                [&]() { return chars(*GetSharedRng()); });
+  return res;
+}
+
+}  // namespace hash_internal
+}  // namespace container_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/third_party/abseil_cpp/absl/container/internal/hash_generator_testing.h b/third_party/abseil_cpp/absl/container/internal/hash_generator_testing.h
new file mode 100644
index 000000000000..6869fe45e8c8
--- /dev/null
+++ b/third_party/abseil_cpp/absl/container/internal/hash_generator_testing.h
@@ -0,0 +1,161 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Generates random values for testing. Specialized only for the few types we
+// care about.
+
+#ifndef ABSL_CONTAINER_INTERNAL_HASH_GENERATOR_TESTING_H_
+#define ABSL_CONTAINER_INTERNAL_HASH_GENERATOR_TESTING_H_
+
+#include <stdint.h>
+
+#include <algorithm>
+#include <iosfwd>
+#include <random>
+#include <tuple>
+#include <type_traits>
+#include <utility>
+
+#include "absl/container/internal/hash_policy_testing.h"
+#include "absl/memory/memory.h"
+#include "absl/meta/type_traits.h"
+#include "absl/strings/string_view.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+namespace hash_internal {
+namespace generator_internal {
+
+template <class Container, class = void>
+struct IsMap : std::false_type {};
+
+template <class Map>
+struct IsMap<Map, absl::void_t<typename Map::mapped_type>> : std::true_type {};
+
+}  // namespace generator_internal
+
+std::mt19937_64* GetSharedRng();
+
+enum Enum {
+  kEnumEmpty,
+  kEnumDeleted,
+};
+
+enum class EnumClass : uint64_t {
+  kEmpty,
+  kDeleted,
+};
+
+inline std::ostream& operator<<(std::ostream& o, const EnumClass& ec) {
+  return o << static_cast<uint64_t>(ec);
+}
+
+template <class T, class E = void>
+struct Generator;
+
+template <class T>
+struct Generator<T, typename std::enable_if<std::is_integral<T>::value>::type> {
+  T operator()() const {
+    std::uniform_int_distribution<T> dist;
+    return dist(*GetSharedRng());
+  }
+};
+
+template <>
+struct Generator<Enum> {
+  Enum operator()() const {
+    std::uniform_int_distribution<typename std::underlying_type<Enum>::type>
+        dist;
+    while (true) {
+      auto variate = dist(*GetSharedRng());
+      if (variate != kEnumEmpty && variate != kEnumDeleted)
+        return static_cast<Enum>(variate);
+    }
+  }
+};
+
+template <>
+struct Generator<EnumClass> {
+  EnumClass operator()() const {
+    std::uniform_int_distribution<
+        typename std::underlying_type<EnumClass>::type>
+        dist;
+    while (true) {
+      EnumClass variate = static_cast<EnumClass>(dist(*GetSharedRng()));
+      if (variate != EnumClass::kEmpty && variate != EnumClass::kDeleted)
+        return static_cast<EnumClass>(variate);
+    }
+  }
+};
+
+template <>
+struct Generator<std::string> {
+  std::string operator()() const;
+};
+
+template <>
+struct Generator<absl::string_view> {
+  absl::string_view operator()() const;
+};
+
+template <>
+struct Generator<NonStandardLayout> {
+  NonStandardLayout operator()() const {
+    return NonStandardLayout(Generator<std::string>()());
+  }
+};
+
+template <class K, class V>
+struct Generator<std::pair<K, V>> {
+  std::pair<K, V> operator()() const {
+    return std::pair<K, V>(Generator<typename std::decay<K>::type>()(),
+                           Generator<typename std::decay<V>::type>()());
+  }
+};
+
+template <class... Ts>
+struct Generator<std::tuple<Ts...>> {
+  std::tuple<Ts...> operator()() const {
+    return std::tuple<Ts...>(Generator<typename std::decay<Ts>::type>()()...);
+  }
+};
+
+template <class T>
+struct Generator<std::unique_ptr<T>> {
+  std::unique_ptr<T> operator()() const {
+    return absl::make_unique<T>(Generator<T>()());
+  }
+};
+
+template <class U>
+struct Generator<U, absl::void_t<decltype(std::declval<U&>().key()),
+                                decltype(std::declval<U&>().value())>>
+    : Generator<std::pair<
+          typename std::decay<decltype(std::declval<U&>().key())>::type,
+          typename std::decay<decltype(std::declval<U&>().value())>::type>> {};
+
+template <class Container>
+using GeneratedType = decltype(
+    std::declval<const Generator<
+        typename std::conditional<generator_internal::IsMap<Container>::value,
+                                  typename Container::value_type,
+                                  typename Container::key_type>::type>&>()());
+
+}  // namespace hash_internal
+}  // namespace container_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_CONTAINER_INTERNAL_HASH_GENERATOR_TESTING_H_
diff --git a/third_party/abseil_cpp/absl/container/internal/hash_policy_testing.h b/third_party/abseil_cpp/absl/container/internal/hash_policy_testing.h
new file mode 100644
index 000000000000..01c40d2e5cff
--- /dev/null
+++ b/third_party/abseil_cpp/absl/container/internal/hash_policy_testing.h
@@ -0,0 +1,184 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Utilities to help tests verify that hash tables properly handle stateful
+// allocators and hash functions.
+
+#ifndef ABSL_CONTAINER_INTERNAL_HASH_POLICY_TESTING_H_
+#define ABSL_CONTAINER_INTERNAL_HASH_POLICY_TESTING_H_
+
+#include <cstdlib>
+#include <limits>
+#include <memory>
+#include <ostream>
+#include <type_traits>
+#include <utility>
+#include <vector>
+
+#include "absl/hash/hash.h"
+#include "absl/strings/string_view.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+namespace hash_testing_internal {
+
+template <class Derived>
+struct WithId {
+  WithId() : id_(next_id<Derived>()) {}
+  WithId(const WithId& that) : id_(that.id_) {}
+  WithId(WithId&& that) : id_(that.id_) { that.id_ = 0; }
+  WithId& operator=(const WithId& that) {
+    id_ = that.id_;
+    return *this;
+  }
+  WithId& operator=(WithId&& that) {
+    id_ = that.id_;
+    that.id_ = 0;
+    return *this;
+  }
+
+  size_t id() const { return id_; }
+
+  friend bool operator==(const WithId& a, const WithId& b) {
+    return a.id_ == b.id_;
+  }
+  friend bool operator!=(const WithId& a, const WithId& b) { return !(a == b); }
+
+ protected:
+  explicit WithId(size_t id) : id_(id) {}
+
+ private:
+  size_t id_;
+
+  template <class T>
+  static size_t next_id() {
+    // 0 is reserved for moved from state.
+    static size_t gId = 1;
+    return gId++;
+  }
+};
+
+}  // namespace hash_testing_internal
+
+struct NonStandardLayout {
+  NonStandardLayout() {}
+  explicit NonStandardLayout(std::string s) : value(std::move(s)) {}
+  virtual ~NonStandardLayout() {}
+
+  friend bool operator==(const NonStandardLayout& a,
+                         const NonStandardLayout& b) {
+    return a.value == b.value;
+  }
+  friend bool operator!=(const NonStandardLayout& a,
+                         const NonStandardLayout& b) {
+    return a.value != b.value;
+  }
+
+  template <typename H>
+  friend H AbslHashValue(H h, const NonStandardLayout& v) {
+    return H::combine(std::move(h), v.value);
+  }
+
+  std::string value;
+};
+
+struct StatefulTestingHash
+    : absl::container_internal::hash_testing_internal::WithId<
+          StatefulTestingHash> {
+  template <class T>
+  size_t operator()(const T& t) const {
+    return absl::Hash<T>{}(t);
+  }
+};
+
+struct StatefulTestingEqual
+    : absl::container_internal::hash_testing_internal::WithId<
+          StatefulTestingEqual> {
+  template <class T, class U>
+  bool operator()(const T& t, const U& u) const {
+    return t == u;
+  }
+};
+
+// It is expected that Alloc() == Alloc() for all allocators so we cannot use
+// WithId base. We need to explicitly assign ids.
+template <class T = int>
+struct Alloc : std::allocator<T> {
+  using propagate_on_container_swap = std::true_type;
+
+  // Using old paradigm for this to ensure compatibility.
+  explicit Alloc(size_t id = 0) : id_(id) {}
+
+  Alloc(const Alloc&) = default;
+  Alloc& operator=(const Alloc&) = default;
+
+  template <class U>
+  Alloc(const Alloc<U>& that) : std::allocator<T>(that), id_(that.id()) {}
+
+  template <class U>
+  struct rebind {
+    using other = Alloc<U>;
+  };
+
+  size_t id() const { return id_; }
+
+  friend bool operator==(const Alloc& a, const Alloc& b) {
+    return a.id_ == b.id_;
+  }
+  friend bool operator!=(const Alloc& a, const Alloc& b) { return !(a == b); }
+
+ private:
+  size_t id_ = (std::numeric_limits<size_t>::max)();
+};
+
+template <class Map>
+auto items(const Map& m) -> std::vector<
+    std::pair<typename Map::key_type, typename Map::mapped_type>> {
+  using std::get;
+  std::vector<std::pair<typename Map::key_type, typename Map::mapped_type>> res;
+  res.reserve(m.size());
+  for (const auto& v : m) res.emplace_back(get<0>(v), get<1>(v));
+  return res;
+}
+
+template <class Set>
+auto keys(const Set& s)
+    -> std::vector<typename std::decay<typename Set::key_type>::type> {
+  std::vector<typename std::decay<typename Set::key_type>::type> res;
+  res.reserve(s.size());
+  for (const auto& v : s) res.emplace_back(v);
+  return res;
+}
+
+}  // namespace container_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+// ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS is false for glibcxx versions
+// where the unordered containers are missing certain constructors that
+// take allocator arguments. This test is defined ad-hoc for the platforms
+// we care about (notably Crosstool 17) because libstdcxx's useless
+// versioning scheme precludes a more principled solution.
+// From GCC-4.9 Changelog: (src: https://gcc.gnu.org/gcc-4.9/changes.html)
+// "the unordered associative containers in <unordered_map> and <unordered_set>
+// meet the allocator-aware container requirements;"
+#if (defined(__GLIBCXX__) && __GLIBCXX__ <= 20140425 ) || \
+( __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 9 ))
+#define ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS 0
+#else
+#define ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS 1
+#endif
+
+#endif  // ABSL_CONTAINER_INTERNAL_HASH_POLICY_TESTING_H_
diff --git a/third_party/abseil_cpp/absl/container/internal/hash_policy_testing_test.cc b/third_party/abseil_cpp/absl/container/internal/hash_policy_testing_test.cc
new file mode 100644
index 000000000000..f0b20fe345e2
--- /dev/null
+++ b/third_party/abseil_cpp/absl/container/internal/hash_policy_testing_test.cc
@@ -0,0 +1,45 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/container/internal/hash_policy_testing.h"
+
+#include "gtest/gtest.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+namespace {
+
+TEST(_, Hash) {
+  StatefulTestingHash h1;
+  EXPECT_EQ(1, h1.id());
+  StatefulTestingHash h2;
+  EXPECT_EQ(2, h2.id());
+  StatefulTestingHash h1c(h1);
+  EXPECT_EQ(1, h1c.id());
+  StatefulTestingHash h2m(std::move(h2));
+  EXPECT_EQ(2, h2m.id());
+  EXPECT_EQ(0, h2.id());
+  StatefulTestingHash h3;
+  EXPECT_EQ(3, h3.id());
+  h3 = StatefulTestingHash();
+  EXPECT_EQ(4, h3.id());
+  h3 = std::move(h1);
+  EXPECT_EQ(1, h3.id());
+}
+
+}  // namespace
+}  // namespace container_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/third_party/abseil_cpp/absl/container/internal/hash_policy_traits.h b/third_party/abseil_cpp/absl/container/internal/hash_policy_traits.h
new file mode 100644
index 000000000000..46c97b18a227
--- /dev/null
+++ b/third_party/abseil_cpp/absl/container/internal/hash_policy_traits.h
@@ -0,0 +1,208 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_HASH_POLICY_TRAITS_H_
+#define ABSL_CONTAINER_INTERNAL_HASH_POLICY_TRAITS_H_
+
+#include <cstddef>
+#include <memory>
+#include <new>
+#include <type_traits>
+#include <utility>
+
+#include "absl/meta/type_traits.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+// Defines how slots are initialized/destroyed/moved.
+template <class Policy, class = void>
+struct hash_policy_traits {
+  // The type of the keys stored in the hashtable.
+  using key_type = typename Policy::key_type;
+
+ private:
+  struct ReturnKey {
+    // When C++17 is available, we can use std::launder to provide mutable
+    // access to the key for use in node handle.
+#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606
+    template <class Key,
+              absl::enable_if_t<std::is_lvalue_reference<Key>::value, int> = 0>
+    static key_type& Impl(Key&& k, int) {
+      return *std::launder(
+          const_cast<key_type*>(std::addressof(std::forward<Key>(k))));
+    }
+#endif
+
+    template <class Key>
+    static Key Impl(Key&& k, char) {
+      return std::forward<Key>(k);
+    }
+
+    // When Key=T&, we forward the lvalue reference.
+    // When Key=T, we return by value to avoid a dangling reference.
+    // eg, for string_hash_map.
+    template <class Key, class... Args>
+    auto operator()(Key&& k, const Args&...) const
+        -> decltype(Impl(std::forward<Key>(k), 0)) {
+      return Impl(std::forward<Key>(k), 0);
+    }
+  };
+
+  template <class P = Policy, class = void>
+  struct ConstantIteratorsImpl : std::false_type {};
+
+  template <class P>
+  struct ConstantIteratorsImpl<P, absl::void_t<typename P::constant_iterators>>
+      : P::constant_iterators {};
+
+ public:
+  // The actual object stored in the hash table.
+  using slot_type = typename Policy::slot_type;
+
+  // The argument type for insertions into the hashtable. This is different
+  // from value_type for increased performance. See initializer_list constructor
+  // and insert() member functions for more details.
+  using init_type = typename Policy::init_type;
+
+  using reference = decltype(Policy::element(std::declval<slot_type*>()));
+  using pointer = typename std::remove_reference<reference>::type*;
+  using value_type = typename std::remove_reference<reference>::type;
+
+  // Policies can set this variable to tell raw_hash_set that all iterators
+  // should be constant, even `iterator`. This is useful for set-like
+  // containers.
+  // Defaults to false if not provided by the policy.
+  using constant_iterators = ConstantIteratorsImpl<>;
+
+  // PRECONDITION: `slot` is UNINITIALIZED
+  // POSTCONDITION: `slot` is INITIALIZED
+  template <class Alloc, class... Args>
+  static void construct(Alloc* alloc, slot_type* slot, Args&&... args) {
+    Policy::construct(alloc, slot, std::forward<Args>(args)...);
+  }
+
+  // PRECONDITION: `slot` is INITIALIZED
+  // POSTCONDITION: `slot` is UNINITIALIZED
+  template <class Alloc>
+  static void destroy(Alloc* alloc, slot_type* slot) {
+    Policy::destroy(alloc, slot);
+  }
+
+  // Transfers the `old_slot` to `new_slot`. Any memory allocated by the
+  // allocator inside `old_slot` to `new_slot` can be transferred.
+  //
+  // OPTIONAL: defaults to:
+  //
+  //     clone(new_slot, std::move(*old_slot));
+  //     destroy(old_slot);
+  //
+  // PRECONDITION: `new_slot` is UNINITIALIZED and `old_slot` is INITIALIZED
+  // POSTCONDITION: `new_slot` is INITIALIZED and `old_slot` is
+  //                UNINITIALIZED
+  template <class Alloc>
+  static void transfer(Alloc* alloc, slot_type* new_slot, slot_type* old_slot) {
+    transfer_impl(alloc, new_slot, old_slot, 0);
+  }
+
+  // PRECONDITION: `slot` is INITIALIZED
+  // POSTCONDITION: `slot` is INITIALIZED
+  template <class P = Policy>
+  static auto element(slot_type* slot) -> decltype(P::element(slot)) {
+    return P::element(slot);
+  }
+
+  // Returns the amount of memory owned by `slot`, exclusive of `sizeof(*slot)`.
+  //
+  // If `slot` is nullptr, returns the constant amount of memory owned by any
+  // full slot or -1 if slots own variable amounts of memory.
+  //
+  // PRECONDITION: `slot` is INITIALIZED or nullptr
+  template <class P = Policy>
+  static size_t space_used(const slot_type* slot) {
+    return P::space_used(slot);
+  }
+
+  // Provides generalized access to the key for elements, both for elements in
+  // the table and for elements that have not yet been inserted (or even
+  // constructed).  We would like an API that allows us to say: `key(args...)`
+  // but we cannot do that for all cases, so we use this more general API that
+  // can be used for many things, including the following:
+  //
+  //   - Given an element in a table, get its key.
+  //   - Given an element initializer, get its key.
+  //   - Given `emplace()` arguments, get the element key.
+  //
+  // Implementations of this must adhere to a very strict technical
+  // specification around aliasing and consuming arguments:
+  //
+  // Let `value_type` be the result type of `element()` without ref- and
+  // cv-qualifiers. The first argument is a functor, the rest are constructor
+  // arguments for `value_type`. Returns `std::forward<F>(f)(k, xs...)`, where
+  // `k` is the element key, and `xs...` are the new constructor arguments for
+  // `value_type`. It's allowed for `k` to alias `xs...`, and for both to alias
+  // `ts...`. The key won't be touched once `xs...` are used to construct an
+  // element; `ts...` won't be touched at all, which allows `apply()` to consume
+  // any rvalues among them.
+  //
+  // If `value_type` is constructible from `Ts&&...`, `Policy::apply()` must not
+  // trigger a hard compile error unless it originates from `f`. In other words,
+  // `Policy::apply()` must be SFINAE-friendly. If `value_type` is not
+  // constructible from `Ts&&...`, either SFINAE or a hard compile error is OK.
+  //
+  // If `Ts...` is `[cv] value_type[&]` or `[cv] init_type[&]`,
+  // `Policy::apply()` must work. A compile error is not allowed, SFINAE or not.
+  template <class F, class... Ts, class P = Policy>
+  static auto apply(F&& f, Ts&&... ts)
+      -> decltype(P::apply(std::forward<F>(f), std::forward<Ts>(ts)...)) {
+    return P::apply(std::forward<F>(f), std::forward<Ts>(ts)...);
+  }
+
+  // Returns the "key" portion of the slot.
+  // Used for node handle manipulation.
+  template <class P = Policy>
+  static auto mutable_key(slot_type* slot)
+      -> decltype(P::apply(ReturnKey(), element(slot))) {
+    return P::apply(ReturnKey(), element(slot));
+  }
+
+  // Returns the "value" (as opposed to the "key") portion of the element. Used
+  // by maps to implement `operator[]`, `at()` and `insert_or_assign()`.
+  template <class T, class P = Policy>
+  static auto value(T* elem) -> decltype(P::value(elem)) {
+    return P::value(elem);
+  }
+
+ private:
+  // Use auto -> decltype as an enabler.
+  template <class Alloc, class P = Policy>
+  static auto transfer_impl(Alloc* alloc, slot_type* new_slot,
+                            slot_type* old_slot, int)
+      -> decltype((void)P::transfer(alloc, new_slot, old_slot)) {
+    P::transfer(alloc, new_slot, old_slot);
+  }
+  template <class Alloc>
+  static void transfer_impl(Alloc* alloc, slot_type* new_slot,
+                            slot_type* old_slot, char) {
+    construct(alloc, new_slot, std::move(element(old_slot)));
+    destroy(alloc, old_slot);
+  }
+};
+
+}  // namespace container_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_CONTAINER_INTERNAL_HASH_POLICY_TRAITS_H_
diff --git a/third_party/abseil_cpp/absl/container/internal/hash_policy_traits_test.cc b/third_party/abseil_cpp/absl/container/internal/hash_policy_traits_test.cc
new file mode 100644
index 000000000000..6ef8b9e05fb1
--- /dev/null
+++ b/third_party/abseil_cpp/absl/container/internal/hash_policy_traits_test.cc
@@ -0,0 +1,144 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/container/internal/hash_policy_traits.h"
+
+#include <functional>
+#include <memory>
+#include <new>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+namespace {
+
+using ::testing::MockFunction;
+using ::testing::Return;
+using ::testing::ReturnRef;
+
+using Alloc = std::allocator<int>;
+using Slot = int;
+
+struct PolicyWithoutOptionalOps {
+  using slot_type = Slot;
+  using key_type = Slot;
+  using init_type = Slot;
+
+  static std::function<void(void*, Slot*, Slot)> construct;
+  static std::function<void(void*, Slot*)> destroy;
+
+  static std::function<Slot&(Slot*)> element;
+  static int apply(int v) { return apply_impl(v); }
+  static std::function<int(int)> apply_impl;
+  static std::function<Slot&(Slot*)> value;
+};
+
+std::function<void(void*, Slot*, Slot)> PolicyWithoutOptionalOps::construct;
+std::function<void(void*, Slot*)> PolicyWithoutOptionalOps::destroy;
+
+std::function<Slot&(Slot*)> PolicyWithoutOptionalOps::element;
+std::function<int(int)> PolicyWithoutOptionalOps::apply_impl;
+std::function<Slot&(Slot*)> PolicyWithoutOptionalOps::value;
+
+struct PolicyWithOptionalOps : PolicyWithoutOptionalOps {
+  static std::function<void(void*, Slot*, Slot*)> transfer;
+};
+
+std::function<void(void*, Slot*, Slot*)> PolicyWithOptionalOps::transfer;
+
+struct Test : ::testing::Test {
+  Test() {
+    PolicyWithoutOptionalOps::construct = [&](void* a1, Slot* a2, Slot a3) {
+      construct.Call(a1, a2, std::move(a3));
+    };
+    PolicyWithoutOptionalOps::destroy = [&](void* a1, Slot* a2) {
+      destroy.Call(a1, a2);
+    };
+
+    PolicyWithoutOptionalOps::element = [&](Slot* a1) -> Slot& {
+      return element.Call(a1);
+    };
+    PolicyWithoutOptionalOps::apply_impl = [&](int a1) -> int {
+      return apply.Call(a1);
+    };
+    PolicyWithoutOptionalOps::value = [&](Slot* a1) -> Slot& {
+      return value.Call(a1);
+    };
+
+    PolicyWithOptionalOps::transfer = [&](void* a1, Slot* a2, Slot* a3) {
+      return transfer.Call(a1, a2, a3);
+    };
+  }
+
+  std::allocator<int> alloc;
+  int a = 53;
+
+  MockFunction<void(void*, Slot*, Slot)> construct;
+  MockFunction<void(void*, Slot*)> destroy;
+
+  MockFunction<Slot&(Slot*)> element;
+  MockFunction<int(int)> apply;
+  MockFunction<Slot&(Slot*)> value;
+
+  MockFunction<void(void*, Slot*, Slot*)> transfer;
+};
+
+TEST_F(Test, construct) {
+  EXPECT_CALL(construct, Call(&alloc, &a, 53));
+  hash_policy_traits<PolicyWithoutOptionalOps>::construct(&alloc, &a, 53);
+}
+
+TEST_F(Test, destroy) {
+  EXPECT_CALL(destroy, Call(&alloc, &a));
+  hash_policy_traits<PolicyWithoutOptionalOps>::destroy(&alloc, &a);
+}
+
+TEST_F(Test, element) {
+  int b = 0;
+  EXPECT_CALL(element, Call(&a)).WillOnce(ReturnRef(b));
+  EXPECT_EQ(&b, &hash_policy_traits<PolicyWithoutOptionalOps>::element(&a));
+}
+
+TEST_F(Test, apply) {
+  EXPECT_CALL(apply, Call(42)).WillOnce(Return(1337));
+  EXPECT_EQ(1337, (hash_policy_traits<PolicyWithoutOptionalOps>::apply(42)));
+}
+
+TEST_F(Test, value) {
+  int b = 0;
+  EXPECT_CALL(value, Call(&a)).WillOnce(ReturnRef(b));
+  EXPECT_EQ(&b, &hash_policy_traits<PolicyWithoutOptionalOps>::value(&a));
+}
+
+TEST_F(Test, without_transfer) {
+  int b = 42;
+  EXPECT_CALL(element, Call(&b)).WillOnce(::testing::ReturnRef(b));
+  EXPECT_CALL(construct, Call(&alloc, &a, b));
+  EXPECT_CALL(destroy, Call(&alloc, &b));
+  hash_policy_traits<PolicyWithoutOptionalOps>::transfer(&alloc, &a, &b);
+}
+
+TEST_F(Test, with_transfer) {
+  int b = 42;
+  EXPECT_CALL(transfer, Call(&alloc, &a, &b));
+  hash_policy_traits<PolicyWithOptionalOps>::transfer(&alloc, &a, &b);
+}
+
+}  // namespace
+}  // namespace container_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/third_party/abseil_cpp/absl/container/internal/hashtable_debug.h b/third_party/abseil_cpp/absl/container/internal/hashtable_debug.h
new file mode 100644
index 000000000000..19d52121d688
--- /dev/null
+++ b/third_party/abseil_cpp/absl/container/internal/hashtable_debug.h
@@ -0,0 +1,110 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// This library provides APIs to debug the probing behavior of hash tables.
+//
+// In general, the probing behavior is a black box for users and only the
+// side effects can be measured in the form of performance differences.
+// These APIs give a glimpse on the actual behavior of the probing algorithms in
+// these hashtables given a specified hash function and a set of elements.
+//
+// The probe count distribution can be used to assess the quality of the hash
+// function for that particular hash table. Note that a hash function that
+// performs well in one hash table implementation does not necessarily performs
+// well in a different one.
+//
+// This library supports std::unordered_{set,map}, dense_hash_{set,map} and
+// absl::{flat,node,string}_hash_{set,map}.
+
+#ifndef ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_H_
+#define ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_H_
+
+#include <cstddef>
+#include <algorithm>
+#include <type_traits>
+#include <vector>
+
+#include "absl/container/internal/hashtable_debug_hooks.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+// Returns the number of probes required to lookup `key`.  Returns 0 for a
+// search with no collisions.  Higher values mean more hash collisions occurred;
+// however, the exact meaning of this number varies according to the container
+// type.
+template <typename C>
+size_t GetHashtableDebugNumProbes(
+    const C& c, const typename C::key_type& key) {
+  return absl::container_internal::hashtable_debug_internal::
+      HashtableDebugAccess<C>::GetNumProbes(c, key);
+}
+
+// Gets a histogram of the number of probes for each elements in the container.
+// The sum of all the values in the vector is equal to container.size().
+template <typename C>
+std::vector<size_t> GetHashtableDebugNumProbesHistogram(const C& container) {
+  std::vector<size_t> v;
+  for (auto it = container.begin(); it != container.end(); ++it) {
+    size_t num_probes = GetHashtableDebugNumProbes(
+        container,
+        absl::container_internal::hashtable_debug_internal::GetKey<C>(*it, 0));
+    v.resize((std::max)(v.size(), num_probes + 1));
+    v[num_probes]++;
+  }
+  return v;
+}
+
+struct HashtableDebugProbeSummary {
+  size_t total_elements;
+  size_t total_num_probes;
+  double mean;
+};
+
+// Gets a summary of the probe count distribution for the elements in the
+// container.
+template <typename C>
+HashtableDebugProbeSummary GetHashtableDebugProbeSummary(const C& container) {
+  auto probes = GetHashtableDebugNumProbesHistogram(container);
+  HashtableDebugProbeSummary summary = {};
+  for (size_t i = 0; i < probes.size(); ++i) {
+    summary.total_elements += probes[i];
+    summary.total_num_probes += probes[i] * i;
+  }
+  summary.mean = 1.0 * summary.total_num_probes / summary.total_elements;
+  return summary;
+}
+
+// Returns the number of bytes requested from the allocator by the container
+// and not freed.
+template <typename C>
+size_t AllocatedByteSize(const C& c) {
+  return absl::container_internal::hashtable_debug_internal::
+      HashtableDebugAccess<C>::AllocatedByteSize(c);
+}
+
+// Returns a tight lower bound for AllocatedByteSize(c) where `c` is of type `C`
+// and `c.size()` is equal to `num_elements`.
+template <typename C>
+size_t LowerBoundAllocatedByteSize(size_t num_elements) {
+  return absl::container_internal::hashtable_debug_internal::
+      HashtableDebugAccess<C>::LowerBoundAllocatedByteSize(num_elements);
+}
+
+}  // namespace container_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_H_
diff --git a/third_party/abseil_cpp/absl/container/internal/hashtable_debug_hooks.h b/third_party/abseil_cpp/absl/container/internal/hashtable_debug_hooks.h
new file mode 100644
index 000000000000..3e9ea5954e0a
--- /dev/null
+++ b/third_party/abseil_cpp/absl/container/internal/hashtable_debug_hooks.h
@@ -0,0 +1,85 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Provides the internal API for hashtable_debug.h.
+
+#ifndef ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_HOOKS_H_
+#define ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_HOOKS_H_
+
+#include <cstddef>
+
+#include <algorithm>
+#include <type_traits>
+#include <vector>
+
+#include "absl/base/config.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+namespace hashtable_debug_internal {
+
+// If it is a map, call get<0>().
+using std::get;
+template <typename T, typename = typename T::mapped_type>
+auto GetKey(const typename T::value_type& pair, int) -> decltype(get<0>(pair)) {
+  return get<0>(pair);
+}
+
+// If it is not a map, return the value directly.
+template <typename T>
+const typename T::key_type& GetKey(const typename T::key_type& key, char) {
+  return key;
+}
+
+// Containers should specialize this to provide debug information for that
+// container.
+template <class Container, typename Enabler = void>
+struct HashtableDebugAccess {
+  // Returns the number of probes required to find `key` in `c`.  The "number of
+  // probes" is a concept that can vary by container.  Implementations should
+  // return 0 when `key` was found in the minimum number of operations and
+  // should increment the result for each non-trivial operation required to find
+  // `key`.
+  //
+  // The default implementation uses the bucket api from the standard and thus
+  // works for `std::unordered_*` containers.
+  static size_t GetNumProbes(const Container& c,
+                             const typename Container::key_type& key) {
+    if (!c.bucket_count()) return {};
+    size_t num_probes = 0;
+    size_t bucket = c.bucket(key);
+    for (auto it = c.begin(bucket), e = c.end(bucket);; ++it, ++num_probes) {
+      if (it == e) return num_probes;
+      if (c.key_eq()(key, GetKey<Container>(*it, 0))) return num_probes;
+    }
+  }
+
+  // Returns the number of bytes requested from the allocator by the container
+  // and not freed.
+  //
+  // static size_t AllocatedByteSize(const Container& c);
+
+  // Returns a tight lower bound for AllocatedByteSize(c) where `c` is of type
+  // `Container` and `c.size()` is equal to `num_elements`.
+  //
+  // static size_t LowerBoundAllocatedByteSize(size_t num_elements);
+};
+
+}  // namespace hashtable_debug_internal
+}  // namespace container_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_HOOKS_H_
diff --git a/third_party/abseil_cpp/absl/container/internal/hashtablez_sampler.cc b/third_party/abseil_cpp/absl/container/internal/hashtablez_sampler.cc
new file mode 100644
index 000000000000..e4484fbb1b43
--- /dev/null
+++ b/third_party/abseil_cpp/absl/container/internal/hashtablez_sampler.cc
@@ -0,0 +1,270 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/container/internal/hashtablez_sampler.h"
+
+#include <atomic>
+#include <cassert>
+#include <cmath>
+#include <functional>
+#include <limits>
+
+#include "absl/base/attributes.h"
+#include "absl/base/internal/exponential_biased.h"
+#include "absl/container/internal/have_sse.h"
+#include "absl/debugging/stacktrace.h"
+#include "absl/memory/memory.h"
+#include "absl/synchronization/mutex.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+constexpr int HashtablezInfo::kMaxStackDepth;
+
+namespace {
+ABSL_CONST_INIT std::atomic<bool> g_hashtablez_enabled{
+    false
+};
+ABSL_CONST_INIT std::atomic<int32_t> g_hashtablez_sample_parameter{1 << 10};
+ABSL_CONST_INIT std::atomic<int32_t> g_hashtablez_max_samples{1 << 20};
+
+#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
+ABSL_PER_THREAD_TLS_KEYWORD absl::base_internal::ExponentialBiased
+    g_exponential_biased_generator;
+#endif
+
+}  // namespace
+
+#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
+ABSL_PER_THREAD_TLS_KEYWORD int64_t global_next_sample = 0;
+#endif  // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
+
+HashtablezSampler& HashtablezSampler::Global() {
+  static auto* sampler = new HashtablezSampler();
+  return *sampler;
+}
+
+HashtablezSampler::DisposeCallback HashtablezSampler::SetDisposeCallback(
+    DisposeCallback f) {
+  return dispose_.exchange(f, std::memory_order_relaxed);
+}
+
+HashtablezInfo::HashtablezInfo() { PrepareForSampling(); }
+HashtablezInfo::~HashtablezInfo() = default;
+
+void HashtablezInfo::PrepareForSampling() {
+  capacity.store(0, std::memory_order_relaxed);
+  size.store(0, std::memory_order_relaxed);
+  num_erases.store(0, std::memory_order_relaxed);
+  num_rehashes.store(0, std::memory_order_relaxed);
+  max_probe_length.store(0, std::memory_order_relaxed);
+  total_probe_length.store(0, std::memory_order_relaxed);
+  hashes_bitwise_or.store(0, std::memory_order_relaxed);
+  hashes_bitwise_and.store(~size_t{}, std::memory_order_relaxed);
+
+  create_time = absl::Now();
+  // The inliner makes hardcoded skip_count difficult (especially when combined
+  // with LTO).  We use the ability to exclude stacks by regex when encoding
+  // instead.
+  depth = absl::GetStackTrace(stack, HashtablezInfo::kMaxStackDepth,
+                              /* skip_count= */ 0);
+  dead = nullptr;
+}
+
+HashtablezSampler::HashtablezSampler()
+    : dropped_samples_(0), size_estimate_(0), all_(nullptr), dispose_(nullptr) {
+  absl::MutexLock l(&graveyard_.init_mu);
+  graveyard_.dead = &graveyard_;
+}
+
+HashtablezSampler::~HashtablezSampler() {
+  HashtablezInfo* s = all_.load(std::memory_order_acquire);
+  while (s != nullptr) {
+    HashtablezInfo* next = s->next;
+    delete s;
+    s = next;
+  }
+}
+
+void HashtablezSampler::PushNew(HashtablezInfo* sample) {
+  sample->next = all_.load(std::memory_order_relaxed);
+  while (!all_.compare_exchange_weak(sample->next, sample,
+                                     std::memory_order_release,
+                                     std::memory_order_relaxed)) {
+  }
+}
+
+void HashtablezSampler::PushDead(HashtablezInfo* sample) {
+  if (auto* dispose = dispose_.load(std::memory_order_relaxed)) {
+    dispose(*sample);
+  }
+
+  absl::MutexLock graveyard_lock(&graveyard_.init_mu);
+  absl::MutexLock sample_lock(&sample->init_mu);
+  sample->dead = graveyard_.dead;
+  graveyard_.dead = sample;
+}
+
+HashtablezInfo* HashtablezSampler::PopDead() {
+  absl::MutexLock graveyard_lock(&graveyard_.init_mu);
+
+  // The list is circular, so eventually it collapses down to
+  //   graveyard_.dead == &graveyard_
+  // when it is empty.
+  HashtablezInfo* sample = graveyard_.dead;
+  if (sample == &graveyard_) return nullptr;
+
+  absl::MutexLock sample_lock(&sample->init_mu);
+  graveyard_.dead = sample->dead;
+  sample->PrepareForSampling();
+  return sample;
+}
+
+HashtablezInfo* HashtablezSampler::Register() {
+  int64_t size = size_estimate_.fetch_add(1, std::memory_order_relaxed);
+  if (size > g_hashtablez_max_samples.load(std::memory_order_relaxed)) {
+    size_estimate_.fetch_sub(1, std::memory_order_relaxed);
+    dropped_samples_.fetch_add(1, std::memory_order_relaxed);
+    return nullptr;
+  }
+
+  HashtablezInfo* sample = PopDead();
+  if (sample == nullptr) {
+    // Resurrection failed.  Hire a new warlock.
+    sample = new HashtablezInfo();
+    PushNew(sample);
+  }
+
+  return sample;
+}
+
+void HashtablezSampler::Unregister(HashtablezInfo* sample) {
+  PushDead(sample);
+  size_estimate_.fetch_sub(1, std::memory_order_relaxed);
+}
+
+int64_t HashtablezSampler::Iterate(
+    const std::function<void(const HashtablezInfo& stack)>& f) {
+  HashtablezInfo* s = all_.load(std::memory_order_acquire);
+  while (s != nullptr) {
+    absl::MutexLock l(&s->init_mu);
+    if (s->dead == nullptr) {
+      f(*s);
+    }
+    s = s->next;
+  }
+
+  return dropped_samples_.load(std::memory_order_relaxed);
+}
+
+static bool ShouldForceSampling() {
+  enum ForceState {
+    kDontForce,
+    kForce,
+    kUninitialized
+  };
+  ABSL_CONST_INIT static std::atomic<ForceState> global_state{
+      kUninitialized};
+  ForceState state = global_state.load(std::memory_order_relaxed);
+  if (ABSL_PREDICT_TRUE(state == kDontForce)) return false;
+
+  if (state == kUninitialized) {
+    state = AbslContainerInternalSampleEverything() ? kForce : kDontForce;
+    global_state.store(state, std::memory_order_relaxed);
+  }
+  return state == kForce;
+}
+
+HashtablezInfo* SampleSlow(int64_t* next_sample) {
+  if (ABSL_PREDICT_FALSE(ShouldForceSampling())) {
+    *next_sample = 1;
+    return HashtablezSampler::Global().Register();
+  }
+
+#if !defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
+  *next_sample = std::numeric_limits<int64_t>::max();
+  return nullptr;
+#else
+  bool first = *next_sample < 0;
+  *next_sample = g_exponential_biased_generator.GetStride(
+      g_hashtablez_sample_parameter.load(std::memory_order_relaxed));
+  // Small values of interval are equivalent to just sampling next time.
+  ABSL_ASSERT(*next_sample >= 1);
+
+  // g_hashtablez_enabled can be dynamically flipped, we need to set a threshold
+  // low enough that we will start sampling in a reasonable time, so we just use
+  // the default sampling rate.
+  if (!g_hashtablez_enabled.load(std::memory_order_relaxed)) return nullptr;
+
+  // We will only be negative on our first count, so we should just retry in
+  // that case.
+  if (first) {
+    if (ABSL_PREDICT_TRUE(--*next_sample > 0)) return nullptr;
+    return SampleSlow(next_sample);
+  }
+
+  return HashtablezSampler::Global().Register();
+#endif
+}
+
+void UnsampleSlow(HashtablezInfo* info) {
+  HashtablezSampler::Global().Unregister(info);
+}
+
+void RecordInsertSlow(HashtablezInfo* info, size_t hash,
+                      size_t distance_from_desired) {
+  // SwissTables probe in groups of 16, so scale this to count items probes and
+  // not offset from desired.
+  size_t probe_length = distance_from_desired;
+#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
+  probe_length /= 16;
+#else
+  probe_length /= 8;
+#endif
+
+  info->hashes_bitwise_and.fetch_and(hash, std::memory_order_relaxed);
+  info->hashes_bitwise_or.fetch_or(hash, std::memory_order_relaxed);
+  info->max_probe_length.store(
+      std::max(info->max_probe_length.load(std::memory_order_relaxed),
+               probe_length),
+      std::memory_order_relaxed);
+  info->total_probe_length.fetch_add(probe_length, std::memory_order_relaxed);
+  info->size.fetch_add(1, std::memory_order_relaxed);
+}
+
+void SetHashtablezEnabled(bool enabled) {
+  g_hashtablez_enabled.store(enabled, std::memory_order_release);
+}
+
+void SetHashtablezSampleParameter(int32_t rate) {
+  if (rate > 0) {
+    g_hashtablez_sample_parameter.store(rate, std::memory_order_release);
+  } else {
+    ABSL_RAW_LOG(ERROR, "Invalid hashtablez sample rate: %lld",
+                 static_cast<long long>(rate));  // NOLINT(runtime/int)
+  }
+}
+
+void SetHashtablezMaxSamples(int32_t max) {
+  if (max > 0) {
+    g_hashtablez_max_samples.store(max, std::memory_order_release);
+  } else {
+    ABSL_RAW_LOG(ERROR, "Invalid hashtablez max samples: %lld",
+                 static_cast<long long>(max));  // NOLINT(runtime/int)
+  }
+}
+
+}  // namespace container_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/third_party/abseil_cpp/absl/container/internal/hashtablez_sampler.h b/third_party/abseil_cpp/absl/container/internal/hashtablez_sampler.h
new file mode 100644
index 000000000000..394348da58f5
--- /dev/null
+++ b/third_party/abseil_cpp/absl/container/internal/hashtablez_sampler.h
@@ -0,0 +1,321 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: hashtablez_sampler.h
+// -----------------------------------------------------------------------------
+//
+// This header file defines the API for a low level library to sample hashtables
+// and collect runtime statistics about them.
+//
+// `HashtablezSampler` controls the lifecycle of `HashtablezInfo` objects which
+// store information about a single sample.
+//
+// `Record*` methods store information into samples.
+// `Sample()` and `Unsample()` make use of a single global sampler with
+// properties controlled by the flags hashtablez_enabled,
+// hashtablez_sample_rate, and hashtablez_max_samples.
+//
+// WARNING
+//
+// Using this sampling API may cause sampled Swiss tables to use the global
+// allocator (operator `new`) in addition to any custom allocator.  If you
+// are using a table in an unusual circumstance where allocation or calling a
+// linux syscall is unacceptable, this could interfere.
+//
+// This utility is internal-only. Use at your own risk.
+
+#ifndef ABSL_CONTAINER_INTERNAL_HASHTABLEZ_SAMPLER_H_
+#define ABSL_CONTAINER_INTERNAL_HASHTABLEZ_SAMPLER_H_
+
+#include <atomic>
+#include <functional>
+#include <memory>
+#include <vector>
+
+#include "absl/base/internal/per_thread_tls.h"
+#include "absl/base/optimization.h"
+#include "absl/container/internal/have_sse.h"
+#include "absl/synchronization/mutex.h"
+#include "absl/utility/utility.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+// Stores information about a sampled hashtable.  All mutations to this *must*
+// be made through `Record*` functions below.  All reads from this *must* only
+// occur in the callback to `HashtablezSampler::Iterate`.
+struct HashtablezInfo {
+  // Constructs the object but does not fill in any fields.
+  HashtablezInfo();
+  ~HashtablezInfo();
+  HashtablezInfo(const HashtablezInfo&) = delete;
+  HashtablezInfo& operator=(const HashtablezInfo&) = delete;
+
+  // Puts the object into a clean state, fills in the logically `const` members,
+  // blocking for any readers that are currently sampling the object.
+  void PrepareForSampling() ABSL_EXCLUSIVE_LOCKS_REQUIRED(init_mu);
+
+  // These fields are mutated by the various Record* APIs and need to be
+  // thread-safe.
+  std::atomic<size_t> capacity;
+  std::atomic<size_t> size;
+  std::atomic<size_t> num_erases;
+  std::atomic<size_t> num_rehashes;
+  std::atomic<size_t> max_probe_length;
+  std::atomic<size_t> total_probe_length;
+  std::atomic<size_t> hashes_bitwise_or;
+  std::atomic<size_t> hashes_bitwise_and;
+
+  // `HashtablezSampler` maintains intrusive linked lists for all samples.  See
+  // comments on `HashtablezSampler::all_` for details on these.  `init_mu`
+  // guards the ability to restore the sample to a pristine state.  This
+  // prevents races with sampling and resurrecting an object.
+  absl::Mutex init_mu;
+  HashtablezInfo* next;
+  HashtablezInfo* dead ABSL_GUARDED_BY(init_mu);
+
+  // All of the fields below are set by `PrepareForSampling`, they must not be
+  // mutated in `Record*` functions.  They are logically `const` in that sense.
+  // These are guarded by init_mu, but that is not externalized to clients, who
+  // can only read them during `HashtablezSampler::Iterate` which will hold the
+  // lock.
+  static constexpr int kMaxStackDepth = 64;
+  absl::Time create_time;
+  int32_t depth;
+  void* stack[kMaxStackDepth];
+};
+
+inline void RecordRehashSlow(HashtablezInfo* info, size_t total_probe_length) {
+#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
+  total_probe_length /= 16;
+#else
+  total_probe_length /= 8;
+#endif
+  info->total_probe_length.store(total_probe_length, std::memory_order_relaxed);
+  info->num_erases.store(0, std::memory_order_relaxed);
+  // There is only one concurrent writer, so `load` then `store` is sufficient
+  // instead of using `fetch_add`.
+  info->num_rehashes.store(
+      1 + info->num_rehashes.load(std::memory_order_relaxed),
+      std::memory_order_relaxed);
+}
+
+inline void RecordStorageChangedSlow(HashtablezInfo* info, size_t size,
+                                     size_t capacity) {
+  info->size.store(size, std::memory_order_relaxed);
+  info->capacity.store(capacity, std::memory_order_relaxed);
+  if (size == 0) {
+    // This is a clear, reset the total/num_erases too.
+    info->total_probe_length.store(0, std::memory_order_relaxed);
+    info->num_erases.store(0, std::memory_order_relaxed);
+  }
+}
+
+void RecordInsertSlow(HashtablezInfo* info, size_t hash,
+                      size_t distance_from_desired);
+
+inline void RecordEraseSlow(HashtablezInfo* info) {
+  info->size.fetch_sub(1, std::memory_order_relaxed);
+  // There is only one concurrent writer, so `load` then `store` is sufficient
+  // instead of using `fetch_add`.
+  info->num_erases.store(
+      1 + info->num_erases.load(std::memory_order_relaxed),
+      std::memory_order_relaxed);
+}
+
+HashtablezInfo* SampleSlow(int64_t* next_sample);
+void UnsampleSlow(HashtablezInfo* info);
+
+#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
+#error ABSL_INTERNAL_HASHTABLEZ_SAMPLE cannot be directly set
+#endif  // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
+
+#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
+class HashtablezInfoHandle {
+ public:
+  explicit HashtablezInfoHandle() : info_(nullptr) {}
+  explicit HashtablezInfoHandle(HashtablezInfo* info) : info_(info) {}
+  ~HashtablezInfoHandle() {
+    if (ABSL_PREDICT_TRUE(info_ == nullptr)) return;
+    UnsampleSlow(info_);
+  }
+
+  HashtablezInfoHandle(const HashtablezInfoHandle&) = delete;
+  HashtablezInfoHandle& operator=(const HashtablezInfoHandle&) = delete;
+
+  HashtablezInfoHandle(HashtablezInfoHandle&& o) noexcept
+      : info_(absl::exchange(o.info_, nullptr)) {}
+  HashtablezInfoHandle& operator=(HashtablezInfoHandle&& o) noexcept {
+    if (ABSL_PREDICT_FALSE(info_ != nullptr)) {
+      UnsampleSlow(info_);
+    }
+    info_ = absl::exchange(o.info_, nullptr);
+    return *this;
+  }
+
+  inline void RecordStorageChanged(size_t size, size_t capacity) {
+    if (ABSL_PREDICT_TRUE(info_ == nullptr)) return;
+    RecordStorageChangedSlow(info_, size, capacity);
+  }
+
+  inline void RecordRehash(size_t total_probe_length) {
+    if (ABSL_PREDICT_TRUE(info_ == nullptr)) return;
+    RecordRehashSlow(info_, total_probe_length);
+  }
+
+  inline void RecordInsert(size_t hash, size_t distance_from_desired) {
+    if (ABSL_PREDICT_TRUE(info_ == nullptr)) return;
+    RecordInsertSlow(info_, hash, distance_from_desired);
+  }
+
+  inline void RecordErase() {
+    if (ABSL_PREDICT_TRUE(info_ == nullptr)) return;
+    RecordEraseSlow(info_);
+  }
+
+  friend inline void swap(HashtablezInfoHandle& lhs,
+                          HashtablezInfoHandle& rhs) {
+    std::swap(lhs.info_, rhs.info_);
+  }
+
+ private:
+  friend class HashtablezInfoHandlePeer;
+  HashtablezInfo* info_;
+};
+#else
+// Ensure that when Hashtablez is turned off at compile time, HashtablezInfo can
+// be removed by the linker, in order to reduce the binary size.
+class HashtablezInfoHandle {
+ public:
+  explicit HashtablezInfoHandle() = default;
+  explicit HashtablezInfoHandle(std::nullptr_t) {}
+
+  inline void RecordStorageChanged(size_t /*size*/, size_t /*capacity*/) {}
+  inline void RecordRehash(size_t /*total_probe_length*/) {}
+  inline void RecordInsert(size_t /*hash*/, size_t /*distance_from_desired*/) {}
+  inline void RecordErase() {}
+
+  friend inline void swap(HashtablezInfoHandle& /*lhs*/,
+                          HashtablezInfoHandle& /*rhs*/) {}
+};
+#endif  // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
+
+#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
+extern ABSL_PER_THREAD_TLS_KEYWORD int64_t global_next_sample;
+#endif  // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
+
+// Returns an RAII sampling handle that manages registration and unregistation
+// with the global sampler.
+inline HashtablezInfoHandle Sample() {
+#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
+  if (ABSL_PREDICT_TRUE(--global_next_sample > 0)) {
+    return HashtablezInfoHandle(nullptr);
+  }
+  return HashtablezInfoHandle(SampleSlow(&global_next_sample));
+#else
+  return HashtablezInfoHandle(nullptr);
+#endif  // !ABSL_PER_THREAD_TLS
+}
+
+// Holds samples and their associated stack traces with a soft limit of
+// `SetHashtablezMaxSamples()`.
+//
+// Thread safe.
+class HashtablezSampler {
+ public:
+  // Returns a global Sampler.
+  static HashtablezSampler& Global();
+
+  HashtablezSampler();
+  ~HashtablezSampler();
+
+  // Registers for sampling.  Returns an opaque registration info.
+  HashtablezInfo* Register();
+
+  // Unregisters the sample.
+  void Unregister(HashtablezInfo* sample);
+
+  // The dispose callback will be called on all samples the moment they are
+  // being unregistered. Only affects samples that are unregistered after the
+  // callback has been set.
+  // Returns the previous callback.
+  using DisposeCallback = void (*)(const HashtablezInfo&);
+  DisposeCallback SetDisposeCallback(DisposeCallback f);
+
+  // Iterates over all the registered `StackInfo`s.  Returning the number of
+  // samples that have been dropped.
+  int64_t Iterate(const std::function<void(const HashtablezInfo& stack)>& f);
+
+ private:
+  void PushNew(HashtablezInfo* sample);
+  void PushDead(HashtablezInfo* sample);
+  HashtablezInfo* PopDead();
+
+  std::atomic<size_t> dropped_samples_;
+  std::atomic<size_t> size_estimate_;
+
+  // Intrusive lock free linked lists for tracking samples.
+  //
+  // `all_` records all samples (they are never removed from this list) and is
+  // terminated with a `nullptr`.
+  //
+  // `graveyard_.dead` is a circular linked list.  When it is empty,
+  // `graveyard_.dead == &graveyard`.  The list is circular so that
+  // every item on it (even the last) has a non-null dead pointer.  This allows
+  // `Iterate` to determine if a given sample is live or dead using only
+  // information on the sample itself.
+  //
+  // For example, nodes [A, B, C, D, E] with [A, C, E] alive and [B, D] dead
+  // looks like this (G is the Graveyard):
+  //
+  //           +---+    +---+    +---+    +---+    +---+
+  //    all -->| A |--->| B |--->| C |--->| D |--->| E |
+  //           |   |    |   |    |   |    |   |    |   |
+  //   +---+   |   | +->|   |-+  |   | +->|   |-+  |   |
+  //   | G |   +---+ |  +---+ |  +---+ |  +---+ |  +---+
+  //   |   |         |        |        |        |
+  //   |   | --------+        +--------+        |
+  //   +---+                                    |
+  //     ^                                      |
+  //     +--------------------------------------+
+  //
+  std::atomic<HashtablezInfo*> all_;
+  HashtablezInfo graveyard_;
+
+  std::atomic<DisposeCallback> dispose_;
+};
+
+// Enables or disables sampling for Swiss tables.
+void SetHashtablezEnabled(bool enabled);
+
+// Sets the rate at which Swiss tables will be sampled.
+void SetHashtablezSampleParameter(int32_t rate);
+
+// Sets a soft max for the number of samples that will be kept.
+void SetHashtablezMaxSamples(int32_t max);
+
+// Configuration override.
+// This allows process-wide sampling without depending on order of
+// initialization of static storage duration objects.
+// The definition of this constant is weak, which allows us to inject a
+// different value for it at link time.
+extern "C" bool AbslContainerInternalSampleEverything();
+
+}  // namespace container_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_CONTAINER_INTERNAL_HASHTABLEZ_SAMPLER_H_
diff --git a/third_party/abseil_cpp/absl/container/internal/hashtablez_sampler_force_weak_definition.cc b/third_party/abseil_cpp/absl/container/internal/hashtablez_sampler_force_weak_definition.cc
new file mode 100644
index 000000000000..78b9d362acf3
--- /dev/null
+++ b/third_party/abseil_cpp/absl/container/internal/hashtablez_sampler_force_weak_definition.cc
@@ -0,0 +1,30 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/container/internal/hashtablez_sampler.h"
+
+#include "absl/base/attributes.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+// See hashtablez_sampler.h for details.
+extern "C" ABSL_ATTRIBUTE_WEAK bool AbslContainerInternalSampleEverything() {
+  return false;
+}
+
+}  // namespace container_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/third_party/abseil_cpp/absl/container/internal/hashtablez_sampler_test.cc b/third_party/abseil_cpp/absl/container/internal/hashtablez_sampler_test.cc
new file mode 100644
index 000000000000..8d10a1e94030
--- /dev/null
+++ b/third_party/abseil_cpp/absl/container/internal/hashtablez_sampler_test.cc
@@ -0,0 +1,371 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/container/internal/hashtablez_sampler.h"
+
+#include <atomic>
+#include <limits>
+#include <random>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/base/attributes.h"
+#include "absl/container/internal/have_sse.h"
+#include "absl/synchronization/blocking_counter.h"
+#include "absl/synchronization/internal/thread_pool.h"
+#include "absl/synchronization/mutex.h"
+#include "absl/synchronization/notification.h"
+#include "absl/time/clock.h"
+#include "absl/time/time.h"
+
+#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
+constexpr int kProbeLength = 16;
+#else
+constexpr int kProbeLength = 8;
+#endif
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
+class HashtablezInfoHandlePeer {
+ public:
+  static bool IsSampled(const HashtablezInfoHandle& h) {
+    return h.info_ != nullptr;
+  }
+
+  static HashtablezInfo* GetInfo(HashtablezInfoHandle* h) { return h->info_; }
+};
+#else
+class HashtablezInfoHandlePeer {
+ public:
+  static bool IsSampled(const HashtablezInfoHandle&) { return false; }
+  static HashtablezInfo* GetInfo(HashtablezInfoHandle*) { return nullptr; }
+};
+#endif  // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
+
+namespace {
+using ::absl::synchronization_internal::ThreadPool;
+using ::testing::IsEmpty;
+using ::testing::UnorderedElementsAre;
+
+std::vector<size_t> GetSizes(HashtablezSampler* s) {
+  std::vector<size_t> res;
+  s->Iterate([&](const HashtablezInfo& info) {
+    res.push_back(info.size.load(std::memory_order_acquire));
+  });
+  return res;
+}
+
+HashtablezInfo* Register(HashtablezSampler* s, size_t size) {
+  auto* info = s->Register();
+  assert(info != nullptr);
+  info->size.store(size);
+  return info;
+}
+
+TEST(HashtablezInfoTest, PrepareForSampling) {
+  absl::Time test_start = absl::Now();
+  HashtablezInfo info;
+  absl::MutexLock l(&info.init_mu);
+  info.PrepareForSampling();
+
+  EXPECT_EQ(info.capacity.load(), 0);
+  EXPECT_EQ(info.size.load(), 0);
+  EXPECT_EQ(info.num_erases.load(), 0);
+  EXPECT_EQ(info.num_rehashes.load(), 0);
+  EXPECT_EQ(info.max_probe_length.load(), 0);
+  EXPECT_EQ(info.total_probe_length.load(), 0);
+  EXPECT_EQ(info.hashes_bitwise_or.load(), 0);
+  EXPECT_EQ(info.hashes_bitwise_and.load(), ~size_t{});
+  EXPECT_GE(info.create_time, test_start);
+
+  info.capacity.store(1, std::memory_order_relaxed);
+  info.size.store(1, std::memory_order_relaxed);
+  info.num_erases.store(1, std::memory_order_relaxed);
+  info.max_probe_length.store(1, std::memory_order_relaxed);
+  info.total_probe_length.store(1, std::memory_order_relaxed);
+  info.hashes_bitwise_or.store(1, std::memory_order_relaxed);
+  info.hashes_bitwise_and.store(1, std::memory_order_relaxed);
+  info.create_time = test_start - absl::Hours(20);
+
+  info.PrepareForSampling();
+  EXPECT_EQ(info.capacity.load(), 0);
+  EXPECT_EQ(info.size.load(), 0);
+  EXPECT_EQ(info.num_erases.load(), 0);
+  EXPECT_EQ(info.num_rehashes.load(), 0);
+  EXPECT_EQ(info.max_probe_length.load(), 0);
+  EXPECT_EQ(info.total_probe_length.load(), 0);
+  EXPECT_EQ(info.hashes_bitwise_or.load(), 0);
+  EXPECT_EQ(info.hashes_bitwise_and.load(), ~size_t{});
+  EXPECT_GE(info.create_time, test_start);
+}
+
+TEST(HashtablezInfoTest, RecordStorageChanged) {
+  HashtablezInfo info;
+  absl::MutexLock l(&info.init_mu);
+  info.PrepareForSampling();
+  RecordStorageChangedSlow(&info, 17, 47);
+  EXPECT_EQ(info.size.load(), 17);
+  EXPECT_EQ(info.capacity.load(), 47);
+  RecordStorageChangedSlow(&info, 20, 20);
+  EXPECT_EQ(info.size.load(), 20);
+  EXPECT_EQ(info.capacity.load(), 20);
+}
+
+TEST(HashtablezInfoTest, RecordInsert) {
+  HashtablezInfo info;
+  absl::MutexLock l(&info.init_mu);
+  info.PrepareForSampling();
+  EXPECT_EQ(info.max_probe_length.load(), 0);
+  RecordInsertSlow(&info, 0x0000FF00, 6 * kProbeLength);
+  EXPECT_EQ(info.max_probe_length.load(), 6);
+  EXPECT_EQ(info.hashes_bitwise_and.load(), 0x0000FF00);
+  EXPECT_EQ(info.hashes_bitwise_or.load(), 0x0000FF00);
+  RecordInsertSlow(&info, 0x000FF000, 4 * kProbeLength);
+  EXPECT_EQ(info.max_probe_length.load(), 6);
+  EXPECT_EQ(info.hashes_bitwise_and.load(), 0x0000F000);
+  EXPECT_EQ(info.hashes_bitwise_or.load(), 0x000FFF00);
+  RecordInsertSlow(&info, 0x00FF0000, 12 * kProbeLength);
+  EXPECT_EQ(info.max_probe_length.load(), 12);
+  EXPECT_EQ(info.hashes_bitwise_and.load(), 0x00000000);
+  EXPECT_EQ(info.hashes_bitwise_or.load(), 0x00FFFF00);
+}
+
+TEST(HashtablezInfoTest, RecordErase) {
+  HashtablezInfo info;
+  absl::MutexLock l(&info.init_mu);
+  info.PrepareForSampling();
+  EXPECT_EQ(info.num_erases.load(), 0);
+  EXPECT_EQ(info.size.load(), 0);
+  RecordInsertSlow(&info, 0x0000FF00, 6 * kProbeLength);
+  EXPECT_EQ(info.size.load(), 1);
+  RecordEraseSlow(&info);
+  EXPECT_EQ(info.size.load(), 0);
+  EXPECT_EQ(info.num_erases.load(), 1);
+}
+
+TEST(HashtablezInfoTest, RecordRehash) {
+  HashtablezInfo info;
+  absl::MutexLock l(&info.init_mu);
+  info.PrepareForSampling();
+  RecordInsertSlow(&info, 0x1, 0);
+  RecordInsertSlow(&info, 0x2, kProbeLength);
+  RecordInsertSlow(&info, 0x4, kProbeLength);
+  RecordInsertSlow(&info, 0x8, 2 * kProbeLength);
+  EXPECT_EQ(info.size.load(), 4);
+  EXPECT_EQ(info.total_probe_length.load(), 4);
+
+  RecordEraseSlow(&info);
+  RecordEraseSlow(&info);
+  EXPECT_EQ(info.size.load(), 2);
+  EXPECT_EQ(info.total_probe_length.load(), 4);
+  EXPECT_EQ(info.num_erases.load(), 2);
+
+  RecordRehashSlow(&info, 3 * kProbeLength);
+  EXPECT_EQ(info.size.load(), 2);
+  EXPECT_EQ(info.total_probe_length.load(), 3);
+  EXPECT_EQ(info.num_erases.load(), 0);
+  EXPECT_EQ(info.num_rehashes.load(), 1);
+}
+
+#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
+TEST(HashtablezSamplerTest, SmallSampleParameter) {
+  SetHashtablezEnabled(true);
+  SetHashtablezSampleParameter(100);
+
+  for (int i = 0; i < 1000; ++i) {
+    int64_t next_sample = 0;
+    HashtablezInfo* sample = SampleSlow(&next_sample);
+    EXPECT_GT(next_sample, 0);
+    EXPECT_NE(sample, nullptr);
+    UnsampleSlow(sample);
+  }
+}
+
+TEST(HashtablezSamplerTest, LargeSampleParameter) {
+  SetHashtablezEnabled(true);
+  SetHashtablezSampleParameter(std::numeric_limits<int32_t>::max());
+
+  for (int i = 0; i < 1000; ++i) {
+    int64_t next_sample = 0;
+    HashtablezInfo* sample = SampleSlow(&next_sample);
+    EXPECT_GT(next_sample, 0);
+    EXPECT_NE(sample, nullptr);
+    UnsampleSlow(sample);
+  }
+}
+
+TEST(HashtablezSamplerTest, Sample) {
+  SetHashtablezEnabled(true);
+  SetHashtablezSampleParameter(100);
+  int64_t num_sampled = 0;
+  int64_t total = 0;
+  double sample_rate = 0.0;
+  for (int i = 0; i < 1000000; ++i) {
+    HashtablezInfoHandle h = Sample();
+    ++total;
+    if (HashtablezInfoHandlePeer::IsSampled(h)) {
+      ++num_sampled;
+    }
+    sample_rate = static_cast<double>(num_sampled) / total;
+    if (0.005 < sample_rate && sample_rate < 0.015) break;
+  }
+  EXPECT_NEAR(sample_rate, 0.01, 0.005);
+}
+
+TEST(HashtablezSamplerTest, Handle) {
+  auto& sampler = HashtablezSampler::Global();
+  HashtablezInfoHandle h(sampler.Register());
+  auto* info = HashtablezInfoHandlePeer::GetInfo(&h);
+  info->hashes_bitwise_and.store(0x12345678, std::memory_order_relaxed);
+
+  bool found = false;
+  sampler.Iterate([&](const HashtablezInfo& h) {
+    if (&h == info) {
+      EXPECT_EQ(h.hashes_bitwise_and.load(), 0x12345678);
+      found = true;
+    }
+  });
+  EXPECT_TRUE(found);
+
+  h = HashtablezInfoHandle();
+  found = false;
+  sampler.Iterate([&](const HashtablezInfo& h) {
+    if (&h == info) {
+      // this will only happen if some other thread has resurrected the info
+      // the old handle was using.
+      if (h.hashes_bitwise_and.load() == 0x12345678) {
+        found = true;
+      }
+    }
+  });
+  EXPECT_FALSE(found);
+}
+#endif
+
+
+TEST(HashtablezSamplerTest, Registration) {
+  HashtablezSampler sampler;
+  auto* info1 = Register(&sampler, 1);
+  EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(1));
+
+  auto* info2 = Register(&sampler, 2);
+  EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(1, 2));
+  info1->size.store(3);
+  EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(3, 2));
+
+  sampler.Unregister(info1);
+  sampler.Unregister(info2);
+}
+
+TEST(HashtablezSamplerTest, Unregistration) {
+  HashtablezSampler sampler;
+  std::vector<HashtablezInfo*> infos;
+  for (size_t i = 0; i < 3; ++i) {
+    infos.push_back(Register(&sampler, i));
+  }
+  EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(0, 1, 2));
+
+  sampler.Unregister(infos[1]);
+  EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(0, 2));
+
+  infos.push_back(Register(&sampler, 3));
+  infos.push_back(Register(&sampler, 4));
+  EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(0, 2, 3, 4));
+  sampler.Unregister(infos[3]);
+  EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(0, 2, 4));
+
+  sampler.Unregister(infos[0]);
+  sampler.Unregister(infos[2]);
+  sampler.Unregister(infos[4]);
+  EXPECT_THAT(GetSizes(&sampler), IsEmpty());
+}
+
+TEST(HashtablezSamplerTest, MultiThreaded) {
+  HashtablezSampler sampler;
+  Notification stop;
+  ThreadPool pool(10);
+
+  for (int i = 0; i < 10; ++i) {
+    pool.Schedule([&sampler, &stop]() {
+      std::random_device rd;
+      std::mt19937 gen(rd());
+
+      std::vector<HashtablezInfo*> infoz;
+      while (!stop.HasBeenNotified()) {
+        if (infoz.empty()) {
+          infoz.push_back(sampler.Register());
+        }
+        switch (std::uniform_int_distribution<>(0, 2)(gen)) {
+          case 0: {
+            infoz.push_back(sampler.Register());
+            break;
+          }
+          case 1: {
+            size_t p =
+                std::uniform_int_distribution<>(0, infoz.size() - 1)(gen);
+            HashtablezInfo* info = infoz[p];
+            infoz[p] = infoz.back();
+            infoz.pop_back();
+            sampler.Unregister(info);
+            break;
+          }
+          case 2: {
+            absl::Duration oldest = absl::ZeroDuration();
+            sampler.Iterate([&](const HashtablezInfo& info) {
+              oldest = std::max(oldest, absl::Now() - info.create_time);
+            });
+            ASSERT_GE(oldest, absl::ZeroDuration());
+            break;
+          }
+        }
+      }
+    });
+  }
+  // The threads will hammer away.  Give it a little bit of time for tsan to
+  // spot errors.
+  absl::SleepFor(absl::Seconds(3));
+  stop.Notify();
+}
+
+TEST(HashtablezSamplerTest, Callback) {
+  HashtablezSampler sampler;
+
+  auto* info1 = Register(&sampler, 1);
+  auto* info2 = Register(&sampler, 2);
+
+  static const HashtablezInfo* expected;
+
+  auto callback = [](const HashtablezInfo& info) {
+    // We can't use `info` outside of this callback because the object will be
+    // disposed as soon as we return from here.
+    EXPECT_EQ(&info, expected);
+  };
+
+  // Set the callback.
+  EXPECT_EQ(sampler.SetDisposeCallback(callback), nullptr);
+  expected = info1;
+  sampler.Unregister(info1);
+
+  // Unset the callback.
+  EXPECT_EQ(callback, sampler.SetDisposeCallback(nullptr));
+  expected = nullptr;  // no more calls.
+  sampler.Unregister(info2);
+}
+
+}  // namespace
+}  // namespace container_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/third_party/abseil_cpp/absl/container/internal/have_sse.h b/third_party/abseil_cpp/absl/container/internal/have_sse.h
new file mode 100644
index 000000000000..e75e1a16d327
--- /dev/null
+++ b/third_party/abseil_cpp/absl/container/internal/have_sse.h
@@ -0,0 +1,50 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Shared config probing for SSE instructions used in Swiss tables.
+#ifndef ABSL_CONTAINER_INTERNAL_HAVE_SSE_H_
+#define ABSL_CONTAINER_INTERNAL_HAVE_SSE_H_
+
+#ifndef ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
+#if defined(__SSE2__) ||  \
+    (defined(_MSC_VER) && \
+     (defined(_M_X64) || (defined(_M_IX86) && _M_IX86_FP >= 2)))
+#define ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2 1
+#else
+#define ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2 0
+#endif
+#endif
+
+#ifndef ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3
+#ifdef __SSSE3__
+#define ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3 1
+#else
+#define ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3 0
+#endif
+#endif
+
+#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3 && \
+    !ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
+#error "Bad configuration!"
+#endif
+
+#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
+#include <emmintrin.h>
+#endif
+
+#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3
+#include <tmmintrin.h>
+#endif
+
+#endif  // ABSL_CONTAINER_INTERNAL_HAVE_SSE_H_
diff --git a/third_party/abseil_cpp/absl/container/internal/inlined_vector.h b/third_party/abseil_cpp/absl/container/internal/inlined_vector.h
new file mode 100644
index 000000000000..c98c25c44221
--- /dev/null
+++ b/third_party/abseil_cpp/absl/container/internal/inlined_vector.h
@@ -0,0 +1,895 @@
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_INLINED_VECTOR_INTERNAL_H_
+#define ABSL_CONTAINER_INTERNAL_INLINED_VECTOR_INTERNAL_H_
+
+#include <algorithm>
+#include <cstddef>
+#include <cstring>
+#include <iterator>
+#include <limits>
+#include <memory>
+#include <utility>
+
+#include "absl/base/macros.h"
+#include "absl/container/internal/compressed_tuple.h"
+#include "absl/memory/memory.h"
+#include "absl/meta/type_traits.h"
+#include "absl/types/span.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace inlined_vector_internal {
+
+template <typename Iterator>
+using IsAtLeastForwardIterator = std::is_convertible<
+    typename std::iterator_traits<Iterator>::iterator_category,
+    std::forward_iterator_tag>;
+
+template <typename AllocatorType,
+          typename ValueType =
+              typename absl::allocator_traits<AllocatorType>::value_type>
+using IsMemcpyOk =
+    absl::conjunction<std::is_same<AllocatorType, std::allocator<ValueType>>,
+                      absl::is_trivially_copy_constructible<ValueType>,
+                      absl::is_trivially_copy_assignable<ValueType>,
+                      absl::is_trivially_destructible<ValueType>>;
+
+template <typename AllocatorType, typename Pointer, typename SizeType>
+void DestroyElements(AllocatorType* alloc_ptr, Pointer destroy_first,
+                     SizeType destroy_size) {
+  using AllocatorTraits = absl::allocator_traits<AllocatorType>;
+
+  if (destroy_first != nullptr) {
+    for (auto i = destroy_size; i != 0;) {
+      --i;
+      AllocatorTraits::destroy(*alloc_ptr, destroy_first + i);
+    }
+
+#if !defined(NDEBUG)
+    {
+      using ValueType = typename AllocatorTraits::value_type;
+
+      // Overwrite unused memory with `0xab` so we can catch uninitialized
+      // usage.
+      //
+      // Cast to `void*` to tell the compiler that we don't care that we might
+      // be scribbling on a vtable pointer.
+      void* memory_ptr = destroy_first;
+      auto memory_size = destroy_size * sizeof(ValueType);
+      std::memset(memory_ptr, 0xab, memory_size);
+    }
+#endif  // !defined(NDEBUG)
+  }
+}
+
+template <typename AllocatorType, typename Pointer, typename ValueAdapter,
+          typename SizeType>
+void ConstructElements(AllocatorType* alloc_ptr, Pointer construct_first,
+                       ValueAdapter* values_ptr, SizeType construct_size) {
+  for (SizeType i = 0; i < construct_size; ++i) {
+    ABSL_INTERNAL_TRY {
+      values_ptr->ConstructNext(alloc_ptr, construct_first + i);
+    }
+    ABSL_INTERNAL_CATCH_ANY {
+      inlined_vector_internal::DestroyElements(alloc_ptr, construct_first, i);
+      ABSL_INTERNAL_RETHROW;
+    }
+  }
+}
+
+template <typename Pointer, typename ValueAdapter, typename SizeType>
+void AssignElements(Pointer assign_first, ValueAdapter* values_ptr,
+                    SizeType assign_size) {
+  for (SizeType i = 0; i < assign_size; ++i) {
+    values_ptr->AssignNext(assign_first + i);
+  }
+}
+
+template <typename AllocatorType>
+struct StorageView {
+  using AllocatorTraits = absl::allocator_traits<AllocatorType>;
+  using Pointer = typename AllocatorTraits::pointer;
+  using SizeType = typename AllocatorTraits::size_type;
+
+  Pointer data;
+  SizeType size;
+  SizeType capacity;
+};
+
+template <typename AllocatorType, typename Iterator>
+class IteratorValueAdapter {
+  using AllocatorTraits = absl::allocator_traits<AllocatorType>;
+  using Pointer = typename AllocatorTraits::pointer;
+
+ public:
+  explicit IteratorValueAdapter(const Iterator& it) : it_(it) {}
+
+  void ConstructNext(AllocatorType* alloc_ptr, Pointer construct_at) {
+    AllocatorTraits::construct(*alloc_ptr, construct_at, *it_);
+    ++it_;
+  }
+
+  void AssignNext(Pointer assign_at) {
+    *assign_at = *it_;
+    ++it_;
+  }
+
+ private:
+  Iterator it_;
+};
+
+template <typename AllocatorType>
+class CopyValueAdapter {
+  using AllocatorTraits = absl::allocator_traits<AllocatorType>;
+  using ValueType = typename AllocatorTraits::value_type;
+  using Pointer = typename AllocatorTraits::pointer;
+  using ConstPointer = typename AllocatorTraits::const_pointer;
+
+ public:
+  explicit CopyValueAdapter(const ValueType& v) : ptr_(std::addressof(v)) {}
+
+  void ConstructNext(AllocatorType* alloc_ptr, Pointer construct_at) {
+    AllocatorTraits::construct(*alloc_ptr, construct_at, *ptr_);
+  }
+
+  void AssignNext(Pointer assign_at) { *assign_at = *ptr_; }
+
+ private:
+  ConstPointer ptr_;
+};
+
+template <typename AllocatorType>
+class DefaultValueAdapter {
+  using AllocatorTraits = absl::allocator_traits<AllocatorType>;
+  using ValueType = typename AllocatorTraits::value_type;
+  using Pointer = typename AllocatorTraits::pointer;
+
+ public:
+  explicit DefaultValueAdapter() {}
+
+  void ConstructNext(AllocatorType* alloc_ptr, Pointer construct_at) {
+    AllocatorTraits::construct(*alloc_ptr, construct_at);
+  }
+
+  void AssignNext(Pointer assign_at) { *assign_at = ValueType(); }
+};
+
+template <typename AllocatorType>
+class AllocationTransaction {
+  using AllocatorTraits = absl::allocator_traits<AllocatorType>;
+  using Pointer = typename AllocatorTraits::pointer;
+  using SizeType = typename AllocatorTraits::size_type;
+
+ public:
+  explicit AllocationTransaction(AllocatorType* alloc_ptr)
+      : alloc_data_(*alloc_ptr, nullptr) {}
+
+  ~AllocationTransaction() {
+    if (DidAllocate()) {
+      AllocatorTraits::deallocate(GetAllocator(), GetData(), GetCapacity());
+    }
+  }
+
+  AllocationTransaction(const AllocationTransaction&) = delete;
+  void operator=(const AllocationTransaction&) = delete;
+
+  AllocatorType& GetAllocator() { return alloc_data_.template get<0>(); }
+  Pointer& GetData() { return alloc_data_.template get<1>(); }
+  SizeType& GetCapacity() { return capacity_; }
+
+  bool DidAllocate() { return GetData() != nullptr; }
+  Pointer Allocate(SizeType capacity) {
+    GetData() = AllocatorTraits::allocate(GetAllocator(), capacity);
+    GetCapacity() = capacity;
+    return GetData();
+  }
+
+  void Reset() {
+    GetData() = nullptr;
+    GetCapacity() = 0;
+  }
+
+ private:
+  container_internal::CompressedTuple<AllocatorType, Pointer> alloc_data_;
+  SizeType capacity_ = 0;
+};
+
+template <typename AllocatorType>
+class ConstructionTransaction {
+  using AllocatorTraits = absl::allocator_traits<AllocatorType>;
+  using Pointer = typename AllocatorTraits::pointer;
+  using SizeType = typename AllocatorTraits::size_type;
+
+ public:
+  explicit ConstructionTransaction(AllocatorType* alloc_ptr)
+      : alloc_data_(*alloc_ptr, nullptr) {}
+
+  ~ConstructionTransaction() {
+    if (DidConstruct()) {
+      inlined_vector_internal::DestroyElements(std::addressof(GetAllocator()),
+                                               GetData(), GetSize());
+    }
+  }
+
+  ConstructionTransaction(const ConstructionTransaction&) = delete;
+  void operator=(const ConstructionTransaction&) = delete;
+
+  AllocatorType& GetAllocator() { return alloc_data_.template get<0>(); }
+  Pointer& GetData() { return alloc_data_.template get<1>(); }
+  SizeType& GetSize() { return size_; }
+
+  bool DidConstruct() { return GetData() != nullptr; }
+  template <typename ValueAdapter>
+  void Construct(Pointer data, ValueAdapter* values_ptr, SizeType size) {
+    inlined_vector_internal::ConstructElements(std::addressof(GetAllocator()),
+                                               data, values_ptr, size);
+    GetData() = data;
+    GetSize() = size;
+  }
+  void Commit() {
+    GetData() = nullptr;
+    GetSize() = 0;
+  }
+
+ private:
+  container_internal::CompressedTuple<AllocatorType, Pointer> alloc_data_;
+  SizeType size_ = 0;
+};
+
+template <typename T, size_t N, typename A>
+class Storage {
+ public:
+  using AllocatorTraits = absl::allocator_traits<A>;
+  using allocator_type = typename AllocatorTraits::allocator_type;
+  using value_type = typename AllocatorTraits::value_type;
+  using pointer = typename AllocatorTraits::pointer;
+  using const_pointer = typename AllocatorTraits::const_pointer;
+  using size_type = typename AllocatorTraits::size_type;
+  using difference_type = typename AllocatorTraits::difference_type;
+
+  using reference = value_type&;
+  using const_reference = const value_type&;
+  using RValueReference = value_type&&;
+  using iterator = pointer;
+  using const_iterator = const_pointer;
+  using reverse_iterator = std::reverse_iterator<iterator>;
+  using const_reverse_iterator = std::reverse_iterator<const_iterator>;
+  using MoveIterator = std::move_iterator<iterator>;
+  using IsMemcpyOk = inlined_vector_internal::IsMemcpyOk<allocator_type>;
+
+  using StorageView = inlined_vector_internal::StorageView<allocator_type>;
+
+  template <typename Iterator>
+  using IteratorValueAdapter =
+      inlined_vector_internal::IteratorValueAdapter<allocator_type, Iterator>;
+  using CopyValueAdapter =
+      inlined_vector_internal::CopyValueAdapter<allocator_type>;
+  using DefaultValueAdapter =
+      inlined_vector_internal::DefaultValueAdapter<allocator_type>;
+
+  using AllocationTransaction =
+      inlined_vector_internal::AllocationTransaction<allocator_type>;
+  using ConstructionTransaction =
+      inlined_vector_internal::ConstructionTransaction<allocator_type>;
+
+  static size_type NextCapacity(size_type current_capacity) {
+    return current_capacity * 2;
+  }
+
+  static size_type ComputeCapacity(size_type current_capacity,
+                                   size_type requested_capacity) {
+    return (std::max)(NextCapacity(current_capacity), requested_capacity);
+  }
+
+  // ---------------------------------------------------------------------------
+  // Storage Constructors and Destructor
+  // ---------------------------------------------------------------------------
+
+  Storage() : metadata_() {}
+
+  explicit Storage(const allocator_type& alloc) : metadata_(alloc, {}) {}
+
+  ~Storage() {
+    pointer data = GetIsAllocated() ? GetAllocatedData() : GetInlinedData();
+    inlined_vector_internal::DestroyElements(GetAllocPtr(), data, GetSize());
+    DeallocateIfAllocated();
+  }
+
+  // ---------------------------------------------------------------------------
+  // Storage Member Accessors
+  // ---------------------------------------------------------------------------
+
+  size_type& GetSizeAndIsAllocated() { return metadata_.template get<1>(); }
+
+  const size_type& GetSizeAndIsAllocated() const {
+    return metadata_.template get<1>();
+  }
+
+  size_type GetSize() const { return GetSizeAndIsAllocated() >> 1; }
+
+  bool GetIsAllocated() const { return GetSizeAndIsAllocated() & 1; }
+
+  pointer GetAllocatedData() { return data_.allocated.allocated_data; }
+
+  const_pointer GetAllocatedData() const {
+    return data_.allocated.allocated_data;
+  }
+
+  pointer GetInlinedData() {
+    return reinterpret_cast<pointer>(
+        std::addressof(data_.inlined.inlined_data[0]));
+  }
+
+  const_pointer GetInlinedData() const {
+    return reinterpret_cast<const_pointer>(
+        std::addressof(data_.inlined.inlined_data[0]));
+  }
+
+  size_type GetAllocatedCapacity() const {
+    return data_.allocated.allocated_capacity;
+  }
+
+  size_type GetInlinedCapacity() const { return static_cast<size_type>(N); }
+
+  StorageView MakeStorageView() {
+    return GetIsAllocated()
+               ? StorageView{GetAllocatedData(), GetSize(),
+                             GetAllocatedCapacity()}
+               : StorageView{GetInlinedData(), GetSize(), GetInlinedCapacity()};
+  }
+
+  allocator_type* GetAllocPtr() {
+    return std::addressof(metadata_.template get<0>());
+  }
+
+  const allocator_type* GetAllocPtr() const {
+    return std::addressof(metadata_.template get<0>());
+  }
+
+  // ---------------------------------------------------------------------------
+  // Storage Member Mutators
+  // ---------------------------------------------------------------------------
+
+  template <typename ValueAdapter>
+  void Initialize(ValueAdapter values, size_type new_size);
+
+  template <typename ValueAdapter>
+  void Assign(ValueAdapter values, size_type new_size);
+
+  template <typename ValueAdapter>
+  void Resize(ValueAdapter values, size_type new_size);
+
+  template <typename ValueAdapter>
+  iterator Insert(const_iterator pos, ValueAdapter values,
+                  size_type insert_count);
+
+  template <typename... Args>
+  reference EmplaceBack(Args&&... args);
+
+  iterator Erase(const_iterator from, const_iterator to);
+
+  void Reserve(size_type requested_capacity);
+
+  void ShrinkToFit();
+
+  void Swap(Storage* other_storage_ptr);
+
+  void SetIsAllocated() {
+    GetSizeAndIsAllocated() |= static_cast<size_type>(1);
+  }
+
+  void UnsetIsAllocated() {
+    GetSizeAndIsAllocated() &= ((std::numeric_limits<size_type>::max)() - 1);
+  }
+
+  void SetSize(size_type size) {
+    GetSizeAndIsAllocated() =
+        (size << 1) | static_cast<size_type>(GetIsAllocated());
+  }
+
+  void SetAllocatedSize(size_type size) {
+    GetSizeAndIsAllocated() = (size << 1) | static_cast<size_type>(1);
+  }
+
+  void SetInlinedSize(size_type size) {
+    GetSizeAndIsAllocated() = size << static_cast<size_type>(1);
+  }
+
+  void AddSize(size_type count) {
+    GetSizeAndIsAllocated() += count << static_cast<size_type>(1);
+  }
+
+  void SubtractSize(size_type count) {
+    assert(count <= GetSize());
+
+    GetSizeAndIsAllocated() -= count << static_cast<size_type>(1);
+  }
+
+  void SetAllocatedData(pointer data, size_type capacity) {
+    data_.allocated.allocated_data = data;
+    data_.allocated.allocated_capacity = capacity;
+  }
+
+  void AcquireAllocatedData(AllocationTransaction* allocation_tx_ptr) {
+    SetAllocatedData(allocation_tx_ptr->GetData(),
+                     allocation_tx_ptr->GetCapacity());
+
+    allocation_tx_ptr->Reset();
+  }
+
+  void MemcpyFrom(const Storage& other_storage) {
+    assert(IsMemcpyOk::value || other_storage.GetIsAllocated());
+
+    GetSizeAndIsAllocated() = other_storage.GetSizeAndIsAllocated();
+    data_ = other_storage.data_;
+  }
+
+  void DeallocateIfAllocated() {
+    if (GetIsAllocated()) {
+      AllocatorTraits::deallocate(*GetAllocPtr(), GetAllocatedData(),
+                                  GetAllocatedCapacity());
+    }
+  }
+
+ private:
+  using Metadata =
+      container_internal::CompressedTuple<allocator_type, size_type>;
+
+  struct Allocated {
+    pointer allocated_data;
+    size_type allocated_capacity;
+  };
+
+  struct Inlined {
+    alignas(value_type) char inlined_data[sizeof(value_type[N])];
+  };
+
+  union Data {
+    Allocated allocated;
+    Inlined inlined;
+  };
+
+  template <typename... Args>
+  ABSL_ATTRIBUTE_NOINLINE reference EmplaceBackSlow(Args&&... args);
+
+  Metadata metadata_;
+  Data data_;
+};
+
+template <typename T, size_t N, typename A>
+template <typename ValueAdapter>
+auto Storage<T, N, A>::Initialize(ValueAdapter values, size_type new_size)
+    -> void {
+  // Only callable from constructors!
+  assert(!GetIsAllocated());
+  assert(GetSize() == 0);
+
+  pointer construct_data;
+  if (new_size > GetInlinedCapacity()) {
+    // Because this is only called from the `InlinedVector` constructors, it's
+    // safe to take on the allocation with size `0`. If `ConstructElements(...)`
+    // throws, deallocation will be automatically handled by `~Storage()`.
+    size_type new_capacity = ComputeCapacity(GetInlinedCapacity(), new_size);
+    construct_data = AllocatorTraits::allocate(*GetAllocPtr(), new_capacity);
+    SetAllocatedData(construct_data, new_capacity);
+    SetIsAllocated();
+  } else {
+    construct_data = GetInlinedData();
+  }
+
+  inlined_vector_internal::ConstructElements(GetAllocPtr(), construct_data,
+                                             &values, new_size);
+
+  // Since the initial size was guaranteed to be `0` and the allocated bit is
+  // already correct for either case, *adding* `new_size` gives us the correct
+  // result faster than setting it directly.
+  AddSize(new_size);
+}
+
+template <typename T, size_t N, typename A>
+template <typename ValueAdapter>
+auto Storage<T, N, A>::Assign(ValueAdapter values, size_type new_size) -> void {
+  StorageView storage_view = MakeStorageView();
+
+  AllocationTransaction allocation_tx(GetAllocPtr());
+
+  absl::Span<value_type> assign_loop;
+  absl::Span<value_type> construct_loop;
+  absl::Span<value_type> destroy_loop;
+
+  if (new_size > storage_view.capacity) {
+    size_type new_capacity = ComputeCapacity(storage_view.capacity, new_size);
+    construct_loop = {allocation_tx.Allocate(new_capacity), new_size};
+    destroy_loop = {storage_view.data, storage_view.size};
+  } else if (new_size > storage_view.size) {
+    assign_loop = {storage_view.data, storage_view.size};
+    construct_loop = {storage_view.data + storage_view.size,
+                      new_size - storage_view.size};
+  } else {
+    assign_loop = {storage_view.data, new_size};
+    destroy_loop = {storage_view.data + new_size, storage_view.size - new_size};
+  }
+
+  inlined_vector_internal::AssignElements(assign_loop.data(), &values,
+                                          assign_loop.size());
+
+  inlined_vector_internal::ConstructElements(
+      GetAllocPtr(), construct_loop.data(), &values, construct_loop.size());
+
+  inlined_vector_internal::DestroyElements(GetAllocPtr(), destroy_loop.data(),
+                                           destroy_loop.size());
+
+  if (allocation_tx.DidAllocate()) {
+    DeallocateIfAllocated();
+    AcquireAllocatedData(&allocation_tx);
+    SetIsAllocated();
+  }
+
+  SetSize(new_size);
+}
+
+template <typename T, size_t N, typename A>
+template <typename ValueAdapter>
+auto Storage<T, N, A>::Resize(ValueAdapter values, size_type new_size) -> void {
+  StorageView storage_view = MakeStorageView();
+  auto* const base = storage_view.data;
+  const size_type size = storage_view.size;
+  auto* alloc = GetAllocPtr();
+  if (new_size <= size) {
+    // Destroy extra old elements.
+    inlined_vector_internal::DestroyElements(alloc, base + new_size,
+                                             size - new_size);
+  } else if (new_size <= storage_view.capacity) {
+    // Construct new elements in place.
+    inlined_vector_internal::ConstructElements(alloc, base + size, &values,
+                                               new_size - size);
+  } else {
+    // Steps:
+    //  a. Allocate new backing store.
+    //  b. Construct new elements in new backing store.
+    //  c. Move existing elements from old backing store to now.
+    //  d. Destroy all elements in old backing store.
+    // Use transactional wrappers for the first two steps so we can roll
+    // back if necessary due to exceptions.
+    AllocationTransaction allocation_tx(alloc);
+    size_type new_capacity = ComputeCapacity(storage_view.capacity, new_size);
+    pointer new_data = allocation_tx.Allocate(new_capacity);
+
+    ConstructionTransaction construction_tx(alloc);
+    construction_tx.Construct(new_data + size, &values, new_size - size);
+
+    IteratorValueAdapter<MoveIterator> move_values((MoveIterator(base)));
+    inlined_vector_internal::ConstructElements(alloc, new_data, &move_values,
+                                               size);
+
+    inlined_vector_internal::DestroyElements(alloc, base, size);
+    construction_tx.Commit();
+    DeallocateIfAllocated();
+    AcquireAllocatedData(&allocation_tx);
+    SetIsAllocated();
+  }
+  SetSize(new_size);
+}
+
+template <typename T, size_t N, typename A>
+template <typename ValueAdapter>
+auto Storage<T, N, A>::Insert(const_iterator pos, ValueAdapter values,
+                              size_type insert_count) -> iterator {
+  StorageView storage_view = MakeStorageView();
+
+  size_type insert_index =
+      std::distance(const_iterator(storage_view.data), pos);
+  size_type insert_end_index = insert_index + insert_count;
+  size_type new_size = storage_view.size + insert_count;
+
+  if (new_size > storage_view.capacity) {
+    AllocationTransaction allocation_tx(GetAllocPtr());
+    ConstructionTransaction construction_tx(GetAllocPtr());
+    ConstructionTransaction move_construciton_tx(GetAllocPtr());
+
+    IteratorValueAdapter<MoveIterator> move_values(
+        MoveIterator(storage_view.data));
+
+    size_type new_capacity = ComputeCapacity(storage_view.capacity, new_size);
+    pointer new_data = allocation_tx.Allocate(new_capacity);
+
+    construction_tx.Construct(new_data + insert_index, &values, insert_count);
+
+    move_construciton_tx.Construct(new_data, &move_values, insert_index);
+
+    inlined_vector_internal::ConstructElements(
+        GetAllocPtr(), new_data + insert_end_index, &move_values,
+        storage_view.size - insert_index);
+
+    inlined_vector_internal::DestroyElements(GetAllocPtr(), storage_view.data,
+                                             storage_view.size);
+
+    construction_tx.Commit();
+    move_construciton_tx.Commit();
+    DeallocateIfAllocated();
+    AcquireAllocatedData(&allocation_tx);
+
+    SetAllocatedSize(new_size);
+    return iterator(new_data + insert_index);
+  } else {
+    size_type move_construction_destination_index =
+        (std::max)(insert_end_index, storage_view.size);
+
+    ConstructionTransaction move_construction_tx(GetAllocPtr());
+
+    IteratorValueAdapter<MoveIterator> move_construction_values(
+        MoveIterator(storage_view.data +
+                     (move_construction_destination_index - insert_count)));
+    absl::Span<value_type> move_construction = {
+        storage_view.data + move_construction_destination_index,
+        new_size - move_construction_destination_index};
+
+    pointer move_assignment_values = storage_view.data + insert_index;
+    absl::Span<value_type> move_assignment = {
+        storage_view.data + insert_end_index,
+        move_construction_destination_index - insert_end_index};
+
+    absl::Span<value_type> insert_assignment = {move_assignment_values,
+                                                move_construction.size()};
+
+    absl::Span<value_type> insert_construction = {
+        insert_assignment.data() + insert_assignment.size(),
+        insert_count - insert_assignment.size()};
+
+    move_construction_tx.Construct(move_construction.data(),
+                                   &move_construction_values,
+                                   move_construction.size());
+
+    for (pointer destination = move_assignment.data() + move_assignment.size(),
+                 last_destination = move_assignment.data(),
+                 source = move_assignment_values + move_assignment.size();
+         ;) {
+      --destination;
+      --source;
+      if (destination < last_destination) break;
+      *destination = std::move(*source);
+    }
+
+    inlined_vector_internal::AssignElements(insert_assignment.data(), &values,
+                                            insert_assignment.size());
+
+    inlined_vector_internal::ConstructElements(
+        GetAllocPtr(), insert_construction.data(), &values,
+        insert_construction.size());
+
+    move_construction_tx.Commit();
+
+    AddSize(insert_count);
+    return iterator(storage_view.data + insert_index);
+  }
+}
+
+template <typename T, size_t N, typename A>
+template <typename... Args>
+auto Storage<T, N, A>::EmplaceBack(Args&&... args) -> reference {
+  StorageView storage_view = MakeStorageView();
+  const auto n = storage_view.size;
+  if (ABSL_PREDICT_TRUE(n != storage_view.capacity)) {
+    // Fast path; new element fits.
+    pointer last_ptr = storage_view.data + n;
+    AllocatorTraits::construct(*GetAllocPtr(), last_ptr,
+                               std::forward<Args>(args)...);
+    AddSize(1);
+    return *last_ptr;
+  }
+  // TODO(b/173712035): Annotate with musttail attribute to prevent regression.
+  return EmplaceBackSlow(std::forward<Args>(args)...);
+}
+
+template <typename T, size_t N, typename A>
+template <typename... Args>
+auto Storage<T, N, A>::EmplaceBackSlow(Args&&... args) -> reference {
+  StorageView storage_view = MakeStorageView();
+  AllocationTransaction allocation_tx(GetAllocPtr());
+  IteratorValueAdapter<MoveIterator> move_values(
+      MoveIterator(storage_view.data));
+  size_type new_capacity = NextCapacity(storage_view.capacity);
+  pointer construct_data = allocation_tx.Allocate(new_capacity);
+  pointer last_ptr = construct_data + storage_view.size;
+
+  // Construct new element.
+  AllocatorTraits::construct(*GetAllocPtr(), last_ptr,
+                             std::forward<Args>(args)...);
+  // Move elements from old backing store to new backing store.
+  ABSL_INTERNAL_TRY {
+    inlined_vector_internal::ConstructElements(
+        GetAllocPtr(), allocation_tx.GetData(), &move_values,
+        storage_view.size);
+  }
+  ABSL_INTERNAL_CATCH_ANY {
+    AllocatorTraits::destroy(*GetAllocPtr(), last_ptr);
+    ABSL_INTERNAL_RETHROW;
+  }
+  // Destroy elements in old backing store.
+  inlined_vector_internal::DestroyElements(GetAllocPtr(), storage_view.data,
+                                           storage_view.size);
+
+  DeallocateIfAllocated();
+  AcquireAllocatedData(&allocation_tx);
+  SetIsAllocated();
+  AddSize(1);
+  return *last_ptr;
+}
+
+template <typename T, size_t N, typename A>
+auto Storage<T, N, A>::Erase(const_iterator from, const_iterator to)
+    -> iterator {
+  StorageView storage_view = MakeStorageView();
+
+  size_type erase_size = std::distance(from, to);
+  size_type erase_index =
+      std::distance(const_iterator(storage_view.data), from);
+  size_type erase_end_index = erase_index + erase_size;
+
+  IteratorValueAdapter<MoveIterator> move_values(
+      MoveIterator(storage_view.data + erase_end_index));
+
+  inlined_vector_internal::AssignElements(storage_view.data + erase_index,
+                                          &move_values,
+                                          storage_view.size - erase_end_index);
+
+  inlined_vector_internal::DestroyElements(
+      GetAllocPtr(), storage_view.data + (storage_view.size - erase_size),
+      erase_size);
+
+  SubtractSize(erase_size);
+  return iterator(storage_view.data + erase_index);
+}
+
+template <typename T, size_t N, typename A>
+auto Storage<T, N, A>::Reserve(size_type requested_capacity) -> void {
+  StorageView storage_view = MakeStorageView();
+
+  if (ABSL_PREDICT_FALSE(requested_capacity <= storage_view.capacity)) return;
+
+  AllocationTransaction allocation_tx(GetAllocPtr());
+
+  IteratorValueAdapter<MoveIterator> move_values(
+      MoveIterator(storage_view.data));
+
+  size_type new_capacity =
+      ComputeCapacity(storage_view.capacity, requested_capacity);
+  pointer new_data = allocation_tx.Allocate(new_capacity);
+
+  inlined_vector_internal::ConstructElements(GetAllocPtr(), new_data,
+                                             &move_values, storage_view.size);
+
+  inlined_vector_internal::DestroyElements(GetAllocPtr(), storage_view.data,
+                                           storage_view.size);
+
+  DeallocateIfAllocated();
+  AcquireAllocatedData(&allocation_tx);
+  SetIsAllocated();
+}
+
+template <typename T, size_t N, typename A>
+auto Storage<T, N, A>::ShrinkToFit() -> void {
+  // May only be called on allocated instances!
+  assert(GetIsAllocated());
+
+  StorageView storage_view{GetAllocatedData(), GetSize(),
+                           GetAllocatedCapacity()};
+
+  if (ABSL_PREDICT_FALSE(storage_view.size == storage_view.capacity)) return;
+
+  AllocationTransaction allocation_tx(GetAllocPtr());
+
+  IteratorValueAdapter<MoveIterator> move_values(
+      MoveIterator(storage_view.data));
+
+  pointer construct_data;
+  if (storage_view.size > GetInlinedCapacity()) {
+    size_type new_capacity = storage_view.size;
+    construct_data = allocation_tx.Allocate(new_capacity);
+  } else {
+    construct_data = GetInlinedData();
+  }
+
+  ABSL_INTERNAL_TRY {
+    inlined_vector_internal::ConstructElements(GetAllocPtr(), construct_data,
+                                               &move_values, storage_view.size);
+  }
+  ABSL_INTERNAL_CATCH_ANY {
+    SetAllocatedData(storage_view.data, storage_view.capacity);
+    ABSL_INTERNAL_RETHROW;
+  }
+
+  inlined_vector_internal::DestroyElements(GetAllocPtr(), storage_view.data,
+                                           storage_view.size);
+
+  AllocatorTraits::deallocate(*GetAllocPtr(), storage_view.data,
+                              storage_view.capacity);
+
+  if (allocation_tx.DidAllocate()) {
+    AcquireAllocatedData(&allocation_tx);
+  } else {
+    UnsetIsAllocated();
+  }
+}
+
+template <typename T, size_t N, typename A>
+auto Storage<T, N, A>::Swap(Storage* other_storage_ptr) -> void {
+  using std::swap;
+  assert(this != other_storage_ptr);
+
+  if (GetIsAllocated() && other_storage_ptr->GetIsAllocated()) {
+    swap(data_.allocated, other_storage_ptr->data_.allocated);
+  } else if (!GetIsAllocated() && !other_storage_ptr->GetIsAllocated()) {
+    Storage* small_ptr = this;
+    Storage* large_ptr = other_storage_ptr;
+    if (small_ptr->GetSize() > large_ptr->GetSize()) swap(small_ptr, large_ptr);
+
+    for (size_type i = 0; i < small_ptr->GetSize(); ++i) {
+      swap(small_ptr->GetInlinedData()[i], large_ptr->GetInlinedData()[i]);
+    }
+
+    IteratorValueAdapter<MoveIterator> move_values(
+        MoveIterator(large_ptr->GetInlinedData() + small_ptr->GetSize()));
+
+    inlined_vector_internal::ConstructElements(
+        large_ptr->GetAllocPtr(),
+        small_ptr->GetInlinedData() + small_ptr->GetSize(), &move_values,
+        large_ptr->GetSize() - small_ptr->GetSize());
+
+    inlined_vector_internal::DestroyElements(
+        large_ptr->GetAllocPtr(),
+        large_ptr->GetInlinedData() + small_ptr->GetSize(),
+        large_ptr->GetSize() - small_ptr->GetSize());
+  } else {
+    Storage* allocated_ptr = this;
+    Storage* inlined_ptr = other_storage_ptr;
+    if (!allocated_ptr->GetIsAllocated()) swap(allocated_ptr, inlined_ptr);
+
+    StorageView allocated_storage_view{allocated_ptr->GetAllocatedData(),
+                                       allocated_ptr->GetSize(),
+                                       allocated_ptr->GetAllocatedCapacity()};
+
+    IteratorValueAdapter<MoveIterator> move_values(
+        MoveIterator(inlined_ptr->GetInlinedData()));
+
+    ABSL_INTERNAL_TRY {
+      inlined_vector_internal::ConstructElements(
+          inlined_ptr->GetAllocPtr(), allocated_ptr->GetInlinedData(),
+          &move_values, inlined_ptr->GetSize());
+    }
+    ABSL_INTERNAL_CATCH_ANY {
+      allocated_ptr->SetAllocatedData(allocated_storage_view.data,
+                                      allocated_storage_view.capacity);
+      ABSL_INTERNAL_RETHROW;
+    }
+
+    inlined_vector_internal::DestroyElements(inlined_ptr->GetAllocPtr(),
+                                             inlined_ptr->GetInlinedData(),
+                                             inlined_ptr->GetSize());
+
+    inlined_ptr->SetAllocatedData(allocated_storage_view.data,
+                                  allocated_storage_view.capacity);
+  }
+
+  swap(GetSizeAndIsAllocated(), other_storage_ptr->GetSizeAndIsAllocated());
+  swap(*GetAllocPtr(), *other_storage_ptr->GetAllocPtr());
+}
+
+}  // namespace inlined_vector_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_CONTAINER_INTERNAL_INLINED_VECTOR_INTERNAL_H_
diff --git a/third_party/abseil_cpp/absl/container/internal/layout.h b/third_party/abseil_cpp/absl/container/internal/layout.h
new file mode 100644
index 000000000000..233678331543
--- /dev/null
+++ b/third_party/abseil_cpp/absl/container/internal/layout.h
@@ -0,0 +1,743 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+//                           MOTIVATION AND TUTORIAL
+//
+// If you want to put in a single heap allocation N doubles followed by M ints,
+// it's easy if N and M are known at compile time.
+//
+//   struct S {
+//     double a[N];
+//     int b[M];
+//   };
+//
+//   S* p = new S;
+//
+// But what if N and M are known only in run time? Class template Layout to the
+// rescue! It's a portable generalization of the technique known as struct hack.
+//
+//   // This object will tell us everything we need to know about the memory
+//   // layout of double[N] followed by int[M]. It's structurally identical to
+//   // size_t[2] that stores N and M. It's very cheap to create.
+//   const Layout<double, int> layout(N, M);
+//
+//   // Allocate enough memory for both arrays. `AllocSize()` tells us how much
+//   // memory is needed. We are free to use any allocation function we want as
+//   // long as it returns aligned memory.
+//   std::unique_ptr<unsigned char[]> p(new unsigned char[layout.AllocSize()]);
+//
+//   // Obtain the pointer to the array of doubles.
+//   // Equivalent to `reinterpret_cast<double*>(p.get())`.
+//   //
+//   // We could have written layout.Pointer<0>(p) instead. If all the types are
+//   // unique you can use either form, but if some types are repeated you must
+//   // use the index form.
+//   double* a = layout.Pointer<double>(p.get());
+//
+//   // Obtain the pointer to the array of ints.
+//   // Equivalent to `reinterpret_cast<int*>(p.get() + N * 8)`.
+//   int* b = layout.Pointer<int>(p);
+//
+// If we are unable to specify sizes of all fields, we can pass as many sizes as
+// we can to `Partial()`. In return, it'll allow us to access the fields whose
+// locations and sizes can be computed from the provided information.
+// `Partial()` comes in handy when the array sizes are embedded into the
+// allocation.
+//
+//   // size_t[1] containing N, size_t[1] containing M, double[N], int[M].
+//   using L = Layout<size_t, size_t, double, int>;
+//
+//   unsigned char* Allocate(size_t n, size_t m) {
+//     const L layout(1, 1, n, m);
+//     unsigned char* p = new unsigned char[layout.AllocSize()];
+//     *layout.Pointer<0>(p) = n;
+//     *layout.Pointer<1>(p) = m;
+//     return p;
+//   }
+//
+//   void Use(unsigned char* p) {
+//     // First, extract N and M.
+//     // Specify that the first array has only one element. Using `prefix` we
+//     // can access the first two arrays but not more.
+//     constexpr auto prefix = L::Partial(1);
+//     size_t n = *prefix.Pointer<0>(p);
+//     size_t m = *prefix.Pointer<1>(p);
+//
+//     // Now we can get pointers to the payload.
+//     const L layout(1, 1, n, m);
+//     double* a = layout.Pointer<double>(p);
+//     int* b = layout.Pointer<int>(p);
+//   }
+//
+// The layout we used above combines fixed-size with dynamically-sized fields.
+// This is quite common. Layout is optimized for this use case and generates
+// optimal code. All computations that can be performed at compile time are
+// indeed performed at compile time.
+//
+// Efficiency tip: The order of fields matters. In `Layout<T1, ..., TN>` try to
+// ensure that `alignof(T1) >= ... >= alignof(TN)`. This way you'll have no
+// padding in between arrays.
+//
+// You can manually override the alignment of an array by wrapping the type in
+// `Aligned<T, N>`. `Layout<..., Aligned<T, N>, ...>` has exactly the same API
+// and behavior as `Layout<..., T, ...>` except that the first element of the
+// array of `T` is aligned to `N` (the rest of the elements follow without
+// padding). `N` cannot be less than `alignof(T)`.
+//
+// `AllocSize()` and `Pointer()` are the most basic methods for dealing with
+// memory layouts. Check out the reference or code below to discover more.
+//
+//                            EXAMPLE
+//
+//   // Immutable move-only string with sizeof equal to sizeof(void*). The
+//   // string size and the characters are kept in the same heap allocation.
+//   class CompactString {
+//    public:
+//     CompactString(const char* s = "") {
+//       const size_t size = strlen(s);
+//       // size_t[1] followed by char[size + 1].
+//       const L layout(1, size + 1);
+//       p_.reset(new unsigned char[layout.AllocSize()]);
+//       // If running under ASAN, mark the padding bytes, if any, to catch
+//       // memory errors.
+//       layout.PoisonPadding(p_.get());
+//       // Store the size in the allocation.
+//       *layout.Pointer<size_t>(p_.get()) = size;
+//       // Store the characters in the allocation.
+//       memcpy(layout.Pointer<char>(p_.get()), s, size + 1);
+//     }
+//
+//     size_t size() const {
+//       // Equivalent to reinterpret_cast<size_t&>(*p).
+//       return *L::Partial().Pointer<size_t>(p_.get());
+//     }
+//
+//     const char* c_str() const {
+//       // Equivalent to reinterpret_cast<char*>(p.get() + sizeof(size_t)).
+//       // The argument in Partial(1) specifies that we have size_t[1] in front
+//       // of the characters.
+//       return L::Partial(1).Pointer<char>(p_.get());
+//     }
+//
+//    private:
+//     // Our heap allocation contains a size_t followed by an array of chars.
+//     using L = Layout<size_t, char>;
+//     std::unique_ptr<unsigned char[]> p_;
+//   };
+//
+//   int main() {
+//     CompactString s = "hello";
+//     assert(s.size() == 5);
+//     assert(strcmp(s.c_str(), "hello") == 0);
+//   }
+//
+//                               DOCUMENTATION
+//
+// The interface exported by this file consists of:
+// - class `Layout<>` and its public members.
+// - The public members of class `internal_layout::LayoutImpl<>`. That class
+//   isn't intended to be used directly, and its name and template parameter
+//   list are internal implementation details, but the class itself provides
+//   most of the functionality in this file. See comments on its members for
+//   detailed documentation.
+//
+// `Layout<T1,... Tn>::Partial(count1,..., countm)` (where `m` <= `n`) returns a
+// `LayoutImpl<>` object. `Layout<T1,..., Tn> layout(count1,..., countn)`
+// creates a `Layout` object, which exposes the same functionality by inheriting
+// from `LayoutImpl<>`.
+
+#ifndef ABSL_CONTAINER_INTERNAL_LAYOUT_H_
+#define ABSL_CONTAINER_INTERNAL_LAYOUT_H_
+
+#include <assert.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include <ostream>
+#include <string>
+#include <tuple>
+#include <type_traits>
+#include <typeinfo>
+#include <utility>
+
+#include "absl/base/config.h"
+#include "absl/meta/type_traits.h"
+#include "absl/strings/str_cat.h"
+#include "absl/types/span.h"
+#include "absl/utility/utility.h"
+
+#ifdef ABSL_HAVE_ADDRESS_SANITIZER
+#include <sanitizer/asan_interface.h>
+#endif
+
+#if defined(__GXX_RTTI)
+#define ABSL_INTERNAL_HAS_CXA_DEMANGLE
+#endif
+
+#ifdef ABSL_INTERNAL_HAS_CXA_DEMANGLE
+#include <cxxabi.h>
+#endif
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+// A type wrapper that instructs `Layout` to use the specific alignment for the
+// array. `Layout<..., Aligned<T, N>, ...>` has exactly the same API
+// and behavior as `Layout<..., T, ...>` except that the first element of the
+// array of `T` is aligned to `N` (the rest of the elements follow without
+// padding).
+//
+// Requires: `N >= alignof(T)` and `N` is a power of 2.
+template <class T, size_t N>
+struct Aligned;
+
+namespace internal_layout {
+
+template <class T>
+struct NotAligned {};
+
+template <class T, size_t N>
+struct NotAligned<const Aligned<T, N>> {
+  static_assert(sizeof(T) == 0, "Aligned<T, N> cannot be const-qualified");
+};
+
+template <size_t>
+using IntToSize = size_t;
+
+template <class>
+using TypeToSize = size_t;
+
+template <class T>
+struct Type : NotAligned<T> {
+  using type = T;
+};
+
+template <class T, size_t N>
+struct Type<Aligned<T, N>> {
+  using type = T;
+};
+
+template <class T>
+struct SizeOf : NotAligned<T>, std::integral_constant<size_t, sizeof(T)> {};
+
+template <class T, size_t N>
+struct SizeOf<Aligned<T, N>> : std::integral_constant<size_t, sizeof(T)> {};
+
+// Note: workaround for https://gcc.gnu.org/PR88115
+template <class T>
+struct AlignOf : NotAligned<T> {
+  static constexpr size_t value = alignof(T);
+};
+
+template <class T, size_t N>
+struct AlignOf<Aligned<T, N>> {
+  static_assert(N % alignof(T) == 0,
+                "Custom alignment can't be lower than the type's alignment");
+  static constexpr size_t value = N;
+};
+
+// Does `Ts...` contain `T`?
+template <class T, class... Ts>
+using Contains = absl::disjunction<std::is_same<T, Ts>...>;
+
+template <class From, class To>
+using CopyConst =
+    typename std::conditional<std::is_const<From>::value, const To, To>::type;
+
+// Note: We're not qualifying this with absl:: because it doesn't compile under
+// MSVC.
+template <class T>
+using SliceType = Span<T>;
+
+// This namespace contains no types. It prevents functions defined in it from
+// being found by ADL.
+namespace adl_barrier {
+
+template <class Needle, class... Ts>
+constexpr size_t Find(Needle, Needle, Ts...) {
+  static_assert(!Contains<Needle, Ts...>(), "Duplicate element type");
+  return 0;
+}
+
+template <class Needle, class T, class... Ts>
+constexpr size_t Find(Needle, T, Ts...) {
+  return adl_barrier::Find(Needle(), Ts()...) + 1;
+}
+
+constexpr bool IsPow2(size_t n) { return !(n & (n - 1)); }
+
+// Returns `q * m` for the smallest `q` such that `q * m >= n`.
+// Requires: `m` is a power of two. It's enforced by IsLegalElementType below.
+constexpr size_t Align(size_t n, size_t m) { return (n + m - 1) & ~(m - 1); }
+
+constexpr size_t Min(size_t a, size_t b) { return b < a ? b : a; }
+
+constexpr size_t Max(size_t a) { return a; }
+
+template <class... Ts>
+constexpr size_t Max(size_t a, size_t b, Ts... rest) {
+  return adl_barrier::Max(b < a ? a : b, rest...);
+}
+
+template <class T>
+std::string TypeName() {
+  std::string out;
+  int status = 0;
+  char* demangled = nullptr;
+#ifdef ABSL_INTERNAL_HAS_CXA_DEMANGLE
+  demangled = abi::__cxa_demangle(typeid(T).name(), nullptr, nullptr, &status);
+#endif
+  if (status == 0 && demangled != nullptr) {  // Demangling succeeded.
+    absl::StrAppend(&out, "<", demangled, ">");
+    free(demangled);
+  } else {
+#if defined(__GXX_RTTI) || defined(_CPPRTTI)
+    absl::StrAppend(&out, "<", typeid(T).name(), ">");
+#endif
+  }
+  return out;
+}
+
+}  // namespace adl_barrier
+
+template <bool C>
+using EnableIf = typename std::enable_if<C, int>::type;
+
+// Can `T` be a template argument of `Layout`?
+template <class T>
+using IsLegalElementType = std::integral_constant<
+    bool, !std::is_reference<T>::value && !std::is_volatile<T>::value &&
+              !std::is_reference<typename Type<T>::type>::value &&
+              !std::is_volatile<typename Type<T>::type>::value &&
+              adl_barrier::IsPow2(AlignOf<T>::value)>;
+
+template <class Elements, class SizeSeq, class OffsetSeq>
+class LayoutImpl;
+
+// Public base class of `Layout` and the result type of `Layout::Partial()`.
+//
+// `Elements...` contains all template arguments of `Layout` that created this
+// instance.
+//
+// `SizeSeq...` is `[0, NumSizes)` where `NumSizes` is the number of arguments
+// passed to `Layout::Partial()` or `Layout::Layout()`.
+//
+// `OffsetSeq...` is `[0, NumOffsets)` where `NumOffsets` is
+// `Min(sizeof...(Elements), NumSizes + 1)` (the number of arrays for which we
+// can compute offsets).
+template <class... Elements, size_t... SizeSeq, size_t... OffsetSeq>
+class LayoutImpl<std::tuple<Elements...>, absl::index_sequence<SizeSeq...>,
+                 absl::index_sequence<OffsetSeq...>> {
+ private:
+  static_assert(sizeof...(Elements) > 0, "At least one field is required");
+  static_assert(absl::conjunction<IsLegalElementType<Elements>...>::value,
+                "Invalid element type (see IsLegalElementType)");
+
+  enum {
+    NumTypes = sizeof...(Elements),
+    NumSizes = sizeof...(SizeSeq),
+    NumOffsets = sizeof...(OffsetSeq),
+  };
+
+  // These are guaranteed by `Layout`.
+  static_assert(NumOffsets == adl_barrier::Min(NumTypes, NumSizes + 1),
+                "Internal error");
+  static_assert(NumTypes > 0, "Internal error");
+
+  // Returns the index of `T` in `Elements...`. Results in a compilation error
+  // if `Elements...` doesn't contain exactly one instance of `T`.
+  template <class T>
+  static constexpr size_t ElementIndex() {
+    static_assert(Contains<Type<T>, Type<typename Type<Elements>::type>...>(),
+                  "Type not found");
+    return adl_barrier::Find(Type<T>(),
+                             Type<typename Type<Elements>::type>()...);
+  }
+
+  template <size_t N>
+  using ElementAlignment =
+      AlignOf<typename std::tuple_element<N, std::tuple<Elements...>>::type>;
+
+ public:
+  // Element types of all arrays packed in a tuple.
+  using ElementTypes = std::tuple<typename Type<Elements>::type...>;
+
+  // Element type of the Nth array.
+  template <size_t N>
+  using ElementType = typename std::tuple_element<N, ElementTypes>::type;
+
+  constexpr explicit LayoutImpl(IntToSize<SizeSeq>... sizes)
+      : size_{sizes...} {}
+
+  // Alignment of the layout, equal to the strictest alignment of all elements.
+  // All pointers passed to the methods of layout must be aligned to this value.
+  static constexpr size_t Alignment() {
+    return adl_barrier::Max(AlignOf<Elements>::value...);
+  }
+
+  // Offset in bytes of the Nth array.
+  //
+  //   // int[3], 4 bytes of padding, double[4].
+  //   Layout<int, double> x(3, 4);
+  //   assert(x.Offset<0>() == 0);   // The ints starts from 0.
+  //   assert(x.Offset<1>() == 16);  // The doubles starts from 16.
+  //
+  // Requires: `N <= NumSizes && N < sizeof...(Ts)`.
+  template <size_t N, EnableIf<N == 0> = 0>
+  constexpr size_t Offset() const {
+    return 0;
+  }
+
+  template <size_t N, EnableIf<N != 0> = 0>
+  constexpr size_t Offset() const {
+    static_assert(N < NumOffsets, "Index out of bounds");
+    return adl_barrier::Align(
+        Offset<N - 1>() + SizeOf<ElementType<N - 1>>() * size_[N - 1],
+        ElementAlignment<N>::value);
+  }
+
+  // Offset in bytes of the array with the specified element type. There must
+  // be exactly one such array and its zero-based index must be at most
+  // `NumSizes`.
+  //
+  //   // int[3], 4 bytes of padding, double[4].
+  //   Layout<int, double> x(3, 4);
+  //   assert(x.Offset<int>() == 0);      // The ints starts from 0.
+  //   assert(x.Offset<double>() == 16);  // The doubles starts from 16.
+  template <class T>
+  constexpr size_t Offset() const {
+    return Offset<ElementIndex<T>()>();
+  }
+
+  // Offsets in bytes of all arrays for which the offsets are known.
+  constexpr std::array<size_t, NumOffsets> Offsets() const {
+    return {{Offset<OffsetSeq>()...}};
+  }
+
+  // The number of elements in the Nth array. This is the Nth argument of
+  // `Layout::Partial()` or `Layout::Layout()` (zero-based).
+  //
+  //   // int[3], 4 bytes of padding, double[4].
+  //   Layout<int, double> x(3, 4);
+  //   assert(x.Size<0>() == 3);
+  //   assert(x.Size<1>() == 4);
+  //
+  // Requires: `N < NumSizes`.
+  template <size_t N>
+  constexpr size_t Size() const {
+    static_assert(N < NumSizes, "Index out of bounds");
+    return size_[N];
+  }
+
+  // The number of elements in the array with the specified element type.
+  // There must be exactly one such array and its zero-based index must be
+  // at most `NumSizes`.
+  //
+  //   // int[3], 4 bytes of padding, double[4].
+  //   Layout<int, double> x(3, 4);
+  //   assert(x.Size<int>() == 3);
+  //   assert(x.Size<double>() == 4);
+  template <class T>
+  constexpr size_t Size() const {
+    return Size<ElementIndex<T>()>();
+  }
+
+  // The number of elements of all arrays for which they are known.
+  constexpr std::array<size_t, NumSizes> Sizes() const {
+    return {{Size<SizeSeq>()...}};
+  }
+
+  // Pointer to the beginning of the Nth array.
+  //
+  // `Char` must be `[const] [signed|unsigned] char`.
+  //
+  //   // int[3], 4 bytes of padding, double[4].
+  //   Layout<int, double> x(3, 4);
+  //   unsigned char* p = new unsigned char[x.AllocSize()];
+  //   int* ints = x.Pointer<0>(p);
+  //   double* doubles = x.Pointer<1>(p);
+  //
+  // Requires: `N <= NumSizes && N < sizeof...(Ts)`.
+  // Requires: `p` is aligned to `Alignment()`.
+  template <size_t N, class Char>
+  CopyConst<Char, ElementType<N>>* Pointer(Char* p) const {
+    using C = typename std::remove_const<Char>::type;
+    static_assert(
+        std::is_same<C, char>() || std::is_same<C, unsigned char>() ||
+            std::is_same<C, signed char>(),
+        "The argument must be a pointer to [const] [signed|unsigned] char");
+    constexpr size_t alignment = Alignment();
+    (void)alignment;
+    assert(reinterpret_cast<uintptr_t>(p) % alignment == 0);
+    return reinterpret_cast<CopyConst<Char, ElementType<N>>*>(p + Offset<N>());
+  }
+
+  // Pointer to the beginning of the array with the specified element type.
+  // There must be exactly one such array and its zero-based index must be at
+  // most `NumSizes`.
+  //
+  // `Char` must be `[const] [signed|unsigned] char`.
+  //
+  //   // int[3], 4 bytes of padding, double[4].
+  //   Layout<int, double> x(3, 4);
+  //   unsigned char* p = new unsigned char[x.AllocSize()];
+  //   int* ints = x.Pointer<int>(p);
+  //   double* doubles = x.Pointer<double>(p);
+  //
+  // Requires: `p` is aligned to `Alignment()`.
+  template <class T, class Char>
+  CopyConst<Char, T>* Pointer(Char* p) const {
+    return Pointer<ElementIndex<T>()>(p);
+  }
+
+  // Pointers to all arrays for which pointers are known.
+  //
+  // `Char` must be `[const] [signed|unsigned] char`.
+  //
+  //   // int[3], 4 bytes of padding, double[4].
+  //   Layout<int, double> x(3, 4);
+  //   unsigned char* p = new unsigned char[x.AllocSize()];
+  //
+  //   int* ints;
+  //   double* doubles;
+  //   std::tie(ints, doubles) = x.Pointers(p);
+  //
+  // Requires: `p` is aligned to `Alignment()`.
+  //
+  // Note: We're not using ElementType alias here because it does not compile
+  // under MSVC.
+  template <class Char>
+  std::tuple<CopyConst<
+      Char, typename std::tuple_element<OffsetSeq, ElementTypes>::type>*...>
+  Pointers(Char* p) const {
+    return std::tuple<CopyConst<Char, ElementType<OffsetSeq>>*...>(
+        Pointer<OffsetSeq>(p)...);
+  }
+
+  // The Nth array.
+  //
+  // `Char` must be `[const] [signed|unsigned] char`.
+  //
+  //   // int[3], 4 bytes of padding, double[4].
+  //   Layout<int, double> x(3, 4);
+  //   unsigned char* p = new unsigned char[x.AllocSize()];
+  //   Span<int> ints = x.Slice<0>(p);
+  //   Span<double> doubles = x.Slice<1>(p);
+  //
+  // Requires: `N < NumSizes`.
+  // Requires: `p` is aligned to `Alignment()`.
+  template <size_t N, class Char>
+  SliceType<CopyConst<Char, ElementType<N>>> Slice(Char* p) const {
+    return SliceType<CopyConst<Char, ElementType<N>>>(Pointer<N>(p), Size<N>());
+  }
+
+  // The array with the specified element type. There must be exactly one
+  // such array and its zero-based index must be less than `NumSizes`.
+  //
+  // `Char` must be `[const] [signed|unsigned] char`.
+  //
+  //   // int[3], 4 bytes of padding, double[4].
+  //   Layout<int, double> x(3, 4);
+  //   unsigned char* p = new unsigned char[x.AllocSize()];
+  //   Span<int> ints = x.Slice<int>(p);
+  //   Span<double> doubles = x.Slice<double>(p);
+  //
+  // Requires: `p` is aligned to `Alignment()`.
+  template <class T, class Char>
+  SliceType<CopyConst<Char, T>> Slice(Char* p) const {
+    return Slice<ElementIndex<T>()>(p);
+  }
+
+  // All arrays with known sizes.
+  //
+  // `Char` must be `[const] [signed|unsigned] char`.
+  //
+  //   // int[3], 4 bytes of padding, double[4].
+  //   Layout<int, double> x(3, 4);
+  //   unsigned char* p = new unsigned char[x.AllocSize()];
+  //
+  //   Span<int> ints;
+  //   Span<double> doubles;
+  //   std::tie(ints, doubles) = x.Slices(p);
+  //
+  // Requires: `p` is aligned to `Alignment()`.
+  //
+  // Note: We're not using ElementType alias here because it does not compile
+  // under MSVC.
+  template <class Char>
+  std::tuple<SliceType<CopyConst<
+      Char, typename std::tuple_element<SizeSeq, ElementTypes>::type>>...>
+  Slices(Char* p) const {
+    // Workaround for https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63875 (fixed
+    // in 6.1).
+    (void)p;
+    return std::tuple<SliceType<CopyConst<Char, ElementType<SizeSeq>>>...>(
+        Slice<SizeSeq>(p)...);
+  }
+
+  // The size of the allocation that fits all arrays.
+  //
+  //   // int[3], 4 bytes of padding, double[4].
+  //   Layout<int, double> x(3, 4);
+  //   unsigned char* p = new unsigned char[x.AllocSize()];  // 48 bytes
+  //
+  // Requires: `NumSizes == sizeof...(Ts)`.
+  constexpr size_t AllocSize() const {
+    static_assert(NumTypes == NumSizes, "You must specify sizes of all fields");
+    return Offset<NumTypes - 1>() +
+           SizeOf<ElementType<NumTypes - 1>>() * size_[NumTypes - 1];
+  }
+
+  // If built with --config=asan, poisons padding bytes (if any) in the
+  // allocation. The pointer must point to a memory block at least
+  // `AllocSize()` bytes in length.
+  //
+  // `Char` must be `[const] [signed|unsigned] char`.
+  //
+  // Requires: `p` is aligned to `Alignment()`.
+  template <class Char, size_t N = NumOffsets - 1, EnableIf<N == 0> = 0>
+  void PoisonPadding(const Char* p) const {
+    Pointer<0>(p);  // verify the requirements on `Char` and `p`
+  }
+
+  template <class Char, size_t N = NumOffsets - 1, EnableIf<N != 0> = 0>
+  void PoisonPadding(const Char* p) const {
+    static_assert(N < NumOffsets, "Index out of bounds");
+    (void)p;
+#ifdef ABSL_HAVE_ADDRESS_SANITIZER
+    PoisonPadding<Char, N - 1>(p);
+    // The `if` is an optimization. It doesn't affect the observable behaviour.
+    if (ElementAlignment<N - 1>::value % ElementAlignment<N>::value) {
+      size_t start =
+          Offset<N - 1>() + SizeOf<ElementType<N - 1>>() * size_[N - 1];
+      ASAN_POISON_MEMORY_REGION(p + start, Offset<N>() - start);
+    }
+#endif
+  }
+
+  // Human-readable description of the memory layout. Useful for debugging.
+  // Slow.
+  //
+  //   // char[5], 3 bytes of padding, int[3], 4 bytes of padding, followed
+  //   // by an unknown number of doubles.
+  //   auto x = Layout<char, int, double>::Partial(5, 3);
+  //   assert(x.DebugString() ==
+  //          "@0<char>(1)[5]; @8<int>(4)[3]; @24<double>(8)");
+  //
+  // Each field is in the following format: @offset<type>(sizeof)[size] (<type>
+  // may be missing depending on the target platform). For example,
+  // @8<int>(4)[3] means that at offset 8 we have an array of ints, where each
+  // int is 4 bytes, and we have 3 of those ints. The size of the last field may
+  // be missing (as in the example above). Only fields with known offsets are
+  // described. Type names may differ across platforms: one compiler might
+  // produce "unsigned*" where another produces "unsigned int *".
+  std::string DebugString() const {
+    const auto offsets = Offsets();
+    const size_t sizes[] = {SizeOf<ElementType<OffsetSeq>>()...};
+    const std::string types[] = {
+        adl_barrier::TypeName<ElementType<OffsetSeq>>()...};
+    std::string res = absl::StrCat("@0", types[0], "(", sizes[0], ")");
+    for (size_t i = 0; i != NumOffsets - 1; ++i) {
+      absl::StrAppend(&res, "[", size_[i], "]; @", offsets[i + 1], types[i + 1],
+                      "(", sizes[i + 1], ")");
+    }
+    // NumSizes is a constant that may be zero. Some compilers cannot see that
+    // inside the if statement "size_[NumSizes - 1]" must be valid.
+    int last = static_cast<int>(NumSizes) - 1;
+    if (NumTypes == NumSizes && last >= 0) {
+      absl::StrAppend(&res, "[", size_[last], "]");
+    }
+    return res;
+  }
+
+ private:
+  // Arguments of `Layout::Partial()` or `Layout::Layout()`.
+  size_t size_[NumSizes > 0 ? NumSizes : 1];
+};
+
+template <size_t NumSizes, class... Ts>
+using LayoutType = LayoutImpl<
+    std::tuple<Ts...>, absl::make_index_sequence<NumSizes>,
+    absl::make_index_sequence<adl_barrier::Min(sizeof...(Ts), NumSizes + 1)>>;
+
+}  // namespace internal_layout
+
+// Descriptor of arrays of various types and sizes laid out in memory one after
+// another. See the top of the file for documentation.
+//
+// Check out the public API of internal_layout::LayoutImpl above. The type is
+// internal to the library but its methods are public, and they are inherited
+// by `Layout`.
+template <class... Ts>
+class Layout : public internal_layout::LayoutType<sizeof...(Ts), Ts...> {
+ public:
+  static_assert(sizeof...(Ts) > 0, "At least one field is required");
+  static_assert(
+      absl::conjunction<internal_layout::IsLegalElementType<Ts>...>::value,
+      "Invalid element type (see IsLegalElementType)");
+
+  // The result type of `Partial()` with `NumSizes` arguments.
+  template <size_t NumSizes>
+  using PartialType = internal_layout::LayoutType<NumSizes, Ts...>;
+
+  // `Layout` knows the element types of the arrays we want to lay out in
+  // memory but not the number of elements in each array.
+  // `Partial(size1, ..., sizeN)` allows us to specify the latter. The
+  // resulting immutable object can be used to obtain pointers to the
+  // individual arrays.
+  //
+  // It's allowed to pass fewer array sizes than the number of arrays. E.g.,
+  // if all you need is to the offset of the second array, you only need to
+  // pass one argument -- the number of elements in the first array.
+  //
+  //   // int[3] followed by 4 bytes of padding and an unknown number of
+  //   // doubles.
+  //   auto x = Layout<int, double>::Partial(3);
+  //   // doubles start at byte 16.
+  //   assert(x.Offset<1>() == 16);
+  //
+  // If you know the number of elements in all arrays, you can still call
+  // `Partial()` but it's more convenient to use the constructor of `Layout`.
+  //
+  //   Layout<int, double> x(3, 5);
+  //
+  // Note: The sizes of the arrays must be specified in number of elements,
+  // not in bytes.
+  //
+  // Requires: `sizeof...(Sizes) <= sizeof...(Ts)`.
+  // Requires: all arguments are convertible to `size_t`.
+  template <class... Sizes>
+  static constexpr PartialType<sizeof...(Sizes)> Partial(Sizes&&... sizes) {
+    static_assert(sizeof...(Sizes) <= sizeof...(Ts), "");
+    return PartialType<sizeof...(Sizes)>(absl::forward<Sizes>(sizes)...);
+  }
+
+  // Creates a layout with the sizes of all arrays specified. If you know
+  // only the sizes of the first N arrays (where N can be zero), you can use
+  // `Partial()` defined above. The constructor is essentially equivalent to
+  // calling `Partial()` and passing in all array sizes; the constructor is
+  // provided as a convenient abbreviation.
+  //
+  // Note: The sizes of the arrays must be specified in number of elements,
+  // not in bytes.
+  constexpr explicit Layout(internal_layout::TypeToSize<Ts>... sizes)
+      : internal_layout::LayoutType<sizeof...(Ts), Ts...>(sizes...) {}
+};
+
+}  // namespace container_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_CONTAINER_INTERNAL_LAYOUT_H_
diff --git a/third_party/abseil_cpp/absl/container/internal/layout_test.cc b/third_party/abseil_cpp/absl/container/internal/layout_test.cc
new file mode 100644
index 000000000000..1d7158ffc0b9
--- /dev/null
+++ b/third_party/abseil_cpp/absl/container/internal/layout_test.cc
@@ -0,0 +1,1635 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/container/internal/layout.h"
+
+// We need ::max_align_t because some libstdc++ versions don't provide
+// std::max_align_t
+#include <stddef.h>
+
+#include <cstdint>
+#include <memory>
+#include <sstream>
+#include <type_traits>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/base/config.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/types/span.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+namespace {
+
+using ::absl::Span;
+using ::testing::ElementsAre;
+
+size_t Distance(const void* from, const void* to) {
+  ABSL_RAW_CHECK(from <= to, "Distance must be non-negative");
+  return static_cast<const char*>(to) - static_cast<const char*>(from);
+}
+
+template <class Expected, class Actual>
+Expected Type(Actual val) {
+  static_assert(std::is_same<Expected, Actual>(), "");
+  return val;
+}
+
+// Helper classes to test different size and alignments.
+struct alignas(8) Int128 {
+  uint64_t a, b;
+  friend bool operator==(Int128 lhs, Int128 rhs) {
+    return std::tie(lhs.a, lhs.b) == std::tie(rhs.a, rhs.b);
+  }
+
+  static std::string Name() {
+    return internal_layout::adl_barrier::TypeName<Int128>();
+  }
+};
+
+// int64_t is *not* 8-byte aligned on all platforms!
+struct alignas(8) Int64 {
+  int64_t a;
+  friend bool operator==(Int64 lhs, Int64 rhs) {
+    return lhs.a == rhs.a;
+  }
+};
+
+// Properties of types that this test relies on.
+static_assert(sizeof(int8_t) == 1, "");
+static_assert(alignof(int8_t) == 1, "");
+static_assert(sizeof(int16_t) == 2, "");
+static_assert(alignof(int16_t) == 2, "");
+static_assert(sizeof(int32_t) == 4, "");
+static_assert(alignof(int32_t) == 4, "");
+static_assert(sizeof(Int64) == 8, "");
+static_assert(alignof(Int64) == 8, "");
+static_assert(sizeof(Int128) == 16, "");
+static_assert(alignof(Int128) == 8, "");
+
+template <class Expected, class Actual>
+void SameType() {
+  static_assert(std::is_same<Expected, Actual>(), "");
+}
+
+TEST(Layout, ElementType) {
+  {
+    using L = Layout<int32_t>;
+    SameType<int32_t, L::ElementType<0>>();
+    SameType<int32_t, decltype(L::Partial())::ElementType<0>>();
+    SameType<int32_t, decltype(L::Partial(0))::ElementType<0>>();
+  }
+  {
+    using L = Layout<int32_t, int32_t>;
+    SameType<int32_t, L::ElementType<0>>();
+    SameType<int32_t, L::ElementType<1>>();
+    SameType<int32_t, decltype(L::Partial())::ElementType<0>>();
+    SameType<int32_t, decltype(L::Partial())::ElementType<1>>();
+    SameType<int32_t, decltype(L::Partial(0))::ElementType<0>>();
+    SameType<int32_t, decltype(L::Partial(0))::ElementType<1>>();
+  }
+  {
+    using L = Layout<int8_t, int32_t, Int128>;
+    SameType<int8_t, L::ElementType<0>>();
+    SameType<int32_t, L::ElementType<1>>();
+    SameType<Int128, L::ElementType<2>>();
+    SameType<int8_t, decltype(L::Partial())::ElementType<0>>();
+    SameType<int8_t, decltype(L::Partial(0))::ElementType<0>>();
+    SameType<int32_t, decltype(L::Partial(0))::ElementType<1>>();
+    SameType<int8_t, decltype(L::Partial(0, 0))::ElementType<0>>();
+    SameType<int32_t, decltype(L::Partial(0, 0))::ElementType<1>>();
+    SameType<Int128, decltype(L::Partial(0, 0))::ElementType<2>>();
+    SameType<int8_t, decltype(L::Partial(0, 0, 0))::ElementType<0>>();
+    SameType<int32_t, decltype(L::Partial(0, 0, 0))::ElementType<1>>();
+    SameType<Int128, decltype(L::Partial(0, 0, 0))::ElementType<2>>();
+  }
+}
+
+TEST(Layout, ElementTypes) {
+  {
+    using L = Layout<int32_t>;
+    SameType<std::tuple<int32_t>, L::ElementTypes>();
+    SameType<std::tuple<int32_t>, decltype(L::Partial())::ElementTypes>();
+    SameType<std::tuple<int32_t>, decltype(L::Partial(0))::ElementTypes>();
+  }
+  {
+    using L = Layout<int32_t, int32_t>;
+    SameType<std::tuple<int32_t, int32_t>, L::ElementTypes>();
+    SameType<std::tuple<int32_t, int32_t>,
+             decltype(L::Partial())::ElementTypes>();
+    SameType<std::tuple<int32_t, int32_t>,
+             decltype(L::Partial(0))::ElementTypes>();
+  }
+  {
+    using L = Layout<int8_t, int32_t, Int128>;
+    SameType<std::tuple<int8_t, int32_t, Int128>, L::ElementTypes>();
+    SameType<std::tuple<int8_t, int32_t, Int128>,
+             decltype(L::Partial())::ElementTypes>();
+    SameType<std::tuple<int8_t, int32_t, Int128>,
+             decltype(L::Partial(0))::ElementTypes>();
+    SameType<std::tuple<int8_t, int32_t, Int128>,
+             decltype(L::Partial(0, 0))::ElementTypes>();
+    SameType<std::tuple<int8_t, int32_t, Int128>,
+             decltype(L::Partial(0, 0, 0))::ElementTypes>();
+  }
+}
+
+TEST(Layout, OffsetByIndex) {
+  {
+    using L = Layout<int32_t>;
+    EXPECT_EQ(0, L::Partial().Offset<0>());
+    EXPECT_EQ(0, L::Partial(3).Offset<0>());
+    EXPECT_EQ(0, L(3).Offset<0>());
+  }
+  {
+    using L = Layout<int32_t, int32_t>;
+    EXPECT_EQ(0, L::Partial().Offset<0>());
+    EXPECT_EQ(0, L::Partial(3).Offset<0>());
+    EXPECT_EQ(12, L::Partial(3).Offset<1>());
+    EXPECT_EQ(0, L::Partial(3, 5).Offset<0>());
+    EXPECT_EQ(12, L::Partial(3, 5).Offset<1>());
+    EXPECT_EQ(0, L(3, 5).Offset<0>());
+    EXPECT_EQ(12, L(3, 5).Offset<1>());
+  }
+  {
+    using L = Layout<int8_t, int32_t, Int128>;
+    EXPECT_EQ(0, L::Partial().Offset<0>());
+    EXPECT_EQ(0, L::Partial(0).Offset<0>());
+    EXPECT_EQ(0, L::Partial(0).Offset<1>());
+    EXPECT_EQ(0, L::Partial(1).Offset<0>());
+    EXPECT_EQ(4, L::Partial(1).Offset<1>());
+    EXPECT_EQ(0, L::Partial(5).Offset<0>());
+    EXPECT_EQ(8, L::Partial(5).Offset<1>());
+    EXPECT_EQ(0, L::Partial(0, 0).Offset<0>());
+    EXPECT_EQ(0, L::Partial(0, 0).Offset<1>());
+    EXPECT_EQ(0, L::Partial(0, 0).Offset<2>());
+    EXPECT_EQ(0, L::Partial(1, 0).Offset<0>());
+    EXPECT_EQ(4, L::Partial(1, 0).Offset<1>());
+    EXPECT_EQ(8, L::Partial(1, 0).Offset<2>());
+    EXPECT_EQ(0, L::Partial(5, 3).Offset<0>());
+    EXPECT_EQ(8, L::Partial(5, 3).Offset<1>());
+    EXPECT_EQ(24, L::Partial(5, 3).Offset<2>());
+    EXPECT_EQ(0, L::Partial(0, 0, 0).Offset<0>());
+    EXPECT_EQ(0, L::Partial(0, 0, 0).Offset<1>());
+    EXPECT_EQ(0, L::Partial(0, 0, 0).Offset<2>());
+    EXPECT_EQ(0, L::Partial(1, 0, 0).Offset<0>());
+    EXPECT_EQ(4, L::Partial(1, 0, 0).Offset<1>());
+    EXPECT_EQ(8, L::Partial(1, 0, 0).Offset<2>());
+    EXPECT_EQ(0, L::Partial(5, 3, 1).Offset<0>());
+    EXPECT_EQ(24, L::Partial(5, 3, 1).Offset<2>());
+    EXPECT_EQ(8, L::Partial(5, 3, 1).Offset<1>());
+    EXPECT_EQ(0, L(5, 3, 1).Offset<0>());
+    EXPECT_EQ(24, L(5, 3, 1).Offset<2>());
+    EXPECT_EQ(8, L(5, 3, 1).Offset<1>());
+  }
+}
+
+TEST(Layout, OffsetByType) {
+  {
+    using L = Layout<int32_t>;
+    EXPECT_EQ(0, L::Partial().Offset<int32_t>());
+    EXPECT_EQ(0, L::Partial(3).Offset<int32_t>());
+    EXPECT_EQ(0, L(3).Offset<int32_t>());
+  }
+  {
+    using L = Layout<int8_t, int32_t, Int128>;
+    EXPECT_EQ(0, L::Partial().Offset<int8_t>());
+    EXPECT_EQ(0, L::Partial(0).Offset<int8_t>());
+    EXPECT_EQ(0, L::Partial(0).Offset<int32_t>());
+    EXPECT_EQ(0, L::Partial(1).Offset<int8_t>());
+    EXPECT_EQ(4, L::Partial(1).Offset<int32_t>());
+    EXPECT_EQ(0, L::Partial(5).Offset<int8_t>());
+    EXPECT_EQ(8, L::Partial(5).Offset<int32_t>());
+    EXPECT_EQ(0, L::Partial(0, 0).Offset<int8_t>());
+    EXPECT_EQ(0, L::Partial(0, 0).Offset<int32_t>());
+    EXPECT_EQ(0, L::Partial(0, 0).Offset<Int128>());
+    EXPECT_EQ(0, L::Partial(1, 0).Offset<int8_t>());
+    EXPECT_EQ(4, L::Partial(1, 0).Offset<int32_t>());
+    EXPECT_EQ(8, L::Partial(1, 0).Offset<Int128>());
+    EXPECT_EQ(0, L::Partial(5, 3).Offset<int8_t>());
+    EXPECT_EQ(8, L::Partial(5, 3).Offset<int32_t>());
+    EXPECT_EQ(24, L::Partial(5, 3).Offset<Int128>());
+    EXPECT_EQ(0, L::Partial(0, 0, 0).Offset<int8_t>());
+    EXPECT_EQ(0, L::Partial(0, 0, 0).Offset<int32_t>());
+    EXPECT_EQ(0, L::Partial(0, 0, 0).Offset<Int128>());
+    EXPECT_EQ(0, L::Partial(1, 0, 0).Offset<int8_t>());
+    EXPECT_EQ(4, L::Partial(1, 0, 0).Offset<int32_t>());
+    EXPECT_EQ(8, L::Partial(1, 0, 0).Offset<Int128>());
+    EXPECT_EQ(0, L::Partial(5, 3, 1).Offset<int8_t>());
+    EXPECT_EQ(24, L::Partial(5, 3, 1).Offset<Int128>());
+    EXPECT_EQ(8, L::Partial(5, 3, 1).Offset<int32_t>());
+    EXPECT_EQ(0, L(5, 3, 1).Offset<int8_t>());
+    EXPECT_EQ(24, L(5, 3, 1).Offset<Int128>());
+    EXPECT_EQ(8, L(5, 3, 1).Offset<int32_t>());
+  }
+}
+
+TEST(Layout, Offsets) {
+  {
+    using L = Layout<int32_t>;
+    EXPECT_THAT(L::Partial().Offsets(), ElementsAre(0));
+    EXPECT_THAT(L::Partial(3).Offsets(), ElementsAre(0));
+    EXPECT_THAT(L(3).Offsets(), ElementsAre(0));
+  }
+  {
+    using L = Layout<int32_t, int32_t>;
+    EXPECT_THAT(L::Partial().Offsets(), ElementsAre(0));
+    EXPECT_THAT(L::Partial(3).Offsets(), ElementsAre(0, 12));
+    EXPECT_THAT(L::Partial(3, 5).Offsets(), ElementsAre(0, 12));
+    EXPECT_THAT(L(3, 5).Offsets(), ElementsAre(0, 12));
+  }
+  {
+    using L = Layout<int8_t, int32_t, Int128>;
+    EXPECT_THAT(L::Partial().Offsets(), ElementsAre(0));
+    EXPECT_THAT(L::Partial(1).Offsets(), ElementsAre(0, 4));
+    EXPECT_THAT(L::Partial(5).Offsets(), ElementsAre(0, 8));
+    EXPECT_THAT(L::Partial(0, 0).Offsets(), ElementsAre(0, 0, 0));
+    EXPECT_THAT(L::Partial(1, 0).Offsets(), ElementsAre(0, 4, 8));
+    EXPECT_THAT(L::Partial(5, 3).Offsets(), ElementsAre(0, 8, 24));
+    EXPECT_THAT(L::Partial(0, 0, 0).Offsets(), ElementsAre(0, 0, 0));
+    EXPECT_THAT(L::Partial(1, 0, 0).Offsets(), ElementsAre(0, 4, 8));
+    EXPECT_THAT(L::Partial(5, 3, 1).Offsets(), ElementsAre(0, 8, 24));
+    EXPECT_THAT(L(5, 3, 1).Offsets(), ElementsAre(0, 8, 24));
+  }
+}
+
+TEST(Layout, AllocSize) {
+  {
+    using L = Layout<int32_t>;
+    EXPECT_EQ(0, L::Partial(0).AllocSize());
+    EXPECT_EQ(12, L::Partial(3).AllocSize());
+    EXPECT_EQ(12, L(3).AllocSize());
+  }
+  {
+    using L = Layout<int32_t, int32_t>;
+    EXPECT_EQ(32, L::Partial(3, 5).AllocSize());
+    EXPECT_EQ(32, L(3, 5).AllocSize());
+  }
+  {
+    using L = Layout<int8_t, int32_t, Int128>;
+    EXPECT_EQ(0, L::Partial(0, 0, 0).AllocSize());
+    EXPECT_EQ(8, L::Partial(1, 0, 0).AllocSize());
+    EXPECT_EQ(8, L::Partial(0, 1, 0).AllocSize());
+    EXPECT_EQ(16, L::Partial(0, 0, 1).AllocSize());
+    EXPECT_EQ(24, L::Partial(1, 1, 1).AllocSize());
+    EXPECT_EQ(136, L::Partial(3, 5, 7).AllocSize());
+    EXPECT_EQ(136, L(3, 5, 7).AllocSize());
+  }
+}
+
+TEST(Layout, SizeByIndex) {
+  {
+    using L = Layout<int32_t>;
+    EXPECT_EQ(0, L::Partial(0).Size<0>());
+    EXPECT_EQ(3, L::Partial(3).Size<0>());
+    EXPECT_EQ(3, L(3).Size<0>());
+  }
+  {
+    using L = Layout<int32_t, int32_t>;
+    EXPECT_EQ(0, L::Partial(0).Size<0>());
+    EXPECT_EQ(3, L::Partial(3).Size<0>());
+    EXPECT_EQ(3, L::Partial(3, 5).Size<0>());
+    EXPECT_EQ(5, L::Partial(3, 5).Size<1>());
+    EXPECT_EQ(3, L(3, 5).Size<0>());
+    EXPECT_EQ(5, L(3, 5).Size<1>());
+  }
+  {
+    using L = Layout<int8_t, int32_t, Int128>;
+    EXPECT_EQ(3, L::Partial(3).Size<0>());
+    EXPECT_EQ(3, L::Partial(3, 5).Size<0>());
+    EXPECT_EQ(5, L::Partial(3, 5).Size<1>());
+    EXPECT_EQ(3, L::Partial(3, 5, 7).Size<0>());
+    EXPECT_EQ(5, L::Partial(3, 5, 7).Size<1>());
+    EXPECT_EQ(7, L::Partial(3, 5, 7).Size<2>());
+    EXPECT_EQ(3, L(3, 5, 7).Size<0>());
+    EXPECT_EQ(5, L(3, 5, 7).Size<1>());
+    EXPECT_EQ(7, L(3, 5, 7).Size<2>());
+  }
+}
+
+TEST(Layout, SizeByType) {
+  {
+    using L = Layout<int32_t>;
+    EXPECT_EQ(0, L::Partial(0).Size<int32_t>());
+    EXPECT_EQ(3, L::Partial(3).Size<int32_t>());
+    EXPECT_EQ(3, L(3).Size<int32_t>());
+  }
+  {
+    using L = Layout<int8_t, int32_t, Int128>;
+    EXPECT_EQ(3, L::Partial(3).Size<int8_t>());
+    EXPECT_EQ(3, L::Partial(3, 5).Size<int8_t>());
+    EXPECT_EQ(5, L::Partial(3, 5).Size<int32_t>());
+    EXPECT_EQ(3, L::Partial(3, 5, 7).Size<int8_t>());
+    EXPECT_EQ(5, L::Partial(3, 5, 7).Size<int32_t>());
+    EXPECT_EQ(7, L::Partial(3, 5, 7).Size<Int128>());
+    EXPECT_EQ(3, L(3, 5, 7).Size<int8_t>());
+    EXPECT_EQ(5, L(3, 5, 7).Size<int32_t>());
+    EXPECT_EQ(7, L(3, 5, 7).Size<Int128>());
+  }
+}
+
+TEST(Layout, Sizes) {
+  {
+    using L = Layout<int32_t>;
+    EXPECT_THAT(L::Partial().Sizes(), ElementsAre());
+    EXPECT_THAT(L::Partial(3).Sizes(), ElementsAre(3));
+    EXPECT_THAT(L(3).Sizes(), ElementsAre(3));
+  }
+  {
+    using L = Layout<int32_t, int32_t>;
+    EXPECT_THAT(L::Partial().Sizes(), ElementsAre());
+    EXPECT_THAT(L::Partial(3).Sizes(), ElementsAre(3));
+    EXPECT_THAT(L::Partial(3, 5).Sizes(), ElementsAre(3, 5));
+    EXPECT_THAT(L(3, 5).Sizes(), ElementsAre(3, 5));
+  }
+  {
+    using L = Layout<int8_t, int32_t, Int128>;
+    EXPECT_THAT(L::Partial().Sizes(), ElementsAre());
+    EXPECT_THAT(L::Partial(3).Sizes(), ElementsAre(3));
+    EXPECT_THAT(L::Partial(3, 5).Sizes(), ElementsAre(3, 5));
+    EXPECT_THAT(L::Partial(3, 5, 7).Sizes(), ElementsAre(3, 5, 7));
+    EXPECT_THAT(L(3, 5, 7).Sizes(), ElementsAre(3, 5, 7));
+  }
+}
+
+TEST(Layout, PointerByIndex) {
+  alignas(max_align_t) const unsigned char p[100] = {};
+  {
+    using L = Layout<int32_t>;
+    EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L::Partial().Pointer<0>(p))));
+    EXPECT_EQ(0,
+              Distance(p, Type<const int32_t*>(L::Partial(3).Pointer<0>(p))));
+    EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L(3).Pointer<0>(p))));
+  }
+  {
+    using L = Layout<int32_t, int32_t>;
+    EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L::Partial().Pointer<0>(p))));
+    EXPECT_EQ(0,
+              Distance(p, Type<const int32_t*>(L::Partial(3).Pointer<0>(p))));
+    EXPECT_EQ(12,
+              Distance(p, Type<const int32_t*>(L::Partial(3).Pointer<1>(p))));
+    EXPECT_EQ(
+        0, Distance(p, Type<const int32_t*>(L::Partial(3, 5).Pointer<0>(p))));
+    EXPECT_EQ(
+        12, Distance(p, Type<const int32_t*>(L::Partial(3, 5).Pointer<1>(p))));
+    EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L(3, 5).Pointer<0>(p))));
+    EXPECT_EQ(12, Distance(p, Type<const int32_t*>(L(3, 5).Pointer<1>(p))));
+  }
+  {
+    using L = Layout<int8_t, int32_t, Int128>;
+    EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L::Partial().Pointer<0>(p))));
+    EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L::Partial(0).Pointer<0>(p))));
+    EXPECT_EQ(0,
+              Distance(p, Type<const int32_t*>(L::Partial(0).Pointer<1>(p))));
+    EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L::Partial(1).Pointer<0>(p))));
+    EXPECT_EQ(4,
+              Distance(p, Type<const int32_t*>(L::Partial(1).Pointer<1>(p))));
+    EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L::Partial(5).Pointer<0>(p))));
+    EXPECT_EQ(8,
+              Distance(p, Type<const int32_t*>(L::Partial(5).Pointer<1>(p))));
+    EXPECT_EQ(0,
+              Distance(p, Type<const int8_t*>(L::Partial(0, 0).Pointer<0>(p))));
+    EXPECT_EQ(
+        0, Distance(p, Type<const int32_t*>(L::Partial(0, 0).Pointer<1>(p))));
+    EXPECT_EQ(0,
+              Distance(p, Type<const Int128*>(L::Partial(0, 0).Pointer<2>(p))));
+    EXPECT_EQ(0,
+              Distance(p, Type<const int8_t*>(L::Partial(1, 0).Pointer<0>(p))));
+    EXPECT_EQ(
+        4, Distance(p, Type<const int32_t*>(L::Partial(1, 0).Pointer<1>(p))));
+    EXPECT_EQ(8,
+              Distance(p, Type<const Int128*>(L::Partial(1, 0).Pointer<2>(p))));
+    EXPECT_EQ(0,
+              Distance(p, Type<const int8_t*>(L::Partial(5, 3).Pointer<0>(p))));
+    EXPECT_EQ(
+        8, Distance(p, Type<const int32_t*>(L::Partial(5, 3).Pointer<1>(p))));
+    EXPECT_EQ(24,
+              Distance(p, Type<const Int128*>(L::Partial(5, 3).Pointer<2>(p))));
+    EXPECT_EQ(
+        0, Distance(p, Type<const int8_t*>(L::Partial(0, 0, 0).Pointer<0>(p))));
+    EXPECT_EQ(
+        0,
+        Distance(p, Type<const int32_t*>(L::Partial(0, 0, 0).Pointer<1>(p))));
+    EXPECT_EQ(
+        0, Distance(p, Type<const Int128*>(L::Partial(0, 0, 0).Pointer<2>(p))));
+    EXPECT_EQ(
+        0, Distance(p, Type<const int8_t*>(L::Partial(1, 0, 0).Pointer<0>(p))));
+    EXPECT_EQ(
+        4,
+        Distance(p, Type<const int32_t*>(L::Partial(1, 0, 0).Pointer<1>(p))));
+    EXPECT_EQ(
+        8, Distance(p, Type<const Int128*>(L::Partial(1, 0, 0).Pointer<2>(p))));
+    EXPECT_EQ(
+        0, Distance(p, Type<const int8_t*>(L::Partial(5, 3, 1).Pointer<0>(p))));
+    EXPECT_EQ(
+        24,
+        Distance(p, Type<const Int128*>(L::Partial(5, 3, 1).Pointer<2>(p))));
+    EXPECT_EQ(
+        8,
+        Distance(p, Type<const int32_t*>(L::Partial(5, 3, 1).Pointer<1>(p))));
+    EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L(5, 3, 1).Pointer<0>(p))));
+    EXPECT_EQ(24, Distance(p, Type<const Int128*>(L(5, 3, 1).Pointer<2>(p))));
+    EXPECT_EQ(8, Distance(p, Type<const int32_t*>(L(5, 3, 1).Pointer<1>(p))));
+  }
+}
+
+TEST(Layout, PointerByType) {
+  alignas(max_align_t) const unsigned char p[100] = {};
+  {
+    using L = Layout<int32_t>;
+    EXPECT_EQ(
+        0, Distance(p, Type<const int32_t*>(L::Partial().Pointer<int32_t>(p))));
+    EXPECT_EQ(
+        0,
+        Distance(p, Type<const int32_t*>(L::Partial(3).Pointer<int32_t>(p))));
+    EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L(3).Pointer<int32_t>(p))));
+  }
+  {
+    using L = Layout<int8_t, int32_t, Int128>;
+    EXPECT_EQ(
+        0, Distance(p, Type<const int8_t*>(L::Partial().Pointer<int8_t>(p))));
+    EXPECT_EQ(
+        0, Distance(p, Type<const int8_t*>(L::Partial(0).Pointer<int8_t>(p))));
+    EXPECT_EQ(
+        0,
+        Distance(p, Type<const int32_t*>(L::Partial(0).Pointer<int32_t>(p))));
+    EXPECT_EQ(
+        0, Distance(p, Type<const int8_t*>(L::Partial(1).Pointer<int8_t>(p))));
+    EXPECT_EQ(
+        4,
+        Distance(p, Type<const int32_t*>(L::Partial(1).Pointer<int32_t>(p))));
+    EXPECT_EQ(
+        0, Distance(p, Type<const int8_t*>(L::Partial(5).Pointer<int8_t>(p))));
+    EXPECT_EQ(
+        8,
+        Distance(p, Type<const int32_t*>(L::Partial(5).Pointer<int32_t>(p))));
+    EXPECT_EQ(
+        0,
+        Distance(p, Type<const int8_t*>(L::Partial(0, 0).Pointer<int8_t>(p))));
+    EXPECT_EQ(0, Distance(p, Type<const int32_t*>(
+                                 L::Partial(0, 0).Pointer<int32_t>(p))));
+    EXPECT_EQ(
+        0,
+        Distance(p, Type<const Int128*>(L::Partial(0, 0).Pointer<Int128>(p))));
+    EXPECT_EQ(
+        0,
+        Distance(p, Type<const int8_t*>(L::Partial(1, 0).Pointer<int8_t>(p))));
+    EXPECT_EQ(4, Distance(p, Type<const int32_t*>(
+                                 L::Partial(1, 0).Pointer<int32_t>(p))));
+    EXPECT_EQ(
+        8,
+        Distance(p, Type<const Int128*>(L::Partial(1, 0).Pointer<Int128>(p))));
+    EXPECT_EQ(
+        0,
+        Distance(p, Type<const int8_t*>(L::Partial(5, 3).Pointer<int8_t>(p))));
+    EXPECT_EQ(8, Distance(p, Type<const int32_t*>(
+                                 L::Partial(5, 3).Pointer<int32_t>(p))));
+    EXPECT_EQ(
+        24,
+        Distance(p, Type<const Int128*>(L::Partial(5, 3).Pointer<Int128>(p))));
+    EXPECT_EQ(0, Distance(p, Type<const int8_t*>(
+                                 L::Partial(0, 0, 0).Pointer<int8_t>(p))));
+    EXPECT_EQ(0, Distance(p, Type<const int32_t*>(
+                                 L::Partial(0, 0, 0).Pointer<int32_t>(p))));
+    EXPECT_EQ(0, Distance(p, Type<const Int128*>(
+                                 L::Partial(0, 0, 0).Pointer<Int128>(p))));
+    EXPECT_EQ(0, Distance(p, Type<const int8_t*>(
+                                 L::Partial(1, 0, 0).Pointer<int8_t>(p))));
+    EXPECT_EQ(4, Distance(p, Type<const int32_t*>(
+                                 L::Partial(1, 0, 0).Pointer<int32_t>(p))));
+    EXPECT_EQ(8, Distance(p, Type<const Int128*>(
+                                 L::Partial(1, 0, 0).Pointer<Int128>(p))));
+    EXPECT_EQ(0, Distance(p, Type<const int8_t*>(
+                                 L::Partial(5, 3, 1).Pointer<int8_t>(p))));
+    EXPECT_EQ(24, Distance(p, Type<const Int128*>(
+                                  L::Partial(5, 3, 1).Pointer<Int128>(p))));
+    EXPECT_EQ(8, Distance(p, Type<const int32_t*>(
+                                 L::Partial(5, 3, 1).Pointer<int32_t>(p))));
+    EXPECT_EQ(24,
+              Distance(p, Type<const Int128*>(L(5, 3, 1).Pointer<Int128>(p))));
+    EXPECT_EQ(
+        8, Distance(p, Type<const int32_t*>(L(5, 3, 1).Pointer<int32_t>(p))));
+  }
+}
+
+TEST(Layout, MutablePointerByIndex) {
+  alignas(max_align_t) unsigned char p[100];
+  {
+    using L = Layout<int32_t>;
+    EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial().Pointer<0>(p))));
+    EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial(3).Pointer<0>(p))));
+    EXPECT_EQ(0, Distance(p, Type<int32_t*>(L(3).Pointer<0>(p))));
+  }
+  {
+    using L = Layout<int32_t, int32_t>;
+    EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial().Pointer<0>(p))));
+    EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial(3).Pointer<0>(p))));
+    EXPECT_EQ(12, Distance(p, Type<int32_t*>(L::Partial(3).Pointer<1>(p))));
+    EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial(3, 5).Pointer<0>(p))));
+    EXPECT_EQ(12, Distance(p, Type<int32_t*>(L::Partial(3, 5).Pointer<1>(p))));
+    EXPECT_EQ(0, Distance(p, Type<int32_t*>(L(3, 5).Pointer<0>(p))));
+    EXPECT_EQ(12, Distance(p, Type<int32_t*>(L(3, 5).Pointer<1>(p))));
+  }
+  {
+    using L = Layout<int8_t, int32_t, Int128>;
+    EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial().Pointer<0>(p))));
+    EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(0).Pointer<0>(p))));
+    EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial(0).Pointer<1>(p))));
+    EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(1).Pointer<0>(p))));
+    EXPECT_EQ(4, Distance(p, Type<int32_t*>(L::Partial(1).Pointer<1>(p))));
+    EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(5).Pointer<0>(p))));
+    EXPECT_EQ(8, Distance(p, Type<int32_t*>(L::Partial(5).Pointer<1>(p))));
+    EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(0, 0).Pointer<0>(p))));
+    EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial(0, 0).Pointer<1>(p))));
+    EXPECT_EQ(0, Distance(p, Type<Int128*>(L::Partial(0, 0).Pointer<2>(p))));
+    EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(1, 0).Pointer<0>(p))));
+    EXPECT_EQ(4, Distance(p, Type<int32_t*>(L::Partial(1, 0).Pointer<1>(p))));
+    EXPECT_EQ(8, Distance(p, Type<Int128*>(L::Partial(1, 0).Pointer<2>(p))));
+    EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(5, 3).Pointer<0>(p))));
+    EXPECT_EQ(8, Distance(p, Type<int32_t*>(L::Partial(5, 3).Pointer<1>(p))));
+    EXPECT_EQ(24, Distance(p, Type<Int128*>(L::Partial(5, 3).Pointer<2>(p))));
+    EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(0, 0, 0).Pointer<0>(p))));
+    EXPECT_EQ(0,
+              Distance(p, Type<int32_t*>(L::Partial(0, 0, 0).Pointer<1>(p))));
+    EXPECT_EQ(0, Distance(p, Type<Int128*>(L::Partial(0, 0, 0).Pointer<2>(p))));
+    EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(1, 0, 0).Pointer<0>(p))));
+    EXPECT_EQ(4,
+              Distance(p, Type<int32_t*>(L::Partial(1, 0, 0).Pointer<1>(p))));
+    EXPECT_EQ(8, Distance(p, Type<Int128*>(L::Partial(1, 0, 0).Pointer<2>(p))));
+    EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(5, 3, 1).Pointer<0>(p))));
+    EXPECT_EQ(24,
+              Distance(p, Type<Int128*>(L::Partial(5, 3, 1).Pointer<2>(p))));
+    EXPECT_EQ(8,
+              Distance(p, Type<int32_t*>(L::Partial(5, 3, 1).Pointer<1>(p))));
+    EXPECT_EQ(0, Distance(p, Type<int8_t*>(L(5, 3, 1).Pointer<0>(p))));
+    EXPECT_EQ(24, Distance(p, Type<Int128*>(L(5, 3, 1).Pointer<2>(p))));
+    EXPECT_EQ(8, Distance(p, Type<int32_t*>(L(5, 3, 1).Pointer<1>(p))));
+  }
+}
+
+TEST(Layout, MutablePointerByType) {
+  alignas(max_align_t) unsigned char p[100];
+  {
+    using L = Layout<int32_t>;
+    EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial().Pointer<int32_t>(p))));
+    EXPECT_EQ(0,
+              Distance(p, Type<int32_t*>(L::Partial(3).Pointer<int32_t>(p))));
+    EXPECT_EQ(0, Distance(p, Type<int32_t*>(L(3).Pointer<int32_t>(p))));
+  }
+  {
+    using L = Layout<int8_t, int32_t, Int128>;
+    EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial().Pointer<int8_t>(p))));
+    EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(0).Pointer<int8_t>(p))));
+    EXPECT_EQ(0,
+              Distance(p, Type<int32_t*>(L::Partial(0).Pointer<int32_t>(p))));
+    EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(1).Pointer<int8_t>(p))));
+    EXPECT_EQ(4,
+              Distance(p, Type<int32_t*>(L::Partial(1).Pointer<int32_t>(p))));
+    EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(5).Pointer<int8_t>(p))));
+    EXPECT_EQ(8,
+              Distance(p, Type<int32_t*>(L::Partial(5).Pointer<int32_t>(p))));
+    EXPECT_EQ(0,
+              Distance(p, Type<int8_t*>(L::Partial(0, 0).Pointer<int8_t>(p))));
+    EXPECT_EQ(
+        0, Distance(p, Type<int32_t*>(L::Partial(0, 0).Pointer<int32_t>(p))));
+    EXPECT_EQ(0,
+              Distance(p, Type<Int128*>(L::Partial(0, 0).Pointer<Int128>(p))));
+    EXPECT_EQ(0,
+              Distance(p, Type<int8_t*>(L::Partial(1, 0).Pointer<int8_t>(p))));
+    EXPECT_EQ(
+        4, Distance(p, Type<int32_t*>(L::Partial(1, 0).Pointer<int32_t>(p))));
+    EXPECT_EQ(8,
+              Distance(p, Type<Int128*>(L::Partial(1, 0).Pointer<Int128>(p))));
+    EXPECT_EQ(0,
+              Distance(p, Type<int8_t*>(L::Partial(5, 3).Pointer<int8_t>(p))));
+    EXPECT_EQ(
+        8, Distance(p, Type<int32_t*>(L::Partial(5, 3).Pointer<int32_t>(p))));
+    EXPECT_EQ(24,
+              Distance(p, Type<Int128*>(L::Partial(5, 3).Pointer<Int128>(p))));
+    EXPECT_EQ(
+        0, Distance(p, Type<int8_t*>(L::Partial(0, 0, 0).Pointer<int8_t>(p))));
+    EXPECT_EQ(
+        0,
+        Distance(p, Type<int32_t*>(L::Partial(0, 0, 0).Pointer<int32_t>(p))));
+    EXPECT_EQ(
+        0, Distance(p, Type<Int128*>(L::Partial(0, 0, 0).Pointer<Int128>(p))));
+    EXPECT_EQ(
+        0, Distance(p, Type<int8_t*>(L::Partial(1, 0, 0).Pointer<int8_t>(p))));
+    EXPECT_EQ(
+        4,
+        Distance(p, Type<int32_t*>(L::Partial(1, 0, 0).Pointer<int32_t>(p))));
+    EXPECT_EQ(
+        8, Distance(p, Type<Int128*>(L::Partial(1, 0, 0).Pointer<Int128>(p))));
+    EXPECT_EQ(
+        0, Distance(p, Type<int8_t*>(L::Partial(5, 3, 1).Pointer<int8_t>(p))));
+    EXPECT_EQ(
+        24, Distance(p, Type<Int128*>(L::Partial(5, 3, 1).Pointer<Int128>(p))));
+    EXPECT_EQ(
+        8,
+        Distance(p, Type<int32_t*>(L::Partial(5, 3, 1).Pointer<int32_t>(p))));
+    EXPECT_EQ(0, Distance(p, Type<int8_t*>(L(5, 3, 1).Pointer<int8_t>(p))));
+    EXPECT_EQ(24, Distance(p, Type<Int128*>(L(5, 3, 1).Pointer<Int128>(p))));
+    EXPECT_EQ(8, Distance(p, Type<int32_t*>(L(5, 3, 1).Pointer<int32_t>(p))));
+  }
+}
+
+TEST(Layout, Pointers) {
+  alignas(max_align_t) const unsigned char p[100] = {};
+  using L = Layout<int8_t, int8_t, Int128>;
+  {
+    const auto x = L::Partial();
+    EXPECT_EQ(std::make_tuple(x.Pointer<0>(p)),
+              Type<std::tuple<const int8_t*>>(x.Pointers(p)));
+  }
+  {
+    const auto x = L::Partial(1);
+    EXPECT_EQ(std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p)),
+              (Type<std::tuple<const int8_t*, const int8_t*>>(x.Pointers(p))));
+  }
+  {
+    const auto x = L::Partial(1, 2);
+    EXPECT_EQ(
+        std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)),
+        (Type<std::tuple<const int8_t*, const int8_t*, const Int128*>>(
+            x.Pointers(p))));
+  }
+  {
+    const auto x = L::Partial(1, 2, 3);
+    EXPECT_EQ(
+        std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)),
+        (Type<std::tuple<const int8_t*, const int8_t*, const Int128*>>(
+            x.Pointers(p))));
+  }
+  {
+    const L x(1, 2, 3);
+    EXPECT_EQ(
+        std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)),
+        (Type<std::tuple<const int8_t*, const int8_t*, const Int128*>>(
+            x.Pointers(p))));
+  }
+}
+
+TEST(Layout, MutablePointers) {
+  alignas(max_align_t) unsigned char p[100];
+  using L = Layout<int8_t, int8_t, Int128>;
+  {
+    const auto x = L::Partial();
+    EXPECT_EQ(std::make_tuple(x.Pointer<0>(p)),
+              Type<std::tuple<int8_t*>>(x.Pointers(p)));
+  }
+  {
+    const auto x = L::Partial(1);
+    EXPECT_EQ(std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p)),
+              (Type<std::tuple<int8_t*, int8_t*>>(x.Pointers(p))));
+  }
+  {
+    const auto x = L::Partial(1, 2);
+    EXPECT_EQ(
+        std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)),
+        (Type<std::tuple<int8_t*, int8_t*, Int128*>>(x.Pointers(p))));
+  }
+  {
+    const auto x = L::Partial(1, 2, 3);
+    EXPECT_EQ(
+        std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)),
+        (Type<std::tuple<int8_t*, int8_t*, Int128*>>(x.Pointers(p))));
+  }
+  {
+    const L x(1, 2, 3);
+    EXPECT_EQ(
+        std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)),
+        (Type<std::tuple<int8_t*, int8_t*, Int128*>>(x.Pointers(p))));
+  }
+}
+
+TEST(Layout, SliceByIndexSize) {
+  alignas(max_align_t) const unsigned char p[100] = {};
+  {
+    using L = Layout<int32_t>;
+    EXPECT_EQ(0, L::Partial(0).Slice<0>(p).size());
+    EXPECT_EQ(3, L::Partial(3).Slice<0>(p).size());
+    EXPECT_EQ(3, L(3).Slice<0>(p).size());
+  }
+  {
+    using L = Layout<int32_t, int32_t>;
+    EXPECT_EQ(3, L::Partial(3).Slice<0>(p).size());
+    EXPECT_EQ(5, L::Partial(3, 5).Slice<1>(p).size());
+    EXPECT_EQ(5, L(3, 5).Slice<1>(p).size());
+  }
+  {
+    using L = Layout<int8_t, int32_t, Int128>;
+    EXPECT_EQ(3, L::Partial(3).Slice<0>(p).size());
+    EXPECT_EQ(3, L::Partial(3, 5).Slice<0>(p).size());
+    EXPECT_EQ(5, L::Partial(3, 5).Slice<1>(p).size());
+    EXPECT_EQ(3, L::Partial(3, 5, 7).Slice<0>(p).size());
+    EXPECT_EQ(5, L::Partial(3, 5, 7).Slice<1>(p).size());
+    EXPECT_EQ(7, L::Partial(3, 5, 7).Slice<2>(p).size());
+    EXPECT_EQ(3, L(3, 5, 7).Slice<0>(p).size());
+    EXPECT_EQ(5, L(3, 5, 7).Slice<1>(p).size());
+    EXPECT_EQ(7, L(3, 5, 7).Slice<2>(p).size());
+  }
+}
+
+TEST(Layout, SliceByTypeSize) {
+  alignas(max_align_t) const unsigned char p[100] = {};
+  {
+    using L = Layout<int32_t>;
+    EXPECT_EQ(0, L::Partial(0).Slice<int32_t>(p).size());
+    EXPECT_EQ(3, L::Partial(3).Slice<int32_t>(p).size());
+    EXPECT_EQ(3, L(3).Slice<int32_t>(p).size());
+  }
+  {
+    using L = Layout<int8_t, int32_t, Int128>;
+    EXPECT_EQ(3, L::Partial(3).Slice<int8_t>(p).size());
+    EXPECT_EQ(3, L::Partial(3, 5).Slice<int8_t>(p).size());
+    EXPECT_EQ(5, L::Partial(3, 5).Slice<int32_t>(p).size());
+    EXPECT_EQ(3, L::Partial(3, 5, 7).Slice<int8_t>(p).size());
+    EXPECT_EQ(5, L::Partial(3, 5, 7).Slice<int32_t>(p).size());
+    EXPECT_EQ(7, L::Partial(3, 5, 7).Slice<Int128>(p).size());
+    EXPECT_EQ(3, L(3, 5, 7).Slice<int8_t>(p).size());
+    EXPECT_EQ(5, L(3, 5, 7).Slice<int32_t>(p).size());
+    EXPECT_EQ(7, L(3, 5, 7).Slice<Int128>(p).size());
+  }
+}
+
+TEST(Layout, MutableSliceByIndexSize) {
+  alignas(max_align_t) unsigned char p[100];
+  {
+    using L = Layout<int32_t>;
+    EXPECT_EQ(0, L::Partial(0).Slice<0>(p).size());
+    EXPECT_EQ(3, L::Partial(3).Slice<0>(p).size());
+    EXPECT_EQ(3, L(3).Slice<0>(p).size());
+  }
+  {
+    using L = Layout<int32_t, int32_t>;
+    EXPECT_EQ(3, L::Partial(3).Slice<0>(p).size());
+    EXPECT_EQ(5, L::Partial(3, 5).Slice<1>(p).size());
+    EXPECT_EQ(5, L(3, 5).Slice<1>(p).size());
+  }
+  {
+    using L = Layout<int8_t, int32_t, Int128>;
+    EXPECT_EQ(3, L::Partial(3).Slice<0>(p).size());
+    EXPECT_EQ(3, L::Partial(3, 5).Slice<0>(p).size());
+    EXPECT_EQ(5, L::Partial(3, 5).Slice<1>(p).size());
+    EXPECT_EQ(3, L::Partial(3, 5, 7).Slice<0>(p).size());
+    EXPECT_EQ(5, L::Partial(3, 5, 7).Slice<1>(p).size());
+    EXPECT_EQ(7, L::Partial(3, 5, 7).Slice<2>(p).size());
+    EXPECT_EQ(3, L(3, 5, 7).Slice<0>(p).size());
+    EXPECT_EQ(5, L(3, 5, 7).Slice<1>(p).size());
+    EXPECT_EQ(7, L(3, 5, 7).Slice<2>(p).size());
+  }
+}
+
+TEST(Layout, MutableSliceByTypeSize) {
+  alignas(max_align_t) unsigned char p[100];
+  {
+    using L = Layout<int32_t>;
+    EXPECT_EQ(0, L::Partial(0).Slice<int32_t>(p).size());
+    EXPECT_EQ(3, L::Partial(3).Slice<int32_t>(p).size());
+    EXPECT_EQ(3, L(3).Slice<int32_t>(p).size());
+  }
+  {
+    using L = Layout<int8_t, int32_t, Int128>;
+    EXPECT_EQ(3, L::Partial(3).Slice<int8_t>(p).size());
+    EXPECT_EQ(3, L::Partial(3, 5).Slice<int8_t>(p).size());
+    EXPECT_EQ(5, L::Partial(3, 5).Slice<int32_t>(p).size());
+    EXPECT_EQ(3, L::Partial(3, 5, 7).Slice<int8_t>(p).size());
+    EXPECT_EQ(5, L::Partial(3, 5, 7).Slice<int32_t>(p).size());
+    EXPECT_EQ(7, L::Partial(3, 5, 7).Slice<Int128>(p).size());
+    EXPECT_EQ(3, L(3, 5, 7).Slice<int8_t>(p).size());
+    EXPECT_EQ(5, L(3, 5, 7).Slice<int32_t>(p).size());
+    EXPECT_EQ(7, L(3, 5, 7).Slice<Int128>(p).size());
+  }
+}
+
+TEST(Layout, SliceByIndexData) {
+  alignas(max_align_t) const unsigned char p[100] = {};
+  {
+    using L = Layout<int32_t>;
+    EXPECT_EQ(
+        0, Distance(
+               p, Type<Span<const int32_t>>(L::Partial(0).Slice<0>(p)).data()));
+    EXPECT_EQ(
+        0, Distance(
+               p, Type<Span<const int32_t>>(L::Partial(3).Slice<0>(p)).data()));
+    EXPECT_EQ(0,
+              Distance(p, Type<Span<const int32_t>>(L(3).Slice<0>(p)).data()));
+  }
+  {
+    using L = Layout<int32_t, int32_t>;
+    EXPECT_EQ(
+        0, Distance(
+               p, Type<Span<const int32_t>>(L::Partial(3).Slice<0>(p)).data()));
+    EXPECT_EQ(
+        0,
+        Distance(
+            p, Type<Span<const int32_t>>(L::Partial(3, 5).Slice<0>(p)).data()));
+    EXPECT_EQ(
+        12,
+        Distance(
+            p, Type<Span<const int32_t>>(L::Partial(3, 5).Slice<1>(p)).data()));
+    EXPECT_EQ(
+        0, Distance(p, Type<Span<const int32_t>>(L(3, 5).Slice<0>(p)).data()));
+    EXPECT_EQ(
+        12, Distance(p, Type<Span<const int32_t>>(L(3, 5).Slice<1>(p)).data()));
+  }
+  {
+    using L = Layout<int8_t, int32_t, Int128>;
+    EXPECT_EQ(
+        0, Distance(
+               p, Type<Span<const int8_t>>(L::Partial(0).Slice<0>(p)).data()));
+    EXPECT_EQ(
+        0, Distance(
+               p, Type<Span<const int8_t>>(L::Partial(1).Slice<0>(p)).data()));
+    EXPECT_EQ(
+        0, Distance(
+               p, Type<Span<const int8_t>>(L::Partial(5).Slice<0>(p)).data()));
+    EXPECT_EQ(
+        0,
+        Distance(
+            p, Type<Span<const int8_t>>(L::Partial(0, 0).Slice<0>(p)).data()));
+    EXPECT_EQ(
+        0,
+        Distance(
+            p, Type<Span<const int32_t>>(L::Partial(0, 0).Slice<1>(p)).data()));
+    EXPECT_EQ(
+        0,
+        Distance(
+            p, Type<Span<const int8_t>>(L::Partial(1, 0).Slice<0>(p)).data()));
+    EXPECT_EQ(
+        4,
+        Distance(
+            p, Type<Span<const int32_t>>(L::Partial(1, 0).Slice<1>(p)).data()));
+    EXPECT_EQ(
+        0,
+        Distance(
+            p, Type<Span<const int8_t>>(L::Partial(5, 3).Slice<0>(p)).data()));
+    EXPECT_EQ(
+        8,
+        Distance(
+            p, Type<Span<const int32_t>>(L::Partial(5, 3).Slice<1>(p)).data()));
+    EXPECT_EQ(
+        0,
+        Distance(
+            p,
+            Type<Span<const int8_t>>(L::Partial(0, 0, 0).Slice<0>(p)).data()));
+    EXPECT_EQ(
+        0,
+        Distance(
+            p,
+            Type<Span<const int32_t>>(L::Partial(0, 0, 0).Slice<1>(p)).data()));
+    EXPECT_EQ(
+        0,
+        Distance(
+            p,
+            Type<Span<const Int128>>(L::Partial(0, 0, 0).Slice<2>(p)).data()));
+    EXPECT_EQ(
+        0,
+        Distance(
+            p,
+            Type<Span<const int8_t>>(L::Partial(1, 0, 0).Slice<0>(p)).data()));
+    EXPECT_EQ(
+        4,
+        Distance(
+            p,
+            Type<Span<const int32_t>>(L::Partial(1, 0, 0).Slice<1>(p)).data()));
+    EXPECT_EQ(
+        8,
+        Distance(
+            p,
+            Type<Span<const Int128>>(L::Partial(1, 0, 0).Slice<2>(p)).data()));
+    EXPECT_EQ(
+        0,
+        Distance(
+            p,
+            Type<Span<const int8_t>>(L::Partial(5, 3, 1).Slice<0>(p)).data()));
+    EXPECT_EQ(
+        24,
+        Distance(
+            p,
+            Type<Span<const Int128>>(L::Partial(5, 3, 1).Slice<2>(p)).data()));
+    EXPECT_EQ(
+        8,
+        Distance(
+            p,
+            Type<Span<const int32_t>>(L::Partial(5, 3, 1).Slice<1>(p)).data()));
+    EXPECT_EQ(
+        0,
+        Distance(p, Type<Span<const int8_t>>(L(5, 3, 1).Slice<0>(p)).data()));
+    EXPECT_EQ(
+        24,
+        Distance(p, Type<Span<const Int128>>(L(5, 3, 1).Slice<2>(p)).data()));
+    EXPECT_EQ(
+        8,
+        Distance(p, Type<Span<const int32_t>>(L(5, 3, 1).Slice<1>(p)).data()));
+  }
+}
+
+TEST(Layout, SliceByTypeData) {
+  alignas(max_align_t) const unsigned char p[100] = {};
+  {
+    using L = Layout<int32_t>;
+    EXPECT_EQ(
+        0,
+        Distance(
+            p,
+            Type<Span<const int32_t>>(L::Partial(0).Slice<int32_t>(p)).data()));
+    EXPECT_EQ(
+        0,
+        Distance(
+            p,
+            Type<Span<const int32_t>>(L::Partial(3).Slice<int32_t>(p)).data()));
+    EXPECT_EQ(
+        0,
+        Distance(p, Type<Span<const int32_t>>(L(3).Slice<int32_t>(p)).data()));
+  }
+  {
+    using L = Layout<int8_t, int32_t, Int128>;
+    EXPECT_EQ(
+        0,
+        Distance(
+            p,
+            Type<Span<const int8_t>>(L::Partial(0).Slice<int8_t>(p)).data()));
+    EXPECT_EQ(
+        0,
+        Distance(
+            p,
+            Type<Span<const int8_t>>(L::Partial(1).Slice<int8_t>(p)).data()));
+    EXPECT_EQ(
+        0,
+        Distance(
+            p,
+            Type<Span<const int8_t>>(L::Partial(5).Slice<int8_t>(p)).data()));
+    EXPECT_EQ(
+        0,
+        Distance(p, Type<Span<const int8_t>>(L::Partial(0, 0).Slice<int8_t>(p))
+                        .data()));
+    EXPECT_EQ(0, Distance(p, Type<Span<const int32_t>>(
+                                 L::Partial(0, 0).Slice<int32_t>(p))
+                                 .data()));
+    EXPECT_EQ(
+        0,
+        Distance(p, Type<Span<const int8_t>>(L::Partial(1, 0).Slice<int8_t>(p))
+                        .data()));
+    EXPECT_EQ(4, Distance(p, Type<Span<const int32_t>>(
+                                 L::Partial(1, 0).Slice<int32_t>(p))
+                                 .data()));
+    EXPECT_EQ(
+        0,
+        Distance(p, Type<Span<const int8_t>>(L::Partial(5, 3).Slice<int8_t>(p))
+                        .data()));
+    EXPECT_EQ(8, Distance(p, Type<Span<const int32_t>>(
+                                 L::Partial(5, 3).Slice<int32_t>(p))
+                                 .data()));
+    EXPECT_EQ(0, Distance(p, Type<Span<const int8_t>>(
+                                 L::Partial(0, 0, 0).Slice<int8_t>(p))
+                                 .data()));
+    EXPECT_EQ(0, Distance(p, Type<Span<const int32_t>>(
+                                 L::Partial(0, 0, 0).Slice<int32_t>(p))
+                                 .data()));
+    EXPECT_EQ(0, Distance(p, Type<Span<const Int128>>(
+                                 L::Partial(0, 0, 0).Slice<Int128>(p))
+                                 .data()));
+    EXPECT_EQ(0, Distance(p, Type<Span<const int8_t>>(
+                                 L::Partial(1, 0, 0).Slice<int8_t>(p))
+                                 .data()));
+    EXPECT_EQ(4, Distance(p, Type<Span<const int32_t>>(
+                                 L::Partial(1, 0, 0).Slice<int32_t>(p))
+                                 .data()));
+    EXPECT_EQ(8, Distance(p, Type<Span<const Int128>>(
+                                 L::Partial(1, 0, 0).Slice<Int128>(p))
+                                 .data()));
+    EXPECT_EQ(0, Distance(p, Type<Span<const int8_t>>(
+                                 L::Partial(5, 3, 1).Slice<int8_t>(p))
+                                 .data()));
+    EXPECT_EQ(24, Distance(p, Type<Span<const Int128>>(
+                                  L::Partial(5, 3, 1).Slice<Int128>(p))
+                                  .data()));
+    EXPECT_EQ(8, Distance(p, Type<Span<const int32_t>>(
+                                 L::Partial(5, 3, 1).Slice<int32_t>(p))
+                                 .data()));
+    EXPECT_EQ(
+        0,
+        Distance(p,
+                 Type<Span<const int8_t>>(L(5, 3, 1).Slice<int8_t>(p)).data()));
+    EXPECT_EQ(
+        24,
+        Distance(p,
+                 Type<Span<const Int128>>(L(5, 3, 1).Slice<Int128>(p)).data()));
+    EXPECT_EQ(
+        8,
+        Distance(
+            p, Type<Span<const int32_t>>(L(5, 3, 1).Slice<int32_t>(p)).data()));
+  }
+}
+
+TEST(Layout, MutableSliceByIndexData) {
+  alignas(max_align_t) unsigned char p[100];
+  {
+    using L = Layout<int32_t>;
+    EXPECT_EQ(
+        0, Distance(p, Type<Span<int32_t>>(L::Partial(0).Slice<0>(p)).data()));
+    EXPECT_EQ(
+        0, Distance(p, Type<Span<int32_t>>(L::Partial(3).Slice<0>(p)).data()));
+    EXPECT_EQ(0, Distance(p, Type<Span<int32_t>>(L(3).Slice<0>(p)).data()));
+  }
+  {
+    using L = Layout<int32_t, int32_t>;
+    EXPECT_EQ(
+        0, Distance(p, Type<Span<int32_t>>(L::Partial(3).Slice<0>(p)).data()));
+    EXPECT_EQ(
+        0,
+        Distance(p, Type<Span<int32_t>>(L::Partial(3, 5).Slice<0>(p)).data()));
+    EXPECT_EQ(
+        12,
+        Distance(p, Type<Span<int32_t>>(L::Partial(3, 5).Slice<1>(p)).data()));
+    EXPECT_EQ(0, Distance(p, Type<Span<int32_t>>(L(3, 5).Slice<0>(p)).data()));
+    EXPECT_EQ(12, Distance(p, Type<Span<int32_t>>(L(3, 5).Slice<1>(p)).data()));
+  }
+  {
+    using L = Layout<int8_t, int32_t, Int128>;
+    EXPECT_EQ(
+        0, Distance(p, Type<Span<int8_t>>(L::Partial(0).Slice<0>(p)).data()));
+    EXPECT_EQ(
+        0, Distance(p, Type<Span<int8_t>>(L::Partial(1).Slice<0>(p)).data()));
+    EXPECT_EQ(
+        0, Distance(p, Type<Span<int8_t>>(L::Partial(5).Slice<0>(p)).data()));
+    EXPECT_EQ(
+        0,
+        Distance(p, Type<Span<int8_t>>(L::Partial(0, 0).Slice<0>(p)).data()));
+    EXPECT_EQ(
+        0,
+        Distance(p, Type<Span<int32_t>>(L::Partial(0, 0).Slice<1>(p)).data()));
+    EXPECT_EQ(
+        0,
+        Distance(p, Type<Span<int8_t>>(L::Partial(1, 0).Slice<0>(p)).data()));
+    EXPECT_EQ(
+        4,
+        Distance(p, Type<Span<int32_t>>(L::Partial(1, 0).Slice<1>(p)).data()));
+    EXPECT_EQ(
+        0,
+        Distance(p, Type<Span<int8_t>>(L::Partial(5, 3).Slice<0>(p)).data()));
+    EXPECT_EQ(
+        8,
+        Distance(p, Type<Span<int32_t>>(L::Partial(5, 3).Slice<1>(p)).data()));
+    EXPECT_EQ(
+        0, Distance(
+               p, Type<Span<int8_t>>(L::Partial(0, 0, 0).Slice<0>(p)).data()));
+    EXPECT_EQ(
+        0, Distance(
+               p, Type<Span<int32_t>>(L::Partial(0, 0, 0).Slice<1>(p)).data()));
+    EXPECT_EQ(
+        0, Distance(
+               p, Type<Span<Int128>>(L::Partial(0, 0, 0).Slice<2>(p)).data()));
+    EXPECT_EQ(
+        0, Distance(
+               p, Type<Span<int8_t>>(L::Partial(1, 0, 0).Slice<0>(p)).data()));
+    EXPECT_EQ(
+        4, Distance(
+               p, Type<Span<int32_t>>(L::Partial(1, 0, 0).Slice<1>(p)).data()));
+    EXPECT_EQ(
+        8, Distance(
+               p, Type<Span<Int128>>(L::Partial(1, 0, 0).Slice<2>(p)).data()));
+    EXPECT_EQ(
+        0, Distance(
+               p, Type<Span<int8_t>>(L::Partial(5, 3, 1).Slice<0>(p)).data()));
+    EXPECT_EQ(
+        24, Distance(
+                p, Type<Span<Int128>>(L::Partial(5, 3, 1).Slice<2>(p)).data()));
+    EXPECT_EQ(
+        8, Distance(
+               p, Type<Span<int32_t>>(L::Partial(5, 3, 1).Slice<1>(p)).data()));
+    EXPECT_EQ(0,
+              Distance(p, Type<Span<int8_t>>(L(5, 3, 1).Slice<0>(p)).data()));
+    EXPECT_EQ(24,
+              Distance(p, Type<Span<Int128>>(L(5, 3, 1).Slice<2>(p)).data()));
+    EXPECT_EQ(8,
+              Distance(p, Type<Span<int32_t>>(L(5, 3, 1).Slice<1>(p)).data()));
+  }
+}
+
+TEST(Layout, MutableSliceByTypeData) {
+  alignas(max_align_t) unsigned char p[100];
+  {
+    using L = Layout<int32_t>;
+    EXPECT_EQ(
+        0, Distance(
+               p, Type<Span<int32_t>>(L::Partial(0).Slice<int32_t>(p)).data()));
+    EXPECT_EQ(
+        0, Distance(
+               p, Type<Span<int32_t>>(L::Partial(3).Slice<int32_t>(p)).data()));
+    EXPECT_EQ(0,
+              Distance(p, Type<Span<int32_t>>(L(3).Slice<int32_t>(p)).data()));
+  }
+  {
+    using L = Layout<int8_t, int32_t, Int128>;
+    EXPECT_EQ(
+        0,
+        Distance(p, Type<Span<int8_t>>(L::Partial(0).Slice<int8_t>(p)).data()));
+    EXPECT_EQ(
+        0,
+        Distance(p, Type<Span<int8_t>>(L::Partial(1).Slice<int8_t>(p)).data()));
+    EXPECT_EQ(
+        0,
+        Distance(p, Type<Span<int8_t>>(L::Partial(5).Slice<int8_t>(p)).data()));
+    EXPECT_EQ(
+        0,
+        Distance(p,
+                 Type<Span<int8_t>>(L::Partial(0, 0).Slice<int8_t>(p)).data()));
+    EXPECT_EQ(
+        0,
+        Distance(
+            p, Type<Span<int32_t>>(L::Partial(0, 0).Slice<int32_t>(p)).data()));
+    EXPECT_EQ(
+        0,
+        Distance(p,
+                 Type<Span<int8_t>>(L::Partial(1, 0).Slice<int8_t>(p)).data()));
+    EXPECT_EQ(
+        4,
+        Distance(
+            p, Type<Span<int32_t>>(L::Partial(1, 0).Slice<int32_t>(p)).data()));
+    EXPECT_EQ(
+        0,
+        Distance(p,
+                 Type<Span<int8_t>>(L::Partial(5, 3).Slice<int8_t>(p)).data()));
+    EXPECT_EQ(
+        8,
+        Distance(
+            p, Type<Span<int32_t>>(L::Partial(5, 3).Slice<int32_t>(p)).data()));
+    EXPECT_EQ(
+        0,
+        Distance(
+            p,
+            Type<Span<int8_t>>(L::Partial(0, 0, 0).Slice<int8_t>(p)).data()));
+    EXPECT_EQ(
+        0,
+        Distance(
+            p,
+            Type<Span<int32_t>>(L::Partial(0, 0, 0).Slice<int32_t>(p)).data()));
+    EXPECT_EQ(
+        0,
+        Distance(
+            p,
+            Type<Span<Int128>>(L::Partial(0, 0, 0).Slice<Int128>(p)).data()));
+    EXPECT_EQ(
+        0,
+        Distance(
+            p,
+            Type<Span<int8_t>>(L::Partial(1, 0, 0).Slice<int8_t>(p)).data()));
+    EXPECT_EQ(
+        4,
+        Distance(
+            p,
+            Type<Span<int32_t>>(L::Partial(1, 0, 0).Slice<int32_t>(p)).data()));
+    EXPECT_EQ(
+        8,
+        Distance(
+            p,
+            Type<Span<Int128>>(L::Partial(1, 0, 0).Slice<Int128>(p)).data()));
+    EXPECT_EQ(
+        0,
+        Distance(
+            p,
+            Type<Span<int8_t>>(L::Partial(5, 3, 1).Slice<int8_t>(p)).data()));
+    EXPECT_EQ(
+        24,
+        Distance(
+            p,
+            Type<Span<Int128>>(L::Partial(5, 3, 1).Slice<Int128>(p)).data()));
+    EXPECT_EQ(
+        8,
+        Distance(
+            p,
+            Type<Span<int32_t>>(L::Partial(5, 3, 1).Slice<int32_t>(p)).data()));
+    EXPECT_EQ(
+        0, Distance(p, Type<Span<int8_t>>(L(5, 3, 1).Slice<int8_t>(p)).data()));
+    EXPECT_EQ(
+        24,
+        Distance(p, Type<Span<Int128>>(L(5, 3, 1).Slice<Int128>(p)).data()));
+    EXPECT_EQ(
+        8,
+        Distance(p, Type<Span<int32_t>>(L(5, 3, 1).Slice<int32_t>(p)).data()));
+  }
+}
+
+MATCHER_P(IsSameSlice, slice, "") {
+  return arg.size() == slice.size() && arg.data() == slice.data();
+}
+
+template <typename... M>
+class TupleMatcher {
+ public:
+  explicit TupleMatcher(M... matchers) : matchers_(std::move(matchers)...) {}
+
+  template <typename Tuple>
+  bool MatchAndExplain(const Tuple& p,
+                       testing::MatchResultListener* /* listener */) const {
+    static_assert(std::tuple_size<Tuple>::value == sizeof...(M), "");
+    return MatchAndExplainImpl(
+        p, absl::make_index_sequence<std::tuple_size<Tuple>::value>{});
+  }
+
+  // For the matcher concept. Left empty as we don't really need the diagnostics
+  // right now.
+  void DescribeTo(::std::ostream* os) const {}
+  void DescribeNegationTo(::std::ostream* os) const {}
+
+ private:
+  template <typename Tuple, size_t... Is>
+  bool MatchAndExplainImpl(const Tuple& p, absl::index_sequence<Is...>) const {
+    // Using std::min as a simple variadic "and".
+    return std::min(
+        {true, testing::SafeMatcherCast<
+                   const typename std::tuple_element<Is, Tuple>::type&>(
+                   std::get<Is>(matchers_))
+                   .Matches(std::get<Is>(p))...});
+  }
+
+  std::tuple<M...> matchers_;
+};
+
+template <typename... M>
+testing::PolymorphicMatcher<TupleMatcher<M...>> Tuple(M... matchers) {
+  return testing::MakePolymorphicMatcher(
+      TupleMatcher<M...>(std::move(matchers)...));
+}
+
+TEST(Layout, Slices) {
+  alignas(max_align_t) const unsigned char p[100] = {};
+  using L = Layout<int8_t, int8_t, Int128>;
+  {
+    const auto x = L::Partial();
+    EXPECT_THAT(Type<std::tuple<>>(x.Slices(p)), Tuple());
+  }
+  {
+    const auto x = L::Partial(1);
+    EXPECT_THAT(Type<std::tuple<Span<const int8_t>>>(x.Slices(p)),
+                Tuple(IsSameSlice(x.Slice<0>(p))));
+  }
+  {
+    const auto x = L::Partial(1, 2);
+    EXPECT_THAT(
+        (Type<std::tuple<Span<const int8_t>, Span<const int8_t>>>(x.Slices(p))),
+        Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p))));
+  }
+  {
+    const auto x = L::Partial(1, 2, 3);
+    EXPECT_THAT((Type<std::tuple<Span<const int8_t>, Span<const int8_t>,
+                                 Span<const Int128>>>(x.Slices(p))),
+                Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)),
+                      IsSameSlice(x.Slice<2>(p))));
+  }
+  {
+    const L x(1, 2, 3);
+    EXPECT_THAT((Type<std::tuple<Span<const int8_t>, Span<const int8_t>,
+                                 Span<const Int128>>>(x.Slices(p))),
+                Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)),
+                      IsSameSlice(x.Slice<2>(p))));
+  }
+}
+
+TEST(Layout, MutableSlices) {
+  alignas(max_align_t) unsigned char p[100] = {};
+  using L = Layout<int8_t, int8_t, Int128>;
+  {
+    const auto x = L::Partial();
+    EXPECT_THAT(Type<std::tuple<>>(x.Slices(p)), Tuple());
+  }
+  {
+    const auto x = L::Partial(1);
+    EXPECT_THAT(Type<std::tuple<Span<int8_t>>>(x.Slices(p)),
+                Tuple(IsSameSlice(x.Slice<0>(p))));
+  }
+  {
+    const auto x = L::Partial(1, 2);
+    EXPECT_THAT((Type<std::tuple<Span<int8_t>, Span<int8_t>>>(x.Slices(p))),
+                Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p))));
+  }
+  {
+    const auto x = L::Partial(1, 2, 3);
+    EXPECT_THAT((Type<std::tuple<Span<int8_t>, Span<int8_t>, Span<Int128>>>(
+                    x.Slices(p))),
+                Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)),
+                      IsSameSlice(x.Slice<2>(p))));
+  }
+  {
+    const L x(1, 2, 3);
+    EXPECT_THAT((Type<std::tuple<Span<int8_t>, Span<int8_t>, Span<Int128>>>(
+                    x.Slices(p))),
+                Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)),
+                      IsSameSlice(x.Slice<2>(p))));
+  }
+}
+
+TEST(Layout, UnalignedTypes) {
+  constexpr Layout<unsigned char, unsigned char, unsigned char> x(1, 2, 3);
+  alignas(max_align_t) unsigned char p[x.AllocSize() + 1];
+  EXPECT_THAT(x.Pointers(p + 1), Tuple(p + 1, p + 2, p + 4));
+}
+
+TEST(Layout, CustomAlignment) {
+  constexpr Layout<unsigned char, Aligned<unsigned char, 8>> x(1, 2);
+  alignas(max_align_t) unsigned char p[x.AllocSize()];
+  EXPECT_EQ(10, x.AllocSize());
+  EXPECT_THAT(x.Pointers(p), Tuple(p + 0, p + 8));
+}
+
+TEST(Layout, OverAligned) {
+  constexpr size_t M = alignof(max_align_t);
+  constexpr Layout<unsigned char, Aligned<unsigned char, 2 * M>> x(1, 3);
+  alignas(2 * M) unsigned char p[x.AllocSize()];
+  EXPECT_EQ(2 * M + 3, x.AllocSize());
+  EXPECT_THAT(x.Pointers(p), Tuple(p + 0, p + 2 * M));
+}
+
+TEST(Layout, Alignment) {
+  static_assert(Layout<int8_t>::Alignment() == 1, "");
+  static_assert(Layout<int32_t>::Alignment() == 4, "");
+  static_assert(Layout<Int64>::Alignment() == 8, "");
+  static_assert(Layout<Aligned<int8_t, 64>>::Alignment() == 64, "");
+  static_assert(Layout<int8_t, int32_t, Int64>::Alignment() == 8, "");
+  static_assert(Layout<int8_t, Int64, int32_t>::Alignment() == 8, "");
+  static_assert(Layout<int32_t, int8_t, Int64>::Alignment() == 8, "");
+  static_assert(Layout<int32_t, Int64, int8_t>::Alignment() == 8, "");
+  static_assert(Layout<Int64, int8_t, int32_t>::Alignment() == 8, "");
+  static_assert(Layout<Int64, int32_t, int8_t>::Alignment() == 8, "");
+}
+
+TEST(Layout, ConstexprPartial) {
+  constexpr size_t M = alignof(max_align_t);
+  constexpr Layout<unsigned char, Aligned<unsigned char, 2 * M>> x(1, 3);
+  static_assert(x.Partial(1).template Offset<1>() == 2 * M, "");
+}
+// [from, to)
+struct Region {
+  size_t from;
+  size_t to;
+};
+
+void ExpectRegionPoisoned(const unsigned char* p, size_t n, bool poisoned) {
+#ifdef ABSL_HAVE_ADDRESS_SANITIZER
+  for (size_t i = 0; i != n; ++i) {
+    EXPECT_EQ(poisoned, __asan_address_is_poisoned(p + i));
+  }
+#endif
+}
+
+template <size_t N>
+void ExpectPoisoned(const unsigned char (&buf)[N],
+                    std::initializer_list<Region> reg) {
+  size_t prev = 0;
+  for (const Region& r : reg) {
+    ExpectRegionPoisoned(buf + prev, r.from - prev, false);
+    ExpectRegionPoisoned(buf + r.from, r.to - r.from, true);
+    prev = r.to;
+  }
+  ExpectRegionPoisoned(buf + prev, N - prev, false);
+}
+
+TEST(Layout, PoisonPadding) {
+  using L = Layout<int8_t, Int64, int32_t, Int128>;
+
+  constexpr size_t n = L::Partial(1, 2, 3, 4).AllocSize();
+  {
+    constexpr auto x = L::Partial();
+    alignas(max_align_t) const unsigned char c[n] = {};
+    x.PoisonPadding(c);
+    EXPECT_EQ(x.Slices(c), x.Slices(c));
+    ExpectPoisoned(c, {});
+  }
+  {
+    constexpr auto x = L::Partial(1);
+    alignas(max_align_t) const unsigned char c[n] = {};
+    x.PoisonPadding(c);
+    EXPECT_EQ(x.Slices(c), x.Slices(c));
+    ExpectPoisoned(c, {{1, 8}});
+  }
+  {
+    constexpr auto x = L::Partial(1, 2);
+    alignas(max_align_t) const unsigned char c[n] = {};
+    x.PoisonPadding(c);
+    EXPECT_EQ(x.Slices(c), x.Slices(c));
+    ExpectPoisoned(c, {{1, 8}});
+  }
+  {
+    constexpr auto x = L::Partial(1, 2, 3);
+    alignas(max_align_t) const unsigned char c[n] = {};
+    x.PoisonPadding(c);
+    EXPECT_EQ(x.Slices(c), x.Slices(c));
+    ExpectPoisoned(c, {{1, 8}, {36, 40}});
+  }
+  {
+    constexpr auto x = L::Partial(1, 2, 3, 4);
+    alignas(max_align_t) const unsigned char c[n] = {};
+    x.PoisonPadding(c);
+    EXPECT_EQ(x.Slices(c), x.Slices(c));
+    ExpectPoisoned(c, {{1, 8}, {36, 40}});
+  }
+  {
+    constexpr L x(1, 2, 3, 4);
+    alignas(max_align_t) const unsigned char c[n] = {};
+    x.PoisonPadding(c);
+    EXPECT_EQ(x.Slices(c), x.Slices(c));
+    ExpectPoisoned(c, {{1, 8}, {36, 40}});
+  }
+}
+
+TEST(Layout, DebugString) {
+  {
+    constexpr auto x = Layout<int8_t, int32_t, int8_t, Int128>::Partial();
+    EXPECT_EQ("@0<signed char>(1)", x.DebugString());
+  }
+  {
+    constexpr auto x = Layout<int8_t, int32_t, int8_t, Int128>::Partial(1);
+    EXPECT_EQ("@0<signed char>(1)[1]; @4<int>(4)", x.DebugString());
+  }
+  {
+    constexpr auto x = Layout<int8_t, int32_t, int8_t, Int128>::Partial(1, 2);
+    EXPECT_EQ("@0<signed char>(1)[1]; @4<int>(4)[2]; @12<signed char>(1)",
+              x.DebugString());
+  }
+  {
+    constexpr auto x =
+        Layout<int8_t, int32_t, int8_t, Int128>::Partial(1, 2, 3);
+    EXPECT_EQ(
+        "@0<signed char>(1)[1]; @4<int>(4)[2]; @12<signed char>(1)[3]; "
+        "@16" +
+            Int128::Name() + "(16)",
+        x.DebugString());
+  }
+  {
+    constexpr auto x =
+        Layout<int8_t, int32_t, int8_t, Int128>::Partial(1, 2, 3, 4);
+    EXPECT_EQ(
+        "@0<signed char>(1)[1]; @4<int>(4)[2]; @12<signed char>(1)[3]; "
+        "@16" +
+            Int128::Name() + "(16)[4]",
+        x.DebugString());
+  }
+  {
+    constexpr Layout<int8_t, int32_t, int8_t, Int128> x(1, 2, 3, 4);
+    EXPECT_EQ(
+        "@0<signed char>(1)[1]; @4<int>(4)[2]; @12<signed char>(1)[3]; "
+        "@16" +
+            Int128::Name() + "(16)[4]",
+        x.DebugString());
+  }
+}
+
+TEST(Layout, CharTypes) {
+  constexpr Layout<int32_t> x(1);
+  alignas(max_align_t) char c[x.AllocSize()] = {};
+  alignas(max_align_t) unsigned char uc[x.AllocSize()] = {};
+  alignas(max_align_t) signed char sc[x.AllocSize()] = {};
+  alignas(max_align_t) const char cc[x.AllocSize()] = {};
+  alignas(max_align_t) const unsigned char cuc[x.AllocSize()] = {};
+  alignas(max_align_t) const signed char csc[x.AllocSize()] = {};
+
+  Type<int32_t*>(x.Pointer<0>(c));
+  Type<int32_t*>(x.Pointer<0>(uc));
+  Type<int32_t*>(x.Pointer<0>(sc));
+  Type<const int32_t*>(x.Pointer<0>(cc));
+  Type<const int32_t*>(x.Pointer<0>(cuc));
+  Type<const int32_t*>(x.Pointer<0>(csc));
+
+  Type<int32_t*>(x.Pointer<int32_t>(c));
+  Type<int32_t*>(x.Pointer<int32_t>(uc));
+  Type<int32_t*>(x.Pointer<int32_t>(sc));
+  Type<const int32_t*>(x.Pointer<int32_t>(cc));
+  Type<const int32_t*>(x.Pointer<int32_t>(cuc));
+  Type<const int32_t*>(x.Pointer<int32_t>(csc));
+
+  Type<std::tuple<int32_t*>>(x.Pointers(c));
+  Type<std::tuple<int32_t*>>(x.Pointers(uc));
+  Type<std::tuple<int32_t*>>(x.Pointers(sc));
+  Type<std::tuple<const int32_t*>>(x.Pointers(cc));
+  Type<std::tuple<const int32_t*>>(x.Pointers(cuc));
+  Type<std::tuple<const int32_t*>>(x.Pointers(csc));
+
+  Type<Span<int32_t>>(x.Slice<0>(c));
+  Type<Span<int32_t>>(x.Slice<0>(uc));
+  Type<Span<int32_t>>(x.Slice<0>(sc));
+  Type<Span<const int32_t>>(x.Slice<0>(cc));
+  Type<Span<const int32_t>>(x.Slice<0>(cuc));
+  Type<Span<const int32_t>>(x.Slice<0>(csc));
+
+  Type<std::tuple<Span<int32_t>>>(x.Slices(c));
+  Type<std::tuple<Span<int32_t>>>(x.Slices(uc));
+  Type<std::tuple<Span<int32_t>>>(x.Slices(sc));
+  Type<std::tuple<Span<const int32_t>>>(x.Slices(cc));
+  Type<std::tuple<Span<const int32_t>>>(x.Slices(cuc));
+  Type<std::tuple<Span<const int32_t>>>(x.Slices(csc));
+}
+
+TEST(Layout, ConstElementType) {
+  constexpr Layout<const int32_t> x(1);
+  alignas(int32_t) char c[x.AllocSize()] = {};
+  const char* cc = c;
+  const int32_t* p = reinterpret_cast<const int32_t*>(cc);
+
+  EXPECT_EQ(alignof(int32_t), x.Alignment());
+
+  EXPECT_EQ(0, x.Offset<0>());
+  EXPECT_EQ(0, x.Offset<const int32_t>());
+
+  EXPECT_THAT(x.Offsets(), ElementsAre(0));
+
+  EXPECT_EQ(1, x.Size<0>());
+  EXPECT_EQ(1, x.Size<const int32_t>());
+
+  EXPECT_THAT(x.Sizes(), ElementsAre(1));
+
+  EXPECT_EQ(sizeof(int32_t), x.AllocSize());
+
+  EXPECT_EQ(p, Type<const int32_t*>(x.Pointer<0>(c)));
+  EXPECT_EQ(p, Type<const int32_t*>(x.Pointer<0>(cc)));
+
+  EXPECT_EQ(p, Type<const int32_t*>(x.Pointer<const int32_t>(c)));
+  EXPECT_EQ(p, Type<const int32_t*>(x.Pointer<const int32_t>(cc)));
+
+  EXPECT_THAT(Type<std::tuple<const int32_t*>>(x.Pointers(c)), Tuple(p));
+  EXPECT_THAT(Type<std::tuple<const int32_t*>>(x.Pointers(cc)), Tuple(p));
+
+  EXPECT_THAT(Type<Span<const int32_t>>(x.Slice<0>(c)),
+              IsSameSlice(Span<const int32_t>(p, 1)));
+  EXPECT_THAT(Type<Span<const int32_t>>(x.Slice<0>(cc)),
+              IsSameSlice(Span<const int32_t>(p, 1)));
+
+  EXPECT_THAT(Type<Span<const int32_t>>(x.Slice<const int32_t>(c)),
+              IsSameSlice(Span<const int32_t>(p, 1)));
+  EXPECT_THAT(Type<Span<const int32_t>>(x.Slice<const int32_t>(cc)),
+              IsSameSlice(Span<const int32_t>(p, 1)));
+
+  EXPECT_THAT(Type<std::tuple<Span<const int32_t>>>(x.Slices(c)),
+              Tuple(IsSameSlice(Span<const int32_t>(p, 1))));
+  EXPECT_THAT(Type<std::tuple<Span<const int32_t>>>(x.Slices(cc)),
+              Tuple(IsSameSlice(Span<const int32_t>(p, 1))));
+}
+
+namespace example {
+
+// Immutable move-only string with sizeof equal to sizeof(void*). The string
+// size and the characters are kept in the same heap allocation.
+class CompactString {
+ public:
+  CompactString(const char* s = "") {  // NOLINT
+    const size_t size = strlen(s);
+    // size_t[1], followed by char[size + 1].
+    // This statement doesn't allocate memory.
+    const L layout(1, size + 1);
+    // AllocSize() tells us how much memory we need to allocate for all our
+    // data.
+    p_.reset(new unsigned char[layout.AllocSize()]);
+    // If running under ASAN, mark the padding bytes, if any, to catch memory
+    // errors.
+    layout.PoisonPadding(p_.get());
+    // Store the size in the allocation.
+    // Pointer<size_t>() is a synonym for Pointer<0>().
+    *layout.Pointer<size_t>(p_.get()) = size;
+    // Store the characters in the allocation.
+    memcpy(layout.Pointer<char>(p_.get()), s, size + 1);
+  }
+
+  size_t size() const {
+    // Equivalent to reinterpret_cast<size_t&>(*p).
+    return *L::Partial().Pointer<size_t>(p_.get());
+  }
+
+  const char* c_str() const {
+    // Equivalent to reinterpret_cast<char*>(p.get() + sizeof(size_t)).
+    // The argument in Partial(1) specifies that we have size_t[1] in front of
+    // the characters.
+    return L::Partial(1).Pointer<char>(p_.get());
+  }
+
+ private:
+  // Our heap allocation contains a size_t followed by an array of chars.
+  using L = Layout<size_t, char>;
+  std::unique_ptr<unsigned char[]> p_;
+};
+
+TEST(CompactString, Works) {
+  CompactString s = "hello";
+  EXPECT_EQ(5, s.size());
+  EXPECT_STREQ("hello", s.c_str());
+}
+
+}  // namespace example
+
+}  // namespace
+}  // namespace container_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/third_party/abseil_cpp/absl/container/internal/node_hash_policy.h b/third_party/abseil_cpp/absl/container/internal/node_hash_policy.h
new file mode 100644
index 000000000000..4617162f0b32
--- /dev/null
+++ b/third_party/abseil_cpp/absl/container/internal/node_hash_policy.h
@@ -0,0 +1,92 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Adapts a policy for nodes.
+//
+// The node policy should model:
+//
+// struct Policy {
+//   // Returns a new node allocated and constructed using the allocator, using
+//   // the specified arguments.
+//   template <class Alloc, class... Args>
+//   value_type* new_element(Alloc* alloc, Args&&... args) const;
+//
+//   // Destroys and deallocates node using the allocator.
+//   template <class Alloc>
+//   void delete_element(Alloc* alloc, value_type* node) const;
+// };
+//
+// It may also optionally define `value()` and `apply()`. For documentation on
+// these, see hash_policy_traits.h.
+
+#ifndef ABSL_CONTAINER_INTERNAL_NODE_HASH_POLICY_H_
+#define ABSL_CONTAINER_INTERNAL_NODE_HASH_POLICY_H_
+
+#include <cassert>
+#include <cstddef>
+#include <memory>
+#include <type_traits>
+#include <utility>
+
+#include "absl/base/config.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+template <class Reference, class Policy>
+struct node_hash_policy {
+  static_assert(std::is_lvalue_reference<Reference>::value, "");
+
+  using slot_type = typename std::remove_cv<
+      typename std::remove_reference<Reference>::type>::type*;
+
+  template <class Alloc, class... Args>
+  static void construct(Alloc* alloc, slot_type* slot, Args&&... args) {
+    *slot = Policy::new_element(alloc, std::forward<Args>(args)...);
+  }
+
+  template <class Alloc>
+  static void destroy(Alloc* alloc, slot_type* slot) {
+    Policy::delete_element(alloc, *slot);
+  }
+
+  template <class Alloc>
+  static void transfer(Alloc*, slot_type* new_slot, slot_type* old_slot) {
+    *new_slot = *old_slot;
+  }
+
+  static size_t space_used(const slot_type* slot) {
+    if (slot == nullptr) return Policy::element_space_used(nullptr);
+    return Policy::element_space_used(*slot);
+  }
+
+  static Reference element(slot_type* slot) { return **slot; }
+
+  template <class T, class P = Policy>
+  static auto value(T* elem) -> decltype(P::value(elem)) {
+    return P::value(elem);
+  }
+
+  template <class... Ts, class P = Policy>
+  static auto apply(Ts&&... ts) -> decltype(P::apply(std::forward<Ts>(ts)...)) {
+    return P::apply(std::forward<Ts>(ts)...);
+  }
+};
+
+}  // namespace container_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_CONTAINER_INTERNAL_NODE_HASH_POLICY_H_
diff --git a/third_party/abseil_cpp/absl/container/internal/node_hash_policy_test.cc b/third_party/abseil_cpp/absl/container/internal/node_hash_policy_test.cc
new file mode 100644
index 000000000000..84aabba96830
--- /dev/null
+++ b/third_party/abseil_cpp/absl/container/internal/node_hash_policy_test.cc
@@ -0,0 +1,69 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/container/internal/node_hash_policy.h"
+
+#include <memory>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/container/internal/hash_policy_traits.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+namespace {
+
+using ::testing::Pointee;
+
+struct Policy : node_hash_policy<int&, Policy> {
+  using key_type = int;
+  using init_type = int;
+
+  template <class Alloc>
+  static int* new_element(Alloc* alloc, int value) {
+    return new int(value);
+  }
+
+  template <class Alloc>
+  static void delete_element(Alloc* alloc, int* elem) {
+    delete elem;
+  }
+};
+
+using NodePolicy = hash_policy_traits<Policy>;
+
+struct NodeTest : ::testing::Test {
+  std::allocator<int> alloc;
+  int n = 53;
+  int* a = &n;
+};
+
+TEST_F(NodeTest, ConstructDestroy) {
+  NodePolicy::construct(&alloc, &a, 42);
+  EXPECT_THAT(a, Pointee(42));
+  NodePolicy::destroy(&alloc, &a);
+}
+
+TEST_F(NodeTest, transfer) {
+  int s = 42;
+  int* b = &s;
+  NodePolicy::transfer(&alloc, &a, &b);
+  EXPECT_EQ(&s, a);
+}
+
+}  // namespace
+}  // namespace container_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/third_party/abseil_cpp/absl/container/internal/raw_hash_map.h b/third_party/abseil_cpp/absl/container/internal/raw_hash_map.h
new file mode 100644
index 000000000000..0a02757ddfb4
--- /dev/null
+++ b/third_party/abseil_cpp/absl/container/internal/raw_hash_map.h
@@ -0,0 +1,197 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_RAW_HASH_MAP_H_
+#define ABSL_CONTAINER_INTERNAL_RAW_HASH_MAP_H_
+
+#include <tuple>
+#include <type_traits>
+#include <utility>
+
+#include "absl/base/internal/throw_delegate.h"
+#include "absl/container/internal/container_memory.h"
+#include "absl/container/internal/raw_hash_set.h"  // IWYU pragma: export
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+template <class Policy, class Hash, class Eq, class Alloc>
+class raw_hash_map : public raw_hash_set<Policy, Hash, Eq, Alloc> {
+  // P is Policy. It's passed as a template argument to support maps that have
+  // incomplete types as values, as in unordered_map<K, IncompleteType>.
+  // MappedReference<> may be a non-reference type.
+  template <class P>
+  using MappedReference = decltype(P::value(
+      std::addressof(std::declval<typename raw_hash_map::reference>())));
+
+  // MappedConstReference<> may be a non-reference type.
+  template <class P>
+  using MappedConstReference = decltype(P::value(
+      std::addressof(std::declval<typename raw_hash_map::const_reference>())));
+
+  using KeyArgImpl =
+      KeyArg<IsTransparent<Eq>::value && IsTransparent<Hash>::value>;
+
+ public:
+  using key_type = typename Policy::key_type;
+  using mapped_type = typename Policy::mapped_type;
+  template <class K>
+  using key_arg = typename KeyArgImpl::template type<K, key_type>;
+
+  static_assert(!std::is_reference<key_type>::value, "");
+  // TODO(alkis): remove this assertion and verify that reference mapped_type is
+  // supported.
+  static_assert(!std::is_reference<mapped_type>::value, "");
+
+  using iterator = typename raw_hash_map::raw_hash_set::iterator;
+  using const_iterator = typename raw_hash_map::raw_hash_set::const_iterator;
+
+  raw_hash_map() {}
+  using raw_hash_map::raw_hash_set::raw_hash_set;
+
+  // The last two template parameters ensure that both arguments are rvalues
+  // (lvalue arguments are handled by the overloads below). This is necessary
+  // for supporting bitfield arguments.
+  //
+  //   union { int n : 1; };
+  //   flat_hash_map<int, int> m;
+  //   m.insert_or_assign(n, n);
+  template <class K = key_type, class V = mapped_type, K* = nullptr,
+            V* = nullptr>
+  std::pair<iterator, bool> insert_or_assign(key_arg<K>&& k, V&& v) {
+    return insert_or_assign_impl(std::forward<K>(k), std::forward<V>(v));
+  }
+
+  template <class K = key_type, class V = mapped_type, K* = nullptr>
+  std::pair<iterator, bool> insert_or_assign(key_arg<K>&& k, const V& v) {
+    return insert_or_assign_impl(std::forward<K>(k), v);
+  }
+
+  template <class K = key_type, class V = mapped_type, V* = nullptr>
+  std::pair<iterator, bool> insert_or_assign(const key_arg<K>& k, V&& v) {
+    return insert_or_assign_impl(k, std::forward<V>(v));
+  }
+
+  template <class K = key_type, class V = mapped_type>
+  std::pair<iterator, bool> insert_or_assign(const key_arg<K>& k, const V& v) {
+    return insert_or_assign_impl(k, v);
+  }
+
+  template <class K = key_type, class V = mapped_type, K* = nullptr,
+            V* = nullptr>
+  iterator insert_or_assign(const_iterator, key_arg<K>&& k, V&& v) {
+    return insert_or_assign(std::forward<K>(k), std::forward<V>(v)).first;
+  }
+
+  template <class K = key_type, class V = mapped_type, K* = nullptr>
+  iterator insert_or_assign(const_iterator, key_arg<K>&& k, const V& v) {
+    return insert_or_assign(std::forward<K>(k), v).first;
+  }
+
+  template <class K = key_type, class V = mapped_type, V* = nullptr>
+  iterator insert_or_assign(const_iterator, const key_arg<K>& k, V&& v) {
+    return insert_or_assign(k, std::forward<V>(v)).first;
+  }
+
+  template <class K = key_type, class V = mapped_type>
+  iterator insert_or_assign(const_iterator, const key_arg<K>& k, const V& v) {
+    return insert_or_assign(k, v).first;
+  }
+
+  // All `try_emplace()` overloads make the same guarantees regarding rvalue
+  // arguments as `std::unordered_map::try_emplace()`, namely that these
+  // functions will not move from rvalue arguments if insertions do not happen.
+  template <class K = key_type, class... Args,
+            typename std::enable_if<
+                !std::is_convertible<K, const_iterator>::value, int>::type = 0,
+            K* = nullptr>
+  std::pair<iterator, bool> try_emplace(key_arg<K>&& k, Args&&... args) {
+    return try_emplace_impl(std::forward<K>(k), std::forward<Args>(args)...);
+  }
+
+  template <class K = key_type, class... Args,
+            typename std::enable_if<
+                !std::is_convertible<K, const_iterator>::value, int>::type = 0>
+  std::pair<iterator, bool> try_emplace(const key_arg<K>& k, Args&&... args) {
+    return try_emplace_impl(k, std::forward<Args>(args)...);
+  }
+
+  template <class K = key_type, class... Args, K* = nullptr>
+  iterator try_emplace(const_iterator, key_arg<K>&& k, Args&&... args) {
+    return try_emplace(std::forward<K>(k), std::forward<Args>(args)...).first;
+  }
+
+  template <class K = key_type, class... Args>
+  iterator try_emplace(const_iterator, const key_arg<K>& k, Args&&... args) {
+    return try_emplace(k, std::forward<Args>(args)...).first;
+  }
+
+  template <class K = key_type, class P = Policy>
+  MappedReference<P> at(const key_arg<K>& key) {
+    auto it = this->find(key);
+    if (it == this->end()) {
+      base_internal::ThrowStdOutOfRange(
+          "absl::container_internal::raw_hash_map<>::at");
+    }
+    return Policy::value(&*it);
+  }
+
+  template <class K = key_type, class P = Policy>
+  MappedConstReference<P> at(const key_arg<K>& key) const {
+    auto it = this->find(key);
+    if (it == this->end()) {
+      base_internal::ThrowStdOutOfRange(
+          "absl::container_internal::raw_hash_map<>::at");
+    }
+    return Policy::value(&*it);
+  }
+
+  template <class K = key_type, class P = Policy, K* = nullptr>
+  MappedReference<P> operator[](key_arg<K>&& key) {
+    return Policy::value(&*try_emplace(std::forward<K>(key)).first);
+  }
+
+  template <class K = key_type, class P = Policy>
+  MappedReference<P> operator[](const key_arg<K>& key) {
+    return Policy::value(&*try_emplace(key).first);
+  }
+
+ private:
+  template <class K, class V>
+  std::pair<iterator, bool> insert_or_assign_impl(K&& k, V&& v) {
+    auto res = this->find_or_prepare_insert(k);
+    if (res.second)
+      this->emplace_at(res.first, std::forward<K>(k), std::forward<V>(v));
+    else
+      Policy::value(&*this->iterator_at(res.first)) = std::forward<V>(v);
+    return {this->iterator_at(res.first), res.second};
+  }
+
+  template <class K = key_type, class... Args>
+  std::pair<iterator, bool> try_emplace_impl(K&& k, Args&&... args) {
+    auto res = this->find_or_prepare_insert(k);
+    if (res.second)
+      this->emplace_at(res.first, std::piecewise_construct,
+                       std::forward_as_tuple(std::forward<K>(k)),
+                       std::forward_as_tuple(std::forward<Args>(args)...));
+    return {this->iterator_at(res.first), res.second};
+  }
+};
+
+}  // namespace container_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_CONTAINER_INTERNAL_RAW_HASH_MAP_H_
diff --git a/third_party/abseil_cpp/absl/container/internal/raw_hash_set.cc b/third_party/abseil_cpp/absl/container/internal/raw_hash_set.cc
new file mode 100644
index 000000000000..bfef071f29e6
--- /dev/null
+++ b/third_party/abseil_cpp/absl/container/internal/raw_hash_set.cc
@@ -0,0 +1,61 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/container/internal/raw_hash_set.h"
+
+#include <atomic>
+#include <cstddef>
+
+#include "absl/base/config.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+constexpr size_t Group::kWidth;
+
+// Returns "random" seed.
+inline size_t RandomSeed() {
+#ifdef ABSL_HAVE_THREAD_LOCAL
+  static thread_local size_t counter = 0;
+  size_t value = ++counter;
+#else   // ABSL_HAVE_THREAD_LOCAL
+  static std::atomic<size_t> counter(0);
+  size_t value = counter.fetch_add(1, std::memory_order_relaxed);
+#endif  // ABSL_HAVE_THREAD_LOCAL
+  return value ^ static_cast<size_t>(reinterpret_cast<uintptr_t>(&counter));
+}
+
+bool ShouldInsertBackwards(size_t hash, ctrl_t* ctrl) {
+  // To avoid problems with weak hashes and single bit tests, we use % 13.
+  // TODO(kfm,sbenza): revisit after we do unconditional mixing
+  return (H1(hash, ctrl) ^ RandomSeed()) % 13 > 6;
+}
+
+void ConvertDeletedToEmptyAndFullToDeleted(
+    ctrl_t* ctrl, size_t capacity) {
+  assert(ctrl[capacity] == kSentinel);
+  assert(IsValidCapacity(capacity));
+  for (ctrl_t* pos = ctrl; pos != ctrl + capacity + 1; pos += Group::kWidth) {
+    Group{pos}.ConvertSpecialToEmptyAndFullToDeleted(pos);
+  }
+  // Copy the cloned ctrl bytes.
+  std::memcpy(ctrl + capacity + 1, ctrl, Group::kWidth);
+  ctrl[capacity] = kSentinel;
+}
+
+
+}  // namespace container_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/third_party/abseil_cpp/absl/container/internal/raw_hash_set.h b/third_party/abseil_cpp/absl/container/internal/raw_hash_set.h
new file mode 100644
index 000000000000..02158c4e0886
--- /dev/null
+++ b/third_party/abseil_cpp/absl/container/internal/raw_hash_set.h
@@ -0,0 +1,1903 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// An open-addressing
+// hashtable with quadratic probing.
+//
+// This is a low level hashtable on top of which different interfaces can be
+// implemented, like flat_hash_set, node_hash_set, string_hash_set, etc.
+//
+// The table interface is similar to that of std::unordered_set. Notable
+// differences are that most member functions support heterogeneous keys when
+// BOTH the hash and eq functions are marked as transparent. They do so by
+// providing a typedef called `is_transparent`.
+//
+// When heterogeneous lookup is enabled, functions that take key_type act as if
+// they have an overload set like:
+//
+//   iterator find(const key_type& key);
+//   template <class K>
+//   iterator find(const K& key);
+//
+//   size_type erase(const key_type& key);
+//   template <class K>
+//   size_type erase(const K& key);
+//
+//   std::pair<iterator, iterator> equal_range(const key_type& key);
+//   template <class K>
+//   std::pair<iterator, iterator> equal_range(const K& key);
+//
+// When heterogeneous lookup is disabled, only the explicit `key_type` overloads
+// exist.
+//
+// find() also supports passing the hash explicitly:
+//
+//   iterator find(const key_type& key, size_t hash);
+//   template <class U>
+//   iterator find(const U& key, size_t hash);
+//
+// In addition the pointer to element and iterator stability guarantees are
+// weaker: all iterators and pointers are invalidated after a new element is
+// inserted.
+//
+// IMPLEMENTATION DETAILS
+//
+// The table stores elements inline in a slot array. In addition to the slot
+// array the table maintains some control state per slot. The extra state is one
+// byte per slot and stores empty or deleted marks, or alternatively 7 bits from
+// the hash of an occupied slot. The table is split into logical groups of
+// slots, like so:
+//
+//      Group 1         Group 2        Group 3
+// +---------------+---------------+---------------+
+// | | | | | | | | | | | | | | | | | | | | | | | | |
+// +---------------+---------------+---------------+
+//
+// On lookup the hash is split into two parts:
+// - H2: 7 bits (those stored in the control bytes)
+// - H1: the rest of the bits
+// The groups are probed using H1. For each group the slots are matched to H2 in
+// parallel. Because H2 is 7 bits (128 states) and the number of slots per group
+// is low (8 or 16) in almost all cases a match in H2 is also a lookup hit.
+//
+// On insert, once the right group is found (as in lookup), its slots are
+// filled in order.
+//
+// On erase a slot is cleared. In case the group did not have any empty slots
+// before the erase, the erased slot is marked as deleted.
+//
+// Groups without empty slots (but maybe with deleted slots) extend the probe
+// sequence. The probing algorithm is quadratic. Given N the number of groups,
+// the probing function for the i'th probe is:
+//
+//   P(0) = H1 % N
+//
+//   P(i) = (P(i - 1) + i) % N
+//
+// This probing function guarantees that after N probes, all the groups of the
+// table will be probed exactly once.
+
+#ifndef ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
+#define ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
+
+#include <algorithm>
+#include <cmath>
+#include <cstdint>
+#include <cstring>
+#include <iterator>
+#include <limits>
+#include <memory>
+#include <tuple>
+#include <type_traits>
+#include <utility>
+
+#include "absl/base/internal/bits.h"
+#include "absl/base/internal/endian.h"
+#include "absl/base/optimization.h"
+#include "absl/base/port.h"
+#include "absl/container/internal/common.h"
+#include "absl/container/internal/compressed_tuple.h"
+#include "absl/container/internal/container_memory.h"
+#include "absl/container/internal/hash_policy_traits.h"
+#include "absl/container/internal/hashtable_debug_hooks.h"
+#include "absl/container/internal/hashtablez_sampler.h"
+#include "absl/container/internal/have_sse.h"
+#include "absl/container/internal/layout.h"
+#include "absl/memory/memory.h"
+#include "absl/meta/type_traits.h"
+#include "absl/utility/utility.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+template <typename AllocType>
+void SwapAlloc(AllocType& lhs, AllocType& rhs,
+               std::true_type /* propagate_on_container_swap */) {
+  using std::swap;
+  swap(lhs, rhs);
+}
+template <typename AllocType>
+void SwapAlloc(AllocType& /*lhs*/, AllocType& /*rhs*/,
+               std::false_type /* propagate_on_container_swap */) {}
+
+template <size_t Width>
+class probe_seq {
+ public:
+  probe_seq(size_t hash, size_t mask) {
+    assert(((mask + 1) & mask) == 0 && "not a mask");
+    mask_ = mask;
+    offset_ = hash & mask_;
+  }
+  size_t offset() const { return offset_; }
+  size_t offset(size_t i) const { return (offset_ + i) & mask_; }
+
+  void next() {
+    index_ += Width;
+    offset_ += index_;
+    offset_ &= mask_;
+  }
+  // 0-based probe index. The i-th probe in the probe sequence.
+  size_t index() const { return index_; }
+
+ private:
+  size_t mask_;
+  size_t offset_;
+  size_t index_ = 0;
+};
+
+template <class ContainerKey, class Hash, class Eq>
+struct RequireUsableKey {
+  template <class PassedKey, class... Args>
+  std::pair<
+      decltype(std::declval<const Hash&>()(std::declval<const PassedKey&>())),
+      decltype(std::declval<const Eq&>()(std::declval<const ContainerKey&>(),
+                                         std::declval<const PassedKey&>()))>*
+  operator()(const PassedKey&, const Args&...) const;
+};
+
+template <class E, class Policy, class Hash, class Eq, class... Ts>
+struct IsDecomposable : std::false_type {};
+
+template <class Policy, class Hash, class Eq, class... Ts>
+struct IsDecomposable<
+    absl::void_t<decltype(
+        Policy::apply(RequireUsableKey<typename Policy::key_type, Hash, Eq>(),
+                      std::declval<Ts>()...))>,
+    Policy, Hash, Eq, Ts...> : std::true_type {};
+
+// TODO(alkis): Switch to std::is_nothrow_swappable when gcc/clang supports it.
+template <class T>
+constexpr bool IsNoThrowSwappable(std::true_type = {} /* is_swappable */) {
+  using std::swap;
+  return noexcept(swap(std::declval<T&>(), std::declval<T&>()));
+}
+template <class T>
+constexpr bool IsNoThrowSwappable(std::false_type /* is_swappable */) {
+  return false;
+}
+
+template <typename T>
+int TrailingZeros(T x) {
+  return sizeof(T) == 8 ? base_internal::CountTrailingZerosNonZero64(
+                              static_cast<uint64_t>(x))
+                        : base_internal::CountTrailingZerosNonZero32(
+                              static_cast<uint32_t>(x));
+}
+
+template <typename T>
+int LeadingZeros(T x) {
+  return sizeof(T) == 8
+             ? base_internal::CountLeadingZeros64(static_cast<uint64_t>(x))
+             : base_internal::CountLeadingZeros32(static_cast<uint32_t>(x));
+}
+
+// An abstraction over a bitmask. It provides an easy way to iterate through the
+// indexes of the set bits of a bitmask.  When Shift=0 (platforms with SSE),
+// this is a true bitmask.  On non-SSE, platforms the arithematic used to
+// emulate the SSE behavior works in bytes (Shift=3) and leaves each bytes as
+// either 0x00 or 0x80.
+//
+// For example:
+//   for (int i : BitMask<uint32_t, 16>(0x5)) -> yields 0, 2
+//   for (int i : BitMask<uint64_t, 8, 3>(0x0000000080800000)) -> yields 2, 3
+template <class T, int SignificantBits, int Shift = 0>
+class BitMask {
+  static_assert(std::is_unsigned<T>::value, "");
+  static_assert(Shift == 0 || Shift == 3, "");
+
+ public:
+  // These are useful for unit tests (gunit).
+  using value_type = int;
+  using iterator = BitMask;
+  using const_iterator = BitMask;
+
+  explicit BitMask(T mask) : mask_(mask) {}
+  BitMask& operator++() {
+    mask_ &= (mask_ - 1);
+    return *this;
+  }
+  explicit operator bool() const { return mask_ != 0; }
+  int operator*() const { return LowestBitSet(); }
+  int LowestBitSet() const {
+    return container_internal::TrailingZeros(mask_) >> Shift;
+  }
+  int HighestBitSet() const {
+    return (sizeof(T) * CHAR_BIT - container_internal::LeadingZeros(mask_) -
+            1) >>
+           Shift;
+  }
+
+  BitMask begin() const { return *this; }
+  BitMask end() const { return BitMask(0); }
+
+  int TrailingZeros() const {
+    return container_internal::TrailingZeros(mask_) >> Shift;
+  }
+
+  int LeadingZeros() const {
+    constexpr int total_significant_bits = SignificantBits << Shift;
+    constexpr int extra_bits = sizeof(T) * 8 - total_significant_bits;
+    return container_internal::LeadingZeros(mask_ << extra_bits) >> Shift;
+  }
+
+ private:
+  friend bool operator==(const BitMask& a, const BitMask& b) {
+    return a.mask_ == b.mask_;
+  }
+  friend bool operator!=(const BitMask& a, const BitMask& b) {
+    return a.mask_ != b.mask_;
+  }
+
+  T mask_;
+};
+
+using ctrl_t = signed char;
+using h2_t = uint8_t;
+
+// The values here are selected for maximum performance. See the static asserts
+// below for details.
+enum Ctrl : ctrl_t {
+  kEmpty = -128,   // 0b10000000
+  kDeleted = -2,   // 0b11111110
+  kSentinel = -1,  // 0b11111111
+};
+static_assert(
+    kEmpty & kDeleted & kSentinel & 0x80,
+    "Special markers need to have the MSB to make checking for them efficient");
+static_assert(kEmpty < kSentinel && kDeleted < kSentinel,
+              "kEmpty and kDeleted must be smaller than kSentinel to make the "
+              "SIMD test of IsEmptyOrDeleted() efficient");
+static_assert(kSentinel == -1,
+              "kSentinel must be -1 to elide loading it from memory into SIMD "
+              "registers (pcmpeqd xmm, xmm)");
+static_assert(kEmpty == -128,
+              "kEmpty must be -128 to make the SIMD check for its "
+              "existence efficient (psignb xmm, xmm)");
+static_assert(~kEmpty & ~kDeleted & kSentinel & 0x7F,
+              "kEmpty and kDeleted must share an unset bit that is not shared "
+              "by kSentinel to make the scalar test for MatchEmptyOrDeleted() "
+              "efficient");
+static_assert(kDeleted == -2,
+              "kDeleted must be -2 to make the implementation of "
+              "ConvertSpecialToEmptyAndFullToDeleted efficient");
+
+// A single block of empty control bytes for tables without any slots allocated.
+// This enables removing a branch in the hot path of find().
+inline ctrl_t* EmptyGroup() {
+  alignas(16) static constexpr ctrl_t empty_group[] = {
+      kSentinel, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty,
+      kEmpty,    kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty};
+  return const_cast<ctrl_t*>(empty_group);
+}
+
+// Mixes a randomly generated per-process seed with `hash` and `ctrl` to
+// randomize insertion order within groups.
+bool ShouldInsertBackwards(size_t hash, ctrl_t* ctrl);
+
+// Returns a hash seed.
+//
+// The seed consists of the ctrl_ pointer, which adds enough entropy to ensure
+// non-determinism of iteration order in most cases.
+inline size_t HashSeed(const ctrl_t* ctrl) {
+  // The low bits of the pointer have little or no entropy because of
+  // alignment. We shift the pointer to try to use higher entropy bits. A
+  // good number seems to be 12 bits, because that aligns with page size.
+  return reinterpret_cast<uintptr_t>(ctrl) >> 12;
+}
+
+inline size_t H1(size_t hash, const ctrl_t* ctrl) {
+  return (hash >> 7) ^ HashSeed(ctrl);
+}
+inline ctrl_t H2(size_t hash) { return hash & 0x7F; }
+
+inline bool IsEmpty(ctrl_t c) { return c == kEmpty; }
+inline bool IsFull(ctrl_t c) { return c >= 0; }
+inline bool IsDeleted(ctrl_t c) { return c == kDeleted; }
+inline bool IsEmptyOrDeleted(ctrl_t c) { return c < kSentinel; }
+
+#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
+
+// https://github.com/abseil/abseil-cpp/issues/209
+// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=87853
+// _mm_cmpgt_epi8 is broken under GCC with -funsigned-char
+// Work around this by using the portable implementation of Group
+// when using -funsigned-char under GCC.
+inline __m128i _mm_cmpgt_epi8_fixed(__m128i a, __m128i b) {
+#if defined(__GNUC__) && !defined(__clang__)
+  if (std::is_unsigned<char>::value) {
+    const __m128i mask = _mm_set1_epi8(0x80);
+    const __m128i diff = _mm_subs_epi8(b, a);
+    return _mm_cmpeq_epi8(_mm_and_si128(diff, mask), mask);
+  }
+#endif
+  return _mm_cmpgt_epi8(a, b);
+}
+
+struct GroupSse2Impl {
+  static constexpr size_t kWidth = 16;  // the number of slots per group
+
+  explicit GroupSse2Impl(const ctrl_t* pos) {
+    ctrl = _mm_loadu_si128(reinterpret_cast<const __m128i*>(pos));
+  }
+
+  // Returns a bitmask representing the positions of slots that match hash.
+  BitMask<uint32_t, kWidth> Match(h2_t hash) const {
+    auto match = _mm_set1_epi8(hash);
+    return BitMask<uint32_t, kWidth>(
+        _mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl)));
+  }
+
+  // Returns a bitmask representing the positions of empty slots.
+  BitMask<uint32_t, kWidth> MatchEmpty() const {
+#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3
+    // This only works because kEmpty is -128.
+    return BitMask<uint32_t, kWidth>(
+        _mm_movemask_epi8(_mm_sign_epi8(ctrl, ctrl)));
+#else
+    return Match(static_cast<h2_t>(kEmpty));
+#endif
+  }
+
+  // Returns a bitmask representing the positions of empty or deleted slots.
+  BitMask<uint32_t, kWidth> MatchEmptyOrDeleted() const {
+    auto special = _mm_set1_epi8(kSentinel);
+    return BitMask<uint32_t, kWidth>(
+        _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)));
+  }
+
+  // Returns the number of trailing empty or deleted elements in the group.
+  uint32_t CountLeadingEmptyOrDeleted() const {
+    auto special = _mm_set1_epi8(kSentinel);
+    return TrailingZeros(
+        _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)) + 1);
+  }
+
+  void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
+    auto msbs = _mm_set1_epi8(static_cast<char>(-128));
+    auto x126 = _mm_set1_epi8(126);
+#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3
+    auto res = _mm_or_si128(_mm_shuffle_epi8(x126, ctrl), msbs);
+#else
+    auto zero = _mm_setzero_si128();
+    auto special_mask = _mm_cmpgt_epi8_fixed(zero, ctrl);
+    auto res = _mm_or_si128(msbs, _mm_andnot_si128(special_mask, x126));
+#endif
+    _mm_storeu_si128(reinterpret_cast<__m128i*>(dst), res);
+  }
+
+  __m128i ctrl;
+};
+#endif  // ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
+
+struct GroupPortableImpl {
+  static constexpr size_t kWidth = 8;
+
+  explicit GroupPortableImpl(const ctrl_t* pos)
+      : ctrl(little_endian::Load64(pos)) {}
+
+  BitMask<uint64_t, kWidth, 3> Match(h2_t hash) const {
+    // For the technique, see:
+    // http://graphics.stanford.edu/~seander/bithacks.html##ValueInWord
+    // (Determine if a word has a byte equal to n).
+    //
+    // Caveat: there are false positives but:
+    // - they only occur if there is a real match
+    // - they never occur on kEmpty, kDeleted, kSentinel
+    // - they will be handled gracefully by subsequent checks in code
+    //
+    // Example:
+    //   v = 0x1716151413121110
+    //   hash = 0x12
+    //   retval = (v - lsbs) & ~v & msbs = 0x0000000080800000
+    constexpr uint64_t msbs = 0x8080808080808080ULL;
+    constexpr uint64_t lsbs = 0x0101010101010101ULL;
+    auto x = ctrl ^ (lsbs * hash);
+    return BitMask<uint64_t, kWidth, 3>((x - lsbs) & ~x & msbs);
+  }
+
+  BitMask<uint64_t, kWidth, 3> MatchEmpty() const {
+    constexpr uint64_t msbs = 0x8080808080808080ULL;
+    return BitMask<uint64_t, kWidth, 3>((ctrl & (~ctrl << 6)) & msbs);
+  }
+
+  BitMask<uint64_t, kWidth, 3> MatchEmptyOrDeleted() const {
+    constexpr uint64_t msbs = 0x8080808080808080ULL;
+    return BitMask<uint64_t, kWidth, 3>((ctrl & (~ctrl << 7)) & msbs);
+  }
+
+  uint32_t CountLeadingEmptyOrDeleted() const {
+    constexpr uint64_t gaps = 0x00FEFEFEFEFEFEFEULL;
+    return (TrailingZeros(((~ctrl & (ctrl >> 7)) | gaps) + 1) + 7) >> 3;
+  }
+
+  void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
+    constexpr uint64_t msbs = 0x8080808080808080ULL;
+    constexpr uint64_t lsbs = 0x0101010101010101ULL;
+    auto x = ctrl & msbs;
+    auto res = (~x + (x >> 7)) & ~lsbs;
+    little_endian::Store64(dst, res);
+  }
+
+  uint64_t ctrl;
+};
+
+#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
+using Group = GroupSse2Impl;
+#else
+using Group = GroupPortableImpl;
+#endif
+
+template <class Policy, class Hash, class Eq, class Alloc>
+class raw_hash_set;
+
+inline bool IsValidCapacity(size_t n) { return ((n + 1) & n) == 0 && n > 0; }
+
+// PRECONDITION:
+//   IsValidCapacity(capacity)
+//   ctrl[capacity] == kSentinel
+//   ctrl[i] != kSentinel for all i < capacity
+// Applies mapping for every byte in ctrl:
+//   DELETED -> EMPTY
+//   EMPTY -> EMPTY
+//   FULL -> DELETED
+void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t* ctrl, size_t capacity);
+
+// Rounds up the capacity to the next power of 2 minus 1, with a minimum of 1.
+inline size_t NormalizeCapacity(size_t n) {
+  return n ? ~size_t{} >> LeadingZeros(n) : 1;
+}
+
+// We use 7/8th as maximum load factor.
+// For 16-wide groups, that gives an average of two empty slots per group.
+inline size_t CapacityToGrowth(size_t capacity) {
+  assert(IsValidCapacity(capacity));
+  // `capacity*7/8`
+  if (Group::kWidth == 8 && capacity == 7) {
+    // x-x/8 does not work when x==7.
+    return 6;
+  }
+  return capacity - capacity / 8;
+}
+// From desired "growth" to a lowerbound of the necessary capacity.
+// Might not be a valid one and required NormalizeCapacity().
+inline size_t GrowthToLowerboundCapacity(size_t growth) {
+  // `growth*8/7`
+  if (Group::kWidth == 8 && growth == 7) {
+    // x+(x-1)/7 does not work when x==7.
+    return 8;
+  }
+  return growth + static_cast<size_t>((static_cast<int64_t>(growth) - 1) / 7);
+}
+
+inline void AssertIsFull(ctrl_t* ctrl) {
+  ABSL_HARDENING_ASSERT((ctrl != nullptr && IsFull(*ctrl)) &&
+                        "Invalid operation on iterator. The element might have "
+                        "been erased, or the table might have rehashed.");
+}
+
+inline void AssertIsValid(ctrl_t* ctrl) {
+  ABSL_HARDENING_ASSERT((ctrl == nullptr || IsFull(*ctrl)) &&
+                        "Invalid operation on iterator. The element might have "
+                        "been erased, or the table might have rehashed.");
+}
+
+struct FindInfo {
+  size_t offset;
+  size_t probe_length;
+};
+
+// The representation of the object has two modes:
+//  - small: For capacities < kWidth-1
+//  - large: For the rest.
+//
+// Differences:
+//  - In small mode we are able to use the whole capacity. The extra control
+//  bytes give us at least one "empty" control byte to stop the iteration.
+//  This is important to make 1 a valid capacity.
+//
+//  - In small mode only the first `capacity()` control bytes after the
+//  sentinel are valid. The rest contain dummy kEmpty values that do not
+//  represent a real slot. This is important to take into account on
+//  find_first_non_full(), where we never try ShouldInsertBackwards() for
+//  small tables.
+inline bool is_small(size_t capacity) { return capacity < Group::kWidth - 1; }
+
+inline probe_seq<Group::kWidth> probe(ctrl_t* ctrl, size_t hash,
+                                      size_t capacity) {
+  return probe_seq<Group::kWidth>(H1(hash, ctrl), capacity);
+}
+
+// Probes the raw_hash_set with the probe sequence for hash and returns the
+// pointer to the first empty or deleted slot.
+// NOTE: this function must work with tables having both kEmpty and kDelete
+// in one group. Such tables appears during drop_deletes_without_resize.
+//
+// This function is very useful when insertions happen and:
+// - the input is already a set
+// - there are enough slots
+// - the element with the hash is not in the table
+inline FindInfo find_first_non_full(ctrl_t* ctrl, size_t hash,
+                                    size_t capacity) {
+  auto seq = probe(ctrl, hash, capacity);
+  while (true) {
+    Group g{ctrl + seq.offset()};
+    auto mask = g.MatchEmptyOrDeleted();
+    if (mask) {
+#if !defined(NDEBUG)
+      // We want to add entropy even when ASLR is not enabled.
+      // In debug build we will randomly insert in either the front or back of
+      // the group.
+      // TODO(kfm,sbenza): revisit after we do unconditional mixing
+      if (!is_small(capacity) && ShouldInsertBackwards(hash, ctrl)) {
+        return {seq.offset(mask.HighestBitSet()), seq.index()};
+      }
+#endif
+      return {seq.offset(mask.LowestBitSet()), seq.index()};
+    }
+    seq.next();
+    assert(seq.index() < capacity && "full table!");
+  }
+}
+
+// Policy: a policy defines how to perform different operations on
+// the slots of the hashtable (see hash_policy_traits.h for the full interface
+// of policy).
+//
+// Hash: a (possibly polymorphic) functor that hashes keys of the hashtable. The
+// functor should accept a key and return size_t as hash. For best performance
+// it is important that the hash function provides high entropy across all bits
+// of the hash.
+//
+// Eq: a (possibly polymorphic) functor that compares two keys for equality. It
+// should accept two (of possibly different type) keys and return a bool: true
+// if they are equal, false if they are not. If two keys compare equal, then
+// their hash values as defined by Hash MUST be equal.
+//
+// Allocator: an Allocator
+// [https://en.cppreference.com/w/cpp/named_req/Allocator] with which
+// the storage of the hashtable will be allocated and the elements will be
+// constructed and destroyed.
+template <class Policy, class Hash, class Eq, class Alloc>
+class raw_hash_set {
+  using PolicyTraits = hash_policy_traits<Policy>;
+  using KeyArgImpl =
+      KeyArg<IsTransparent<Eq>::value && IsTransparent<Hash>::value>;
+
+ public:
+  using init_type = typename PolicyTraits::init_type;
+  using key_type = typename PolicyTraits::key_type;
+  // TODO(sbenza): Hide slot_type as it is an implementation detail. Needs user
+  // code fixes!
+  using slot_type = typename PolicyTraits::slot_type;
+  using allocator_type = Alloc;
+  using size_type = size_t;
+  using difference_type = ptrdiff_t;
+  using hasher = Hash;
+  using key_equal = Eq;
+  using policy_type = Policy;
+  using value_type = typename PolicyTraits::value_type;
+  using reference = value_type&;
+  using const_reference = const value_type&;
+  using pointer = typename absl::allocator_traits<
+      allocator_type>::template rebind_traits<value_type>::pointer;
+  using const_pointer = typename absl::allocator_traits<
+      allocator_type>::template rebind_traits<value_type>::const_pointer;
+
+  // Alias used for heterogeneous lookup functions.
+  // `key_arg<K>` evaluates to `K` when the functors are transparent and to
+  // `key_type` otherwise. It permits template argument deduction on `K` for the
+  // transparent case.
+  template <class K>
+  using key_arg = typename KeyArgImpl::template type<K, key_type>;
+
+ private:
+  // Give an early error when key_type is not hashable/eq.
+  auto KeyTypeCanBeHashed(const Hash& h, const key_type& k) -> decltype(h(k));
+  auto KeyTypeCanBeEq(const Eq& eq, const key_type& k) -> decltype(eq(k, k));
+
+  using Layout = absl::container_internal::Layout<ctrl_t, slot_type>;
+
+  static Layout MakeLayout(size_t capacity) {
+    assert(IsValidCapacity(capacity));
+    return Layout(capacity + Group::kWidth + 1, capacity);
+  }
+
+  using AllocTraits = absl::allocator_traits<allocator_type>;
+  using SlotAlloc = typename absl::allocator_traits<
+      allocator_type>::template rebind_alloc<slot_type>;
+  using SlotAllocTraits = typename absl::allocator_traits<
+      allocator_type>::template rebind_traits<slot_type>;
+
+  static_assert(std::is_lvalue_reference<reference>::value,
+                "Policy::element() must return a reference");
+
+  template <typename T>
+  struct SameAsElementReference
+      : std::is_same<typename std::remove_cv<
+                         typename std::remove_reference<reference>::type>::type,
+                     typename std::remove_cv<
+                         typename std::remove_reference<T>::type>::type> {};
+
+  // An enabler for insert(T&&): T must be convertible to init_type or be the
+  // same as [cv] value_type [ref].
+  // Note: we separate SameAsElementReference into its own type to avoid using
+  // reference unless we need to. MSVC doesn't seem to like it in some
+  // cases.
+  template <class T>
+  using RequiresInsertable = typename std::enable_if<
+      absl::disjunction<std::is_convertible<T, init_type>,
+                        SameAsElementReference<T>>::value,
+      int>::type;
+
+  // RequiresNotInit is a workaround for gcc prior to 7.1.
+  // See https://godbolt.org/g/Y4xsUh.
+  template <class T>
+  using RequiresNotInit =
+      typename std::enable_if<!std::is_same<T, init_type>::value, int>::type;
+
+  template <class... Ts>
+  using IsDecomposable = IsDecomposable<void, PolicyTraits, Hash, Eq, Ts...>;
+
+ public:
+  static_assert(std::is_same<pointer, value_type*>::value,
+                "Allocators with custom pointer types are not supported");
+  static_assert(std::is_same<const_pointer, const value_type*>::value,
+                "Allocators with custom pointer types are not supported");
+
+  class iterator {
+    friend class raw_hash_set;
+
+   public:
+    using iterator_category = std::forward_iterator_tag;
+    using value_type = typename raw_hash_set::value_type;
+    using reference =
+        absl::conditional_t<PolicyTraits::constant_iterators::value,
+                            const value_type&, value_type&>;
+    using pointer = absl::remove_reference_t<reference>*;
+    using difference_type = typename raw_hash_set::difference_type;
+
+    iterator() {}
+
+    // PRECONDITION: not an end() iterator.
+    reference operator*() const {
+      AssertIsFull(ctrl_);
+      return PolicyTraits::element(slot_);
+    }
+
+    // PRECONDITION: not an end() iterator.
+    pointer operator->() const { return &operator*(); }
+
+    // PRECONDITION: not an end() iterator.
+    iterator& operator++() {
+      AssertIsFull(ctrl_);
+      ++ctrl_;
+      ++slot_;
+      skip_empty_or_deleted();
+      return *this;
+    }
+    // PRECONDITION: not an end() iterator.
+    iterator operator++(int) {
+      auto tmp = *this;
+      ++*this;
+      return tmp;
+    }
+
+    friend bool operator==(const iterator& a, const iterator& b) {
+      AssertIsValid(a.ctrl_);
+      AssertIsValid(b.ctrl_);
+      return a.ctrl_ == b.ctrl_;
+    }
+    friend bool operator!=(const iterator& a, const iterator& b) {
+      return !(a == b);
+    }
+
+   private:
+    iterator(ctrl_t* ctrl, slot_type* slot) : ctrl_(ctrl), slot_(slot) {
+      // This assumption helps the compiler know that any non-end iterator is
+      // not equal to any end iterator.
+      ABSL_INTERNAL_ASSUME(ctrl != nullptr);
+    }
+
+    void skip_empty_or_deleted() {
+      while (IsEmptyOrDeleted(*ctrl_)) {
+        uint32_t shift = Group{ctrl_}.CountLeadingEmptyOrDeleted();
+        ctrl_ += shift;
+        slot_ += shift;
+      }
+      if (ABSL_PREDICT_FALSE(*ctrl_ == kSentinel)) ctrl_ = nullptr;
+    }
+
+    ctrl_t* ctrl_ = nullptr;
+    // To avoid uninitialized member warnings, put slot_ in an anonymous union.
+    // The member is not initialized on singleton and end iterators.
+    union {
+      slot_type* slot_;
+    };
+  };
+
+  class const_iterator {
+    friend class raw_hash_set;
+
+   public:
+    using iterator_category = typename iterator::iterator_category;
+    using value_type = typename raw_hash_set::value_type;
+    using reference = typename raw_hash_set::const_reference;
+    using pointer = typename raw_hash_set::const_pointer;
+    using difference_type = typename raw_hash_set::difference_type;
+
+    const_iterator() {}
+    // Implicit construction from iterator.
+    const_iterator(iterator i) : inner_(std::move(i)) {}
+
+    reference operator*() const { return *inner_; }
+    pointer operator->() const { return inner_.operator->(); }
+
+    const_iterator& operator++() {
+      ++inner_;
+      return *this;
+    }
+    const_iterator operator++(int) { return inner_++; }
+
+    friend bool operator==(const const_iterator& a, const const_iterator& b) {
+      return a.inner_ == b.inner_;
+    }
+    friend bool operator!=(const const_iterator& a, const const_iterator& b) {
+      return !(a == b);
+    }
+
+   private:
+    const_iterator(const ctrl_t* ctrl, const slot_type* slot)
+        : inner_(const_cast<ctrl_t*>(ctrl), const_cast<slot_type*>(slot)) {}
+
+    iterator inner_;
+  };
+
+  using node_type = node_handle<Policy, hash_policy_traits<Policy>, Alloc>;
+  using insert_return_type = InsertReturnType<iterator, node_type>;
+
+  raw_hash_set() noexcept(
+      std::is_nothrow_default_constructible<hasher>::value&&
+          std::is_nothrow_default_constructible<key_equal>::value&&
+              std::is_nothrow_default_constructible<allocator_type>::value) {}
+
+  explicit raw_hash_set(size_t bucket_count, const hasher& hash = hasher(),
+                        const key_equal& eq = key_equal(),
+                        const allocator_type& alloc = allocator_type())
+      : ctrl_(EmptyGroup()), settings_(0, hash, eq, alloc) {
+    if (bucket_count) {
+      capacity_ = NormalizeCapacity(bucket_count);
+      initialize_slots();
+    }
+  }
+
+  raw_hash_set(size_t bucket_count, const hasher& hash,
+               const allocator_type& alloc)
+      : raw_hash_set(bucket_count, hash, key_equal(), alloc) {}
+
+  raw_hash_set(size_t bucket_count, const allocator_type& alloc)
+      : raw_hash_set(bucket_count, hasher(), key_equal(), alloc) {}
+
+  explicit raw_hash_set(const allocator_type& alloc)
+      : raw_hash_set(0, hasher(), key_equal(), alloc) {}
+
+  template <class InputIter>
+  raw_hash_set(InputIter first, InputIter last, size_t bucket_count = 0,
+               const hasher& hash = hasher(), const key_equal& eq = key_equal(),
+               const allocator_type& alloc = allocator_type())
+      : raw_hash_set(bucket_count, hash, eq, alloc) {
+    insert(first, last);
+  }
+
+  template <class InputIter>
+  raw_hash_set(InputIter first, InputIter last, size_t bucket_count,
+               const hasher& hash, const allocator_type& alloc)
+      : raw_hash_set(first, last, bucket_count, hash, key_equal(), alloc) {}
+
+  template <class InputIter>
+  raw_hash_set(InputIter first, InputIter last, size_t bucket_count,
+               const allocator_type& alloc)
+      : raw_hash_set(first, last, bucket_count, hasher(), key_equal(), alloc) {}
+
+  template <class InputIter>
+  raw_hash_set(InputIter first, InputIter last, const allocator_type& alloc)
+      : raw_hash_set(first, last, 0, hasher(), key_equal(), alloc) {}
+
+  // Instead of accepting std::initializer_list<value_type> as the first
+  // argument like std::unordered_set<value_type> does, we have two overloads
+  // that accept std::initializer_list<T> and std::initializer_list<init_type>.
+  // This is advantageous for performance.
+  //
+  //   // Turns {"abc", "def"} into std::initializer_list<std::string>, then
+  //   // copies the strings into the set.
+  //   std::unordered_set<std::string> s = {"abc", "def"};
+  //
+  //   // Turns {"abc", "def"} into std::initializer_list<const char*>, then
+  //   // copies the strings into the set.
+  //   absl::flat_hash_set<std::string> s = {"abc", "def"};
+  //
+  // The same trick is used in insert().
+  //
+  // The enabler is necessary to prevent this constructor from triggering where
+  // the copy constructor is meant to be called.
+  //
+  //   absl::flat_hash_set<int> a, b{a};
+  //
+  // RequiresNotInit<T> is a workaround for gcc prior to 7.1.
+  template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
+  raw_hash_set(std::initializer_list<T> init, size_t bucket_count = 0,
+               const hasher& hash = hasher(), const key_equal& eq = key_equal(),
+               const allocator_type& alloc = allocator_type())
+      : raw_hash_set(init.begin(), init.end(), bucket_count, hash, eq, alloc) {}
+
+  raw_hash_set(std::initializer_list<init_type> init, size_t bucket_count = 0,
+               const hasher& hash = hasher(), const key_equal& eq = key_equal(),
+               const allocator_type& alloc = allocator_type())
+      : raw_hash_set(init.begin(), init.end(), bucket_count, hash, eq, alloc) {}
+
+  template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
+  raw_hash_set(std::initializer_list<T> init, size_t bucket_count,
+               const hasher& hash, const allocator_type& alloc)
+      : raw_hash_set(init, bucket_count, hash, key_equal(), alloc) {}
+
+  raw_hash_set(std::initializer_list<init_type> init, size_t bucket_count,
+               const hasher& hash, const allocator_type& alloc)
+      : raw_hash_set(init, bucket_count, hash, key_equal(), alloc) {}
+
+  template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
+  raw_hash_set(std::initializer_list<T> init, size_t bucket_count,
+               const allocator_type& alloc)
+      : raw_hash_set(init, bucket_count, hasher(), key_equal(), alloc) {}
+
+  raw_hash_set(std::initializer_list<init_type> init, size_t bucket_count,
+               const allocator_type& alloc)
+      : raw_hash_set(init, bucket_count, hasher(), key_equal(), alloc) {}
+
+  template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
+  raw_hash_set(std::initializer_list<T> init, const allocator_type& alloc)
+      : raw_hash_set(init, 0, hasher(), key_equal(), alloc) {}
+
+  raw_hash_set(std::initializer_list<init_type> init,
+               const allocator_type& alloc)
+      : raw_hash_set(init, 0, hasher(), key_equal(), alloc) {}
+
+  raw_hash_set(const raw_hash_set& that)
+      : raw_hash_set(that, AllocTraits::select_on_container_copy_construction(
+                               that.alloc_ref())) {}
+
+  raw_hash_set(const raw_hash_set& that, const allocator_type& a)
+      : raw_hash_set(0, that.hash_ref(), that.eq_ref(), a) {
+    reserve(that.size());
+    // Because the table is guaranteed to be empty, we can do something faster
+    // than a full `insert`.
+    for (const auto& v : that) {
+      const size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, v);
+      auto target = find_first_non_full(ctrl_, hash, capacity_);
+      set_ctrl(target.offset, H2(hash));
+      emplace_at(target.offset, v);
+      infoz_.RecordInsert(hash, target.probe_length);
+    }
+    size_ = that.size();
+    growth_left() -= that.size();
+  }
+
+  raw_hash_set(raw_hash_set&& that) noexcept(
+      std::is_nothrow_copy_constructible<hasher>::value&&
+          std::is_nothrow_copy_constructible<key_equal>::value&&
+              std::is_nothrow_copy_constructible<allocator_type>::value)
+      : ctrl_(absl::exchange(that.ctrl_, EmptyGroup())),
+        slots_(absl::exchange(that.slots_, nullptr)),
+        size_(absl::exchange(that.size_, 0)),
+        capacity_(absl::exchange(that.capacity_, 0)),
+        infoz_(absl::exchange(that.infoz_, HashtablezInfoHandle())),
+        // Hash, equality and allocator are copied instead of moved because
+        // `that` must be left valid. If Hash is std::function<Key>, moving it
+        // would create a nullptr functor that cannot be called.
+        settings_(that.settings_) {
+    // growth_left was copied above, reset the one from `that`.
+    that.growth_left() = 0;
+  }
+
+  raw_hash_set(raw_hash_set&& that, const allocator_type& a)
+      : ctrl_(EmptyGroup()),
+        slots_(nullptr),
+        size_(0),
+        capacity_(0),
+        settings_(0, that.hash_ref(), that.eq_ref(), a) {
+    if (a == that.alloc_ref()) {
+      std::swap(ctrl_, that.ctrl_);
+      std::swap(slots_, that.slots_);
+      std::swap(size_, that.size_);
+      std::swap(capacity_, that.capacity_);
+      std::swap(growth_left(), that.growth_left());
+      std::swap(infoz_, that.infoz_);
+    } else {
+      reserve(that.size());
+      // Note: this will copy elements of dense_set and unordered_set instead of
+      // moving them. This can be fixed if it ever becomes an issue.
+      for (auto& elem : that) insert(std::move(elem));
+    }
+  }
+
+  raw_hash_set& operator=(const raw_hash_set& that) {
+    raw_hash_set tmp(that,
+                     AllocTraits::propagate_on_container_copy_assignment::value
+                         ? that.alloc_ref()
+                         : alloc_ref());
+    swap(tmp);
+    return *this;
+  }
+
+  raw_hash_set& operator=(raw_hash_set&& that) noexcept(
+      absl::allocator_traits<allocator_type>::is_always_equal::value&&
+          std::is_nothrow_move_assignable<hasher>::value&&
+              std::is_nothrow_move_assignable<key_equal>::value) {
+    // TODO(sbenza): We should only use the operations from the noexcept clause
+    // to make sure we actually adhere to that contract.
+    return move_assign(
+        std::move(that),
+        typename AllocTraits::propagate_on_container_move_assignment());
+  }
+
+  ~raw_hash_set() { destroy_slots(); }
+
+  iterator begin() {
+    auto it = iterator_at(0);
+    it.skip_empty_or_deleted();
+    return it;
+  }
+  iterator end() { return {}; }
+
+  const_iterator begin() const {
+    return const_cast<raw_hash_set*>(this)->begin();
+  }
+  const_iterator end() const { return {}; }
+  const_iterator cbegin() const { return begin(); }
+  const_iterator cend() const { return end(); }
+
+  bool empty() const { return !size(); }
+  size_t size() const { return size_; }
+  size_t capacity() const { return capacity_; }
+  size_t max_size() const { return (std::numeric_limits<size_t>::max)(); }
+
+  ABSL_ATTRIBUTE_REINITIALIZES void clear() {
+    // Iterating over this container is O(bucket_count()). When bucket_count()
+    // is much greater than size(), iteration becomes prohibitively expensive.
+    // For clear() it is more important to reuse the allocated array when the
+    // container is small because allocation takes comparatively long time
+    // compared to destruction of the elements of the container. So we pick the
+    // largest bucket_count() threshold for which iteration is still fast and
+    // past that we simply deallocate the array.
+    if (capacity_ > 127) {
+      destroy_slots();
+    } else if (capacity_) {
+      for (size_t i = 0; i != capacity_; ++i) {
+        if (IsFull(ctrl_[i])) {
+          PolicyTraits::destroy(&alloc_ref(), slots_ + i);
+        }
+      }
+      size_ = 0;
+      reset_ctrl();
+      reset_growth_left();
+    }
+    assert(empty());
+    infoz_.RecordStorageChanged(0, capacity_);
+  }
+
+  // This overload kicks in when the argument is an rvalue of insertable and
+  // decomposable type other than init_type.
+  //
+  //   flat_hash_map<std::string, int> m;
+  //   m.insert(std::make_pair("abc", 42));
+  // TODO(cheshire): A type alias T2 is introduced as a workaround for the nvcc
+  // bug.
+  template <class T, RequiresInsertable<T> = 0,
+            class T2 = T,
+            typename std::enable_if<IsDecomposable<T2>::value, int>::type = 0,
+            T* = nullptr>
+  std::pair<iterator, bool> insert(T&& value) {
+    return emplace(std::forward<T>(value));
+  }
+
+  // This overload kicks in when the argument is a bitfield or an lvalue of
+  // insertable and decomposable type.
+  //
+  //   union { int n : 1; };
+  //   flat_hash_set<int> s;
+  //   s.insert(n);
+  //
+  //   flat_hash_set<std::string> s;
+  //   const char* p = "hello";
+  //   s.insert(p);
+  //
+  // TODO(romanp): Once we stop supporting gcc 5.1 and below, replace
+  // RequiresInsertable<T> with RequiresInsertable<const T&>.
+  // We are hitting this bug: https://godbolt.org/g/1Vht4f.
+  template <
+      class T, RequiresInsertable<T> = 0,
+      typename std::enable_if<IsDecomposable<const T&>::value, int>::type = 0>
+  std::pair<iterator, bool> insert(const T& value) {
+    return emplace(value);
+  }
+
+  // This overload kicks in when the argument is an rvalue of init_type. Its
+  // purpose is to handle brace-init-list arguments.
+  //
+  //   flat_hash_map<std::string, int> s;
+  //   s.insert({"abc", 42});
+  std::pair<iterator, bool> insert(init_type&& value) {
+    return emplace(std::move(value));
+  }
+
+  // TODO(cheshire): A type alias T2 is introduced as a workaround for the nvcc
+  // bug.
+  template <class T, RequiresInsertable<T> = 0, class T2 = T,
+            typename std::enable_if<IsDecomposable<T2>::value, int>::type = 0,
+            T* = nullptr>
+  iterator insert(const_iterator, T&& value) {
+    return insert(std::forward<T>(value)).first;
+  }
+
+  // TODO(romanp): Once we stop supporting gcc 5.1 and below, replace
+  // RequiresInsertable<T> with RequiresInsertable<const T&>.
+  // We are hitting this bug: https://godbolt.org/g/1Vht4f.
+  template <
+      class T, RequiresInsertable<T> = 0,
+      typename std::enable_if<IsDecomposable<const T&>::value, int>::type = 0>
+  iterator insert(const_iterator, const T& value) {
+    return insert(value).first;
+  }
+
+  iterator insert(const_iterator, init_type&& value) {
+    return insert(std::move(value)).first;
+  }
+
+  template <class InputIt>
+  void insert(InputIt first, InputIt last) {
+    for (; first != last; ++first) insert(*first);
+  }
+
+  template <class T, RequiresNotInit<T> = 0, RequiresInsertable<const T&> = 0>
+  void insert(std::initializer_list<T> ilist) {
+    insert(ilist.begin(), ilist.end());
+  }
+
+  void insert(std::initializer_list<init_type> ilist) {
+    insert(ilist.begin(), ilist.end());
+  }
+
+  insert_return_type insert(node_type&& node) {
+    if (!node) return {end(), false, node_type()};
+    const auto& elem = PolicyTraits::element(CommonAccess::GetSlot(node));
+    auto res = PolicyTraits::apply(
+        InsertSlot<false>{*this, std::move(*CommonAccess::GetSlot(node))},
+        elem);
+    if (res.second) {
+      CommonAccess::Reset(&node);
+      return {res.first, true, node_type()};
+    } else {
+      return {res.first, false, std::move(node)};
+    }
+  }
+
+  iterator insert(const_iterator, node_type&& node) {
+    auto res = insert(std::move(node));
+    node = std::move(res.node);
+    return res.position;
+  }
+
+  // This overload kicks in if we can deduce the key from args. This enables us
+  // to avoid constructing value_type if an entry with the same key already
+  // exists.
+  //
+  // For example:
+  //
+  //   flat_hash_map<std::string, std::string> m = {{"abc", "def"}};
+  //   // Creates no std::string copies and makes no heap allocations.
+  //   m.emplace("abc", "xyz");
+  template <class... Args, typename std::enable_if<
+                               IsDecomposable<Args...>::value, int>::type = 0>
+  std::pair<iterator, bool> emplace(Args&&... args) {
+    return PolicyTraits::apply(EmplaceDecomposable{*this},
+                               std::forward<Args>(args)...);
+  }
+
+  // This overload kicks in if we cannot deduce the key from args. It constructs
+  // value_type unconditionally and then either moves it into the table or
+  // destroys.
+  template <class... Args, typename std::enable_if<
+                               !IsDecomposable<Args...>::value, int>::type = 0>
+  std::pair<iterator, bool> emplace(Args&&... args) {
+    alignas(slot_type) unsigned char raw[sizeof(slot_type)];
+    slot_type* slot = reinterpret_cast<slot_type*>(&raw);
+
+    PolicyTraits::construct(&alloc_ref(), slot, std::forward<Args>(args)...);
+    const auto& elem = PolicyTraits::element(slot);
+    return PolicyTraits::apply(InsertSlot<true>{*this, std::move(*slot)}, elem);
+  }
+
+  template <class... Args>
+  iterator emplace_hint(const_iterator, Args&&... args) {
+    return emplace(std::forward<Args>(args)...).first;
+  }
+
+  // Extension API: support for lazy emplace.
+  //
+  // Looks up key in the table. If found, returns the iterator to the element.
+  // Otherwise calls `f` with one argument of type `raw_hash_set::constructor`.
+  //
+  // `f` must abide by several restrictions:
+  //  - it MUST call `raw_hash_set::constructor` with arguments as if a
+  //    `raw_hash_set::value_type` is constructed,
+  //  - it MUST NOT access the container before the call to
+  //    `raw_hash_set::constructor`, and
+  //  - it MUST NOT erase the lazily emplaced element.
+  // Doing any of these is undefined behavior.
+  //
+  // For example:
+  //
+  //   std::unordered_set<ArenaString> s;
+  //   // Makes ArenaStr even if "abc" is in the map.
+  //   s.insert(ArenaString(&arena, "abc"));
+  //
+  //   flat_hash_set<ArenaStr> s;
+  //   // Makes ArenaStr only if "abc" is not in the map.
+  //   s.lazy_emplace("abc", [&](const constructor& ctor) {
+  //     ctor(&arena, "abc");
+  //   });
+  //
+  // WARNING: This API is currently experimental. If there is a way to implement
+  // the same thing with the rest of the API, prefer that.
+  class constructor {
+    friend class raw_hash_set;
+
+   public:
+    template <class... Args>
+    void operator()(Args&&... args) const {
+      assert(*slot_);
+      PolicyTraits::construct(alloc_, *slot_, std::forward<Args>(args)...);
+      *slot_ = nullptr;
+    }
+
+   private:
+    constructor(allocator_type* a, slot_type** slot) : alloc_(a), slot_(slot) {}
+
+    allocator_type* alloc_;
+    slot_type** slot_;
+  };
+
+  template <class K = key_type, class F>
+  iterator lazy_emplace(const key_arg<K>& key, F&& f) {
+    auto res = find_or_prepare_insert(key);
+    if (res.second) {
+      slot_type* slot = slots_ + res.first;
+      std::forward<F>(f)(constructor(&alloc_ref(), &slot));
+      assert(!slot);
+    }
+    return iterator_at(res.first);
+  }
+
+  // Extension API: support for heterogeneous keys.
+  //
+  //   std::unordered_set<std::string> s;
+  //   // Turns "abc" into std::string.
+  //   s.erase("abc");
+  //
+  //   flat_hash_set<std::string> s;
+  //   // Uses "abc" directly without copying it into std::string.
+  //   s.erase("abc");
+  template <class K = key_type>
+  size_type erase(const key_arg<K>& key) {
+    auto it = find(key);
+    if (it == end()) return 0;
+    erase(it);
+    return 1;
+  }
+
+  // Erases the element pointed to by `it`.  Unlike `std::unordered_set::erase`,
+  // this method returns void to reduce algorithmic complexity to O(1).  The
+  // iterator is invalidated, so any increment should be done before calling
+  // erase.  In order to erase while iterating across a map, use the following
+  // idiom (which also works for standard containers):
+  //
+  // for (auto it = m.begin(), end = m.end(); it != end;) {
+  //   // `erase()` will invalidate `it`, so advance `it` first.
+  //   auto copy_it = it++;
+  //   if (<pred>) {
+  //     m.erase(copy_it);
+  //   }
+  // }
+  void erase(const_iterator cit) { erase(cit.inner_); }
+
+  // This overload is necessary because otherwise erase<K>(const K&) would be
+  // a better match if non-const iterator is passed as an argument.
+  void erase(iterator it) {
+    AssertIsFull(it.ctrl_);
+    PolicyTraits::destroy(&alloc_ref(), it.slot_);
+    erase_meta_only(it);
+  }
+
+  iterator erase(const_iterator first, const_iterator last) {
+    while (first != last) {
+      erase(first++);
+    }
+    return last.inner_;
+  }
+
+  // Moves elements from `src` into `this`.
+  // If the element already exists in `this`, it is left unmodified in `src`.
+  template <typename H, typename E>
+  void merge(raw_hash_set<Policy, H, E, Alloc>& src) {  // NOLINT
+    assert(this != &src);
+    for (auto it = src.begin(), e = src.end(); it != e;) {
+      auto next = std::next(it);
+      if (PolicyTraits::apply(InsertSlot<false>{*this, std::move(*it.slot_)},
+                              PolicyTraits::element(it.slot_))
+              .second) {
+        src.erase_meta_only(it);
+      }
+      it = next;
+    }
+  }
+
+  template <typename H, typename E>
+  void merge(raw_hash_set<Policy, H, E, Alloc>&& src) {
+    merge(src);
+  }
+
+  node_type extract(const_iterator position) {
+    AssertIsFull(position.inner_.ctrl_);
+    auto node =
+        CommonAccess::Transfer<node_type>(alloc_ref(), position.inner_.slot_);
+    erase_meta_only(position);
+    return node;
+  }
+
+  template <
+      class K = key_type,
+      typename std::enable_if<!std::is_same<K, iterator>::value, int>::type = 0>
+  node_type extract(const key_arg<K>& key) {
+    auto it = find(key);
+    return it == end() ? node_type() : extract(const_iterator{it});
+  }
+
+  void swap(raw_hash_set& that) noexcept(
+      IsNoThrowSwappable<hasher>() && IsNoThrowSwappable<key_equal>() &&
+      IsNoThrowSwappable<allocator_type>(
+          typename AllocTraits::propagate_on_container_swap{})) {
+    using std::swap;
+    swap(ctrl_, that.ctrl_);
+    swap(slots_, that.slots_);
+    swap(size_, that.size_);
+    swap(capacity_, that.capacity_);
+    swap(growth_left(), that.growth_left());
+    swap(hash_ref(), that.hash_ref());
+    swap(eq_ref(), that.eq_ref());
+    swap(infoz_, that.infoz_);
+    SwapAlloc(alloc_ref(), that.alloc_ref(),
+              typename AllocTraits::propagate_on_container_swap{});
+  }
+
+  void rehash(size_t n) {
+    if (n == 0 && capacity_ == 0) return;
+    if (n == 0 && size_ == 0) {
+      destroy_slots();
+      infoz_.RecordStorageChanged(0, 0);
+      return;
+    }
+    // bitor is a faster way of doing `max` here. We will round up to the next
+    // power-of-2-minus-1, so bitor is good enough.
+    auto m = NormalizeCapacity(n | GrowthToLowerboundCapacity(size()));
+    // n == 0 unconditionally rehashes as per the standard.
+    if (n == 0 || m > capacity_) {
+      resize(m);
+    }
+  }
+
+  void reserve(size_t n) {
+    size_t m = GrowthToLowerboundCapacity(n);
+    if (m > capacity_) {
+      resize(NormalizeCapacity(m));
+    }
+  }
+
+  // Extension API: support for heterogeneous keys.
+  //
+  //   std::unordered_set<std::string> s;
+  //   // Turns "abc" into std::string.
+  //   s.count("abc");
+  //
+  //   ch_set<std::string> s;
+  //   // Uses "abc" directly without copying it into std::string.
+  //   s.count("abc");
+  template <class K = key_type>
+  size_t count(const key_arg<K>& key) const {
+    return find(key) == end() ? 0 : 1;
+  }
+
+  // Issues CPU prefetch instructions for the memory needed to find or insert
+  // a key.  Like all lookup functions, this support heterogeneous keys.
+  //
+  // NOTE: This is a very low level operation and should not be used without
+  // specific benchmarks indicating its importance.
+  template <class K = key_type>
+  void prefetch(const key_arg<K>& key) const {
+    (void)key;
+#if defined(__GNUC__)
+    auto seq = probe(ctrl_, hash_ref()(key), capacity_);
+    __builtin_prefetch(static_cast<const void*>(ctrl_ + seq.offset()));
+    __builtin_prefetch(static_cast<const void*>(slots_ + seq.offset()));
+#endif  // __GNUC__
+  }
+
+  // The API of find() has two extensions.
+  //
+  // 1. The hash can be passed by the user. It must be equal to the hash of the
+  // key.
+  //
+  // 2. The type of the key argument doesn't have to be key_type. This is so
+  // called heterogeneous key support.
+  template <class K = key_type>
+  iterator find(const key_arg<K>& key, size_t hash) {
+    auto seq = probe(ctrl_, hash, capacity_);
+    while (true) {
+      Group g{ctrl_ + seq.offset()};
+      for (int i : g.Match(H2(hash))) {
+        if (ABSL_PREDICT_TRUE(PolicyTraits::apply(
+                EqualElement<K>{key, eq_ref()},
+                PolicyTraits::element(slots_ + seq.offset(i)))))
+          return iterator_at(seq.offset(i));
+      }
+      if (ABSL_PREDICT_TRUE(g.MatchEmpty())) return end();
+      seq.next();
+      assert(seq.index() < capacity_ && "full table!");
+    }
+  }
+  template <class K = key_type>
+  iterator find(const key_arg<K>& key) {
+    return find(key, hash_ref()(key));
+  }
+
+  template <class K = key_type>
+  const_iterator find(const key_arg<K>& key, size_t hash) const {
+    return const_cast<raw_hash_set*>(this)->find(key, hash);
+  }
+  template <class K = key_type>
+  const_iterator find(const key_arg<K>& key) const {
+    return find(key, hash_ref()(key));
+  }
+
+  template <class K = key_type>
+  bool contains(const key_arg<K>& key) const {
+    return find(key) != end();
+  }
+
+  template <class K = key_type>
+  std::pair<iterator, iterator> equal_range(const key_arg<K>& key) {
+    auto it = find(key);
+    if (it != end()) return {it, std::next(it)};
+    return {it, it};
+  }
+  template <class K = key_type>
+  std::pair<const_iterator, const_iterator> equal_range(
+      const key_arg<K>& key) const {
+    auto it = find(key);
+    if (it != end()) return {it, std::next(it)};
+    return {it, it};
+  }
+
+  size_t bucket_count() const { return capacity_; }
+  float load_factor() const {
+    return capacity_ ? static_cast<double>(size()) / capacity_ : 0.0;
+  }
+  float max_load_factor() const { return 1.0f; }
+  void max_load_factor(float) {
+    // Does nothing.
+  }
+
+  hasher hash_function() const { return hash_ref(); }
+  key_equal key_eq() const { return eq_ref(); }
+  allocator_type get_allocator() const { return alloc_ref(); }
+
+  friend bool operator==(const raw_hash_set& a, const raw_hash_set& b) {
+    if (a.size() != b.size()) return false;
+    const raw_hash_set* outer = &a;
+    const raw_hash_set* inner = &b;
+    if (outer->capacity() > inner->capacity()) std::swap(outer, inner);
+    for (const value_type& elem : *outer)
+      if (!inner->has_element(elem)) return false;
+    return true;
+  }
+
+  friend bool operator!=(const raw_hash_set& a, const raw_hash_set& b) {
+    return !(a == b);
+  }
+
+  friend void swap(raw_hash_set& a,
+                   raw_hash_set& b) noexcept(noexcept(a.swap(b))) {
+    a.swap(b);
+  }
+
+ private:
+  template <class Container, typename Enabler>
+  friend struct absl::container_internal::hashtable_debug_internal::
+      HashtableDebugAccess;
+
+  struct FindElement {
+    template <class K, class... Args>
+    const_iterator operator()(const K& key, Args&&...) const {
+      return s.find(key);
+    }
+    const raw_hash_set& s;
+  };
+
+  struct HashElement {
+    template <class K, class... Args>
+    size_t operator()(const K& key, Args&&...) const {
+      return h(key);
+    }
+    const hasher& h;
+  };
+
+  template <class K1>
+  struct EqualElement {
+    template <class K2, class... Args>
+    bool operator()(const K2& lhs, Args&&...) const {
+      return eq(lhs, rhs);
+    }
+    const K1& rhs;
+    const key_equal& eq;
+  };
+
+  struct EmplaceDecomposable {
+    template <class K, class... Args>
+    std::pair<iterator, bool> operator()(const K& key, Args&&... args) const {
+      auto res = s.find_or_prepare_insert(key);
+      if (res.second) {
+        s.emplace_at(res.first, std::forward<Args>(args)...);
+      }
+      return {s.iterator_at(res.first), res.second};
+    }
+    raw_hash_set& s;
+  };
+
+  template <bool do_destroy>
+  struct InsertSlot {
+    template <class K, class... Args>
+    std::pair<iterator, bool> operator()(const K& key, Args&&...) && {
+      auto res = s.find_or_prepare_insert(key);
+      if (res.second) {
+        PolicyTraits::transfer(&s.alloc_ref(), s.slots_ + res.first, &slot);
+      } else if (do_destroy) {
+        PolicyTraits::destroy(&s.alloc_ref(), &slot);
+      }
+      return {s.iterator_at(res.first), res.second};
+    }
+    raw_hash_set& s;
+    // Constructed slot. Either moved into place or destroyed.
+    slot_type&& slot;
+  };
+
+  // "erases" the object from the container, except that it doesn't actually
+  // destroy the object. It only updates all the metadata of the class.
+  // This can be used in conjunction with Policy::transfer to move the object to
+  // another place.
+  void erase_meta_only(const_iterator it) {
+    assert(IsFull(*it.inner_.ctrl_) && "erasing a dangling iterator");
+    --size_;
+    const size_t index = it.inner_.ctrl_ - ctrl_;
+    const size_t index_before = (index - Group::kWidth) & capacity_;
+    const auto empty_after = Group(it.inner_.ctrl_).MatchEmpty();
+    const auto empty_before = Group(ctrl_ + index_before).MatchEmpty();
+
+    // We count how many consecutive non empties we have to the right and to the
+    // left of `it`. If the sum is >= kWidth then there is at least one probe
+    // window that might have seen a full group.
+    bool was_never_full =
+        empty_before && empty_after &&
+        static_cast<size_t>(empty_after.TrailingZeros() +
+                            empty_before.LeadingZeros()) < Group::kWidth;
+
+    set_ctrl(index, was_never_full ? kEmpty : kDeleted);
+    growth_left() += was_never_full;
+    infoz_.RecordErase();
+  }
+
+  void initialize_slots() {
+    assert(capacity_);
+    // Folks with custom allocators often make unwarranted assumptions about the
+    // behavior of their classes vis-a-vis trivial destructability and what
+    // calls they will or wont make.  Avoid sampling for people with custom
+    // allocators to get us out of this mess.  This is not a hard guarantee but
+    // a workaround while we plan the exact guarantee we want to provide.
+    //
+    // People are often sloppy with the exact type of their allocator (sometimes
+    // it has an extra const or is missing the pair, but rebinds made it work
+    // anyway).  To avoid the ambiguity, we work off SlotAlloc which we have
+    // bound more carefully.
+    if (std::is_same<SlotAlloc, std::allocator<slot_type>>::value &&
+        slots_ == nullptr) {
+      infoz_ = Sample();
+    }
+
+    auto layout = MakeLayout(capacity_);
+    char* mem = static_cast<char*>(
+        Allocate<Layout::Alignment()>(&alloc_ref(), layout.AllocSize()));
+    ctrl_ = reinterpret_cast<ctrl_t*>(layout.template Pointer<0>(mem));
+    slots_ = layout.template Pointer<1>(mem);
+    reset_ctrl();
+    reset_growth_left();
+    infoz_.RecordStorageChanged(size_, capacity_);
+  }
+
+  void destroy_slots() {
+    if (!capacity_) return;
+    for (size_t i = 0; i != capacity_; ++i) {
+      if (IsFull(ctrl_[i])) {
+        PolicyTraits::destroy(&alloc_ref(), slots_ + i);
+      }
+    }
+    auto layout = MakeLayout(capacity_);
+    // Unpoison before returning the memory to the allocator.
+    SanitizerUnpoisonMemoryRegion(slots_, sizeof(slot_type) * capacity_);
+    Deallocate<Layout::Alignment()>(&alloc_ref(), ctrl_, layout.AllocSize());
+    ctrl_ = EmptyGroup();
+    slots_ = nullptr;
+    size_ = 0;
+    capacity_ = 0;
+    growth_left() = 0;
+  }
+
+  void resize(size_t new_capacity) {
+    assert(IsValidCapacity(new_capacity));
+    auto* old_ctrl = ctrl_;
+    auto* old_slots = slots_;
+    const size_t old_capacity = capacity_;
+    capacity_ = new_capacity;
+    initialize_slots();
+
+    size_t total_probe_length = 0;
+    for (size_t i = 0; i != old_capacity; ++i) {
+      if (IsFull(old_ctrl[i])) {
+        size_t hash = PolicyTraits::apply(HashElement{hash_ref()},
+                                          PolicyTraits::element(old_slots + i));
+        auto target = find_first_non_full(ctrl_, hash, capacity_);
+        size_t new_i = target.offset;
+        total_probe_length += target.probe_length;
+        set_ctrl(new_i, H2(hash));
+        PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, old_slots + i);
+      }
+    }
+    if (old_capacity) {
+      SanitizerUnpoisonMemoryRegion(old_slots,
+                                    sizeof(slot_type) * old_capacity);
+      auto layout = MakeLayout(old_capacity);
+      Deallocate<Layout::Alignment()>(&alloc_ref(), old_ctrl,
+                                      layout.AllocSize());
+    }
+    infoz_.RecordRehash(total_probe_length);
+  }
+
+  void drop_deletes_without_resize() ABSL_ATTRIBUTE_NOINLINE {
+    assert(IsValidCapacity(capacity_));
+    assert(!is_small(capacity_));
+    // Algorithm:
+    // - mark all DELETED slots as EMPTY
+    // - mark all FULL slots as DELETED
+    // - for each slot marked as DELETED
+    //     hash = Hash(element)
+    //     target = find_first_non_full(hash)
+    //     if target is in the same group
+    //       mark slot as FULL
+    //     else if target is EMPTY
+    //       transfer element to target
+    //       mark slot as EMPTY
+    //       mark target as FULL
+    //     else if target is DELETED
+    //       swap current element with target element
+    //       mark target as FULL
+    //       repeat procedure for current slot with moved from element (target)
+    ConvertDeletedToEmptyAndFullToDeleted(ctrl_, capacity_);
+    alignas(slot_type) unsigned char raw[sizeof(slot_type)];
+    size_t total_probe_length = 0;
+    slot_type* slot = reinterpret_cast<slot_type*>(&raw);
+    for (size_t i = 0; i != capacity_; ++i) {
+      if (!IsDeleted(ctrl_[i])) continue;
+      size_t hash = PolicyTraits::apply(HashElement{hash_ref()},
+                                        PolicyTraits::element(slots_ + i));
+      auto target = find_first_non_full(ctrl_, hash, capacity_);
+      size_t new_i = target.offset;
+      total_probe_length += target.probe_length;
+
+      // Verify if the old and new i fall within the same group wrt the hash.
+      // If they do, we don't need to move the object as it falls already in the
+      // best probe we can.
+      const auto probe_index = [&](size_t pos) {
+        return ((pos - probe(ctrl_, hash, capacity_).offset()) & capacity_) /
+               Group::kWidth;
+      };
+
+      // Element doesn't move.
+      if (ABSL_PREDICT_TRUE(probe_index(new_i) == probe_index(i))) {
+        set_ctrl(i, H2(hash));
+        continue;
+      }
+      if (IsEmpty(ctrl_[new_i])) {
+        // Transfer element to the empty spot.
+        // set_ctrl poisons/unpoisons the slots so we have to call it at the
+        // right time.
+        set_ctrl(new_i, H2(hash));
+        PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, slots_ + i);
+        set_ctrl(i, kEmpty);
+      } else {
+        assert(IsDeleted(ctrl_[new_i]));
+        set_ctrl(new_i, H2(hash));
+        // Until we are done rehashing, DELETED marks previously FULL slots.
+        // Swap i and new_i elements.
+        PolicyTraits::transfer(&alloc_ref(), slot, slots_ + i);
+        PolicyTraits::transfer(&alloc_ref(), slots_ + i, slots_ + new_i);
+        PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, slot);
+        --i;  // repeat
+      }
+    }
+    reset_growth_left();
+    infoz_.RecordRehash(total_probe_length);
+  }
+
+  void rehash_and_grow_if_necessary() {
+    if (capacity_ == 0) {
+      resize(1);
+    } else if (size() <= CapacityToGrowth(capacity()) / 2) {
+      // Squash DELETED without growing if there is enough capacity.
+      drop_deletes_without_resize();
+    } else {
+      // Otherwise grow the container.
+      resize(capacity_ * 2 + 1);
+    }
+  }
+
+  bool has_element(const value_type& elem) const {
+    size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, elem);
+    auto seq = probe(ctrl_, hash, capacity_);
+    while (true) {
+      Group g{ctrl_ + seq.offset()};
+      for (int i : g.Match(H2(hash))) {
+        if (ABSL_PREDICT_TRUE(PolicyTraits::element(slots_ + seq.offset(i)) ==
+                              elem))
+          return true;
+      }
+      if (ABSL_PREDICT_TRUE(g.MatchEmpty())) return false;
+      seq.next();
+      assert(seq.index() < capacity_ && "full table!");
+    }
+    return false;
+  }
+
+  // TODO(alkis): Optimize this assuming *this and that don't overlap.
+  raw_hash_set& move_assign(raw_hash_set&& that, std::true_type) {
+    raw_hash_set tmp(std::move(that));
+    swap(tmp);
+    return *this;
+  }
+  raw_hash_set& move_assign(raw_hash_set&& that, std::false_type) {
+    raw_hash_set tmp(std::move(that), alloc_ref());
+    swap(tmp);
+    return *this;
+  }
+
+ protected:
+  template <class K>
+  std::pair<size_t, bool> find_or_prepare_insert(const K& key) {
+    auto hash = hash_ref()(key);
+    auto seq = probe(ctrl_, hash, capacity_);
+    while (true) {
+      Group g{ctrl_ + seq.offset()};
+      for (int i : g.Match(H2(hash))) {
+        if (ABSL_PREDICT_TRUE(PolicyTraits::apply(
+                EqualElement<K>{key, eq_ref()},
+                PolicyTraits::element(slots_ + seq.offset(i)))))
+          return {seq.offset(i), false};
+      }
+      if (ABSL_PREDICT_TRUE(g.MatchEmpty())) break;
+      seq.next();
+      assert(seq.index() < capacity_ && "full table!");
+    }
+    return {prepare_insert(hash), true};
+  }
+
+  size_t prepare_insert(size_t hash) ABSL_ATTRIBUTE_NOINLINE {
+    auto target = find_first_non_full(ctrl_, hash, capacity_);
+    if (ABSL_PREDICT_FALSE(growth_left() == 0 &&
+                           !IsDeleted(ctrl_[target.offset]))) {
+      rehash_and_grow_if_necessary();
+      target = find_first_non_full(ctrl_, hash, capacity_);
+    }
+    ++size_;
+    growth_left() -= IsEmpty(ctrl_[target.offset]);
+    set_ctrl(target.offset, H2(hash));
+    infoz_.RecordInsert(hash, target.probe_length);
+    return target.offset;
+  }
+
+  // Constructs the value in the space pointed by the iterator. This only works
+  // after an unsuccessful find_or_prepare_insert() and before any other
+  // modifications happen in the raw_hash_set.
+  //
+  // PRECONDITION: i is an index returned from find_or_prepare_insert(k), where
+  // k is the key decomposed from `forward<Args>(args)...`, and the bool
+  // returned by find_or_prepare_insert(k) was true.
+  // POSTCONDITION: *m.iterator_at(i) == value_type(forward<Args>(args)...).
+  template <class... Args>
+  void emplace_at(size_t i, Args&&... args) {
+    PolicyTraits::construct(&alloc_ref(), slots_ + i,
+                            std::forward<Args>(args)...);
+
+    assert(PolicyTraits::apply(FindElement{*this}, *iterator_at(i)) ==
+               iterator_at(i) &&
+           "constructed value does not match the lookup key");
+  }
+
+  iterator iterator_at(size_t i) { return {ctrl_ + i, slots_ + i}; }
+  const_iterator iterator_at(size_t i) const { return {ctrl_ + i, slots_ + i}; }
+
+ private:
+  friend struct RawHashSetTestOnlyAccess;
+
+  // Reset all ctrl bytes back to kEmpty, except the sentinel.
+  void reset_ctrl() {
+    std::memset(ctrl_, kEmpty, capacity_ + Group::kWidth);
+    ctrl_[capacity_] = kSentinel;
+    SanitizerPoisonMemoryRegion(slots_, sizeof(slot_type) * capacity_);
+  }
+
+  void reset_growth_left() {
+    growth_left() = CapacityToGrowth(capacity()) - size_;
+  }
+
+  // Sets the control byte, and if `i < Group::kWidth`, set the cloned byte at
+  // the end too.
+  void set_ctrl(size_t i, ctrl_t h) {
+    assert(i < capacity_);
+
+    if (IsFull(h)) {
+      SanitizerUnpoisonObject(slots_ + i);
+    } else {
+      SanitizerPoisonObject(slots_ + i);
+    }
+
+    ctrl_[i] = h;
+    ctrl_[((i - Group::kWidth) & capacity_) + 1 +
+          ((Group::kWidth - 1) & capacity_)] = h;
+  }
+
+  size_t& growth_left() { return settings_.template get<0>(); }
+
+  hasher& hash_ref() { return settings_.template get<1>(); }
+  const hasher& hash_ref() const { return settings_.template get<1>(); }
+  key_equal& eq_ref() { return settings_.template get<2>(); }
+  const key_equal& eq_ref() const { return settings_.template get<2>(); }
+  allocator_type& alloc_ref() { return settings_.template get<3>(); }
+  const allocator_type& alloc_ref() const {
+    return settings_.template get<3>();
+  }
+
+  // TODO(alkis): Investigate removing some of these fields:
+  // - ctrl/slots can be derived from each other
+  // - size can be moved into the slot array
+  ctrl_t* ctrl_ = EmptyGroup();    // [(capacity + 1) * ctrl_t]
+  slot_type* slots_ = nullptr;     // [capacity * slot_type]
+  size_t size_ = 0;                // number of full slots
+  size_t capacity_ = 0;            // total number of slots
+  HashtablezInfoHandle infoz_;
+  absl::container_internal::CompressedTuple<size_t /* growth_left */, hasher,
+                                            key_equal, allocator_type>
+      settings_{0, hasher{}, key_equal{}, allocator_type{}};
+};
+
+// Erases all elements that satisfy the predicate `pred` from the container `c`.
+template <typename P, typename H, typename E, typename A, typename Predicate>
+void EraseIf(Predicate pred, raw_hash_set<P, H, E, A>* c) {
+  for (auto it = c->begin(), last = c->end(); it != last;) {
+    auto copy_it = it++;
+    if (pred(*copy_it)) {
+      c->erase(copy_it);
+    }
+  }
+}
+
+namespace hashtable_debug_internal {
+template <typename Set>
+struct HashtableDebugAccess<Set, absl::void_t<typename Set::raw_hash_set>> {
+  using Traits = typename Set::PolicyTraits;
+  using Slot = typename Traits::slot_type;
+
+  static size_t GetNumProbes(const Set& set,
+                             const typename Set::key_type& key) {
+    size_t num_probes = 0;
+    size_t hash = set.hash_ref()(key);
+    auto seq = probe(set.ctrl_, hash, set.capacity_);
+    while (true) {
+      container_internal::Group g{set.ctrl_ + seq.offset()};
+      for (int i : g.Match(container_internal::H2(hash))) {
+        if (Traits::apply(
+                typename Set::template EqualElement<typename Set::key_type>{
+                    key, set.eq_ref()},
+                Traits::element(set.slots_ + seq.offset(i))))
+          return num_probes;
+        ++num_probes;
+      }
+      if (g.MatchEmpty()) return num_probes;
+      seq.next();
+      ++num_probes;
+    }
+  }
+
+  static size_t AllocatedByteSize(const Set& c) {
+    size_t capacity = c.capacity_;
+    if (capacity == 0) return 0;
+    auto layout = Set::MakeLayout(capacity);
+    size_t m = layout.AllocSize();
+
+    size_t per_slot = Traits::space_used(static_cast<const Slot*>(nullptr));
+    if (per_slot != ~size_t{}) {
+      m += per_slot * c.size();
+    } else {
+      for (size_t i = 0; i != capacity; ++i) {
+        if (container_internal::IsFull(c.ctrl_[i])) {
+          m += Traits::space_used(c.slots_ + i);
+        }
+      }
+    }
+    return m;
+  }
+
+  static size_t LowerBoundAllocatedByteSize(size_t size) {
+    size_t capacity = GrowthToLowerboundCapacity(size);
+    if (capacity == 0) return 0;
+    auto layout = Set::MakeLayout(NormalizeCapacity(capacity));
+    size_t m = layout.AllocSize();
+    size_t per_slot = Traits::space_used(static_cast<const Slot*>(nullptr));
+    if (per_slot != ~size_t{}) {
+      m += per_slot * size;
+    }
+    return m;
+  }
+};
+
+}  // namespace hashtable_debug_internal
+}  // namespace container_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
diff --git a/third_party/abseil_cpp/absl/container/internal/raw_hash_set_allocator_test.cc b/third_party/abseil_cpp/absl/container/internal/raw_hash_set_allocator_test.cc
new file mode 100644
index 000000000000..e73f53fd637b
--- /dev/null
+++ b/third_party/abseil_cpp/absl/container/internal/raw_hash_set_allocator_test.cc
@@ -0,0 +1,505 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <limits>
+#include <scoped_allocator>
+
+#include "gtest/gtest.h"
+#include "absl/container/internal/raw_hash_set.h"
+#include "absl/container/internal/tracked.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+namespace {
+
+enum AllocSpec {
+  kPropagateOnCopy = 1,
+  kPropagateOnMove = 2,
+  kPropagateOnSwap = 4,
+};
+
+struct AllocState {
+  size_t num_allocs = 0;
+  std::set<void*> owned;
+};
+
+template <class T,
+          int Spec = kPropagateOnCopy | kPropagateOnMove | kPropagateOnSwap>
+class CheckedAlloc {
+ public:
+  template <class, int>
+  friend class CheckedAlloc;
+
+  using value_type = T;
+
+  CheckedAlloc() {}
+  explicit CheckedAlloc(size_t id) : id_(id) {}
+  CheckedAlloc(const CheckedAlloc&) = default;
+  CheckedAlloc& operator=(const CheckedAlloc&) = default;
+
+  template <class U>
+  CheckedAlloc(const CheckedAlloc<U, Spec>& that)
+      : id_(that.id_), state_(that.state_) {}
+
+  template <class U>
+  struct rebind {
+    using other = CheckedAlloc<U, Spec>;
+  };
+
+  using propagate_on_container_copy_assignment =
+      std::integral_constant<bool, (Spec & kPropagateOnCopy) != 0>;
+
+  using propagate_on_container_move_assignment =
+      std::integral_constant<bool, (Spec & kPropagateOnMove) != 0>;
+
+  using propagate_on_container_swap =
+      std::integral_constant<bool, (Spec & kPropagateOnSwap) != 0>;
+
+  CheckedAlloc select_on_container_copy_construction() const {
+    if (Spec & kPropagateOnCopy) return *this;
+    return {};
+  }
+
+  T* allocate(size_t n) {
+    T* ptr = std::allocator<T>().allocate(n);
+    track_alloc(ptr);
+    return ptr;
+  }
+  void deallocate(T* ptr, size_t n) {
+    memset(ptr, 0, n * sizeof(T));  // The freed memory must be unpoisoned.
+    track_dealloc(ptr);
+    return std::allocator<T>().deallocate(ptr, n);
+  }
+
+  friend bool operator==(const CheckedAlloc& a, const CheckedAlloc& b) {
+    return a.id_ == b.id_;
+  }
+  friend bool operator!=(const CheckedAlloc& a, const CheckedAlloc& b) {
+    return !(a == b);
+  }
+
+  size_t num_allocs() const { return state_->num_allocs; }
+
+  void swap(CheckedAlloc& that) {
+    using std::swap;
+    swap(id_, that.id_);
+    swap(state_, that.state_);
+  }
+
+  friend void swap(CheckedAlloc& a, CheckedAlloc& b) { a.swap(b); }
+
+  friend std::ostream& operator<<(std::ostream& o, const CheckedAlloc& a) {
+    return o << "alloc(" << a.id_ << ")";
+  }
+
+ private:
+  void track_alloc(void* ptr) {
+    AllocState* state = state_.get();
+    ++state->num_allocs;
+    if (!state->owned.insert(ptr).second)
+      ADD_FAILURE() << *this << " got previously allocated memory: " << ptr;
+  }
+  void track_dealloc(void* ptr) {
+    if (state_->owned.erase(ptr) != 1)
+      ADD_FAILURE() << *this
+                    << " deleting memory owned by another allocator: " << ptr;
+  }
+
+  size_t id_ = std::numeric_limits<size_t>::max();
+
+  std::shared_ptr<AllocState> state_ = std::make_shared<AllocState>();
+};
+
+struct Identity {
+  int32_t operator()(int32_t v) const { return v; }
+};
+
+struct Policy {
+  using slot_type = Tracked<int32_t>;
+  using init_type = Tracked<int32_t>;
+  using key_type = int32_t;
+
+  template <class allocator_type, class... Args>
+  static void construct(allocator_type* alloc, slot_type* slot,
+                        Args&&... args) {
+    std::allocator_traits<allocator_type>::construct(
+        *alloc, slot, std::forward<Args>(args)...);
+  }
+
+  template <class allocator_type>
+  static void destroy(allocator_type* alloc, slot_type* slot) {
+    std::allocator_traits<allocator_type>::destroy(*alloc, slot);
+  }
+
+  template <class allocator_type>
+  static void transfer(allocator_type* alloc, slot_type* new_slot,
+                       slot_type* old_slot) {
+    construct(alloc, new_slot, std::move(*old_slot));
+    destroy(alloc, old_slot);
+  }
+
+  template <class F>
+  static auto apply(F&& f, int32_t v) -> decltype(std::forward<F>(f)(v, v)) {
+    return std::forward<F>(f)(v, v);
+  }
+
+  template <class F>
+  static auto apply(F&& f, const slot_type& v)
+      -> decltype(std::forward<F>(f)(v.val(), v)) {
+    return std::forward<F>(f)(v.val(), v);
+  }
+
+  template <class F>
+  static auto apply(F&& f, slot_type&& v)
+      -> decltype(std::forward<F>(f)(v.val(), std::move(v))) {
+    return std::forward<F>(f)(v.val(), std::move(v));
+  }
+
+  static slot_type& element(slot_type* slot) { return *slot; }
+};
+
+template <int Spec>
+struct PropagateTest : public ::testing::Test {
+  using Alloc = CheckedAlloc<Tracked<int32_t>, Spec>;
+
+  using Table = raw_hash_set<Policy, Identity, std::equal_to<int32_t>, Alloc>;
+
+  PropagateTest() {
+    EXPECT_EQ(a1, t1.get_allocator());
+    EXPECT_NE(a2, t1.get_allocator());
+  }
+
+  Alloc a1 = Alloc(1);
+  Table t1 = Table(0, a1);
+  Alloc a2 = Alloc(2);
+};
+
+using PropagateOnAll =
+    PropagateTest<kPropagateOnCopy | kPropagateOnMove | kPropagateOnSwap>;
+using NoPropagateOnCopy = PropagateTest<kPropagateOnMove | kPropagateOnSwap>;
+using NoPropagateOnMove = PropagateTest<kPropagateOnCopy | kPropagateOnSwap>;
+
+TEST_F(PropagateOnAll, Empty) { EXPECT_EQ(0, a1.num_allocs()); }
+
+TEST_F(PropagateOnAll, InsertAllocates) {
+  auto it = t1.insert(0).first;
+  EXPECT_EQ(1, a1.num_allocs());
+  EXPECT_EQ(0, it->num_moves());
+  EXPECT_EQ(0, it->num_copies());
+}
+
+TEST_F(PropagateOnAll, InsertDecomposes) {
+  auto it = t1.insert(0).first;
+  EXPECT_EQ(1, a1.num_allocs());
+  EXPECT_EQ(0, it->num_moves());
+  EXPECT_EQ(0, it->num_copies());
+
+  EXPECT_FALSE(t1.insert(0).second);
+  EXPECT_EQ(1, a1.num_allocs());
+  EXPECT_EQ(0, it->num_moves());
+  EXPECT_EQ(0, it->num_copies());
+}
+
+TEST_F(PropagateOnAll, RehashMoves) {
+  auto it = t1.insert(0).first;
+  EXPECT_EQ(0, it->num_moves());
+  t1.rehash(2 * t1.capacity());
+  EXPECT_EQ(2, a1.num_allocs());
+  it = t1.find(0);
+  EXPECT_EQ(1, it->num_moves());
+  EXPECT_EQ(0, it->num_copies());
+}
+
+TEST_F(PropagateOnAll, CopyConstructor) {
+  auto it = t1.insert(0).first;
+  Table u(t1);
+  EXPECT_EQ(2, a1.num_allocs());
+  EXPECT_EQ(0, it->num_moves());
+  EXPECT_EQ(1, it->num_copies());
+}
+
+TEST_F(NoPropagateOnCopy, CopyConstructor) {
+  auto it = t1.insert(0).first;
+  Table u(t1);
+  EXPECT_EQ(1, a1.num_allocs());
+  EXPECT_EQ(1, u.get_allocator().num_allocs());
+  EXPECT_EQ(0, it->num_moves());
+  EXPECT_EQ(1, it->num_copies());
+}
+
+TEST_F(PropagateOnAll, CopyConstructorWithSameAlloc) {
+  auto it = t1.insert(0).first;
+  Table u(t1, a1);
+  EXPECT_EQ(2, a1.num_allocs());
+  EXPECT_EQ(0, it->num_moves());
+  EXPECT_EQ(1, it->num_copies());
+}
+
+TEST_F(NoPropagateOnCopy, CopyConstructorWithSameAlloc) {
+  auto it = t1.insert(0).first;
+  Table u(t1, a1);
+  EXPECT_EQ(2, a1.num_allocs());
+  EXPECT_EQ(0, it->num_moves());
+  EXPECT_EQ(1, it->num_copies());
+}
+
+TEST_F(PropagateOnAll, CopyConstructorWithDifferentAlloc) {
+  auto it = t1.insert(0).first;
+  Table u(t1, a2);
+  EXPECT_EQ(a2, u.get_allocator());
+  EXPECT_EQ(1, a1.num_allocs());
+  EXPECT_EQ(1, a2.num_allocs());
+  EXPECT_EQ(0, it->num_moves());
+  EXPECT_EQ(1, it->num_copies());
+}
+
+TEST_F(NoPropagateOnCopy, CopyConstructorWithDifferentAlloc) {
+  auto it = t1.insert(0).first;
+  Table u(t1, a2);
+  EXPECT_EQ(a2, u.get_allocator());
+  EXPECT_EQ(1, a1.num_allocs());
+  EXPECT_EQ(1, a2.num_allocs());
+  EXPECT_EQ(0, it->num_moves());
+  EXPECT_EQ(1, it->num_copies());
+}
+
+TEST_F(PropagateOnAll, MoveConstructor) {
+  auto it = t1.insert(0).first;
+  Table u(std::move(t1));
+  EXPECT_EQ(1, a1.num_allocs());
+  EXPECT_EQ(0, it->num_moves());
+  EXPECT_EQ(0, it->num_copies());
+}
+
+TEST_F(NoPropagateOnMove, MoveConstructor) {
+  auto it = t1.insert(0).first;
+  Table u(std::move(t1));
+  EXPECT_EQ(1, a1.num_allocs());
+  EXPECT_EQ(0, it->num_moves());
+  EXPECT_EQ(0, it->num_copies());
+}
+
+TEST_F(PropagateOnAll, MoveConstructorWithSameAlloc) {
+  auto it = t1.insert(0).first;
+  Table u(std::move(t1), a1);
+  EXPECT_EQ(1, a1.num_allocs());
+  EXPECT_EQ(0, it->num_moves());
+  EXPECT_EQ(0, it->num_copies());
+}
+
+TEST_F(NoPropagateOnMove, MoveConstructorWithSameAlloc) {
+  auto it = t1.insert(0).first;
+  Table u(std::move(t1), a1);
+  EXPECT_EQ(1, a1.num_allocs());
+  EXPECT_EQ(0, it->num_moves());
+  EXPECT_EQ(0, it->num_copies());
+}
+
+TEST_F(PropagateOnAll, MoveConstructorWithDifferentAlloc) {
+  auto it = t1.insert(0).first;
+  Table u(std::move(t1), a2);
+  it = u.find(0);
+  EXPECT_EQ(a2, u.get_allocator());
+  EXPECT_EQ(1, a1.num_allocs());
+  EXPECT_EQ(1, a2.num_allocs());
+  EXPECT_EQ(1, it->num_moves());
+  EXPECT_EQ(0, it->num_copies());
+}
+
+TEST_F(NoPropagateOnMove, MoveConstructorWithDifferentAlloc) {
+  auto it = t1.insert(0).first;
+  Table u(std::move(t1), a2);
+  it = u.find(0);
+  EXPECT_EQ(a2, u.get_allocator());
+  EXPECT_EQ(1, a1.num_allocs());
+  EXPECT_EQ(1, a2.num_allocs());
+  EXPECT_EQ(1, it->num_moves());
+  EXPECT_EQ(0, it->num_copies());
+}
+
+TEST_F(PropagateOnAll, CopyAssignmentWithSameAlloc) {
+  auto it = t1.insert(0).first;
+  Table u(0, a1);
+  u = t1;
+  EXPECT_EQ(2, a1.num_allocs());
+  EXPECT_EQ(0, it->num_moves());
+  EXPECT_EQ(1, it->num_copies());
+}
+
+TEST_F(NoPropagateOnCopy, CopyAssignmentWithSameAlloc) {
+  auto it = t1.insert(0).first;
+  Table u(0, a1);
+  u = t1;
+  EXPECT_EQ(2, a1.num_allocs());
+  EXPECT_EQ(0, it->num_moves());
+  EXPECT_EQ(1, it->num_copies());
+}
+
+TEST_F(PropagateOnAll, CopyAssignmentWithDifferentAlloc) {
+  auto it = t1.insert(0).first;
+  Table u(0, a2);
+  u = t1;
+  EXPECT_EQ(a1, u.get_allocator());
+  EXPECT_EQ(2, a1.num_allocs());
+  EXPECT_EQ(0, a2.num_allocs());
+  EXPECT_EQ(0, it->num_moves());
+  EXPECT_EQ(1, it->num_copies());
+}
+
+TEST_F(NoPropagateOnCopy, CopyAssignmentWithDifferentAlloc) {
+  auto it = t1.insert(0).first;
+  Table u(0, a2);
+  u = t1;
+  EXPECT_EQ(a2, u.get_allocator());
+  EXPECT_EQ(1, a1.num_allocs());
+  EXPECT_EQ(1, a2.num_allocs());
+  EXPECT_EQ(0, it->num_moves());
+  EXPECT_EQ(1, it->num_copies());
+}
+
+TEST_F(PropagateOnAll, MoveAssignmentWithSameAlloc) {
+  auto it = t1.insert(0).first;
+  Table u(0, a1);
+  u = std::move(t1);
+  EXPECT_EQ(a1, u.get_allocator());
+  EXPECT_EQ(1, a1.num_allocs());
+  EXPECT_EQ(0, it->num_moves());
+  EXPECT_EQ(0, it->num_copies());
+}
+
+TEST_F(NoPropagateOnMove, MoveAssignmentWithSameAlloc) {
+  auto it = t1.insert(0).first;
+  Table u(0, a1);
+  u = std::move(t1);
+  EXPECT_EQ(a1, u.get_allocator());
+  EXPECT_EQ(1, a1.num_allocs());
+  EXPECT_EQ(0, it->num_moves());
+  EXPECT_EQ(0, it->num_copies());
+}
+
+TEST_F(PropagateOnAll, MoveAssignmentWithDifferentAlloc) {
+  auto it = t1.insert(0).first;
+  Table u(0, a2);
+  u = std::move(t1);
+  EXPECT_EQ(a1, u.get_allocator());
+  EXPECT_EQ(1, a1.num_allocs());
+  EXPECT_EQ(0, a2.num_allocs());
+  EXPECT_EQ(0, it->num_moves());
+  EXPECT_EQ(0, it->num_copies());
+}
+
+TEST_F(NoPropagateOnMove, MoveAssignmentWithDifferentAlloc) {
+  auto it = t1.insert(0).first;
+  Table u(0, a2);
+  u = std::move(t1);
+  it = u.find(0);
+  EXPECT_EQ(a2, u.get_allocator());
+  EXPECT_EQ(1, a1.num_allocs());
+  EXPECT_EQ(1, a2.num_allocs());
+  EXPECT_EQ(1, it->num_moves());
+  EXPECT_EQ(0, it->num_copies());
+}
+
+TEST_F(PropagateOnAll, Swap) {
+  auto it = t1.insert(0).first;
+  Table u(0, a2);
+  u.swap(t1);
+  EXPECT_EQ(a1, u.get_allocator());
+  EXPECT_EQ(a2, t1.get_allocator());
+  EXPECT_EQ(1, a1.num_allocs());
+  EXPECT_EQ(0, a2.num_allocs());
+  EXPECT_EQ(0, it->num_moves());
+  EXPECT_EQ(0, it->num_copies());
+}
+
+// This allocator is similar to std::pmr::polymorphic_allocator.
+// Note the disabled assignment.
+template <class T>
+class PAlloc {
+  template <class>
+  friend class PAlloc;
+
+ public:
+  // types
+  using value_type = T;
+
+  // traits
+  using propagate_on_container_swap = std::false_type;
+
+  PAlloc() noexcept = default;
+  explicit PAlloc(size_t id) noexcept : id_(id) {}
+  PAlloc(const PAlloc&) noexcept = default;
+  PAlloc& operator=(const PAlloc&) noexcept = delete;
+
+  template <class U>
+  PAlloc(const PAlloc<U>& that) noexcept : id_(that.id_) {}  // NOLINT
+
+  template <class U>
+  struct rebind {
+    using other = PAlloc<U>;
+  };
+
+  constexpr PAlloc select_on_container_copy_construction() const { return {}; }
+
+  // public member functions
+  T* allocate(size_t) { return new T; }
+  void deallocate(T* p, size_t) { delete p; }
+
+  friend bool operator==(const PAlloc& a, const PAlloc& b) {
+    return a.id_ == b.id_;
+  }
+  friend bool operator!=(const PAlloc& a, const PAlloc& b) { return !(a == b); }
+
+ private:
+  size_t id_ = std::numeric_limits<size_t>::max();
+};
+
+// This doesn't compile with GCC 5.4 and 5.5 due to a bug in noexcept handing.
+#if !defined(__GNUC__) || __GNUC__ != 5 || (__GNUC_MINOR__ != 4 && \
+    __GNUC_MINOR__ != 5)
+TEST(NoPropagateOn, Swap) {
+  using PA = PAlloc<char>;
+  using Table = raw_hash_set<Policy, Identity, std::equal_to<int32_t>, PA>;
+
+  Table t1(PA{1}), t2(PA{2});
+  swap(t1, t2);
+  EXPECT_EQ(t1.get_allocator(), PA(1));
+  EXPECT_EQ(t2.get_allocator(), PA(2));
+}
+#endif
+
+TEST(NoPropagateOn, CopyConstruct) {
+  using PA = PAlloc<char>;
+  using Table = raw_hash_set<Policy, Identity, std::equal_to<int32_t>, PA>;
+
+  Table t1(PA{1}), t2(t1);
+  EXPECT_EQ(t1.get_allocator(), PA(1));
+  EXPECT_EQ(t2.get_allocator(), PA());
+}
+
+TEST(NoPropagateOn, Assignment) {
+  using PA = PAlloc<char>;
+  using Table = raw_hash_set<Policy, Identity, std::equal_to<int32_t>, PA>;
+
+  Table t1(PA{1}), t2(PA{2});
+  t1 = t2;
+  EXPECT_EQ(t1.get_allocator(), PA(1));
+  EXPECT_EQ(t2.get_allocator(), PA(2));
+}
+
+}  // namespace
+}  // namespace container_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/third_party/abseil_cpp/absl/container/internal/raw_hash_set_test.cc b/third_party/abseil_cpp/absl/container/internal/raw_hash_set_test.cc
new file mode 100644
index 000000000000..33d2773de302
--- /dev/null
+++ b/third_party/abseil_cpp/absl/container/internal/raw_hash_set_test.cc
@@ -0,0 +1,1893 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/container/internal/raw_hash_set.h"
+
+#include <cmath>
+#include <cstdint>
+#include <deque>
+#include <functional>
+#include <memory>
+#include <numeric>
+#include <random>
+#include <string>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/base/attributes.h"
+#include "absl/base/config.h"
+#include "absl/base/internal/cycleclock.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/container/internal/container_memory.h"
+#include "absl/container/internal/hash_function_defaults.h"
+#include "absl/container/internal/hash_policy_testing.h"
+#include "absl/container/internal/hashtable_debug.h"
+#include "absl/strings/string_view.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+struct RawHashSetTestOnlyAccess {
+  template <typename C>
+  static auto GetSlots(const C& c) -> decltype(c.slots_) {
+    return c.slots_;
+  }
+};
+
+namespace {
+
+using ::testing::DoubleNear;
+using ::testing::ElementsAre;
+using ::testing::Ge;
+using ::testing::Lt;
+using ::testing::Optional;
+using ::testing::Pair;
+using ::testing::UnorderedElementsAre;
+
+TEST(Util, NormalizeCapacity) {
+  EXPECT_EQ(1, NormalizeCapacity(0));
+  EXPECT_EQ(1, NormalizeCapacity(1));
+  EXPECT_EQ(3, NormalizeCapacity(2));
+  EXPECT_EQ(3, NormalizeCapacity(3));
+  EXPECT_EQ(7, NormalizeCapacity(4));
+  EXPECT_EQ(7, NormalizeCapacity(7));
+  EXPECT_EQ(15, NormalizeCapacity(8));
+  EXPECT_EQ(15, NormalizeCapacity(15));
+  EXPECT_EQ(15 * 2 + 1, NormalizeCapacity(15 + 1));
+  EXPECT_EQ(15 * 2 + 1, NormalizeCapacity(15 + 2));
+}
+
+TEST(Util, GrowthAndCapacity) {
+  // Verify that GrowthToCapacity gives the minimum capacity that has enough
+  // growth.
+  for (size_t growth = 0; growth < 10000; ++growth) {
+    SCOPED_TRACE(growth);
+    size_t capacity = NormalizeCapacity(GrowthToLowerboundCapacity(growth));
+    // The capacity is large enough for `growth`
+    EXPECT_THAT(CapacityToGrowth(capacity), Ge(growth));
+    if (growth != 0 && capacity > 1) {
+      // There is no smaller capacity that works.
+      EXPECT_THAT(CapacityToGrowth(capacity / 2), Lt(growth));
+    }
+  }
+
+  for (size_t capacity = Group::kWidth - 1; capacity < 10000;
+       capacity = 2 * capacity + 1) {
+    SCOPED_TRACE(capacity);
+    size_t growth = CapacityToGrowth(capacity);
+    EXPECT_THAT(growth, Lt(capacity));
+    EXPECT_LE(GrowthToLowerboundCapacity(growth), capacity);
+    EXPECT_EQ(NormalizeCapacity(GrowthToLowerboundCapacity(growth)), capacity);
+  }
+}
+
+TEST(Util, probe_seq) {
+  probe_seq<16> seq(0, 127);
+  auto gen = [&]() {
+    size_t res = seq.offset();
+    seq.next();
+    return res;
+  };
+  std::vector<size_t> offsets(8);
+  std::generate_n(offsets.begin(), 8, gen);
+  EXPECT_THAT(offsets, ElementsAre(0, 16, 48, 96, 32, 112, 80, 64));
+  seq = probe_seq<16>(128, 127);
+  std::generate_n(offsets.begin(), 8, gen);
+  EXPECT_THAT(offsets, ElementsAre(0, 16, 48, 96, 32, 112, 80, 64));
+}
+
+TEST(BitMask, Smoke) {
+  EXPECT_FALSE((BitMask<uint8_t, 8>(0)));
+  EXPECT_TRUE((BitMask<uint8_t, 8>(5)));
+
+  EXPECT_THAT((BitMask<uint8_t, 8>(0)), ElementsAre());
+  EXPECT_THAT((BitMask<uint8_t, 8>(0x1)), ElementsAre(0));
+  EXPECT_THAT((BitMask<uint8_t, 8>(0x2)), ElementsAre(1));
+  EXPECT_THAT((BitMask<uint8_t, 8>(0x3)), ElementsAre(0, 1));
+  EXPECT_THAT((BitMask<uint8_t, 8>(0x4)), ElementsAre(2));
+  EXPECT_THAT((BitMask<uint8_t, 8>(0x5)), ElementsAre(0, 2));
+  EXPECT_THAT((BitMask<uint8_t, 8>(0x55)), ElementsAre(0, 2, 4, 6));
+  EXPECT_THAT((BitMask<uint8_t, 8>(0xAA)), ElementsAre(1, 3, 5, 7));
+}
+
+TEST(BitMask, WithShift) {
+  // See the non-SSE version of Group for details on what this math is for.
+  uint64_t ctrl = 0x1716151413121110;
+  uint64_t hash = 0x12;
+  constexpr uint64_t msbs = 0x8080808080808080ULL;
+  constexpr uint64_t lsbs = 0x0101010101010101ULL;
+  auto x = ctrl ^ (lsbs * hash);
+  uint64_t mask = (x - lsbs) & ~x & msbs;
+  EXPECT_EQ(0x0000000080800000, mask);
+
+  BitMask<uint64_t, 8, 3> b(mask);
+  EXPECT_EQ(*b, 2);
+}
+
+TEST(BitMask, LeadingTrailing) {
+  EXPECT_EQ((BitMask<uint32_t, 16>(0x00001a40).LeadingZeros()), 3);
+  EXPECT_EQ((BitMask<uint32_t, 16>(0x00001a40).TrailingZeros()), 6);
+
+  EXPECT_EQ((BitMask<uint32_t, 16>(0x00000001).LeadingZeros()), 15);
+  EXPECT_EQ((BitMask<uint32_t, 16>(0x00000001).TrailingZeros()), 0);
+
+  EXPECT_EQ((BitMask<uint32_t, 16>(0x00008000).LeadingZeros()), 0);
+  EXPECT_EQ((BitMask<uint32_t, 16>(0x00008000).TrailingZeros()), 15);
+
+  EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x0000008080808000).LeadingZeros()), 3);
+  EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x0000008080808000).TrailingZeros()), 1);
+
+  EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x0000000000000080).LeadingZeros()), 7);
+  EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x0000000000000080).TrailingZeros()), 0);
+
+  EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x8000000000000000).LeadingZeros()), 0);
+  EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x8000000000000000).TrailingZeros()), 7);
+}
+
+TEST(Group, EmptyGroup) {
+  for (h2_t h = 0; h != 128; ++h) EXPECT_FALSE(Group{EmptyGroup()}.Match(h));
+}
+
+TEST(Group, Match) {
+  if (Group::kWidth == 16) {
+    ctrl_t group[] = {kEmpty, 1, kDeleted, 3, kEmpty, 5, kSentinel, 7,
+                      7,      5, 3,        1, 1,      1, 1,         1};
+    EXPECT_THAT(Group{group}.Match(0), ElementsAre());
+    EXPECT_THAT(Group{group}.Match(1), ElementsAre(1, 11, 12, 13, 14, 15));
+    EXPECT_THAT(Group{group}.Match(3), ElementsAre(3, 10));
+    EXPECT_THAT(Group{group}.Match(5), ElementsAre(5, 9));
+    EXPECT_THAT(Group{group}.Match(7), ElementsAre(7, 8));
+  } else if (Group::kWidth == 8) {
+    ctrl_t group[] = {kEmpty, 1, 2, kDeleted, 2, 1, kSentinel, 1};
+    EXPECT_THAT(Group{group}.Match(0), ElementsAre());
+    EXPECT_THAT(Group{group}.Match(1), ElementsAre(1, 5, 7));
+    EXPECT_THAT(Group{group}.Match(2), ElementsAre(2, 4));
+  } else {
+    FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth;
+  }
+}
+
+TEST(Group, MatchEmpty) {
+  if (Group::kWidth == 16) {
+    ctrl_t group[] = {kEmpty, 1, kDeleted, 3, kEmpty, 5, kSentinel, 7,
+                      7,      5, 3,        1, 1,      1, 1,         1};
+    EXPECT_THAT(Group{group}.MatchEmpty(), ElementsAre(0, 4));
+  } else if (Group::kWidth == 8) {
+    ctrl_t group[] = {kEmpty, 1, 2, kDeleted, 2, 1, kSentinel, 1};
+    EXPECT_THAT(Group{group}.MatchEmpty(), ElementsAre(0));
+  } else {
+    FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth;
+  }
+}
+
+TEST(Group, MatchEmptyOrDeleted) {
+  if (Group::kWidth == 16) {
+    ctrl_t group[] = {kEmpty, 1, kDeleted, 3, kEmpty, 5, kSentinel, 7,
+                      7,      5, 3,        1, 1,      1, 1,         1};
+    EXPECT_THAT(Group{group}.MatchEmptyOrDeleted(), ElementsAre(0, 2, 4));
+  } else if (Group::kWidth == 8) {
+    ctrl_t group[] = {kEmpty, 1, 2, kDeleted, 2, 1, kSentinel, 1};
+    EXPECT_THAT(Group{group}.MatchEmptyOrDeleted(), ElementsAre(0, 3));
+  } else {
+    FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth;
+  }
+}
+
+TEST(Batch, DropDeletes) {
+  constexpr size_t kCapacity = 63;
+  constexpr size_t kGroupWidth = container_internal::Group::kWidth;
+  std::vector<ctrl_t> ctrl(kCapacity + 1 + kGroupWidth);
+  ctrl[kCapacity] = kSentinel;
+  std::vector<ctrl_t> pattern = {kEmpty, 2, kDeleted, 2, kEmpty, 1, kDeleted};
+  for (size_t i = 0; i != kCapacity; ++i) {
+    ctrl[i] = pattern[i % pattern.size()];
+    if (i < kGroupWidth - 1)
+      ctrl[i + kCapacity + 1] = pattern[i % pattern.size()];
+  }
+  ConvertDeletedToEmptyAndFullToDeleted(ctrl.data(), kCapacity);
+  ASSERT_EQ(ctrl[kCapacity], kSentinel);
+  for (size_t i = 0; i < kCapacity + 1 + kGroupWidth; ++i) {
+    ctrl_t expected = pattern[i % (kCapacity + 1) % pattern.size()];
+    if (i == kCapacity) expected = kSentinel;
+    if (expected == kDeleted) expected = kEmpty;
+    if (IsFull(expected)) expected = kDeleted;
+    EXPECT_EQ(ctrl[i], expected)
+        << i << " " << int{pattern[i % pattern.size()]};
+  }
+}
+
+TEST(Group, CountLeadingEmptyOrDeleted) {
+  const std::vector<ctrl_t> empty_examples = {kEmpty, kDeleted};
+  const std::vector<ctrl_t> full_examples = {0, 1, 2, 3, 5, 9, 127, kSentinel};
+
+  for (ctrl_t empty : empty_examples) {
+    std::vector<ctrl_t> e(Group::kWidth, empty);
+    EXPECT_EQ(Group::kWidth, Group{e.data()}.CountLeadingEmptyOrDeleted());
+    for (ctrl_t full : full_examples) {
+      for (size_t i = 0; i != Group::kWidth; ++i) {
+        std::vector<ctrl_t> f(Group::kWidth, empty);
+        f[i] = full;
+        EXPECT_EQ(i, Group{f.data()}.CountLeadingEmptyOrDeleted());
+      }
+      std::vector<ctrl_t> f(Group::kWidth, empty);
+      f[Group::kWidth * 2 / 3] = full;
+      f[Group::kWidth / 2] = full;
+      EXPECT_EQ(
+          Group::kWidth / 2, Group{f.data()}.CountLeadingEmptyOrDeleted());
+    }
+  }
+}
+
+struct IntPolicy {
+  using slot_type = int64_t;
+  using key_type = int64_t;
+  using init_type = int64_t;
+
+  static void construct(void*, int64_t* slot, int64_t v) { *slot = v; }
+  static void destroy(void*, int64_t*) {}
+  static void transfer(void*, int64_t* new_slot, int64_t* old_slot) {
+    *new_slot = *old_slot;
+  }
+
+  static int64_t& element(slot_type* slot) { return *slot; }
+
+  template <class F>
+  static auto apply(F&& f, int64_t x) -> decltype(std::forward<F>(f)(x, x)) {
+    return std::forward<F>(f)(x, x);
+  }
+};
+
+class StringPolicy {
+  template <class F, class K, class V,
+            class = typename std::enable_if<
+                std::is_convertible<const K&, absl::string_view>::value>::type>
+  decltype(std::declval<F>()(
+      std::declval<const absl::string_view&>(), std::piecewise_construct,
+      std::declval<std::tuple<K>>(),
+      std::declval<V>())) static apply_impl(F&& f,
+                                            std::pair<std::tuple<K>, V> p) {
+    const absl::string_view& key = std::get<0>(p.first);
+    return std::forward<F>(f)(key, std::piecewise_construct, std::move(p.first),
+                              std::move(p.second));
+  }
+
+ public:
+  struct slot_type {
+    struct ctor {};
+
+    template <class... Ts>
+    slot_type(ctor, Ts&&... ts) : pair(std::forward<Ts>(ts)...) {}
+
+    std::pair<std::string, std::string> pair;
+  };
+
+  using key_type = std::string;
+  using init_type = std::pair<std::string, std::string>;
+
+  template <class allocator_type, class... Args>
+  static void construct(allocator_type* alloc, slot_type* slot, Args... args) {
+    std::allocator_traits<allocator_type>::construct(
+        *alloc, slot, typename slot_type::ctor(), std::forward<Args>(args)...);
+  }
+
+  template <class allocator_type>
+  static void destroy(allocator_type* alloc, slot_type* slot) {
+    std::allocator_traits<allocator_type>::destroy(*alloc, slot);
+  }
+
+  template <class allocator_type>
+  static void transfer(allocator_type* alloc, slot_type* new_slot,
+                       slot_type* old_slot) {
+    construct(alloc, new_slot, std::move(old_slot->pair));
+    destroy(alloc, old_slot);
+  }
+
+  static std::pair<std::string, std::string>& element(slot_type* slot) {
+    return slot->pair;
+  }
+
+  template <class F, class... Args>
+  static auto apply(F&& f, Args&&... args)
+      -> decltype(apply_impl(std::forward<F>(f),
+                             PairArgs(std::forward<Args>(args)...))) {
+    return apply_impl(std::forward<F>(f),
+                      PairArgs(std::forward<Args>(args)...));
+  }
+};
+
+struct StringHash : absl::Hash<absl::string_view> {
+  using is_transparent = void;
+};
+struct StringEq : std::equal_to<absl::string_view> {
+  using is_transparent = void;
+};
+
+struct StringTable
+    : raw_hash_set<StringPolicy, StringHash, StringEq, std::allocator<int>> {
+  using Base = typename StringTable::raw_hash_set;
+  StringTable() {}
+  using Base::Base;
+};
+
+struct IntTable
+    : raw_hash_set<IntPolicy, container_internal::hash_default_hash<int64_t>,
+                   std::equal_to<int64_t>, std::allocator<int64_t>> {
+  using Base = typename IntTable::raw_hash_set;
+  using Base::Base;
+};
+
+template <typename T>
+struct CustomAlloc : std::allocator<T> {
+  CustomAlloc() {}
+
+  template <typename U>
+  CustomAlloc(const CustomAlloc<U>& other) {}
+
+  template<class U> struct rebind {
+    using other = CustomAlloc<U>;
+  };
+};
+
+struct CustomAllocIntTable
+    : raw_hash_set<IntPolicy, container_internal::hash_default_hash<int64_t>,
+                   std::equal_to<int64_t>, CustomAlloc<int64_t>> {
+  using Base = typename CustomAllocIntTable::raw_hash_set;
+  using Base::Base;
+};
+
+struct BadFastHash {
+  template <class T>
+  size_t operator()(const T&) const {
+    return 0;
+  }
+};
+
+struct BadTable : raw_hash_set<IntPolicy, BadFastHash, std::equal_to<int>,
+                               std::allocator<int>> {
+  using Base = typename BadTable::raw_hash_set;
+  BadTable() {}
+  using Base::Base;
+};
+
+TEST(Table, EmptyFunctorOptimization) {
+  static_assert(std::is_empty<std::equal_to<absl::string_view>>::value, "");
+  static_assert(std::is_empty<std::allocator<int>>::value, "");
+
+  struct MockTable {
+    void* ctrl;
+    void* slots;
+    size_t size;
+    size_t capacity;
+    size_t growth_left;
+    void* infoz;
+  };
+  struct StatelessHash {
+    size_t operator()(absl::string_view) const { return 0; }
+  };
+  struct StatefulHash : StatelessHash {
+    size_t dummy;
+  };
+
+  EXPECT_EQ(
+      sizeof(MockTable),
+      sizeof(
+          raw_hash_set<StringPolicy, StatelessHash,
+                       std::equal_to<absl::string_view>, std::allocator<int>>));
+
+  EXPECT_EQ(
+      sizeof(MockTable) + sizeof(StatefulHash),
+      sizeof(
+          raw_hash_set<StringPolicy, StatefulHash,
+                       std::equal_to<absl::string_view>, std::allocator<int>>));
+}
+
+TEST(Table, Empty) {
+  IntTable t;
+  EXPECT_EQ(0, t.size());
+  EXPECT_TRUE(t.empty());
+}
+
+TEST(Table, LookupEmpty) {
+  IntTable t;
+  auto it = t.find(0);
+  EXPECT_TRUE(it == t.end());
+}
+
+TEST(Table, Insert1) {
+  IntTable t;
+  EXPECT_TRUE(t.find(0) == t.end());
+  auto res = t.emplace(0);
+  EXPECT_TRUE(res.second);
+  EXPECT_THAT(*res.first, 0);
+  EXPECT_EQ(1, t.size());
+  EXPECT_THAT(*t.find(0), 0);
+}
+
+TEST(Table, Insert2) {
+  IntTable t;
+  EXPECT_TRUE(t.find(0) == t.end());
+  auto res = t.emplace(0);
+  EXPECT_TRUE(res.second);
+  EXPECT_THAT(*res.first, 0);
+  EXPECT_EQ(1, t.size());
+  EXPECT_TRUE(t.find(1) == t.end());
+  res = t.emplace(1);
+  EXPECT_TRUE(res.second);
+  EXPECT_THAT(*res.first, 1);
+  EXPECT_EQ(2, t.size());
+  EXPECT_THAT(*t.find(0), 0);
+  EXPECT_THAT(*t.find(1), 1);
+}
+
+TEST(Table, InsertCollision) {
+  BadTable t;
+  EXPECT_TRUE(t.find(1) == t.end());
+  auto res = t.emplace(1);
+  EXPECT_TRUE(res.second);
+  EXPECT_THAT(*res.first, 1);
+  EXPECT_EQ(1, t.size());
+
+  EXPECT_TRUE(t.find(2) == t.end());
+  res = t.emplace(2);
+  EXPECT_THAT(*res.first, 2);
+  EXPECT_TRUE(res.second);
+  EXPECT_EQ(2, t.size());
+
+  EXPECT_THAT(*t.find(1), 1);
+  EXPECT_THAT(*t.find(2), 2);
+}
+
+// Test that we do not add existent element in case we need to search through
+// many groups with deleted elements
+TEST(Table, InsertCollisionAndFindAfterDelete) {
+  BadTable t;  // all elements go to the same group.
+  // Have at least 2 groups with Group::kWidth collisions
+  // plus some extra collisions in the last group.
+  constexpr size_t kNumInserts = Group::kWidth * 2 + 5;
+  for (size_t i = 0; i < kNumInserts; ++i) {
+    auto res = t.emplace(i);
+    EXPECT_TRUE(res.second);
+    EXPECT_THAT(*res.first, i);
+    EXPECT_EQ(i + 1, t.size());
+  }
+
+  // Remove elements one by one and check
+  // that we still can find all other elements.
+  for (size_t i = 0; i < kNumInserts; ++i) {
+    EXPECT_EQ(1, t.erase(i)) << i;
+    for (size_t j = i + 1; j < kNumInserts; ++j) {
+      EXPECT_THAT(*t.find(j), j);
+      auto res = t.emplace(j);
+      EXPECT_FALSE(res.second) << i << " " << j;
+      EXPECT_THAT(*res.first, j);
+      EXPECT_EQ(kNumInserts - i - 1, t.size());
+    }
+  }
+  EXPECT_TRUE(t.empty());
+}
+
+TEST(Table, LazyEmplace) {
+  StringTable t;
+  bool called = false;
+  auto it = t.lazy_emplace("abc", [&](const StringTable::constructor& f) {
+    called = true;
+    f("abc", "ABC");
+  });
+  EXPECT_TRUE(called);
+  EXPECT_THAT(*it, Pair("abc", "ABC"));
+  called = false;
+  it = t.lazy_emplace("abc", [&](const StringTable::constructor& f) {
+    called = true;
+    f("abc", "DEF");
+  });
+  EXPECT_FALSE(called);
+  EXPECT_THAT(*it, Pair("abc", "ABC"));
+}
+
+TEST(Table, ContainsEmpty) {
+  IntTable t;
+
+  EXPECT_FALSE(t.contains(0));
+}
+
+TEST(Table, Contains1) {
+  IntTable t;
+
+  EXPECT_TRUE(t.insert(0).second);
+  EXPECT_TRUE(t.contains(0));
+  EXPECT_FALSE(t.contains(1));
+
+  EXPECT_EQ(1, t.erase(0));
+  EXPECT_FALSE(t.contains(0));
+}
+
+TEST(Table, Contains2) {
+  IntTable t;
+
+  EXPECT_TRUE(t.insert(0).second);
+  EXPECT_TRUE(t.contains(0));
+  EXPECT_FALSE(t.contains(1));
+
+  t.clear();
+  EXPECT_FALSE(t.contains(0));
+}
+
+int decompose_constructed;
+struct DecomposeType {
+  DecomposeType(int i) : i(i) {  // NOLINT
+    ++decompose_constructed;
+  }
+
+  explicit DecomposeType(const char* d) : DecomposeType(*d) {}
+
+  int i;
+};
+
+struct DecomposeHash {
+  using is_transparent = void;
+  size_t operator()(DecomposeType a) const { return a.i; }
+  size_t operator()(int a) const { return a; }
+  size_t operator()(const char* a) const { return *a; }
+};
+
+struct DecomposeEq {
+  using is_transparent = void;
+  bool operator()(DecomposeType a, DecomposeType b) const { return a.i == b.i; }
+  bool operator()(DecomposeType a, int b) const { return a.i == b; }
+  bool operator()(DecomposeType a, const char* b) const { return a.i == *b; }
+};
+
+struct DecomposePolicy {
+  using slot_type = DecomposeType;
+  using key_type = DecomposeType;
+  using init_type = DecomposeType;
+
+  template <typename T>
+  static void construct(void*, DecomposeType* slot, T&& v) {
+    *slot = DecomposeType(std::forward<T>(v));
+  }
+  static void destroy(void*, DecomposeType*) {}
+  static DecomposeType& element(slot_type* slot) { return *slot; }
+
+  template <class F, class T>
+  static auto apply(F&& f, const T& x) -> decltype(std::forward<F>(f)(x, x)) {
+    return std::forward<F>(f)(x, x);
+  }
+};
+
+template <typename Hash, typename Eq>
+void TestDecompose(bool construct_three) {
+  DecomposeType elem{0};
+  const int one = 1;
+  const char* three_p = "3";
+  const auto& three = three_p;
+
+  raw_hash_set<DecomposePolicy, Hash, Eq, std::allocator<int>> set1;
+
+  decompose_constructed = 0;
+  int expected_constructed = 0;
+  EXPECT_EQ(expected_constructed, decompose_constructed);
+  set1.insert(elem);
+  EXPECT_EQ(expected_constructed, decompose_constructed);
+  set1.insert(1);
+  EXPECT_EQ(++expected_constructed, decompose_constructed);
+  set1.emplace("3");
+  EXPECT_EQ(++expected_constructed, decompose_constructed);
+  EXPECT_EQ(expected_constructed, decompose_constructed);
+
+  {  // insert(T&&)
+    set1.insert(1);
+    EXPECT_EQ(expected_constructed, decompose_constructed);
+  }
+
+  {  // insert(const T&)
+    set1.insert(one);
+    EXPECT_EQ(expected_constructed, decompose_constructed);
+  }
+
+  {  // insert(hint, T&&)
+    set1.insert(set1.begin(), 1);
+    EXPECT_EQ(expected_constructed, decompose_constructed);
+  }
+
+  {  // insert(hint, const T&)
+    set1.insert(set1.begin(), one);
+    EXPECT_EQ(expected_constructed, decompose_constructed);
+  }
+
+  {  // emplace(...)
+    set1.emplace(1);
+    EXPECT_EQ(expected_constructed, decompose_constructed);
+    set1.emplace("3");
+    expected_constructed += construct_three;
+    EXPECT_EQ(expected_constructed, decompose_constructed);
+    set1.emplace(one);
+    EXPECT_EQ(expected_constructed, decompose_constructed);
+    set1.emplace(three);
+    expected_constructed += construct_three;
+    EXPECT_EQ(expected_constructed, decompose_constructed);
+  }
+
+  {  // emplace_hint(...)
+    set1.emplace_hint(set1.begin(), 1);
+    EXPECT_EQ(expected_constructed, decompose_constructed);
+    set1.emplace_hint(set1.begin(), "3");
+    expected_constructed += construct_three;
+    EXPECT_EQ(expected_constructed, decompose_constructed);
+    set1.emplace_hint(set1.begin(), one);
+    EXPECT_EQ(expected_constructed, decompose_constructed);
+    set1.emplace_hint(set1.begin(), three);
+    expected_constructed += construct_three;
+    EXPECT_EQ(expected_constructed, decompose_constructed);
+  }
+}
+
+TEST(Table, Decompose) {
+  TestDecompose<DecomposeHash, DecomposeEq>(false);
+
+  struct TransparentHashIntOverload {
+    size_t operator()(DecomposeType a) const { return a.i; }
+    size_t operator()(int a) const { return a; }
+  };
+  struct TransparentEqIntOverload {
+    bool operator()(DecomposeType a, DecomposeType b) const {
+      return a.i == b.i;
+    }
+    bool operator()(DecomposeType a, int b) const { return a.i == b; }
+  };
+  TestDecompose<TransparentHashIntOverload, DecomposeEq>(true);
+  TestDecompose<TransparentHashIntOverload, TransparentEqIntOverload>(true);
+  TestDecompose<DecomposeHash, TransparentEqIntOverload>(true);
+}
+
+// Returns the largest m such that a table with m elements has the same number
+// of buckets as a table with n elements.
+size_t MaxDensitySize(size_t n) {
+  IntTable t;
+  t.reserve(n);
+  for (size_t i = 0; i != n; ++i) t.emplace(i);
+  const size_t c = t.bucket_count();
+  while (c == t.bucket_count()) t.emplace(n++);
+  return t.size() - 1;
+}
+
+struct Modulo1000Hash {
+  size_t operator()(int x) const { return x % 1000; }
+};
+
+struct Modulo1000HashTable
+    : public raw_hash_set<IntPolicy, Modulo1000Hash, std::equal_to<int>,
+                          std::allocator<int>> {};
+
+// Test that rehash with no resize happen in case of many deleted slots.
+TEST(Table, RehashWithNoResize) {
+  Modulo1000HashTable t;
+  // Adding the same length (and the same hash) strings
+  // to have at least kMinFullGroups groups
+  // with Group::kWidth collisions. Then fill up to MaxDensitySize;
+  const size_t kMinFullGroups = 7;
+  std::vector<int> keys;
+  for (size_t i = 0; i < MaxDensitySize(Group::kWidth * kMinFullGroups); ++i) {
+    int k = i * 1000;
+    t.emplace(k);
+    keys.push_back(k);
+  }
+  const size_t capacity = t.capacity();
+
+  // Remove elements from all groups except the first and the last one.
+  // All elements removed from full groups will be marked as kDeleted.
+  const size_t erase_begin = Group::kWidth / 2;
+  const size_t erase_end = (t.size() / Group::kWidth - 1) * Group::kWidth;
+  for (size_t i = erase_begin; i < erase_end; ++i) {
+    EXPECT_EQ(1, t.erase(keys[i])) << i;
+  }
+  keys.erase(keys.begin() + erase_begin, keys.begin() + erase_end);
+
+  auto last_key = keys.back();
+  size_t last_key_num_probes = GetHashtableDebugNumProbes(t, last_key);
+
+  // Make sure that we have to make a lot of probes for last key.
+  ASSERT_GT(last_key_num_probes, kMinFullGroups);
+
+  int x = 1;
+  // Insert and erase one element, before inplace rehash happen.
+  while (last_key_num_probes == GetHashtableDebugNumProbes(t, last_key)) {
+    t.emplace(x);
+    ASSERT_EQ(capacity, t.capacity());
+    // All elements should be there.
+    ASSERT_TRUE(t.find(x) != t.end()) << x;
+    for (const auto& k : keys) {
+      ASSERT_TRUE(t.find(k) != t.end()) << k;
+    }
+    t.erase(x);
+    ++x;
+  }
+}
+
+TEST(Table, InsertEraseStressTest) {
+  IntTable t;
+  const size_t kMinElementCount = 250;
+  std::deque<int> keys;
+  size_t i = 0;
+  for (; i < MaxDensitySize(kMinElementCount); ++i) {
+    t.emplace(i);
+    keys.push_back(i);
+  }
+  const size_t kNumIterations = 1000000;
+  for (; i < kNumIterations; ++i) {
+    ASSERT_EQ(1, t.erase(keys.front()));
+    keys.pop_front();
+    t.emplace(i);
+    keys.push_back(i);
+  }
+}
+
+TEST(Table, InsertOverloads) {
+  StringTable t;
+  // These should all trigger the insert(init_type) overload.
+  t.insert({{}, {}});
+  t.insert({"ABC", {}});
+  t.insert({"DEF", "!!!"});
+
+  EXPECT_THAT(t, UnorderedElementsAre(Pair("", ""), Pair("ABC", ""),
+                                      Pair("DEF", "!!!")));
+}
+
+TEST(Table, LargeTable) {
+  IntTable t;
+  for (int64_t i = 0; i != 100000; ++i) t.emplace(i << 40);
+  for (int64_t i = 0; i != 100000; ++i) ASSERT_EQ(i << 40, *t.find(i << 40));
+}
+
+// Timeout if copy is quadratic as it was in Rust.
+TEST(Table, EnsureNonQuadraticAsInRust) {
+  static const size_t kLargeSize = 1 << 15;
+
+  IntTable t;
+  for (size_t i = 0; i != kLargeSize; ++i) {
+    t.insert(i);
+  }
+
+  // If this is quadratic, the test will timeout.
+  IntTable t2;
+  for (const auto& entry : t) t2.insert(entry);
+}
+
+TEST(Table, ClearBug) {
+  IntTable t;
+  constexpr size_t capacity = container_internal::Group::kWidth - 1;
+  constexpr size_t max_size = capacity / 2 + 1;
+  for (size_t i = 0; i < max_size; ++i) {
+    t.insert(i);
+  }
+  ASSERT_EQ(capacity, t.capacity());
+  intptr_t original = reinterpret_cast<intptr_t>(&*t.find(2));
+  t.clear();
+  ASSERT_EQ(capacity, t.capacity());
+  for (size_t i = 0; i < max_size; ++i) {
+    t.insert(i);
+  }
+  ASSERT_EQ(capacity, t.capacity());
+  intptr_t second = reinterpret_cast<intptr_t>(&*t.find(2));
+  // We are checking that original and second are close enough to each other
+  // that they are probably still in the same group.  This is not strictly
+  // guaranteed.
+  EXPECT_LT(std::abs(original - second),
+            capacity * sizeof(IntTable::value_type));
+}
+
+TEST(Table, Erase) {
+  IntTable t;
+  EXPECT_TRUE(t.find(0) == t.end());
+  auto res = t.emplace(0);
+  EXPECT_TRUE(res.second);
+  EXPECT_EQ(1, t.size());
+  t.erase(res.first);
+  EXPECT_EQ(0, t.size());
+  EXPECT_TRUE(t.find(0) == t.end());
+}
+
+TEST(Table, EraseMaintainsValidIterator) {
+  IntTable t;
+  const int kNumElements = 100;
+  for (int i = 0; i < kNumElements; i ++) {
+    EXPECT_TRUE(t.emplace(i).second);
+  }
+  EXPECT_EQ(t.size(), kNumElements);
+
+  int num_erase_calls = 0;
+  auto it = t.begin();
+  while (it != t.end()) {
+    t.erase(it++);
+    num_erase_calls++;
+  }
+
+  EXPECT_TRUE(t.empty());
+  EXPECT_EQ(num_erase_calls, kNumElements);
+}
+
+// Collect N bad keys by following algorithm:
+// 1. Create an empty table and reserve it to 2 * N.
+// 2. Insert N random elements.
+// 3. Take first Group::kWidth - 1 to bad_keys array.
+// 4. Clear the table without resize.
+// 5. Go to point 2 while N keys not collected
+std::vector<int64_t> CollectBadMergeKeys(size_t N) {
+  static constexpr int kGroupSize = Group::kWidth - 1;
+
+  auto topk_range = [](size_t b, size_t e,
+                       IntTable* t) -> std::vector<int64_t> {
+    for (size_t i = b; i != e; ++i) {
+      t->emplace(i);
+    }
+    std::vector<int64_t> res;
+    res.reserve(kGroupSize);
+    auto it = t->begin();
+    for (size_t i = b; i != e && i != b + kGroupSize; ++i, ++it) {
+      res.push_back(*it);
+    }
+    return res;
+  };
+
+  std::vector<int64_t> bad_keys;
+  bad_keys.reserve(N);
+  IntTable t;
+  t.reserve(N * 2);
+
+  for (size_t b = 0; bad_keys.size() < N; b += N) {
+    auto keys = topk_range(b, b + N, &t);
+    bad_keys.insert(bad_keys.end(), keys.begin(), keys.end());
+    t.erase(t.begin(), t.end());
+    EXPECT_TRUE(t.empty());
+  }
+  return bad_keys;
+}
+
+struct ProbeStats {
+  // Number of elements with specific probe length over all tested tables.
+  std::vector<size_t> all_probes_histogram;
+  // Ratios total_probe_length/size for every tested table.
+  std::vector<double> single_table_ratios;
+
+  friend ProbeStats operator+(const ProbeStats& a, const ProbeStats& b) {
+    ProbeStats res = a;
+    res.all_probes_histogram.resize(std::max(res.all_probes_histogram.size(),
+                                             b.all_probes_histogram.size()));
+    std::transform(b.all_probes_histogram.begin(), b.all_probes_histogram.end(),
+                   res.all_probes_histogram.begin(),
+                   res.all_probes_histogram.begin(), std::plus<size_t>());
+    res.single_table_ratios.insert(res.single_table_ratios.end(),
+                                   b.single_table_ratios.begin(),
+                                   b.single_table_ratios.end());
+    return res;
+  }
+
+  // Average ratio total_probe_length/size over tables.
+  double AvgRatio() const {
+    return std::accumulate(single_table_ratios.begin(),
+                           single_table_ratios.end(), 0.0) /
+           single_table_ratios.size();
+  }
+
+  // Maximum ratio total_probe_length/size over tables.
+  double MaxRatio() const {
+    return *std::max_element(single_table_ratios.begin(),
+                             single_table_ratios.end());
+  }
+
+  // Percentile ratio total_probe_length/size over tables.
+  double PercentileRatio(double Percentile = 0.95) const {
+    auto r = single_table_ratios;
+    auto mid = r.begin() + static_cast<size_t>(r.size() * Percentile);
+    if (mid != r.end()) {
+      std::nth_element(r.begin(), mid, r.end());
+      return *mid;
+    } else {
+      return MaxRatio();
+    }
+  }
+
+  // Maximum probe length over all elements and all tables.
+  size_t MaxProbe() const { return all_probes_histogram.size(); }
+
+  // Fraction of elements with specified probe length.
+  std::vector<double> ProbeNormalizedHistogram() const {
+    double total_elements = std::accumulate(all_probes_histogram.begin(),
+                                            all_probes_histogram.end(), 0ull);
+    std::vector<double> res;
+    for (size_t p : all_probes_histogram) {
+      res.push_back(p / total_elements);
+    }
+    return res;
+  }
+
+  size_t PercentileProbe(double Percentile = 0.99) const {
+    size_t idx = 0;
+    for (double p : ProbeNormalizedHistogram()) {
+      if (Percentile > p) {
+        Percentile -= p;
+        ++idx;
+      } else {
+        return idx;
+      }
+    }
+    return idx;
+  }
+
+  friend std::ostream& operator<<(std::ostream& out, const ProbeStats& s) {
+    out << "{AvgRatio:" << s.AvgRatio() << ", MaxRatio:" << s.MaxRatio()
+        << ", PercentileRatio:" << s.PercentileRatio()
+        << ", MaxProbe:" << s.MaxProbe() << ", Probes=[";
+    for (double p : s.ProbeNormalizedHistogram()) {
+      out << p << ",";
+    }
+    out << "]}";
+
+    return out;
+  }
+};
+
+struct ExpectedStats {
+  double avg_ratio;
+  double max_ratio;
+  std::vector<std::pair<double, double>> pecentile_ratios;
+  std::vector<std::pair<double, double>> pecentile_probes;
+
+  friend std::ostream& operator<<(std::ostream& out, const ExpectedStats& s) {
+    out << "{AvgRatio:" << s.avg_ratio << ", MaxRatio:" << s.max_ratio
+        << ", PercentileRatios: [";
+    for (auto el : s.pecentile_ratios) {
+      out << el.first << ":" << el.second << ", ";
+    }
+    out << "], PercentileProbes: [";
+    for (auto el : s.pecentile_probes) {
+      out << el.first << ":" << el.second << ", ";
+    }
+    out << "]}";
+
+    return out;
+  }
+};
+
+void VerifyStats(size_t size, const ExpectedStats& exp,
+                 const ProbeStats& stats) {
+  EXPECT_LT(stats.AvgRatio(), exp.avg_ratio) << size << " " << stats;
+  EXPECT_LT(stats.MaxRatio(), exp.max_ratio) << size << " " << stats;
+  for (auto pr : exp.pecentile_ratios) {
+    EXPECT_LE(stats.PercentileRatio(pr.first), pr.second)
+        << size << " " << pr.first << " " << stats;
+  }
+
+  for (auto pr : exp.pecentile_probes) {
+    EXPECT_LE(stats.PercentileProbe(pr.first), pr.second)
+        << size << " " << pr.first << " " << stats;
+  }
+}
+
+using ProbeStatsPerSize = std::map<size_t, ProbeStats>;
+
+// Collect total ProbeStats on num_iters iterations of the following algorithm:
+// 1. Create new table and reserve it to keys.size() * 2
+// 2. Insert all keys xored with seed
+// 3. Collect ProbeStats from final table.
+ProbeStats CollectProbeStatsOnKeysXoredWithSeed(
+    const std::vector<int64_t>& keys, size_t num_iters) {
+  const size_t reserve_size = keys.size() * 2;
+
+  ProbeStats stats;
+
+  int64_t seed = 0x71b1a19b907d6e33;
+  while (num_iters--) {
+    seed = static_cast<int64_t>(static_cast<uint64_t>(seed) * 17 + 13);
+    IntTable t1;
+    t1.reserve(reserve_size);
+    for (const auto& key : keys) {
+      t1.emplace(key ^ seed);
+    }
+
+    auto probe_histogram = GetHashtableDebugNumProbesHistogram(t1);
+    stats.all_probes_histogram.resize(
+        std::max(stats.all_probes_histogram.size(), probe_histogram.size()));
+    std::transform(probe_histogram.begin(), probe_histogram.end(),
+                   stats.all_probes_histogram.begin(),
+                   stats.all_probes_histogram.begin(), std::plus<size_t>());
+
+    size_t total_probe_seq_length = 0;
+    for (size_t i = 0; i < probe_histogram.size(); ++i) {
+      total_probe_seq_length += i * probe_histogram[i];
+    }
+    stats.single_table_ratios.push_back(total_probe_seq_length * 1.0 /
+                                        keys.size());
+    t1.erase(t1.begin(), t1.end());
+  }
+  return stats;
+}
+
+ExpectedStats XorSeedExpectedStats() {
+  constexpr bool kRandomizesInserts =
+#ifdef NDEBUG
+      false;
+#else   // NDEBUG
+      true;
+#endif  // NDEBUG
+
+  // The effective load factor is larger in non-opt mode because we insert
+  // elements out of order.
+  switch (container_internal::Group::kWidth) {
+    case 8:
+      if (kRandomizesInserts) {
+  return {0.05,
+          1.0,
+          {{0.95, 0.5}},
+          {{0.95, 0}, {0.99, 2}, {0.999, 4}, {0.9999, 10}}};
+      } else {
+  return {0.05,
+          2.0,
+          {{0.95, 0.1}},
+          {{0.95, 0}, {0.99, 2}, {0.999, 4}, {0.9999, 10}}};
+      }
+    case 16:
+      if (kRandomizesInserts) {
+        return {0.1,
+                1.0,
+                {{0.95, 0.1}},
+                {{0.95, 0}, {0.99, 1}, {0.999, 8}, {0.9999, 15}}};
+      } else {
+        return {0.05,
+                1.0,
+                {{0.95, 0.05}},
+                {{0.95, 0}, {0.99, 1}, {0.999, 4}, {0.9999, 10}}};
+      }
+  }
+  ABSL_RAW_LOG(FATAL, "%s", "Unknown Group width");
+  return {};
+}
+
+TEST(Table, DISABLED_EnsureNonQuadraticTopNXorSeedByProbeSeqLength) {
+  ProbeStatsPerSize stats;
+  std::vector<size_t> sizes = {Group::kWidth << 5, Group::kWidth << 10};
+  for (size_t size : sizes) {
+    stats[size] =
+        CollectProbeStatsOnKeysXoredWithSeed(CollectBadMergeKeys(size), 200);
+  }
+  auto expected = XorSeedExpectedStats();
+  for (size_t size : sizes) {
+    auto& stat = stats[size];
+    VerifyStats(size, expected, stat);
+  }
+}
+
+// Collect total ProbeStats on num_iters iterations of the following algorithm:
+// 1. Create new table
+// 2. Select 10% of keys and insert 10 elements key * 17 + j * 13
+// 3. Collect ProbeStats from final table
+ProbeStats CollectProbeStatsOnLinearlyTransformedKeys(
+    const std::vector<int64_t>& keys, size_t num_iters) {
+  ProbeStats stats;
+
+  std::random_device rd;
+  std::mt19937 rng(rd());
+  auto linear_transform = [](size_t x, size_t y) { return x * 17 + y * 13; };
+  std::uniform_int_distribution<size_t> dist(0, keys.size()-1);
+  while (num_iters--) {
+    IntTable t1;
+    size_t num_keys = keys.size() / 10;
+    size_t start = dist(rng);
+    for (size_t i = 0; i != num_keys; ++i) {
+      for (size_t j = 0; j != 10; ++j) {
+        t1.emplace(linear_transform(keys[(i + start) % keys.size()], j));
+      }
+    }
+
+    auto probe_histogram = GetHashtableDebugNumProbesHistogram(t1);
+    stats.all_probes_histogram.resize(
+        std::max(stats.all_probes_histogram.size(), probe_histogram.size()));
+    std::transform(probe_histogram.begin(), probe_histogram.end(),
+                   stats.all_probes_histogram.begin(),
+                   stats.all_probes_histogram.begin(), std::plus<size_t>());
+
+    size_t total_probe_seq_length = 0;
+    for (size_t i = 0; i < probe_histogram.size(); ++i) {
+      total_probe_seq_length += i * probe_histogram[i];
+    }
+    stats.single_table_ratios.push_back(total_probe_seq_length * 1.0 /
+                                        t1.size());
+    t1.erase(t1.begin(), t1.end());
+  }
+  return stats;
+}
+
+ExpectedStats LinearTransformExpectedStats() {
+  constexpr bool kRandomizesInserts =
+#ifdef NDEBUG
+      false;
+#else   // NDEBUG
+      true;
+#endif  // NDEBUG
+
+  // The effective load factor is larger in non-opt mode because we insert
+  // elements out of order.
+  switch (container_internal::Group::kWidth) {
+    case 8:
+      if (kRandomizesInserts) {
+        return {0.1,
+                0.5,
+                {{0.95, 0.3}},
+                {{0.95, 0}, {0.99, 1}, {0.999, 8}, {0.9999, 15}}};
+      } else {
+        return {0.15,
+                0.5,
+                {{0.95, 0.3}},
+                {{0.95, 0}, {0.99, 3}, {0.999, 15}, {0.9999, 25}}};
+      }
+    case 16:
+      if (kRandomizesInserts) {
+        return {0.1,
+                0.4,
+                {{0.95, 0.3}},
+                {{0.95, 0}, {0.99, 1}, {0.999, 8}, {0.9999, 15}}};
+      } else {
+        return {0.05,
+                0.2,
+                {{0.95, 0.1}},
+                {{0.95, 0}, {0.99, 1}, {0.999, 6}, {0.9999, 10}}};
+      }
+  }
+  ABSL_RAW_LOG(FATAL, "%s", "Unknown Group width");
+  return {};
+}
+
+TEST(Table, DISABLED_EnsureNonQuadraticTopNLinearTransformByProbeSeqLength) {
+  ProbeStatsPerSize stats;
+  std::vector<size_t> sizes = {Group::kWidth << 5, Group::kWidth << 10};
+  for (size_t size : sizes) {
+    stats[size] = CollectProbeStatsOnLinearlyTransformedKeys(
+        CollectBadMergeKeys(size), 300);
+  }
+  auto expected = LinearTransformExpectedStats();
+  for (size_t size : sizes) {
+    auto& stat = stats[size];
+    VerifyStats(size, expected, stat);
+  }
+}
+
+TEST(Table, EraseCollision) {
+  BadTable t;
+
+  // 1 2 3
+  t.emplace(1);
+  t.emplace(2);
+  t.emplace(3);
+  EXPECT_THAT(*t.find(1), 1);
+  EXPECT_THAT(*t.find(2), 2);
+  EXPECT_THAT(*t.find(3), 3);
+  EXPECT_EQ(3, t.size());
+
+  // 1 DELETED 3
+  t.erase(t.find(2));
+  EXPECT_THAT(*t.find(1), 1);
+  EXPECT_TRUE(t.find(2) == t.end());
+  EXPECT_THAT(*t.find(3), 3);
+  EXPECT_EQ(2, t.size());
+
+  // DELETED DELETED 3
+  t.erase(t.find(1));
+  EXPECT_TRUE(t.find(1) == t.end());
+  EXPECT_TRUE(t.find(2) == t.end());
+  EXPECT_THAT(*t.find(3), 3);
+  EXPECT_EQ(1, t.size());
+
+  // DELETED DELETED DELETED
+  t.erase(t.find(3));
+  EXPECT_TRUE(t.find(1) == t.end());
+  EXPECT_TRUE(t.find(2) == t.end());
+  EXPECT_TRUE(t.find(3) == t.end());
+  EXPECT_EQ(0, t.size());
+}
+
+TEST(Table, EraseInsertProbing) {
+  BadTable t(100);
+
+  // 1 2 3 4
+  t.emplace(1);
+  t.emplace(2);
+  t.emplace(3);
+  t.emplace(4);
+
+  // 1 DELETED 3 DELETED
+  t.erase(t.find(2));
+  t.erase(t.find(4));
+
+  // 1 10 3 11 12
+  t.emplace(10);
+  t.emplace(11);
+  t.emplace(12);
+
+  EXPECT_EQ(5, t.size());
+  EXPECT_THAT(t, UnorderedElementsAre(1, 10, 3, 11, 12));
+}
+
+TEST(Table, Clear) {
+  IntTable t;
+  EXPECT_TRUE(t.find(0) == t.end());
+  t.clear();
+  EXPECT_TRUE(t.find(0) == t.end());
+  auto res = t.emplace(0);
+  EXPECT_TRUE(res.second);
+  EXPECT_EQ(1, t.size());
+  t.clear();
+  EXPECT_EQ(0, t.size());
+  EXPECT_TRUE(t.find(0) == t.end());
+}
+
+TEST(Table, Swap) {
+  IntTable t;
+  EXPECT_TRUE(t.find(0) == t.end());
+  auto res = t.emplace(0);
+  EXPECT_TRUE(res.second);
+  EXPECT_EQ(1, t.size());
+  IntTable u;
+  t.swap(u);
+  EXPECT_EQ(0, t.size());
+  EXPECT_EQ(1, u.size());
+  EXPECT_TRUE(t.find(0) == t.end());
+  EXPECT_THAT(*u.find(0), 0);
+}
+
+TEST(Table, Rehash) {
+  IntTable t;
+  EXPECT_TRUE(t.find(0) == t.end());
+  t.emplace(0);
+  t.emplace(1);
+  EXPECT_EQ(2, t.size());
+  t.rehash(128);
+  EXPECT_EQ(2, t.size());
+  EXPECT_THAT(*t.find(0), 0);
+  EXPECT_THAT(*t.find(1), 1);
+}
+
+TEST(Table, RehashDoesNotRehashWhenNotNecessary) {
+  IntTable t;
+  t.emplace(0);
+  t.emplace(1);
+  auto* p = &*t.find(0);
+  t.rehash(1);
+  EXPECT_EQ(p, &*t.find(0));
+}
+
+TEST(Table, RehashZeroDoesNotAllocateOnEmptyTable) {
+  IntTable t;
+  t.rehash(0);
+  EXPECT_EQ(0, t.bucket_count());
+}
+
+TEST(Table, RehashZeroDeallocatesEmptyTable) {
+  IntTable t;
+  t.emplace(0);
+  t.clear();
+  EXPECT_NE(0, t.bucket_count());
+  t.rehash(0);
+  EXPECT_EQ(0, t.bucket_count());
+}
+
+TEST(Table, RehashZeroForcesRehash) {
+  IntTable t;
+  t.emplace(0);
+  t.emplace(1);
+  auto* p = &*t.find(0);
+  t.rehash(0);
+  EXPECT_NE(p, &*t.find(0));
+}
+
+TEST(Table, ConstructFromInitList) {
+  using P = std::pair<std::string, std::string>;
+  struct Q {
+    operator P() const { return {}; }
+  };
+  StringTable t = {P(), Q(), {}, {{}, {}}};
+}
+
+TEST(Table, CopyConstruct) {
+  IntTable t;
+  t.emplace(0);
+  EXPECT_EQ(1, t.size());
+  {
+    IntTable u(t);
+    EXPECT_EQ(1, u.size());
+    EXPECT_THAT(*u.find(0), 0);
+  }
+  {
+    IntTable u{t};
+    EXPECT_EQ(1, u.size());
+    EXPECT_THAT(*u.find(0), 0);
+  }
+  {
+    IntTable u = t;
+    EXPECT_EQ(1, u.size());
+    EXPECT_THAT(*u.find(0), 0);
+  }
+}
+
+TEST(Table, CopyConstructWithAlloc) {
+  StringTable t;
+  t.emplace("a", "b");
+  EXPECT_EQ(1, t.size());
+  StringTable u(t, Alloc<std::pair<std::string, std::string>>());
+  EXPECT_EQ(1, u.size());
+  EXPECT_THAT(*u.find("a"), Pair("a", "b"));
+}
+
+struct ExplicitAllocIntTable
+    : raw_hash_set<IntPolicy, container_internal::hash_default_hash<int64_t>,
+                   std::equal_to<int64_t>, Alloc<int64_t>> {
+  ExplicitAllocIntTable() {}
+};
+
+TEST(Table, AllocWithExplicitCtor) {
+  ExplicitAllocIntTable t;
+  EXPECT_EQ(0, t.size());
+}
+
+TEST(Table, MoveConstruct) {
+  {
+    StringTable t;
+    t.emplace("a", "b");
+    EXPECT_EQ(1, t.size());
+
+    StringTable u(std::move(t));
+    EXPECT_EQ(1, u.size());
+    EXPECT_THAT(*u.find("a"), Pair("a", "b"));
+  }
+  {
+    StringTable t;
+    t.emplace("a", "b");
+    EXPECT_EQ(1, t.size());
+
+    StringTable u{std::move(t)};
+    EXPECT_EQ(1, u.size());
+    EXPECT_THAT(*u.find("a"), Pair("a", "b"));
+  }
+  {
+    StringTable t;
+    t.emplace("a", "b");
+    EXPECT_EQ(1, t.size());
+
+    StringTable u = std::move(t);
+    EXPECT_EQ(1, u.size());
+    EXPECT_THAT(*u.find("a"), Pair("a", "b"));
+  }
+}
+
+TEST(Table, MoveConstructWithAlloc) {
+  StringTable t;
+  t.emplace("a", "b");
+  EXPECT_EQ(1, t.size());
+  StringTable u(std::move(t), Alloc<std::pair<std::string, std::string>>());
+  EXPECT_EQ(1, u.size());
+  EXPECT_THAT(*u.find("a"), Pair("a", "b"));
+}
+
+TEST(Table, CopyAssign) {
+  StringTable t;
+  t.emplace("a", "b");
+  EXPECT_EQ(1, t.size());
+  StringTable u;
+  u = t;
+  EXPECT_EQ(1, u.size());
+  EXPECT_THAT(*u.find("a"), Pair("a", "b"));
+}
+
+TEST(Table, CopySelfAssign) {
+  StringTable t;
+  t.emplace("a", "b");
+  EXPECT_EQ(1, t.size());
+  t = *&t;
+  EXPECT_EQ(1, t.size());
+  EXPECT_THAT(*t.find("a"), Pair("a", "b"));
+}
+
+TEST(Table, MoveAssign) {
+  StringTable t;
+  t.emplace("a", "b");
+  EXPECT_EQ(1, t.size());
+  StringTable u;
+  u = std::move(t);
+  EXPECT_EQ(1, u.size());
+  EXPECT_THAT(*u.find("a"), Pair("a", "b"));
+}
+
+TEST(Table, Equality) {
+  StringTable t;
+  std::vector<std::pair<std::string, std::string>> v = {{"a", "b"},
+                                                        {"aa", "bb"}};
+  t.insert(std::begin(v), std::end(v));
+  StringTable u = t;
+  EXPECT_EQ(u, t);
+}
+
+TEST(Table, Equality2) {
+  StringTable t;
+  std::vector<std::pair<std::string, std::string>> v1 = {{"a", "b"},
+                                                         {"aa", "bb"}};
+  t.insert(std::begin(v1), std::end(v1));
+  StringTable u;
+  std::vector<std::pair<std::string, std::string>> v2 = {{"a", "a"},
+                                                         {"aa", "aa"}};
+  u.insert(std::begin(v2), std::end(v2));
+  EXPECT_NE(u, t);
+}
+
+TEST(Table, Equality3) {
+  StringTable t;
+  std::vector<std::pair<std::string, std::string>> v1 = {{"b", "b"},
+                                                         {"bb", "bb"}};
+  t.insert(std::begin(v1), std::end(v1));
+  StringTable u;
+  std::vector<std::pair<std::string, std::string>> v2 = {{"a", "a"},
+                                                         {"aa", "aa"}};
+  u.insert(std::begin(v2), std::end(v2));
+  EXPECT_NE(u, t);
+}
+
+TEST(Table, NumDeletedRegression) {
+  IntTable t;
+  t.emplace(0);
+  t.erase(t.find(0));
+  // construct over a deleted slot.
+  t.emplace(0);
+  t.clear();
+}
+
+TEST(Table, FindFullDeletedRegression) {
+  IntTable t;
+  for (int i = 0; i < 1000; ++i) {
+    t.emplace(i);
+    t.erase(t.find(i));
+  }
+  EXPECT_EQ(0, t.size());
+}
+
+TEST(Table, ReplacingDeletedSlotDoesNotRehash) {
+  size_t n;
+  {
+    // Compute n such that n is the maximum number of elements before rehash.
+    IntTable t;
+    t.emplace(0);
+    size_t c = t.bucket_count();
+    for (n = 1; c == t.bucket_count(); ++n) t.emplace(n);
+    --n;
+  }
+  IntTable t;
+  t.rehash(n);
+  const size_t c = t.bucket_count();
+  for (size_t i = 0; i != n; ++i) t.emplace(i);
+  EXPECT_EQ(c, t.bucket_count()) << "rehashing threshold = " << n;
+  t.erase(0);
+  t.emplace(0);
+  EXPECT_EQ(c, t.bucket_count()) << "rehashing threshold = " << n;
+}
+
+TEST(Table, NoThrowMoveConstruct) {
+  ASSERT_TRUE(
+      std::is_nothrow_copy_constructible<absl::Hash<absl::string_view>>::value);
+  ASSERT_TRUE(std::is_nothrow_copy_constructible<
+              std::equal_to<absl::string_view>>::value);
+  ASSERT_TRUE(std::is_nothrow_copy_constructible<std::allocator<int>>::value);
+  EXPECT_TRUE(std::is_nothrow_move_constructible<StringTable>::value);
+}
+
+TEST(Table, NoThrowMoveAssign) {
+  ASSERT_TRUE(
+      std::is_nothrow_move_assignable<absl::Hash<absl::string_view>>::value);
+  ASSERT_TRUE(
+      std::is_nothrow_move_assignable<std::equal_to<absl::string_view>>::value);
+  ASSERT_TRUE(std::is_nothrow_move_assignable<std::allocator<int>>::value);
+  ASSERT_TRUE(
+      absl::allocator_traits<std::allocator<int>>::is_always_equal::value);
+  EXPECT_TRUE(std::is_nothrow_move_assignable<StringTable>::value);
+}
+
+TEST(Table, NoThrowSwappable) {
+  ASSERT_TRUE(
+      container_internal::IsNoThrowSwappable<absl::Hash<absl::string_view>>());
+  ASSERT_TRUE(container_internal::IsNoThrowSwappable<
+              std::equal_to<absl::string_view>>());
+  ASSERT_TRUE(container_internal::IsNoThrowSwappable<std::allocator<int>>());
+  EXPECT_TRUE(container_internal::IsNoThrowSwappable<StringTable>());
+}
+
+TEST(Table, HeterogeneousLookup) {
+  struct Hash {
+    size_t operator()(int64_t i) const { return i; }
+    size_t operator()(double i) const {
+      ADD_FAILURE();
+      return i;
+    }
+  };
+  struct Eq {
+    bool operator()(int64_t a, int64_t b) const { return a == b; }
+    bool operator()(double a, int64_t b) const {
+      ADD_FAILURE();
+      return a == b;
+    }
+    bool operator()(int64_t a, double b) const {
+      ADD_FAILURE();
+      return a == b;
+    }
+    bool operator()(double a, double b) const {
+      ADD_FAILURE();
+      return a == b;
+    }
+  };
+
+  struct THash {
+    using is_transparent = void;
+    size_t operator()(int64_t i) const { return i; }
+    size_t operator()(double i) const { return i; }
+  };
+  struct TEq {
+    using is_transparent = void;
+    bool operator()(int64_t a, int64_t b) const { return a == b; }
+    bool operator()(double a, int64_t b) const { return a == b; }
+    bool operator()(int64_t a, double b) const { return a == b; }
+    bool operator()(double a, double b) const { return a == b; }
+  };
+
+  raw_hash_set<IntPolicy, Hash, Eq, Alloc<int64_t>> s{0, 1, 2};
+  // It will convert to int64_t before the query.
+  EXPECT_EQ(1, *s.find(double{1.1}));
+
+  raw_hash_set<IntPolicy, THash, TEq, Alloc<int64_t>> ts{0, 1, 2};
+  // It will try to use the double, and fail to find the object.
+  EXPECT_TRUE(ts.find(1.1) == ts.end());
+}
+
+template <class Table>
+using CallFind = decltype(std::declval<Table&>().find(17));
+
+template <class Table>
+using CallErase = decltype(std::declval<Table&>().erase(17));
+
+template <class Table>
+using CallExtract = decltype(std::declval<Table&>().extract(17));
+
+template <class Table>
+using CallPrefetch = decltype(std::declval<Table&>().prefetch(17));
+
+template <class Table>
+using CallCount = decltype(std::declval<Table&>().count(17));
+
+template <template <typename> class C, class Table, class = void>
+struct VerifyResultOf : std::false_type {};
+
+template <template <typename> class C, class Table>
+struct VerifyResultOf<C, Table, absl::void_t<C<Table>>> : std::true_type {};
+
+TEST(Table, HeterogeneousLookupOverloads) {
+  using NonTransparentTable =
+      raw_hash_set<StringPolicy, absl::Hash<absl::string_view>,
+                   std::equal_to<absl::string_view>, std::allocator<int>>;
+
+  EXPECT_FALSE((VerifyResultOf<CallFind, NonTransparentTable>()));
+  EXPECT_FALSE((VerifyResultOf<CallErase, NonTransparentTable>()));
+  EXPECT_FALSE((VerifyResultOf<CallExtract, NonTransparentTable>()));
+  EXPECT_FALSE((VerifyResultOf<CallPrefetch, NonTransparentTable>()));
+  EXPECT_FALSE((VerifyResultOf<CallCount, NonTransparentTable>()));
+
+  using TransparentTable = raw_hash_set<
+      StringPolicy,
+      absl::container_internal::hash_default_hash<absl::string_view>,
+      absl::container_internal::hash_default_eq<absl::string_view>,
+      std::allocator<int>>;
+
+  EXPECT_TRUE((VerifyResultOf<CallFind, TransparentTable>()));
+  EXPECT_TRUE((VerifyResultOf<CallErase, TransparentTable>()));
+  EXPECT_TRUE((VerifyResultOf<CallExtract, TransparentTable>()));
+  EXPECT_TRUE((VerifyResultOf<CallPrefetch, TransparentTable>()));
+  EXPECT_TRUE((VerifyResultOf<CallCount, TransparentTable>()));
+}
+
+// TODO(alkis): Expand iterator tests.
+TEST(Iterator, IsDefaultConstructible) {
+  StringTable::iterator i;
+  EXPECT_TRUE(i == StringTable::iterator());
+}
+
+TEST(ConstIterator, IsDefaultConstructible) {
+  StringTable::const_iterator i;
+  EXPECT_TRUE(i == StringTable::const_iterator());
+}
+
+TEST(Iterator, ConvertsToConstIterator) {
+  StringTable::iterator i;
+  EXPECT_TRUE(i == StringTable::const_iterator());
+}
+
+TEST(Iterator, Iterates) {
+  IntTable t;
+  for (size_t i = 3; i != 6; ++i) EXPECT_TRUE(t.emplace(i).second);
+  EXPECT_THAT(t, UnorderedElementsAre(3, 4, 5));
+}
+
+TEST(Table, Merge) {
+  StringTable t1, t2;
+  t1.emplace("0", "-0");
+  t1.emplace("1", "-1");
+  t2.emplace("0", "~0");
+  t2.emplace("2", "~2");
+
+  EXPECT_THAT(t1, UnorderedElementsAre(Pair("0", "-0"), Pair("1", "-1")));
+  EXPECT_THAT(t2, UnorderedElementsAre(Pair("0", "~0"), Pair("2", "~2")));
+
+  t1.merge(t2);
+  EXPECT_THAT(t1, UnorderedElementsAre(Pair("0", "-0"), Pair("1", "-1"),
+                                       Pair("2", "~2")));
+  EXPECT_THAT(t2, UnorderedElementsAre(Pair("0", "~0")));
+}
+
+TEST(Nodes, EmptyNodeType) {
+  using node_type = StringTable::node_type;
+  node_type n;
+  EXPECT_FALSE(n);
+  EXPECT_TRUE(n.empty());
+
+  EXPECT_TRUE((std::is_same<node_type::allocator_type,
+                            StringTable::allocator_type>::value));
+}
+
+TEST(Nodes, ExtractInsert) {
+  constexpr char k0[] = "Very long string zero.";
+  constexpr char k1[] = "Very long string one.";
+  constexpr char k2[] = "Very long string two.";
+  StringTable t = {{k0, ""}, {k1, ""}, {k2, ""}};
+  EXPECT_THAT(t,
+              UnorderedElementsAre(Pair(k0, ""), Pair(k1, ""), Pair(k2, "")));
+
+  auto node = t.extract(k0);
+  EXPECT_THAT(t, UnorderedElementsAre(Pair(k1, ""), Pair(k2, "")));
+  EXPECT_TRUE(node);
+  EXPECT_FALSE(node.empty());
+
+  StringTable t2;
+  StringTable::insert_return_type res = t2.insert(std::move(node));
+  EXPECT_TRUE(res.inserted);
+  EXPECT_THAT(*res.position, Pair(k0, ""));
+  EXPECT_FALSE(res.node);
+  EXPECT_THAT(t2, UnorderedElementsAre(Pair(k0, "")));
+
+  // Not there.
+  EXPECT_THAT(t, UnorderedElementsAre(Pair(k1, ""), Pair(k2, "")));
+  node = t.extract("Not there!");
+  EXPECT_THAT(t, UnorderedElementsAre(Pair(k1, ""), Pair(k2, "")));
+  EXPECT_FALSE(node);
+
+  // Inserting nothing.
+  res = t2.insert(std::move(node));
+  EXPECT_FALSE(res.inserted);
+  EXPECT_EQ(res.position, t2.end());
+  EXPECT_FALSE(res.node);
+  EXPECT_THAT(t2, UnorderedElementsAre(Pair(k0, "")));
+
+  t.emplace(k0, "1");
+  node = t.extract(k0);
+
+  // Insert duplicate.
+  res = t2.insert(std::move(node));
+  EXPECT_FALSE(res.inserted);
+  EXPECT_THAT(*res.position, Pair(k0, ""));
+  EXPECT_TRUE(res.node);
+  EXPECT_FALSE(node);
+}
+
+TEST(Nodes, HintInsert) {
+  IntTable t = {1, 2, 3};
+  auto node = t.extract(1);
+  EXPECT_THAT(t, UnorderedElementsAre(2, 3));
+  auto it = t.insert(t.begin(), std::move(node));
+  EXPECT_THAT(t, UnorderedElementsAre(1, 2, 3));
+  EXPECT_EQ(*it, 1);
+  EXPECT_FALSE(node);
+
+  node = t.extract(2);
+  EXPECT_THAT(t, UnorderedElementsAre(1, 3));
+  // reinsert 2 to make the next insert fail.
+  t.insert(2);
+  EXPECT_THAT(t, UnorderedElementsAre(1, 2, 3));
+  it = t.insert(t.begin(), std::move(node));
+  EXPECT_EQ(*it, 2);
+  // The node was not emptied by the insert call.
+  EXPECT_TRUE(node);
+}
+
+IntTable MakeSimpleTable(size_t size) {
+  IntTable t;
+  while (t.size() < size) t.insert(t.size());
+  return t;
+}
+
+std::vector<int> OrderOfIteration(const IntTable& t) {
+  return {t.begin(), t.end()};
+}
+
+// These IterationOrderChanges tests depend on non-deterministic behavior.
+// We are injecting non-determinism from the pointer of the table, but do so in
+// a way that only the page matters. We have to retry enough times to make sure
+// we are touching different memory pages to cause the ordering to change.
+// We also need to keep the old tables around to avoid getting the same memory
+// blocks over and over.
+TEST(Table, IterationOrderChangesByInstance) {
+  for (size_t size : {2, 6, 12, 20}) {
+    const auto reference_table = MakeSimpleTable(size);
+    const auto reference = OrderOfIteration(reference_table);
+
+    std::vector<IntTable> tables;
+    bool found_difference = false;
+    for (int i = 0; !found_difference && i < 5000; ++i) {
+      tables.push_back(MakeSimpleTable(size));
+      found_difference = OrderOfIteration(tables.back()) != reference;
+    }
+    if (!found_difference) {
+      FAIL()
+          << "Iteration order remained the same across many attempts with size "
+          << size;
+    }
+  }
+}
+
+TEST(Table, IterationOrderChangesOnRehash) {
+  std::vector<IntTable> garbage;
+  for (int i = 0; i < 5000; ++i) {
+    auto t = MakeSimpleTable(20);
+    const auto reference = OrderOfIteration(t);
+    // Force rehash to the same size.
+    t.rehash(0);
+    auto trial = OrderOfIteration(t);
+    if (trial != reference) {
+      // We are done.
+      return;
+    }
+    garbage.push_back(std::move(t));
+  }
+  FAIL() << "Iteration order remained the same across many attempts.";
+}
+
+// Verify that pointers are invalidated as soon as a second element is inserted.
+// This prevents dependency on pointer stability on small tables.
+TEST(Table, UnstablePointers) {
+  IntTable table;
+
+  const auto addr = [&](int i) {
+    return reinterpret_cast<uintptr_t>(&*table.find(i));
+  };
+
+  table.insert(0);
+  const uintptr_t old_ptr = addr(0);
+
+  // This causes a rehash.
+  table.insert(1);
+
+  EXPECT_NE(old_ptr, addr(0));
+}
+
+// Confirm that we assert if we try to erase() end().
+TEST(TableDeathTest, EraseOfEndAsserts) {
+  // Use an assert with side-effects to figure out if they are actually enabled.
+  bool assert_enabled = false;
+  assert([&]() {
+    assert_enabled = true;
+    return true;
+  }());
+  if (!assert_enabled) return;
+
+  IntTable t;
+  // Extra simple "regexp" as regexp support is highly varied across platforms.
+  constexpr char kDeathMsg[] = "Invalid operation on iterator";
+  EXPECT_DEATH_IF_SUPPORTED(t.erase(t.end()), kDeathMsg);
+}
+
+#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
+TEST(RawHashSamplerTest, Sample) {
+  // Enable the feature even if the prod default is off.
+  SetHashtablezEnabled(true);
+  SetHashtablezSampleParameter(100);
+
+  auto& sampler = HashtablezSampler::Global();
+  size_t start_size = 0;
+  start_size += sampler.Iterate([&](const HashtablezInfo&) { ++start_size; });
+
+  std::vector<IntTable> tables;
+  for (int i = 0; i < 1000000; ++i) {
+    tables.emplace_back();
+    tables.back().insert(1);
+  }
+  size_t end_size = 0;
+  end_size += sampler.Iterate([&](const HashtablezInfo&) { ++end_size; });
+
+  EXPECT_NEAR((end_size - start_size) / static_cast<double>(tables.size()),
+              0.01, 0.005);
+}
+#endif  // ABSL_INTERNAL_HASHTABLEZ_SAMPLE
+
+TEST(RawHashSamplerTest, DoNotSampleCustomAllocators) {
+  // Enable the feature even if the prod default is off.
+  SetHashtablezEnabled(true);
+  SetHashtablezSampleParameter(100);
+
+  auto& sampler = HashtablezSampler::Global();
+  size_t start_size = 0;
+  start_size += sampler.Iterate([&](const HashtablezInfo&) { ++start_size; });
+
+  std::vector<CustomAllocIntTable> tables;
+  for (int i = 0; i < 1000000; ++i) {
+    tables.emplace_back();
+    tables.back().insert(1);
+  }
+  size_t end_size = 0;
+  end_size += sampler.Iterate([&](const HashtablezInfo&) { ++end_size; });
+
+  EXPECT_NEAR((end_size - start_size) / static_cast<double>(tables.size()),
+              0.00, 0.001);
+}
+
+#ifdef ABSL_HAVE_ADDRESS_SANITIZER
+TEST(Sanitizer, PoisoningUnused) {
+  IntTable t;
+  t.reserve(5);
+  // Insert something to force an allocation.
+  int64_t& v1 = *t.insert(0).first;
+
+  // Make sure there is something to test.
+  ASSERT_GT(t.capacity(), 1);
+
+  int64_t* slots = RawHashSetTestOnlyAccess::GetSlots(t);
+  for (size_t i = 0; i < t.capacity(); ++i) {
+    EXPECT_EQ(slots + i != &v1, __asan_address_is_poisoned(slots + i));
+  }
+}
+
+TEST(Sanitizer, PoisoningOnErase) {
+  IntTable t;
+  int64_t& v = *t.insert(0).first;
+
+  EXPECT_FALSE(__asan_address_is_poisoned(&v));
+  t.erase(0);
+  EXPECT_TRUE(__asan_address_is_poisoned(&v));
+}
+#endif  // ABSL_HAVE_ADDRESS_SANITIZER
+
+}  // namespace
+}  // namespace container_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/third_party/abseil_cpp/absl/container/internal/test_instance_tracker.cc b/third_party/abseil_cpp/absl/container/internal/test_instance_tracker.cc
new file mode 100644
index 000000000000..f9947f0475d2
--- /dev/null
+++ b/third_party/abseil_cpp/absl/container/internal/test_instance_tracker.cc
@@ -0,0 +1,29 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/container/internal/test_instance_tracker.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace test_internal {
+int BaseCountedInstance::num_instances_ = 0;
+int BaseCountedInstance::num_live_instances_ = 0;
+int BaseCountedInstance::num_moves_ = 0;
+int BaseCountedInstance::num_copies_ = 0;
+int BaseCountedInstance::num_swaps_ = 0;
+int BaseCountedInstance::num_comparisons_ = 0;
+
+}  // namespace test_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/third_party/abseil_cpp/absl/container/internal/test_instance_tracker.h b/third_party/abseil_cpp/absl/container/internal/test_instance_tracker.h
new file mode 100644
index 000000000000..5ff6fd714e2b
--- /dev/null
+++ b/third_party/abseil_cpp/absl/container/internal/test_instance_tracker.h
@@ -0,0 +1,274 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_TEST_INSTANCE_TRACKER_H_
+#define ABSL_CONTAINER_INTERNAL_TEST_INSTANCE_TRACKER_H_
+
+#include <cstdlib>
+#include <ostream>
+
+#include "absl/types/compare.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace test_internal {
+
+// A type that counts number of occurrences of the type, the live occurrences of
+// the type, as well as the number of copies, moves, swaps, and comparisons that
+// have occurred on the type. This is used as a base class for the copyable,
+// copyable+movable, and movable types below that are used in actual tests. Use
+// InstanceTracker in tests to track the number of instances.
+class BaseCountedInstance {
+ public:
+  explicit BaseCountedInstance(int x) : value_(x) {
+    ++num_instances_;
+    ++num_live_instances_;
+  }
+  BaseCountedInstance(const BaseCountedInstance& x)
+      : value_(x.value_), is_live_(x.is_live_) {
+    ++num_instances_;
+    if (is_live_) ++num_live_instances_;
+    ++num_copies_;
+  }
+  BaseCountedInstance(BaseCountedInstance&& x)
+      : value_(x.value_), is_live_(x.is_live_) {
+    x.is_live_ = false;
+    ++num_instances_;
+    ++num_moves_;
+  }
+  ~BaseCountedInstance() {
+    --num_instances_;
+    if (is_live_) --num_live_instances_;
+  }
+
+  BaseCountedInstance& operator=(const BaseCountedInstance& x) {
+    value_ = x.value_;
+    if (is_live_) --num_live_instances_;
+    is_live_ = x.is_live_;
+    if (is_live_) ++num_live_instances_;
+    ++num_copies_;
+    return *this;
+  }
+  BaseCountedInstance& operator=(BaseCountedInstance&& x) {
+    value_ = x.value_;
+    if (is_live_) --num_live_instances_;
+    is_live_ = x.is_live_;
+    x.is_live_ = false;
+    ++num_moves_;
+    return *this;
+  }
+
+  bool operator==(const BaseCountedInstance& x) const {
+    ++num_comparisons_;
+    return value_ == x.value_;
+  }
+
+  bool operator!=(const BaseCountedInstance& x) const {
+    ++num_comparisons_;
+    return value_ != x.value_;
+  }
+
+  bool operator<(const BaseCountedInstance& x) const {
+    ++num_comparisons_;
+    return value_ < x.value_;
+  }
+
+  bool operator>(const BaseCountedInstance& x) const {
+    ++num_comparisons_;
+    return value_ > x.value_;
+  }
+
+  bool operator<=(const BaseCountedInstance& x) const {
+    ++num_comparisons_;
+    return value_ <= x.value_;
+  }
+
+  bool operator>=(const BaseCountedInstance& x) const {
+    ++num_comparisons_;
+    return value_ >= x.value_;
+  }
+
+  absl::weak_ordering compare(const BaseCountedInstance& x) const {
+    ++num_comparisons_;
+    return value_ < x.value_
+               ? absl::weak_ordering::less
+               : value_ == x.value_ ? absl::weak_ordering::equivalent
+                                    : absl::weak_ordering::greater;
+  }
+
+  int value() const {
+    if (!is_live_) std::abort();
+    return value_;
+  }
+
+  friend std::ostream& operator<<(std::ostream& o,
+                                  const BaseCountedInstance& v) {
+    return o << "[value:" << v.value() << "]";
+  }
+
+  // Implementation of efficient swap() that counts swaps.
+  static void SwapImpl(
+      BaseCountedInstance& lhs,    // NOLINT(runtime/references)
+      BaseCountedInstance& rhs) {  // NOLINT(runtime/references)
+    using std::swap;
+    swap(lhs.value_, rhs.value_);
+    swap(lhs.is_live_, rhs.is_live_);
+    ++BaseCountedInstance::num_swaps_;
+  }
+
+ private:
+  friend class InstanceTracker;
+
+  int value_;
+
+  // Indicates if the value is live, ie it hasn't been moved away from.
+  bool is_live_ = true;
+
+  // Number of instances.
+  static int num_instances_;
+
+  // Number of live instances (those that have not been moved away from.)
+  static int num_live_instances_;
+
+  // Number of times that BaseCountedInstance objects were moved.
+  static int num_moves_;
+
+  // Number of times that BaseCountedInstance objects were copied.
+  static int num_copies_;
+
+  // Number of times that BaseCountedInstance objects were swapped.
+  static int num_swaps_;
+
+  // Number of times that BaseCountedInstance objects were compared.
+  static int num_comparisons_;
+};
+
+// Helper to track the BaseCountedInstance instance counters. Expects that the
+// number of instances and live_instances are the same when it is constructed
+// and when it is destructed.
+class InstanceTracker {
+ public:
+  InstanceTracker()
+      : start_instances_(BaseCountedInstance::num_instances_),
+        start_live_instances_(BaseCountedInstance::num_live_instances_) {
+    ResetCopiesMovesSwaps();
+  }
+  ~InstanceTracker() {
+    if (instances() != 0) std::abort();
+    if (live_instances() != 0) std::abort();
+  }
+
+  // Returns the number of BaseCountedInstance instances both containing valid
+  // values and those moved away from compared to when the InstanceTracker was
+  // constructed
+  int instances() const {
+    return BaseCountedInstance::num_instances_ - start_instances_;
+  }
+
+  // Returns the number of live BaseCountedInstance instances compared to when
+  // the InstanceTracker was constructed
+  int live_instances() const {
+    return BaseCountedInstance::num_live_instances_ - start_live_instances_;
+  }
+
+  // Returns the number of moves on BaseCountedInstance objects since
+  // construction or since the last call to ResetCopiesMovesSwaps().
+  int moves() const { return BaseCountedInstance::num_moves_ - start_moves_; }
+
+  // Returns the number of copies on BaseCountedInstance objects since
+  // construction or the last call to ResetCopiesMovesSwaps().
+  int copies() const {
+    return BaseCountedInstance::num_copies_ - start_copies_;
+  }
+
+  // Returns the number of swaps on BaseCountedInstance objects since
+  // construction or the last call to ResetCopiesMovesSwaps().
+  int swaps() const { return BaseCountedInstance::num_swaps_ - start_swaps_; }
+
+  // Returns the number of comparisons on BaseCountedInstance objects since
+  // construction or the last call to ResetCopiesMovesSwaps().
+  int comparisons() const {
+    return BaseCountedInstance::num_comparisons_ - start_comparisons_;
+  }
+
+  // Resets the base values for moves, copies, comparisons, and swaps to the
+  // current values, so that subsequent Get*() calls for moves, copies,
+  // comparisons, and swaps will compare to the situation at the point of this
+  // call.
+  void ResetCopiesMovesSwaps() {
+    start_moves_ = BaseCountedInstance::num_moves_;
+    start_copies_ = BaseCountedInstance::num_copies_;
+    start_swaps_ = BaseCountedInstance::num_swaps_;
+    start_comparisons_ = BaseCountedInstance::num_comparisons_;
+  }
+
+ private:
+  int start_instances_;
+  int start_live_instances_;
+  int start_moves_;
+  int start_copies_;
+  int start_swaps_;
+  int start_comparisons_;
+};
+
+// Copyable, not movable.
+class CopyableOnlyInstance : public BaseCountedInstance {
+ public:
+  explicit CopyableOnlyInstance(int x) : BaseCountedInstance(x) {}
+  CopyableOnlyInstance(const CopyableOnlyInstance& rhs) = default;
+  CopyableOnlyInstance& operator=(const CopyableOnlyInstance& rhs) = default;
+
+  friend void swap(CopyableOnlyInstance& lhs, CopyableOnlyInstance& rhs) {
+    BaseCountedInstance::SwapImpl(lhs, rhs);
+  }
+
+  static bool supports_move() { return false; }
+};
+
+// Copyable and movable.
+class CopyableMovableInstance : public BaseCountedInstance {
+ public:
+  explicit CopyableMovableInstance(int x) : BaseCountedInstance(x) {}
+  CopyableMovableInstance(const CopyableMovableInstance& rhs) = default;
+  CopyableMovableInstance(CopyableMovableInstance&& rhs) = default;
+  CopyableMovableInstance& operator=(const CopyableMovableInstance& rhs) =
+      default;
+  CopyableMovableInstance& operator=(CopyableMovableInstance&& rhs) = default;
+
+  friend void swap(CopyableMovableInstance& lhs, CopyableMovableInstance& rhs) {
+    BaseCountedInstance::SwapImpl(lhs, rhs);
+  }
+
+  static bool supports_move() { return true; }
+};
+
+// Only movable, not default-constructible.
+class MovableOnlyInstance : public BaseCountedInstance {
+ public:
+  explicit MovableOnlyInstance(int x) : BaseCountedInstance(x) {}
+  MovableOnlyInstance(MovableOnlyInstance&& other) = default;
+  MovableOnlyInstance& operator=(MovableOnlyInstance&& other) = default;
+
+  friend void swap(MovableOnlyInstance& lhs, MovableOnlyInstance& rhs) {
+    BaseCountedInstance::SwapImpl(lhs, rhs);
+  }
+
+  static bool supports_move() { return true; }
+};
+
+}  // namespace test_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_CONTAINER_INTERNAL_TEST_INSTANCE_TRACKER_H_
diff --git a/third_party/abseil_cpp/absl/container/internal/test_instance_tracker_test.cc b/third_party/abseil_cpp/absl/container/internal/test_instance_tracker_test.cc
new file mode 100644
index 000000000000..1c6a4fa7150d
--- /dev/null
+++ b/third_party/abseil_cpp/absl/container/internal/test_instance_tracker_test.cc
@@ -0,0 +1,184 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/container/internal/test_instance_tracker.h"
+
+#include "gtest/gtest.h"
+
+namespace {
+
+using absl::test_internal::CopyableMovableInstance;
+using absl::test_internal::CopyableOnlyInstance;
+using absl::test_internal::InstanceTracker;
+using absl::test_internal::MovableOnlyInstance;
+
+TEST(TestInstanceTracker, CopyableMovable) {
+  InstanceTracker tracker;
+  CopyableMovableInstance src(1);
+  EXPECT_EQ(1, src.value()) << src;
+  CopyableMovableInstance copy(src);
+  CopyableMovableInstance move(std::move(src));
+  EXPECT_EQ(1, tracker.copies());
+  EXPECT_EQ(1, tracker.moves());
+  EXPECT_EQ(0, tracker.swaps());
+  EXPECT_EQ(3, tracker.instances());
+  EXPECT_EQ(2, tracker.live_instances());
+  tracker.ResetCopiesMovesSwaps();
+
+  CopyableMovableInstance copy_assign(1);
+  copy_assign = copy;
+  CopyableMovableInstance move_assign(1);
+  move_assign = std::move(move);
+  EXPECT_EQ(1, tracker.copies());
+  EXPECT_EQ(1, tracker.moves());
+  EXPECT_EQ(0, tracker.swaps());
+  EXPECT_EQ(5, tracker.instances());
+  EXPECT_EQ(3, tracker.live_instances());
+  tracker.ResetCopiesMovesSwaps();
+
+  {
+    using std::swap;
+    swap(move_assign, copy);
+    swap(copy, move_assign);
+    EXPECT_EQ(2, tracker.swaps());
+    EXPECT_EQ(0, tracker.copies());
+    EXPECT_EQ(0, tracker.moves());
+    EXPECT_EQ(5, tracker.instances());
+    EXPECT_EQ(3, tracker.live_instances());
+  }
+}
+
+TEST(TestInstanceTracker, CopyableOnly) {
+  InstanceTracker tracker;
+  CopyableOnlyInstance src(1);
+  EXPECT_EQ(1, src.value()) << src;
+  CopyableOnlyInstance copy(src);
+  CopyableOnlyInstance copy2(std::move(src));  // NOLINT
+  EXPECT_EQ(2, tracker.copies());
+  EXPECT_EQ(0, tracker.moves());
+  EXPECT_EQ(3, tracker.instances());
+  EXPECT_EQ(3, tracker.live_instances());
+  tracker.ResetCopiesMovesSwaps();
+
+  CopyableOnlyInstance copy_assign(1);
+  copy_assign = copy;
+  CopyableOnlyInstance copy_assign2(1);
+  copy_assign2 = std::move(copy2);  // NOLINT
+  EXPECT_EQ(2, tracker.copies());
+  EXPECT_EQ(0, tracker.moves());
+  EXPECT_EQ(5, tracker.instances());
+  EXPECT_EQ(5, tracker.live_instances());
+  tracker.ResetCopiesMovesSwaps();
+
+  {
+    using std::swap;
+    swap(src, copy);
+    swap(copy, src);
+    EXPECT_EQ(2, tracker.swaps());
+    EXPECT_EQ(0, tracker.copies());
+    EXPECT_EQ(0, tracker.moves());
+    EXPECT_EQ(5, tracker.instances());
+    EXPECT_EQ(5, tracker.live_instances());
+  }
+}
+
+TEST(TestInstanceTracker, MovableOnly) {
+  InstanceTracker tracker;
+  MovableOnlyInstance src(1);
+  EXPECT_EQ(1, src.value()) << src;
+  MovableOnlyInstance move(std::move(src));
+  MovableOnlyInstance move_assign(2);
+  move_assign = std::move(move);
+  EXPECT_EQ(3, tracker.instances());
+  EXPECT_EQ(1, tracker.live_instances());
+  EXPECT_EQ(2, tracker.moves());
+  EXPECT_EQ(0, tracker.copies());
+  tracker.ResetCopiesMovesSwaps();
+
+  {
+    using std::swap;
+    MovableOnlyInstance other(2);
+    swap(move_assign, other);
+    swap(other, move_assign);
+    EXPECT_EQ(2, tracker.swaps());
+    EXPECT_EQ(0, tracker.copies());
+    EXPECT_EQ(0, tracker.moves());
+    EXPECT_EQ(4, tracker.instances());
+    EXPECT_EQ(2, tracker.live_instances());
+  }
+}
+
+TEST(TestInstanceTracker, ExistingInstances) {
+  CopyableMovableInstance uncounted_instance(1);
+  CopyableMovableInstance uncounted_live_instance(
+      std::move(uncounted_instance));
+  InstanceTracker tracker;
+  EXPECT_EQ(0, tracker.instances());
+  EXPECT_EQ(0, tracker.live_instances());
+  EXPECT_EQ(0, tracker.copies());
+  {
+    CopyableMovableInstance instance1(1);
+    EXPECT_EQ(1, tracker.instances());
+    EXPECT_EQ(1, tracker.live_instances());
+    EXPECT_EQ(0, tracker.copies());
+    EXPECT_EQ(0, tracker.moves());
+    {
+      InstanceTracker tracker2;
+      CopyableMovableInstance instance2(instance1);
+      CopyableMovableInstance instance3(std::move(instance2));
+      EXPECT_EQ(3, tracker.instances());
+      EXPECT_EQ(2, tracker.live_instances());
+      EXPECT_EQ(1, tracker.copies());
+      EXPECT_EQ(1, tracker.moves());
+      EXPECT_EQ(2, tracker2.instances());
+      EXPECT_EQ(1, tracker2.live_instances());
+      EXPECT_EQ(1, tracker2.copies());
+      EXPECT_EQ(1, tracker2.moves());
+    }
+    EXPECT_EQ(1, tracker.instances());
+    EXPECT_EQ(1, tracker.live_instances());
+    EXPECT_EQ(1, tracker.copies());
+    EXPECT_EQ(1, tracker.moves());
+  }
+  EXPECT_EQ(0, tracker.instances());
+  EXPECT_EQ(0, tracker.live_instances());
+  EXPECT_EQ(1, tracker.copies());
+  EXPECT_EQ(1, tracker.moves());
+}
+
+TEST(TestInstanceTracker, Comparisons) {
+  InstanceTracker tracker;
+  MovableOnlyInstance one(1), two(2);
+
+  EXPECT_EQ(0, tracker.comparisons());
+  EXPECT_FALSE(one == two);
+  EXPECT_EQ(1, tracker.comparisons());
+  EXPECT_TRUE(one != two);
+  EXPECT_EQ(2, tracker.comparisons());
+  EXPECT_TRUE(one < two);
+  EXPECT_EQ(3, tracker.comparisons());
+  EXPECT_FALSE(one > two);
+  EXPECT_EQ(4, tracker.comparisons());
+  EXPECT_TRUE(one <= two);
+  EXPECT_EQ(5, tracker.comparisons());
+  EXPECT_FALSE(one >= two);
+  EXPECT_EQ(6, tracker.comparisons());
+  EXPECT_TRUE(one.compare(two) < 0);  // NOLINT
+  EXPECT_EQ(7, tracker.comparisons());
+
+  tracker.ResetCopiesMovesSwaps();
+  EXPECT_EQ(0, tracker.comparisons());
+}
+
+}  // namespace
diff --git a/third_party/abseil_cpp/absl/container/internal/tracked.h b/third_party/abseil_cpp/absl/container/internal/tracked.h
new file mode 100644
index 000000000000..29f5829f7199
--- /dev/null
+++ b/third_party/abseil_cpp/absl/container/internal/tracked.h
@@ -0,0 +1,83 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_TRACKED_H_
+#define ABSL_CONTAINER_INTERNAL_TRACKED_H_
+
+#include <stddef.h>
+
+#include <memory>
+#include <utility>
+
+#include "absl/base/config.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+// A class that tracks its copies and moves so that it can be queried in tests.
+template <class T>
+class Tracked {
+ public:
+  Tracked() {}
+  // NOLINTNEXTLINE(runtime/explicit)
+  Tracked(const T& val) : val_(val) {}
+  Tracked(const Tracked& that)
+      : val_(that.val_),
+        num_moves_(that.num_moves_),
+        num_copies_(that.num_copies_) {
+    ++(*num_copies_);
+  }
+  Tracked(Tracked&& that)
+      : val_(std::move(that.val_)),
+        num_moves_(std::move(that.num_moves_)),
+        num_copies_(std::move(that.num_copies_)) {
+    ++(*num_moves_);
+  }
+  Tracked& operator=(const Tracked& that) {
+    val_ = that.val_;
+    num_moves_ = that.num_moves_;
+    num_copies_ = that.num_copies_;
+    ++(*num_copies_);
+  }
+  Tracked& operator=(Tracked&& that) {
+    val_ = std::move(that.val_);
+    num_moves_ = std::move(that.num_moves_);
+    num_copies_ = std::move(that.num_copies_);
+    ++(*num_moves_);
+  }
+
+  const T& val() const { return val_; }
+
+  friend bool operator==(const Tracked& a, const Tracked& b) {
+    return a.val_ == b.val_;
+  }
+  friend bool operator!=(const Tracked& a, const Tracked& b) {
+    return !(a == b);
+  }
+
+  size_t num_copies() { return *num_copies_; }
+  size_t num_moves() { return *num_moves_; }
+
+ private:
+  T val_;
+  std::shared_ptr<size_t> num_moves_ = std::make_shared<size_t>(0);
+  std::shared_ptr<size_t> num_copies_ = std::make_shared<size_t>(0);
+};
+
+}  // namespace container_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_CONTAINER_INTERNAL_TRACKED_H_
diff --git a/third_party/abseil_cpp/absl/container/internal/unordered_map_constructor_test.h b/third_party/abseil_cpp/absl/container/internal/unordered_map_constructor_test.h
new file mode 100644
index 000000000000..76ee95e6abc5
--- /dev/null
+++ b/third_party/abseil_cpp/absl/container/internal/unordered_map_constructor_test.h
@@ -0,0 +1,489 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_CONSTRUCTOR_TEST_H_
+#define ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_CONSTRUCTOR_TEST_H_
+
+#include <algorithm>
+#include <vector>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/container/internal/hash_generator_testing.h"
+#include "absl/container/internal/hash_policy_testing.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+template <class UnordMap>
+class ConstructorTest : public ::testing::Test {};
+
+TYPED_TEST_SUITE_P(ConstructorTest);
+
+TYPED_TEST_P(ConstructorTest, NoArgs) {
+  TypeParam m;
+  EXPECT_TRUE(m.empty());
+  EXPECT_THAT(m, ::testing::UnorderedElementsAre());
+}
+
+TYPED_TEST_P(ConstructorTest, BucketCount) {
+  TypeParam m(123);
+  EXPECT_TRUE(m.empty());
+  EXPECT_THAT(m, ::testing::UnorderedElementsAre());
+  EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, BucketCountHash) {
+  using H = typename TypeParam::hasher;
+  H hasher;
+  TypeParam m(123, hasher);
+  EXPECT_EQ(m.hash_function(), hasher);
+  EXPECT_TRUE(m.empty());
+  EXPECT_THAT(m, ::testing::UnorderedElementsAre());
+  EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, BucketCountHashEqual) {
+  using H = typename TypeParam::hasher;
+  using E = typename TypeParam::key_equal;
+  H hasher;
+  E equal;
+  TypeParam m(123, hasher, equal);
+  EXPECT_EQ(m.hash_function(), hasher);
+  EXPECT_EQ(m.key_eq(), equal);
+  EXPECT_TRUE(m.empty());
+  EXPECT_THAT(m, ::testing::UnorderedElementsAre());
+  EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, BucketCountHashEqualAlloc) {
+  using H = typename TypeParam::hasher;
+  using E = typename TypeParam::key_equal;
+  using A = typename TypeParam::allocator_type;
+  H hasher;
+  E equal;
+  A alloc(0);
+  TypeParam m(123, hasher, equal, alloc);
+  EXPECT_EQ(m.hash_function(), hasher);
+  EXPECT_EQ(m.key_eq(), equal);
+  EXPECT_EQ(m.get_allocator(), alloc);
+  EXPECT_TRUE(m.empty());
+  EXPECT_THAT(m, ::testing::UnorderedElementsAre());
+  EXPECT_GE(m.bucket_count(), 123);
+}
+
+template <typename T>
+struct is_std_unordered_map : std::false_type {};
+
+template <typename... T>
+struct is_std_unordered_map<std::unordered_map<T...>> : std::true_type {};
+
+#if defined(UNORDERED_MAP_CXX14) || defined(UNORDERED_MAP_CXX17)
+using has_cxx14_std_apis = std::true_type;
+#else
+using has_cxx14_std_apis = std::false_type;
+#endif
+
+template <typename T>
+using expect_cxx14_apis =
+    absl::disjunction<absl::negation<is_std_unordered_map<T>>,
+                      has_cxx14_std_apis>;
+
+template <typename TypeParam>
+void BucketCountAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void BucketCountAllocTest(std::true_type) {
+  using A = typename TypeParam::allocator_type;
+  A alloc(0);
+  TypeParam m(123, alloc);
+  EXPECT_EQ(m.get_allocator(), alloc);
+  EXPECT_TRUE(m.empty());
+  EXPECT_THAT(m, ::testing::UnorderedElementsAre());
+  EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, BucketCountAlloc) {
+  BucketCountAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
+}
+
+template <typename TypeParam>
+void BucketCountHashAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void BucketCountHashAllocTest(std::true_type) {
+  using H = typename TypeParam::hasher;
+  using A = typename TypeParam::allocator_type;
+  H hasher;
+  A alloc(0);
+  TypeParam m(123, hasher, alloc);
+  EXPECT_EQ(m.hash_function(), hasher);
+  EXPECT_EQ(m.get_allocator(), alloc);
+  EXPECT_TRUE(m.empty());
+  EXPECT_THAT(m, ::testing::UnorderedElementsAre());
+  EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, BucketCountHashAlloc) {
+  BucketCountHashAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
+}
+
+#if ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS
+using has_alloc_std_constructors = std::true_type;
+#else
+using has_alloc_std_constructors = std::false_type;
+#endif
+
+template <typename T>
+using expect_alloc_constructors =
+    absl::disjunction<absl::negation<is_std_unordered_map<T>>,
+                      has_alloc_std_constructors>;
+
+template <typename TypeParam>
+void AllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void AllocTest(std::true_type) {
+  using A = typename TypeParam::allocator_type;
+  A alloc(0);
+  TypeParam m(alloc);
+  EXPECT_EQ(m.get_allocator(), alloc);
+  EXPECT_TRUE(m.empty());
+  EXPECT_THAT(m, ::testing::UnorderedElementsAre());
+}
+
+TYPED_TEST_P(ConstructorTest, Alloc) {
+  AllocTest<TypeParam>(expect_alloc_constructors<TypeParam>());
+}
+
+TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashEqualAlloc) {
+  using T = hash_internal::GeneratedType<TypeParam>;
+  using H = typename TypeParam::hasher;
+  using E = typename TypeParam::key_equal;
+  using A = typename TypeParam::allocator_type;
+  H hasher;
+  E equal;
+  A alloc(0);
+  std::vector<T> values;
+  std::generate_n(std::back_inserter(values), 10,
+                  hash_internal::Generator<T>());
+  TypeParam m(values.begin(), values.end(), 123, hasher, equal, alloc);
+  EXPECT_EQ(m.hash_function(), hasher);
+  EXPECT_EQ(m.key_eq(), equal);
+  EXPECT_EQ(m.get_allocator(), alloc);
+  EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
+  EXPECT_GE(m.bucket_count(), 123);
+}
+
+template <typename TypeParam>
+void InputIteratorBucketAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void InputIteratorBucketAllocTest(std::true_type) {
+  using T = hash_internal::GeneratedType<TypeParam>;
+  using A = typename TypeParam::allocator_type;
+  A alloc(0);
+  std::vector<T> values;
+  std::generate_n(std::back_inserter(values), 10,
+                  hash_internal::Generator<T>());
+  TypeParam m(values.begin(), values.end(), 123, alloc);
+  EXPECT_EQ(m.get_allocator(), alloc);
+  EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
+  EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, InputIteratorBucketAlloc) {
+  InputIteratorBucketAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
+}
+
+template <typename TypeParam>
+void InputIteratorBucketHashAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void InputIteratorBucketHashAllocTest(std::true_type) {
+  using T = hash_internal::GeneratedType<TypeParam>;
+  using H = typename TypeParam::hasher;
+  using A = typename TypeParam::allocator_type;
+  H hasher;
+  A alloc(0);
+  std::vector<T> values;
+  std::generate_n(std::back_inserter(values), 10,
+                  hash_internal::Generator<T>());
+  TypeParam m(values.begin(), values.end(), 123, hasher, alloc);
+  EXPECT_EQ(m.hash_function(), hasher);
+  EXPECT_EQ(m.get_allocator(), alloc);
+  EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
+  EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashAlloc) {
+  InputIteratorBucketHashAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
+}
+
+TYPED_TEST_P(ConstructorTest, CopyConstructor) {
+  using T = hash_internal::GeneratedType<TypeParam>;
+  using H = typename TypeParam::hasher;
+  using E = typename TypeParam::key_equal;
+  using A = typename TypeParam::allocator_type;
+  H hasher;
+  E equal;
+  A alloc(0);
+  TypeParam m(123, hasher, equal, alloc);
+  for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator<T>()());
+  TypeParam n(m);
+  EXPECT_EQ(m.hash_function(), n.hash_function());
+  EXPECT_EQ(m.key_eq(), n.key_eq());
+  EXPECT_EQ(m.get_allocator(), n.get_allocator());
+  EXPECT_EQ(m, n);
+}
+
+template <typename TypeParam>
+void CopyConstructorAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void CopyConstructorAllocTest(std::true_type) {
+  using T = hash_internal::GeneratedType<TypeParam>;
+  using H = typename TypeParam::hasher;
+  using E = typename TypeParam::key_equal;
+  using A = typename TypeParam::allocator_type;
+  H hasher;
+  E equal;
+  A alloc(0);
+  TypeParam m(123, hasher, equal, alloc);
+  for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator<T>()());
+  TypeParam n(m, A(11));
+  EXPECT_EQ(m.hash_function(), n.hash_function());
+  EXPECT_EQ(m.key_eq(), n.key_eq());
+  EXPECT_NE(m.get_allocator(), n.get_allocator());
+  EXPECT_EQ(m, n);
+}
+
+TYPED_TEST_P(ConstructorTest, CopyConstructorAlloc) {
+  CopyConstructorAllocTest<TypeParam>(expect_alloc_constructors<TypeParam>());
+}
+
+// TODO(alkis): Test non-propagating allocators on copy constructors.
+
+TYPED_TEST_P(ConstructorTest, MoveConstructor) {
+  using T = hash_internal::GeneratedType<TypeParam>;
+  using H = typename TypeParam::hasher;
+  using E = typename TypeParam::key_equal;
+  using A = typename TypeParam::allocator_type;
+  H hasher;
+  E equal;
+  A alloc(0);
+  TypeParam m(123, hasher, equal, alloc);
+  for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator<T>()());
+  TypeParam t(m);
+  TypeParam n(std::move(t));
+  EXPECT_EQ(m.hash_function(), n.hash_function());
+  EXPECT_EQ(m.key_eq(), n.key_eq());
+  EXPECT_EQ(m.get_allocator(), n.get_allocator());
+  EXPECT_EQ(m, n);
+}
+
+template <typename TypeParam>
+void MoveConstructorAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void MoveConstructorAllocTest(std::true_type) {
+  using T = hash_internal::GeneratedType<TypeParam>;
+  using H = typename TypeParam::hasher;
+  using E = typename TypeParam::key_equal;
+  using A = typename TypeParam::allocator_type;
+  H hasher;
+  E equal;
+  A alloc(0);
+  TypeParam m(123, hasher, equal, alloc);
+  for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator<T>()());
+  TypeParam t(m);
+  TypeParam n(std::move(t), A(1));
+  EXPECT_EQ(m.hash_function(), n.hash_function());
+  EXPECT_EQ(m.key_eq(), n.key_eq());
+  EXPECT_NE(m.get_allocator(), n.get_allocator());
+  EXPECT_EQ(m, n);
+}
+
+TYPED_TEST_P(ConstructorTest, MoveConstructorAlloc) {
+  MoveConstructorAllocTest<TypeParam>(expect_alloc_constructors<TypeParam>());
+}
+
+// TODO(alkis): Test non-propagating allocators on move constructors.
+
+TYPED_TEST_P(ConstructorTest, InitializerListBucketHashEqualAlloc) {
+  using T = hash_internal::GeneratedType<TypeParam>;
+  hash_internal::Generator<T> gen;
+  std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
+  using H = typename TypeParam::hasher;
+  using E = typename TypeParam::key_equal;
+  using A = typename TypeParam::allocator_type;
+  H hasher;
+  E equal;
+  A alloc(0);
+  TypeParam m(values, 123, hasher, equal, alloc);
+  EXPECT_EQ(m.hash_function(), hasher);
+  EXPECT_EQ(m.key_eq(), equal);
+  EXPECT_EQ(m.get_allocator(), alloc);
+  EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
+  EXPECT_GE(m.bucket_count(), 123);
+}
+
+template <typename TypeParam>
+void InitializerListBucketAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void InitializerListBucketAllocTest(std::true_type) {
+  using T = hash_internal::GeneratedType<TypeParam>;
+  using A = typename TypeParam::allocator_type;
+  hash_internal::Generator<T> gen;
+  std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
+  A alloc(0);
+  TypeParam m(values, 123, alloc);
+  EXPECT_EQ(m.get_allocator(), alloc);
+  EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
+  EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, InitializerListBucketAlloc) {
+  InitializerListBucketAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
+}
+
+template <typename TypeParam>
+void InitializerListBucketHashAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void InitializerListBucketHashAllocTest(std::true_type) {
+  using T = hash_internal::GeneratedType<TypeParam>;
+  using H = typename TypeParam::hasher;
+  using A = typename TypeParam::allocator_type;
+  H hasher;
+  A alloc(0);
+  hash_internal::Generator<T> gen;
+  std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
+  TypeParam m(values, 123, hasher, alloc);
+  EXPECT_EQ(m.hash_function(), hasher);
+  EXPECT_EQ(m.get_allocator(), alloc);
+  EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
+  EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, InitializerListBucketHashAlloc) {
+  InitializerListBucketHashAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
+}
+
+TYPED_TEST_P(ConstructorTest, Assignment) {
+  using T = hash_internal::GeneratedType<TypeParam>;
+  using H = typename TypeParam::hasher;
+  using E = typename TypeParam::key_equal;
+  using A = typename TypeParam::allocator_type;
+  H hasher;
+  E equal;
+  A alloc(0);
+  hash_internal::Generator<T> gen;
+  TypeParam m({gen(), gen(), gen()}, 123, hasher, equal, alloc);
+  TypeParam n;
+  n = m;
+  EXPECT_EQ(m.hash_function(), n.hash_function());
+  EXPECT_EQ(m.key_eq(), n.key_eq());
+  EXPECT_EQ(m, n);
+}
+
+// TODO(alkis): Test [non-]propagating allocators on move/copy assignments
+// (it depends on traits).
+
+TYPED_TEST_P(ConstructorTest, MoveAssignment) {
+  using T = hash_internal::GeneratedType<TypeParam>;
+  using H = typename TypeParam::hasher;
+  using E = typename TypeParam::key_equal;
+  using A = typename TypeParam::allocator_type;
+  H hasher;
+  E equal;
+  A alloc(0);
+  hash_internal::Generator<T> gen;
+  TypeParam m({gen(), gen(), gen()}, 123, hasher, equal, alloc);
+  TypeParam t(m);
+  TypeParam n;
+  n = std::move(t);
+  EXPECT_EQ(m.hash_function(), n.hash_function());
+  EXPECT_EQ(m.key_eq(), n.key_eq());
+  EXPECT_EQ(m, n);
+}
+
+TYPED_TEST_P(ConstructorTest, AssignmentFromInitializerList) {
+  using T = hash_internal::GeneratedType<TypeParam>;
+  hash_internal::Generator<T> gen;
+  std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
+  TypeParam m;
+  m = values;
+  EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
+}
+
+TYPED_TEST_P(ConstructorTest, AssignmentOverwritesExisting) {
+  using T = hash_internal::GeneratedType<TypeParam>;
+  hash_internal::Generator<T> gen;
+  TypeParam m({gen(), gen(), gen()});
+  TypeParam n({gen()});
+  n = m;
+  EXPECT_EQ(m, n);
+}
+
+TYPED_TEST_P(ConstructorTest, MoveAssignmentOverwritesExisting) {
+  using T = hash_internal::GeneratedType<TypeParam>;
+  hash_internal::Generator<T> gen;
+  TypeParam m({gen(), gen(), gen()});
+  TypeParam t(m);
+  TypeParam n({gen()});
+  n = std::move(t);
+  EXPECT_EQ(m, n);
+}
+
+TYPED_TEST_P(ConstructorTest, AssignmentFromInitializerListOverwritesExisting) {
+  using T = hash_internal::GeneratedType<TypeParam>;
+  hash_internal::Generator<T> gen;
+  std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
+  TypeParam m;
+  m = values;
+  EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
+}
+
+TYPED_TEST_P(ConstructorTest, AssignmentOnSelf) {
+  using T = hash_internal::GeneratedType<TypeParam>;
+  hash_internal::Generator<T> gen;
+  std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
+  TypeParam m(values);
+  m = *&m;  // Avoid -Wself-assign
+  EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
+}
+
+// We cannot test self move as standard states that it leaves standard
+// containers in unspecified state (and in practice in causes memory-leak
+// according to heap-checker!).
+
+REGISTER_TYPED_TEST_CASE_P(
+    ConstructorTest, NoArgs, BucketCount, BucketCountHash, BucketCountHashEqual,
+    BucketCountHashEqualAlloc, BucketCountAlloc, BucketCountHashAlloc, Alloc,
+    InputIteratorBucketHashEqualAlloc, InputIteratorBucketAlloc,
+    InputIteratorBucketHashAlloc, CopyConstructor, CopyConstructorAlloc,
+    MoveConstructor, MoveConstructorAlloc, InitializerListBucketHashEqualAlloc,
+    InitializerListBucketAlloc, InitializerListBucketHashAlloc, Assignment,
+    MoveAssignment, AssignmentFromInitializerList, AssignmentOverwritesExisting,
+    MoveAssignmentOverwritesExisting,
+    AssignmentFromInitializerListOverwritesExisting, AssignmentOnSelf);
+
+}  // namespace container_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_CONSTRUCTOR_TEST_H_
diff --git a/third_party/abseil_cpp/absl/container/internal/unordered_map_lookup_test.h b/third_party/abseil_cpp/absl/container/internal/unordered_map_lookup_test.h
new file mode 100644
index 000000000000..e76421e508fe
--- /dev/null
+++ b/third_party/abseil_cpp/absl/container/internal/unordered_map_lookup_test.h
@@ -0,0 +1,117 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_LOOKUP_TEST_H_
+#define ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_LOOKUP_TEST_H_
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/container/internal/hash_generator_testing.h"
+#include "absl/container/internal/hash_policy_testing.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+template <class UnordMap>
+class LookupTest : public ::testing::Test {};
+
+TYPED_TEST_SUITE_P(LookupTest);
+
+TYPED_TEST_P(LookupTest, At) {
+  using T = hash_internal::GeneratedType<TypeParam>;
+  std::vector<T> values;
+  std::generate_n(std::back_inserter(values), 10,
+                  hash_internal::Generator<T>());
+  TypeParam m(values.begin(), values.end());
+  for (const auto& p : values) {
+    const auto& val = m.at(p.first);
+    EXPECT_EQ(p.second, val) << ::testing::PrintToString(p.first);
+  }
+}
+
+TYPED_TEST_P(LookupTest, OperatorBracket) {
+  using T = hash_internal::GeneratedType<TypeParam>;
+  using V = typename TypeParam::mapped_type;
+  std::vector<T> values;
+  std::generate_n(std::back_inserter(values), 10,
+                  hash_internal::Generator<T>());
+  TypeParam m;
+  for (const auto& p : values) {
+    auto& val = m[p.first];
+    EXPECT_EQ(V(), val) << ::testing::PrintToString(p.first);
+    val = p.second;
+  }
+  for (const auto& p : values)
+    EXPECT_EQ(p.second, m[p.first]) << ::testing::PrintToString(p.first);
+}
+
+TYPED_TEST_P(LookupTest, Count) {
+  using T = hash_internal::GeneratedType<TypeParam>;
+  std::vector<T> values;
+  std::generate_n(std::back_inserter(values), 10,
+                  hash_internal::Generator<T>());
+  TypeParam m;
+  for (const auto& p : values)
+    EXPECT_EQ(0, m.count(p.first)) << ::testing::PrintToString(p.first);
+  m.insert(values.begin(), values.end());
+  for (const auto& p : values)
+    EXPECT_EQ(1, m.count(p.first)) << ::testing::PrintToString(p.first);
+}
+
+TYPED_TEST_P(LookupTest, Find) {
+  using std::get;
+  using T = hash_internal::GeneratedType<TypeParam>;
+  std::vector<T> values;
+  std::generate_n(std::back_inserter(values), 10,
+                  hash_internal::Generator<T>());
+  TypeParam m;
+  for (const auto& p : values)
+    EXPECT_TRUE(m.end() == m.find(p.first))
+        << ::testing::PrintToString(p.first);
+  m.insert(values.begin(), values.end());
+  for (const auto& p : values) {
+    auto it = m.find(p.first);
+    EXPECT_TRUE(m.end() != it) << ::testing::PrintToString(p.first);
+    EXPECT_EQ(p.second, get<1>(*it)) << ::testing::PrintToString(p.first);
+  }
+}
+
+TYPED_TEST_P(LookupTest, EqualRange) {
+  using std::get;
+  using T = hash_internal::GeneratedType<TypeParam>;
+  std::vector<T> values;
+  std::generate_n(std::back_inserter(values), 10,
+                  hash_internal::Generator<T>());
+  TypeParam m;
+  for (const auto& p : values) {
+    auto r = m.equal_range(p.first);
+    ASSERT_EQ(0, std::distance(r.first, r.second));
+  }
+  m.insert(values.begin(), values.end());
+  for (const auto& p : values) {
+    auto r = m.equal_range(p.first);
+    ASSERT_EQ(1, std::distance(r.first, r.second));
+    EXPECT_EQ(p.second, get<1>(*r.first)) << ::testing::PrintToString(p.first);
+  }
+}
+
+REGISTER_TYPED_TEST_CASE_P(LookupTest, At, OperatorBracket, Count, Find,
+                           EqualRange);
+
+}  // namespace container_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_LOOKUP_TEST_H_
diff --git a/third_party/abseil_cpp/absl/container/internal/unordered_map_members_test.h b/third_party/abseil_cpp/absl/container/internal/unordered_map_members_test.h
new file mode 100644
index 000000000000..7d48cdb890bb
--- /dev/null
+++ b/third_party/abseil_cpp/absl/container/internal/unordered_map_members_test.h
@@ -0,0 +1,87 @@
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_MEMBERS_TEST_H_
+#define ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_MEMBERS_TEST_H_
+
+#include <type_traits>
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/meta/type_traits.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+template <class UnordMap>
+class MembersTest : public ::testing::Test {};
+
+TYPED_TEST_SUITE_P(MembersTest);
+
+template <typename T>
+void UseType() {}
+
+TYPED_TEST_P(MembersTest, Typedefs) {
+  EXPECT_TRUE((std::is_same<std::pair<const typename TypeParam::key_type,
+                                      typename TypeParam::mapped_type>,
+                            typename TypeParam::value_type>()));
+  EXPECT_TRUE((absl::conjunction<
+               absl::negation<std::is_signed<typename TypeParam::size_type>>,
+               std::is_integral<typename TypeParam::size_type>>()));
+  EXPECT_TRUE((absl::conjunction<
+               std::is_signed<typename TypeParam::difference_type>,
+               std::is_integral<typename TypeParam::difference_type>>()));
+  EXPECT_TRUE((std::is_convertible<
+               decltype(std::declval<const typename TypeParam::hasher&>()(
+                   std::declval<const typename TypeParam::key_type&>())),
+               size_t>()));
+  EXPECT_TRUE((std::is_convertible<
+               decltype(std::declval<const typename TypeParam::key_equal&>()(
+                   std::declval<const typename TypeParam::key_type&>(),
+                   std::declval<const typename TypeParam::key_type&>())),
+               bool>()));
+  EXPECT_TRUE((std::is_same<typename TypeParam::allocator_type::value_type,
+                            typename TypeParam::value_type>()));
+  EXPECT_TRUE((std::is_same<typename TypeParam::value_type&,
+                            typename TypeParam::reference>()));
+  EXPECT_TRUE((std::is_same<const typename TypeParam::value_type&,
+                            typename TypeParam::const_reference>()));
+  EXPECT_TRUE((std::is_same<typename std::allocator_traits<
+                                typename TypeParam::allocator_type>::pointer,
+                            typename TypeParam::pointer>()));
+  EXPECT_TRUE(
+      (std::is_same<typename std::allocator_traits<
+                        typename TypeParam::allocator_type>::const_pointer,
+                    typename TypeParam::const_pointer>()));
+}
+
+TYPED_TEST_P(MembersTest, SimpleFunctions) {
+  EXPECT_GT(TypeParam().max_size(), 0);
+}
+
+TYPED_TEST_P(MembersTest, BeginEnd) {
+  TypeParam t = {typename TypeParam::value_type{}};
+  EXPECT_EQ(t.begin(), t.cbegin());
+  EXPECT_EQ(t.end(), t.cend());
+  EXPECT_NE(t.begin(), t.end());
+  EXPECT_NE(t.cbegin(), t.cend());
+}
+
+REGISTER_TYPED_TEST_SUITE_P(MembersTest, Typedefs, SimpleFunctions, BeginEnd);
+
+}  // namespace container_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_MEMBERS_TEST_H_
diff --git a/third_party/abseil_cpp/absl/container/internal/unordered_map_modifiers_test.h b/third_party/abseil_cpp/absl/container/internal/unordered_map_modifiers_test.h
new file mode 100644
index 000000000000..8c9ca779a424
--- /dev/null
+++ b/third_party/abseil_cpp/absl/container/internal/unordered_map_modifiers_test.h
@@ -0,0 +1,318 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_MODIFIERS_TEST_H_
+#define ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_MODIFIERS_TEST_H_
+
+#include <memory>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/container/internal/hash_generator_testing.h"
+#include "absl/container/internal/hash_policy_testing.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+template <class UnordMap>
+class ModifiersTest : public ::testing::Test {};
+
+TYPED_TEST_SUITE_P(ModifiersTest);
+
+TYPED_TEST_P(ModifiersTest, Clear) {
+  using T = hash_internal::GeneratedType<TypeParam>;
+  std::vector<T> values;
+  std::generate_n(std::back_inserter(values), 10,
+                  hash_internal::Generator<T>());
+  TypeParam m(values.begin(), values.end());
+  ASSERT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
+  m.clear();
+  EXPECT_THAT(items(m), ::testing::UnorderedElementsAre());
+  EXPECT_TRUE(m.empty());
+}
+
+TYPED_TEST_P(ModifiersTest, Insert) {
+  using T = hash_internal::GeneratedType<TypeParam>;
+  using V = typename TypeParam::mapped_type;
+  T val = hash_internal::Generator<T>()();
+  TypeParam m;
+  auto p = m.insert(val);
+  EXPECT_TRUE(p.second);
+  EXPECT_EQ(val, *p.first);
+  T val2 = {val.first, hash_internal::Generator<V>()()};
+  p = m.insert(val2);
+  EXPECT_FALSE(p.second);
+  EXPECT_EQ(val, *p.first);
+}
+
+TYPED_TEST_P(ModifiersTest, InsertHint) {
+  using T = hash_internal::GeneratedType<TypeParam>;
+  using V = typename TypeParam::mapped_type;
+  T val = hash_internal::Generator<T>()();
+  TypeParam m;
+  auto it = m.insert(m.end(), val);
+  EXPECT_TRUE(it != m.end());
+  EXPECT_EQ(val, *it);
+  T val2 = {val.first, hash_internal::Generator<V>()()};
+  it = m.insert(it, val2);
+  EXPECT_TRUE(it != m.end());
+  EXPECT_EQ(val, *it);
+}
+
+TYPED_TEST_P(ModifiersTest, InsertRange) {
+  using T = hash_internal::GeneratedType<TypeParam>;
+  std::vector<T> values;
+  std::generate_n(std::back_inserter(values), 10,
+                  hash_internal::Generator<T>());
+  TypeParam m;
+  m.insert(values.begin(), values.end());
+  ASSERT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
+}
+
+TYPED_TEST_P(ModifiersTest, InsertOrAssign) {
+#ifdef UNORDERED_MAP_CXX17
+  using std::get;
+  using K = typename TypeParam::key_type;
+  using V = typename TypeParam::mapped_type;
+  K k = hash_internal::Generator<K>()();
+  V val = hash_internal::Generator<V>()();
+  TypeParam m;
+  auto p = m.insert_or_assign(k, val);
+  EXPECT_TRUE(p.second);
+  EXPECT_EQ(k, get<0>(*p.first));
+  EXPECT_EQ(val, get<1>(*p.first));
+  V val2 = hash_internal::Generator<V>()();
+  p = m.insert_or_assign(k, val2);
+  EXPECT_FALSE(p.second);
+  EXPECT_EQ(k, get<0>(*p.first));
+  EXPECT_EQ(val2, get<1>(*p.first));
+#endif
+}
+
+TYPED_TEST_P(ModifiersTest, InsertOrAssignHint) {
+#ifdef UNORDERED_MAP_CXX17
+  using std::get;
+  using K = typename TypeParam::key_type;
+  using V = typename TypeParam::mapped_type;
+  K k = hash_internal::Generator<K>()();
+  V val = hash_internal::Generator<V>()();
+  TypeParam m;
+  auto it = m.insert_or_assign(m.end(), k, val);
+  EXPECT_TRUE(it != m.end());
+  EXPECT_EQ(k, get<0>(*it));
+  EXPECT_EQ(val, get<1>(*it));
+  V val2 = hash_internal::Generator<V>()();
+  it = m.insert_or_assign(it, k, val2);
+  EXPECT_EQ(k, get<0>(*it));
+  EXPECT_EQ(val2, get<1>(*it));
+#endif
+}
+
+TYPED_TEST_P(ModifiersTest, Emplace) {
+  using T = hash_internal::GeneratedType<TypeParam>;
+  using V = typename TypeParam::mapped_type;
+  T val = hash_internal::Generator<T>()();
+  TypeParam m;
+  // TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps
+  // with test traits/policy.
+  auto p = m.emplace(val);
+  EXPECT_TRUE(p.second);
+  EXPECT_EQ(val, *p.first);
+  T val2 = {val.first, hash_internal::Generator<V>()()};
+  p = m.emplace(val2);
+  EXPECT_FALSE(p.second);
+  EXPECT_EQ(val, *p.first);
+}
+
+TYPED_TEST_P(ModifiersTest, EmplaceHint) {
+  using T = hash_internal::GeneratedType<TypeParam>;
+  using V = typename TypeParam::mapped_type;
+  T val = hash_internal::Generator<T>()();
+  TypeParam m;
+  // TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps
+  // with test traits/policy.
+  auto it = m.emplace_hint(m.end(), val);
+  EXPECT_EQ(val, *it);
+  T val2 = {val.first, hash_internal::Generator<V>()()};
+  it = m.emplace_hint(it, val2);
+  EXPECT_EQ(val, *it);
+}
+
+TYPED_TEST_P(ModifiersTest, TryEmplace) {
+#ifdef UNORDERED_MAP_CXX17
+  using T = hash_internal::GeneratedType<TypeParam>;
+  using V = typename TypeParam::mapped_type;
+  T val = hash_internal::Generator<T>()();
+  TypeParam m;
+  // TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps
+  // with test traits/policy.
+  auto p = m.try_emplace(val.first, val.second);
+  EXPECT_TRUE(p.second);
+  EXPECT_EQ(val, *p.first);
+  T val2 = {val.first, hash_internal::Generator<V>()()};
+  p = m.try_emplace(val2.first, val2.second);
+  EXPECT_FALSE(p.second);
+  EXPECT_EQ(val, *p.first);
+#endif
+}
+
+TYPED_TEST_P(ModifiersTest, TryEmplaceHint) {
+#ifdef UNORDERED_MAP_CXX17
+  using T = hash_internal::GeneratedType<TypeParam>;
+  using V = typename TypeParam::mapped_type;
+  T val = hash_internal::Generator<T>()();
+  TypeParam m;
+  // TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps
+  // with test traits/policy.
+  auto it = m.try_emplace(m.end(), val.first, val.second);
+  EXPECT_EQ(val, *it);
+  T val2 = {val.first, hash_internal::Generator<V>()()};
+  it = m.try_emplace(it, val2.first, val2.second);
+  EXPECT_EQ(val, *it);
+#endif
+}
+
+template <class V>
+using IfNotVoid = typename std::enable_if<!std::is_void<V>::value, V>::type;
+
+// In openmap we chose not to return the iterator from erase because that's
+// more expensive. As such we adapt erase to return an iterator here.
+struct EraseFirst {
+  template <class Map>
+  auto operator()(Map* m, int) const
+      -> IfNotVoid<decltype(m->erase(m->begin()))> {
+    return m->erase(m->begin());
+  }
+  template <class Map>
+  typename Map::iterator operator()(Map* m, ...) const {
+    auto it = m->begin();
+    m->erase(it++);
+    return it;
+  }
+};
+
+TYPED_TEST_P(ModifiersTest, Erase) {
+  using T = hash_internal::GeneratedType<TypeParam>;
+  using std::get;
+  std::vector<T> values;
+  std::generate_n(std::back_inserter(values), 10,
+                  hash_internal::Generator<T>());
+  TypeParam m(values.begin(), values.end());
+  ASSERT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
+  auto& first = *m.begin();
+  std::vector<T> values2;
+  for (const auto& val : values)
+    if (get<0>(val) != get<0>(first)) values2.push_back(val);
+  auto it = EraseFirst()(&m, 0);
+  ASSERT_TRUE(it != m.end());
+  EXPECT_EQ(1, std::count(values2.begin(), values2.end(), *it));
+  EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values2.begin(),
+                                                             values2.end()));
+}
+
+TYPED_TEST_P(ModifiersTest, EraseRange) {
+  using T = hash_internal::GeneratedType<TypeParam>;
+  std::vector<T> values;
+  std::generate_n(std::back_inserter(values), 10,
+                  hash_internal::Generator<T>());
+  TypeParam m(values.begin(), values.end());
+  ASSERT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
+  auto it = m.erase(m.begin(), m.end());
+  EXPECT_THAT(items(m), ::testing::UnorderedElementsAre());
+  EXPECT_TRUE(it == m.end());
+}
+
+TYPED_TEST_P(ModifiersTest, EraseKey) {
+  using T = hash_internal::GeneratedType<TypeParam>;
+  std::vector<T> values;
+  std::generate_n(std::back_inserter(values), 10,
+                  hash_internal::Generator<T>());
+  TypeParam m(values.begin(), values.end());
+  ASSERT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
+  EXPECT_EQ(1, m.erase(values[0].first));
+  EXPECT_EQ(0, std::count(m.begin(), m.end(), values[0]));
+  EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values.begin() + 1,
+                                                             values.end()));
+}
+
+TYPED_TEST_P(ModifiersTest, Swap) {
+  using T = hash_internal::GeneratedType<TypeParam>;
+  std::vector<T> v1;
+  std::vector<T> v2;
+  std::generate_n(std::back_inserter(v1), 5, hash_internal::Generator<T>());
+  std::generate_n(std::back_inserter(v2), 5, hash_internal::Generator<T>());
+  TypeParam m1(v1.begin(), v1.end());
+  TypeParam m2(v2.begin(), v2.end());
+  EXPECT_THAT(items(m1), ::testing::UnorderedElementsAreArray(v1));
+  EXPECT_THAT(items(m2), ::testing::UnorderedElementsAreArray(v2));
+  m1.swap(m2);
+  EXPECT_THAT(items(m1), ::testing::UnorderedElementsAreArray(v2));
+  EXPECT_THAT(items(m2), ::testing::UnorderedElementsAreArray(v1));
+}
+
+// TODO(alkis): Write tests for extract.
+// TODO(alkis): Write tests for merge.
+
+REGISTER_TYPED_TEST_CASE_P(ModifiersTest, Clear, Insert, InsertHint,
+                           InsertRange, InsertOrAssign, InsertOrAssignHint,
+                           Emplace, EmplaceHint, TryEmplace, TryEmplaceHint,
+                           Erase, EraseRange, EraseKey, Swap);
+
+template <typename Type>
+struct is_unique_ptr : std::false_type {};
+
+template <typename Type>
+struct is_unique_ptr<std::unique_ptr<Type>> : std::true_type {};
+
+template <class UnordMap>
+class UniquePtrModifiersTest : public ::testing::Test {
+ protected:
+  UniquePtrModifiersTest() {
+    static_assert(is_unique_ptr<typename UnordMap::mapped_type>::value,
+                  "UniquePtrModifiersTyest may only be called with a "
+                  "std::unique_ptr value type.");
+  }
+};
+
+GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(UniquePtrModifiersTest);
+
+TYPED_TEST_SUITE_P(UniquePtrModifiersTest);
+
+// Test that we do not move from rvalue arguments if an insertion does not
+// happen.
+TYPED_TEST_P(UniquePtrModifiersTest, TryEmplace) {
+#ifdef UNORDERED_MAP_CXX17
+  using T = hash_internal::GeneratedType<TypeParam>;
+  using V = typename TypeParam::mapped_type;
+  T val = hash_internal::Generator<T>()();
+  TypeParam m;
+  auto p = m.try_emplace(val.first, std::move(val.second));
+  EXPECT_TRUE(p.second);
+  // A moved from std::unique_ptr is guaranteed to be nullptr.
+  EXPECT_EQ(val.second, nullptr);
+  T val2 = {val.first, hash_internal::Generator<V>()()};
+  p = m.try_emplace(val2.first, std::move(val2.second));
+  EXPECT_FALSE(p.second);
+  EXPECT_NE(val2.second, nullptr);
+#endif
+}
+
+REGISTER_TYPED_TEST_SUITE_P(UniquePtrModifiersTest, TryEmplace);
+
+}  // namespace container_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_MODIFIERS_TEST_H_
diff --git a/third_party/abseil_cpp/absl/container/internal/unordered_map_test.cc b/third_party/abseil_cpp/absl/container/internal/unordered_map_test.cc
new file mode 100644
index 000000000000..9cbf512f32b2
--- /dev/null
+++ b/third_party/abseil_cpp/absl/container/internal/unordered_map_test.cc
@@ -0,0 +1,50 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <memory>
+#include <unordered_map>
+
+#include "absl/container/internal/unordered_map_constructor_test.h"
+#include "absl/container/internal/unordered_map_lookup_test.h"
+#include "absl/container/internal/unordered_map_members_test.h"
+#include "absl/container/internal/unordered_map_modifiers_test.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+namespace {
+
+using MapTypes = ::testing::Types<
+    std::unordered_map<int, int, StatefulTestingHash, StatefulTestingEqual,
+                       Alloc<std::pair<const int, int>>>,
+    std::unordered_map<std::string, std::string, StatefulTestingHash,
+                       StatefulTestingEqual,
+                       Alloc<std::pair<const std::string, std::string>>>>;
+
+INSTANTIATE_TYPED_TEST_SUITE_P(UnorderedMap, ConstructorTest, MapTypes);
+INSTANTIATE_TYPED_TEST_SUITE_P(UnorderedMap, LookupTest, MapTypes);
+INSTANTIATE_TYPED_TEST_SUITE_P(UnorderedMap, MembersTest, MapTypes);
+INSTANTIATE_TYPED_TEST_SUITE_P(UnorderedMap, ModifiersTest, MapTypes);
+
+using UniquePtrMapTypes = ::testing::Types<std::unordered_map<
+    int, std::unique_ptr<int>, StatefulTestingHash, StatefulTestingEqual,
+    Alloc<std::pair<const int, std::unique_ptr<int>>>>>;
+
+INSTANTIATE_TYPED_TEST_SUITE_P(UnorderedMap, UniquePtrModifiersTest,
+                               UniquePtrMapTypes);
+
+}  // namespace
+}  // namespace container_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/third_party/abseil_cpp/absl/container/internal/unordered_set_constructor_test.h b/third_party/abseil_cpp/absl/container/internal/unordered_set_constructor_test.h
new file mode 100644
index 000000000000..41165b05e97b
--- /dev/null
+++ b/third_party/abseil_cpp/absl/container/internal/unordered_set_constructor_test.h
@@ -0,0 +1,496 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_UNORDERED_SET_CONSTRUCTOR_TEST_H_
+#define ABSL_CONTAINER_INTERNAL_UNORDERED_SET_CONSTRUCTOR_TEST_H_
+
+#include <algorithm>
+#include <unordered_set>
+#include <vector>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/container/internal/hash_generator_testing.h"
+#include "absl/container/internal/hash_policy_testing.h"
+#include "absl/meta/type_traits.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+template <class UnordMap>
+class ConstructorTest : public ::testing::Test {};
+
+TYPED_TEST_SUITE_P(ConstructorTest);
+
+TYPED_TEST_P(ConstructorTest, NoArgs) {
+  TypeParam m;
+  EXPECT_TRUE(m.empty());
+  EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
+}
+
+TYPED_TEST_P(ConstructorTest, BucketCount) {
+  TypeParam m(123);
+  EXPECT_TRUE(m.empty());
+  EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
+  EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, BucketCountHash) {
+  using H = typename TypeParam::hasher;
+  H hasher;
+  TypeParam m(123, hasher);
+  EXPECT_EQ(m.hash_function(), hasher);
+  EXPECT_TRUE(m.empty());
+  EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
+  EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, BucketCountHashEqual) {
+  using H = typename TypeParam::hasher;
+  using E = typename TypeParam::key_equal;
+  H hasher;
+  E equal;
+  TypeParam m(123, hasher, equal);
+  EXPECT_EQ(m.hash_function(), hasher);
+  EXPECT_EQ(m.key_eq(), equal);
+  EXPECT_TRUE(m.empty());
+  EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
+  EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, BucketCountHashEqualAlloc) {
+  using H = typename TypeParam::hasher;
+  using E = typename TypeParam::key_equal;
+  using A = typename TypeParam::allocator_type;
+  H hasher;
+  E equal;
+  A alloc(0);
+  TypeParam m(123, hasher, equal, alloc);
+  EXPECT_EQ(m.hash_function(), hasher);
+  EXPECT_EQ(m.key_eq(), equal);
+  EXPECT_EQ(m.get_allocator(), alloc);
+  EXPECT_TRUE(m.empty());
+  EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
+  EXPECT_GE(m.bucket_count(), 123);
+
+  const auto& cm = m;
+  EXPECT_EQ(cm.hash_function(), hasher);
+  EXPECT_EQ(cm.key_eq(), equal);
+  EXPECT_EQ(cm.get_allocator(), alloc);
+  EXPECT_TRUE(cm.empty());
+  EXPECT_THAT(keys(cm), ::testing::UnorderedElementsAre());
+  EXPECT_GE(cm.bucket_count(), 123);
+}
+
+template <typename T>
+struct is_std_unordered_set : std::false_type {};
+
+template <typename... T>
+struct is_std_unordered_set<std::unordered_set<T...>> : std::true_type {};
+
+#if defined(UNORDERED_SET_CXX14) || defined(UNORDERED_SET_CXX17)
+using has_cxx14_std_apis = std::true_type;
+#else
+using has_cxx14_std_apis = std::false_type;
+#endif
+
+template <typename T>
+using expect_cxx14_apis =
+    absl::disjunction<absl::negation<is_std_unordered_set<T>>,
+                      has_cxx14_std_apis>;
+
+template <typename TypeParam>
+void BucketCountAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void BucketCountAllocTest(std::true_type) {
+  using A = typename TypeParam::allocator_type;
+  A alloc(0);
+  TypeParam m(123, alloc);
+  EXPECT_EQ(m.get_allocator(), alloc);
+  EXPECT_TRUE(m.empty());
+  EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
+  EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, BucketCountAlloc) {
+  BucketCountAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
+}
+
+template <typename TypeParam>
+void BucketCountHashAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void BucketCountHashAllocTest(std::true_type) {
+  using H = typename TypeParam::hasher;
+  using A = typename TypeParam::allocator_type;
+  H hasher;
+  A alloc(0);
+  TypeParam m(123, hasher, alloc);
+  EXPECT_EQ(m.hash_function(), hasher);
+  EXPECT_EQ(m.get_allocator(), alloc);
+  EXPECT_TRUE(m.empty());
+  EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
+  EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, BucketCountHashAlloc) {
+  BucketCountHashAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
+}
+
+#if ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS
+using has_alloc_std_constructors = std::true_type;
+#else
+using has_alloc_std_constructors = std::false_type;
+#endif
+
+template <typename T>
+using expect_alloc_constructors =
+    absl::disjunction<absl::negation<is_std_unordered_set<T>>,
+                      has_alloc_std_constructors>;
+
+template <typename TypeParam>
+void AllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void AllocTest(std::true_type) {
+  using A = typename TypeParam::allocator_type;
+  A alloc(0);
+  TypeParam m(alloc);
+  EXPECT_EQ(m.get_allocator(), alloc);
+  EXPECT_TRUE(m.empty());
+  EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
+}
+
+TYPED_TEST_P(ConstructorTest, Alloc) {
+  AllocTest<TypeParam>(expect_alloc_constructors<TypeParam>());
+}
+
+TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashEqualAlloc) {
+  using T = hash_internal::GeneratedType<TypeParam>;
+  using H = typename TypeParam::hasher;
+  using E = typename TypeParam::key_equal;
+  using A = typename TypeParam::allocator_type;
+  H hasher;
+  E equal;
+  A alloc(0);
+  std::vector<T> values;
+  for (size_t i = 0; i != 10; ++i)
+    values.push_back(hash_internal::Generator<T>()());
+  TypeParam m(values.begin(), values.end(), 123, hasher, equal, alloc);
+  EXPECT_EQ(m.hash_function(), hasher);
+  EXPECT_EQ(m.key_eq(), equal);
+  EXPECT_EQ(m.get_allocator(), alloc);
+  EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
+  EXPECT_GE(m.bucket_count(), 123);
+}
+
+template <typename TypeParam>
+void InputIteratorBucketAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void InputIteratorBucketAllocTest(std::true_type) {
+  using T = hash_internal::GeneratedType<TypeParam>;
+  using A = typename TypeParam::allocator_type;
+  A alloc(0);
+  std::vector<T> values;
+  for (size_t i = 0; i != 10; ++i)
+    values.push_back(hash_internal::Generator<T>()());
+  TypeParam m(values.begin(), values.end(), 123, alloc);
+  EXPECT_EQ(m.get_allocator(), alloc);
+  EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
+  EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, InputIteratorBucketAlloc) {
+  InputIteratorBucketAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
+}
+
+template <typename TypeParam>
+void InputIteratorBucketHashAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void InputIteratorBucketHashAllocTest(std::true_type) {
+  using T = hash_internal::GeneratedType<TypeParam>;
+  using H = typename TypeParam::hasher;
+  using A = typename TypeParam::allocator_type;
+  H hasher;
+  A alloc(0);
+  std::vector<T> values;
+  for (size_t i = 0; i != 10; ++i)
+    values.push_back(hash_internal::Generator<T>()());
+  TypeParam m(values.begin(), values.end(), 123, hasher, alloc);
+  EXPECT_EQ(m.hash_function(), hasher);
+  EXPECT_EQ(m.get_allocator(), alloc);
+  EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
+  EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashAlloc) {
+  InputIteratorBucketHashAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
+}
+
+TYPED_TEST_P(ConstructorTest, CopyConstructor) {
+  using T = hash_internal::GeneratedType<TypeParam>;
+  using H = typename TypeParam::hasher;
+  using E = typename TypeParam::key_equal;
+  using A = typename TypeParam::allocator_type;
+  H hasher;
+  E equal;
+  A alloc(0);
+  TypeParam m(123, hasher, equal, alloc);
+  for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator<T>()());
+  TypeParam n(m);
+  EXPECT_EQ(m.hash_function(), n.hash_function());
+  EXPECT_EQ(m.key_eq(), n.key_eq());
+  EXPECT_EQ(m.get_allocator(), n.get_allocator());
+  EXPECT_EQ(m, n);
+  EXPECT_NE(TypeParam(0, hasher, equal, alloc), n);
+}
+
+template <typename TypeParam>
+void CopyConstructorAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void CopyConstructorAllocTest(std::true_type) {
+  using T = hash_internal::GeneratedType<TypeParam>;
+  using H = typename TypeParam::hasher;
+  using E = typename TypeParam::key_equal;
+  using A = typename TypeParam::allocator_type;
+  H hasher;
+  E equal;
+  A alloc(0);
+  TypeParam m(123, hasher, equal, alloc);
+  for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator<T>()());
+  TypeParam n(m, A(11));
+  EXPECT_EQ(m.hash_function(), n.hash_function());
+  EXPECT_EQ(m.key_eq(), n.key_eq());
+  EXPECT_NE(m.get_allocator(), n.get_allocator());
+  EXPECT_EQ(m, n);
+}
+
+TYPED_TEST_P(ConstructorTest, CopyConstructorAlloc) {
+  CopyConstructorAllocTest<TypeParam>(expect_alloc_constructors<TypeParam>());
+}
+
+// TODO(alkis): Test non-propagating allocators on copy constructors.
+
+TYPED_TEST_P(ConstructorTest, MoveConstructor) {
+  using T = hash_internal::GeneratedType<TypeParam>;
+  using H = typename TypeParam::hasher;
+  using E = typename TypeParam::key_equal;
+  using A = typename TypeParam::allocator_type;
+  H hasher;
+  E equal;
+  A alloc(0);
+  TypeParam m(123, hasher, equal, alloc);
+  for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator<T>()());
+  TypeParam t(m);
+  TypeParam n(std::move(t));
+  EXPECT_EQ(m.hash_function(), n.hash_function());
+  EXPECT_EQ(m.key_eq(), n.key_eq());
+  EXPECT_EQ(m.get_allocator(), n.get_allocator());
+  EXPECT_EQ(m, n);
+}
+
+template <typename TypeParam>
+void MoveConstructorAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void MoveConstructorAllocTest(std::true_type) {
+  using T = hash_internal::GeneratedType<TypeParam>;
+  using H = typename TypeParam::hasher;
+  using E = typename TypeParam::key_equal;
+  using A = typename TypeParam::allocator_type;
+  H hasher;
+  E equal;
+  A alloc(0);
+  TypeParam m(123, hasher, equal, alloc);
+  for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator<T>()());
+  TypeParam t(m);
+  TypeParam n(std::move(t), A(1));
+  EXPECT_EQ(m.hash_function(), n.hash_function());
+  EXPECT_EQ(m.key_eq(), n.key_eq());
+  EXPECT_NE(m.get_allocator(), n.get_allocator());
+  EXPECT_EQ(m, n);
+}
+
+TYPED_TEST_P(ConstructorTest, MoveConstructorAlloc) {
+  MoveConstructorAllocTest<TypeParam>(expect_alloc_constructors<TypeParam>());
+}
+
+// TODO(alkis): Test non-propagating allocators on move constructors.
+
+TYPED_TEST_P(ConstructorTest, InitializerListBucketHashEqualAlloc) {
+  using T = hash_internal::GeneratedType<TypeParam>;
+  hash_internal::Generator<T> gen;
+  std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
+  using H = typename TypeParam::hasher;
+  using E = typename TypeParam::key_equal;
+  using A = typename TypeParam::allocator_type;
+  H hasher;
+  E equal;
+  A alloc(0);
+  TypeParam m(values, 123, hasher, equal, alloc);
+  EXPECT_EQ(m.hash_function(), hasher);
+  EXPECT_EQ(m.key_eq(), equal);
+  EXPECT_EQ(m.get_allocator(), alloc);
+  EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
+  EXPECT_GE(m.bucket_count(), 123);
+}
+
+template <typename TypeParam>
+void InitializerListBucketAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void InitializerListBucketAllocTest(std::true_type) {
+  using T = hash_internal::GeneratedType<TypeParam>;
+  using A = typename TypeParam::allocator_type;
+  hash_internal::Generator<T> gen;
+  std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
+  A alloc(0);
+  TypeParam m(values, 123, alloc);
+  EXPECT_EQ(m.get_allocator(), alloc);
+  EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
+  EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, InitializerListBucketAlloc) {
+  InitializerListBucketAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
+}
+
+template <typename TypeParam>
+void InitializerListBucketHashAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void InitializerListBucketHashAllocTest(std::true_type) {
+  using T = hash_internal::GeneratedType<TypeParam>;
+  using H = typename TypeParam::hasher;
+  using A = typename TypeParam::allocator_type;
+  H hasher;
+  A alloc(0);
+  hash_internal::Generator<T> gen;
+  std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
+  TypeParam m(values, 123, hasher, alloc);
+  EXPECT_EQ(m.hash_function(), hasher);
+  EXPECT_EQ(m.get_allocator(), alloc);
+  EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
+  EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, InitializerListBucketHashAlloc) {
+  InitializerListBucketHashAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
+}
+
+TYPED_TEST_P(ConstructorTest, CopyAssignment) {
+  using T = hash_internal::GeneratedType<TypeParam>;
+  using H = typename TypeParam::hasher;
+  using E = typename TypeParam::key_equal;
+  using A = typename TypeParam::allocator_type;
+  H hasher;
+  E equal;
+  A alloc(0);
+  hash_internal::Generator<T> gen;
+  TypeParam m({gen(), gen(), gen()}, 123, hasher, equal, alloc);
+  TypeParam n;
+  n = m;
+  EXPECT_EQ(m.hash_function(), n.hash_function());
+  EXPECT_EQ(m.key_eq(), n.key_eq());
+  EXPECT_EQ(m, n);
+}
+
+// TODO(alkis): Test [non-]propagating allocators on move/copy assignments
+// (it depends on traits).
+
+TYPED_TEST_P(ConstructorTest, MoveAssignment) {
+  using T = hash_internal::GeneratedType<TypeParam>;
+  using H = typename TypeParam::hasher;
+  using E = typename TypeParam::key_equal;
+  using A = typename TypeParam::allocator_type;
+  H hasher;
+  E equal;
+  A alloc(0);
+  hash_internal::Generator<T> gen;
+  TypeParam m({gen(), gen(), gen()}, 123, hasher, equal, alloc);
+  TypeParam t(m);
+  TypeParam n;
+  n = std::move(t);
+  EXPECT_EQ(m.hash_function(), n.hash_function());
+  EXPECT_EQ(m.key_eq(), n.key_eq());
+  EXPECT_EQ(m, n);
+}
+
+TYPED_TEST_P(ConstructorTest, AssignmentFromInitializerList) {
+  using T = hash_internal::GeneratedType<TypeParam>;
+  hash_internal::Generator<T> gen;
+  std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
+  TypeParam m;
+  m = values;
+  EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
+}
+
+TYPED_TEST_P(ConstructorTest, AssignmentOverwritesExisting) {
+  using T = hash_internal::GeneratedType<TypeParam>;
+  hash_internal::Generator<T> gen;
+  TypeParam m({gen(), gen(), gen()});
+  TypeParam n({gen()});
+  n = m;
+  EXPECT_EQ(m, n);
+}
+
+TYPED_TEST_P(ConstructorTest, MoveAssignmentOverwritesExisting) {
+  using T = hash_internal::GeneratedType<TypeParam>;
+  hash_internal::Generator<T> gen;
+  TypeParam m({gen(), gen(), gen()});
+  TypeParam t(m);
+  TypeParam n({gen()});
+  n = std::move(t);
+  EXPECT_EQ(m, n);
+}
+
+TYPED_TEST_P(ConstructorTest, AssignmentFromInitializerListOverwritesExisting) {
+  using T = hash_internal::GeneratedType<TypeParam>;
+  hash_internal::Generator<T> gen;
+  std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
+  TypeParam m;
+  m = values;
+  EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
+}
+
+TYPED_TEST_P(ConstructorTest, AssignmentOnSelf) {
+  using T = hash_internal::GeneratedType<TypeParam>;
+  hash_internal::Generator<T> gen;
+  std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
+  TypeParam m(values);
+  m = *&m;  // Avoid -Wself-assign.
+  EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
+}
+
+REGISTER_TYPED_TEST_CASE_P(
+    ConstructorTest, NoArgs, BucketCount, BucketCountHash, BucketCountHashEqual,
+    BucketCountHashEqualAlloc, BucketCountAlloc, BucketCountHashAlloc, Alloc,
+    InputIteratorBucketHashEqualAlloc, InputIteratorBucketAlloc,
+    InputIteratorBucketHashAlloc, CopyConstructor, CopyConstructorAlloc,
+    MoveConstructor, MoveConstructorAlloc, InitializerListBucketHashEqualAlloc,
+    InitializerListBucketAlloc, InitializerListBucketHashAlloc, CopyAssignment,
+    MoveAssignment, AssignmentFromInitializerList, AssignmentOverwritesExisting,
+    MoveAssignmentOverwritesExisting,
+    AssignmentFromInitializerListOverwritesExisting, AssignmentOnSelf);
+
+}  // namespace container_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_CONTAINER_INTERNAL_UNORDERED_SET_CONSTRUCTOR_TEST_H_
diff --git a/third_party/abseil_cpp/absl/container/internal/unordered_set_lookup_test.h b/third_party/abseil_cpp/absl/container/internal/unordered_set_lookup_test.h
new file mode 100644
index 000000000000..8f2f4b207ef0
--- /dev/null
+++ b/third_party/abseil_cpp/absl/container/internal/unordered_set_lookup_test.h
@@ -0,0 +1,91 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_UNORDERED_SET_LOOKUP_TEST_H_
+#define ABSL_CONTAINER_INTERNAL_UNORDERED_SET_LOOKUP_TEST_H_
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/container/internal/hash_generator_testing.h"
+#include "absl/container/internal/hash_policy_testing.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+template <class UnordSet>
+class LookupTest : public ::testing::Test {};
+
+TYPED_TEST_SUITE_P(LookupTest);
+
+TYPED_TEST_P(LookupTest, Count) {
+  using T = hash_internal::GeneratedType<TypeParam>;
+  std::vector<T> values;
+  std::generate_n(std::back_inserter(values), 10,
+                  hash_internal::Generator<T>());
+  TypeParam m;
+  for (const auto& v : values)
+    EXPECT_EQ(0, m.count(v)) << ::testing::PrintToString(v);
+  m.insert(values.begin(), values.end());
+  for (const auto& v : values)
+    EXPECT_EQ(1, m.count(v)) << ::testing::PrintToString(v);
+}
+
+TYPED_TEST_P(LookupTest, Find) {
+  using T = hash_internal::GeneratedType<TypeParam>;
+  std::vector<T> values;
+  std::generate_n(std::back_inserter(values), 10,
+                  hash_internal::Generator<T>());
+  TypeParam m;
+  for (const auto& v : values)
+    EXPECT_TRUE(m.end() == m.find(v)) << ::testing::PrintToString(v);
+  m.insert(values.begin(), values.end());
+  for (const auto& v : values) {
+    typename TypeParam::iterator it = m.find(v);
+    static_assert(std::is_same<const typename TypeParam::value_type&,
+                               decltype(*it)>::value,
+                  "");
+    static_assert(std::is_same<const typename TypeParam::value_type*,
+                               decltype(it.operator->())>::value,
+                  "");
+    EXPECT_TRUE(m.end() != it) << ::testing::PrintToString(v);
+    EXPECT_EQ(v, *it) << ::testing::PrintToString(v);
+  }
+}
+
+TYPED_TEST_P(LookupTest, EqualRange) {
+  using T = hash_internal::GeneratedType<TypeParam>;
+  std::vector<T> values;
+  std::generate_n(std::back_inserter(values), 10,
+                  hash_internal::Generator<T>());
+  TypeParam m;
+  for (const auto& v : values) {
+    auto r = m.equal_range(v);
+    ASSERT_EQ(0, std::distance(r.first, r.second));
+  }
+  m.insert(values.begin(), values.end());
+  for (const auto& v : values) {
+    auto r = m.equal_range(v);
+    ASSERT_EQ(1, std::distance(r.first, r.second));
+    EXPECT_EQ(v, *r.first);
+  }
+}
+
+REGISTER_TYPED_TEST_CASE_P(LookupTest, Count, Find, EqualRange);
+
+}  // namespace container_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_CONTAINER_INTERNAL_UNORDERED_SET_LOOKUP_TEST_H_
diff --git a/third_party/abseil_cpp/absl/container/internal/unordered_set_members_test.h b/third_party/abseil_cpp/absl/container/internal/unordered_set_members_test.h
new file mode 100644
index 000000000000..4c5e104af292
--- /dev/null
+++ b/third_party/abseil_cpp/absl/container/internal/unordered_set_members_test.h
@@ -0,0 +1,86 @@
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_UNORDERED_SET_MEMBERS_TEST_H_
+#define ABSL_CONTAINER_INTERNAL_UNORDERED_SET_MEMBERS_TEST_H_
+
+#include <type_traits>
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/meta/type_traits.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+template <class UnordSet>
+class MembersTest : public ::testing::Test {};
+
+TYPED_TEST_SUITE_P(MembersTest);
+
+template <typename T>
+void UseType() {}
+
+TYPED_TEST_P(MembersTest, Typedefs) {
+  EXPECT_TRUE((std::is_same<typename TypeParam::key_type,
+                            typename TypeParam::value_type>()));
+  EXPECT_TRUE((absl::conjunction<
+               absl::negation<std::is_signed<typename TypeParam::size_type>>,
+               std::is_integral<typename TypeParam::size_type>>()));
+  EXPECT_TRUE((absl::conjunction<
+               std::is_signed<typename TypeParam::difference_type>,
+               std::is_integral<typename TypeParam::difference_type>>()));
+  EXPECT_TRUE((std::is_convertible<
+               decltype(std::declval<const typename TypeParam::hasher&>()(
+                   std::declval<const typename TypeParam::key_type&>())),
+               size_t>()));
+  EXPECT_TRUE((std::is_convertible<
+               decltype(std::declval<const typename TypeParam::key_equal&>()(
+                   std::declval<const typename TypeParam::key_type&>(),
+                   std::declval<const typename TypeParam::key_type&>())),
+               bool>()));
+  EXPECT_TRUE((std::is_same<typename TypeParam::allocator_type::value_type,
+                            typename TypeParam::value_type>()));
+  EXPECT_TRUE((std::is_same<typename TypeParam::value_type&,
+                            typename TypeParam::reference>()));
+  EXPECT_TRUE((std::is_same<const typename TypeParam::value_type&,
+                            typename TypeParam::const_reference>()));
+  EXPECT_TRUE((std::is_same<typename std::allocator_traits<
+                                typename TypeParam::allocator_type>::pointer,
+                            typename TypeParam::pointer>()));
+  EXPECT_TRUE(
+      (std::is_same<typename std::allocator_traits<
+                        typename TypeParam::allocator_type>::const_pointer,
+                    typename TypeParam::const_pointer>()));
+}
+
+TYPED_TEST_P(MembersTest, SimpleFunctions) {
+  EXPECT_GT(TypeParam().max_size(), 0);
+}
+
+TYPED_TEST_P(MembersTest, BeginEnd) {
+  TypeParam t = {typename TypeParam::value_type{}};
+  EXPECT_EQ(t.begin(), t.cbegin());
+  EXPECT_EQ(t.end(), t.cend());
+  EXPECT_NE(t.begin(), t.end());
+  EXPECT_NE(t.cbegin(), t.cend());
+}
+
+REGISTER_TYPED_TEST_SUITE_P(MembersTest, Typedefs, SimpleFunctions, BeginEnd);
+
+}  // namespace container_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_CONTAINER_INTERNAL_UNORDERED_SET_MEMBERS_TEST_H_
diff --git a/third_party/abseil_cpp/absl/container/internal/unordered_set_modifiers_test.h b/third_party/abseil_cpp/absl/container/internal/unordered_set_modifiers_test.h
new file mode 100644
index 000000000000..26be58d99f21
--- /dev/null
+++ b/third_party/abseil_cpp/absl/container/internal/unordered_set_modifiers_test.h
@@ -0,0 +1,190 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_UNORDERED_SET_MODIFIERS_TEST_H_
+#define ABSL_CONTAINER_INTERNAL_UNORDERED_SET_MODIFIERS_TEST_H_
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/container/internal/hash_generator_testing.h"
+#include "absl/container/internal/hash_policy_testing.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+template <class UnordSet>
+class ModifiersTest : public ::testing::Test {};
+
+TYPED_TEST_SUITE_P(ModifiersTest);
+
+TYPED_TEST_P(ModifiersTest, Clear) {
+  using T = hash_internal::GeneratedType<TypeParam>;
+  std::vector<T> values;
+  std::generate_n(std::back_inserter(values), 10,
+                  hash_internal::Generator<T>());
+  TypeParam m(values.begin(), values.end());
+  ASSERT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
+  m.clear();
+  EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
+  EXPECT_TRUE(m.empty());
+}
+
+TYPED_TEST_P(ModifiersTest, Insert) {
+  using T = hash_internal::GeneratedType<TypeParam>;
+  T val = hash_internal::Generator<T>()();
+  TypeParam m;
+  auto p = m.insert(val);
+  EXPECT_TRUE(p.second);
+  EXPECT_EQ(val, *p.first);
+  p = m.insert(val);
+  EXPECT_FALSE(p.second);
+}
+
+TYPED_TEST_P(ModifiersTest, InsertHint) {
+  using T = hash_internal::GeneratedType<TypeParam>;
+  T val = hash_internal::Generator<T>()();
+  TypeParam m;
+  auto it = m.insert(m.end(), val);
+  EXPECT_TRUE(it != m.end());
+  EXPECT_EQ(val, *it);
+  it = m.insert(it, val);
+  EXPECT_TRUE(it != m.end());
+  EXPECT_EQ(val, *it);
+}
+
+TYPED_TEST_P(ModifiersTest, InsertRange) {
+  using T = hash_internal::GeneratedType<TypeParam>;
+  std::vector<T> values;
+  std::generate_n(std::back_inserter(values), 10,
+                  hash_internal::Generator<T>());
+  TypeParam m;
+  m.insert(values.begin(), values.end());
+  ASSERT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
+}
+
+TYPED_TEST_P(ModifiersTest, Emplace) {
+  using T = hash_internal::GeneratedType<TypeParam>;
+  T val = hash_internal::Generator<T>()();
+  TypeParam m;
+  // TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps
+  // with test traits/policy.
+  auto p = m.emplace(val);
+  EXPECT_TRUE(p.second);
+  EXPECT_EQ(val, *p.first);
+  p = m.emplace(val);
+  EXPECT_FALSE(p.second);
+  EXPECT_EQ(val, *p.first);
+}
+
+TYPED_TEST_P(ModifiersTest, EmplaceHint) {
+  using T = hash_internal::GeneratedType<TypeParam>;
+  T val = hash_internal::Generator<T>()();
+  TypeParam m;
+  // TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps
+  // with test traits/policy.
+  auto it = m.emplace_hint(m.end(), val);
+  EXPECT_EQ(val, *it);
+  it = m.emplace_hint(it, val);
+  EXPECT_EQ(val, *it);
+}
+
+template <class V>
+using IfNotVoid = typename std::enable_if<!std::is_void<V>::value, V>::type;
+
+// In openmap we chose not to return the iterator from erase because that's
+// more expensive. As such we adapt erase to return an iterator here.
+struct EraseFirst {
+  template <class Map>
+  auto operator()(Map* m, int) const
+      -> IfNotVoid<decltype(m->erase(m->begin()))> {
+    return m->erase(m->begin());
+  }
+  template <class Map>
+  typename Map::iterator operator()(Map* m, ...) const {
+    auto it = m->begin();
+    m->erase(it++);
+    return it;
+  }
+};
+
+TYPED_TEST_P(ModifiersTest, Erase) {
+  using T = hash_internal::GeneratedType<TypeParam>;
+  std::vector<T> values;
+  std::generate_n(std::back_inserter(values), 10,
+                  hash_internal::Generator<T>());
+  TypeParam m(values.begin(), values.end());
+  ASSERT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
+  std::vector<T> values2;
+  for (const auto& val : values)
+    if (val != *m.begin()) values2.push_back(val);
+  auto it = EraseFirst()(&m, 0);
+  ASSERT_TRUE(it != m.end());
+  EXPECT_EQ(1, std::count(values2.begin(), values2.end(), *it));
+  EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values2.begin(),
+                                                            values2.end()));
+}
+
+TYPED_TEST_P(ModifiersTest, EraseRange) {
+  using T = hash_internal::GeneratedType<TypeParam>;
+  std::vector<T> values;
+  std::generate_n(std::back_inserter(values), 10,
+                  hash_internal::Generator<T>());
+  TypeParam m(values.begin(), values.end());
+  ASSERT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
+  auto it = m.erase(m.begin(), m.end());
+  EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
+  EXPECT_TRUE(it == m.end());
+}
+
+TYPED_TEST_P(ModifiersTest, EraseKey) {
+  using T = hash_internal::GeneratedType<TypeParam>;
+  std::vector<T> values;
+  std::generate_n(std::back_inserter(values), 10,
+                  hash_internal::Generator<T>());
+  TypeParam m(values.begin(), values.end());
+  ASSERT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
+  EXPECT_EQ(1, m.erase(values[0]));
+  EXPECT_EQ(0, std::count(m.begin(), m.end(), values[0]));
+  EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values.begin() + 1,
+                                                            values.end()));
+}
+
+TYPED_TEST_P(ModifiersTest, Swap) {
+  using T = hash_internal::GeneratedType<TypeParam>;
+  std::vector<T> v1;
+  std::vector<T> v2;
+  std::generate_n(std::back_inserter(v1), 5, hash_internal::Generator<T>());
+  std::generate_n(std::back_inserter(v2), 5, hash_internal::Generator<T>());
+  TypeParam m1(v1.begin(), v1.end());
+  TypeParam m2(v2.begin(), v2.end());
+  EXPECT_THAT(keys(m1), ::testing::UnorderedElementsAreArray(v1));
+  EXPECT_THAT(keys(m2), ::testing::UnorderedElementsAreArray(v2));
+  m1.swap(m2);
+  EXPECT_THAT(keys(m1), ::testing::UnorderedElementsAreArray(v2));
+  EXPECT_THAT(keys(m2), ::testing::UnorderedElementsAreArray(v1));
+}
+
+// TODO(alkis): Write tests for extract.
+// TODO(alkis): Write tests for merge.
+
+REGISTER_TYPED_TEST_CASE_P(ModifiersTest, Clear, Insert, InsertHint,
+                           InsertRange, Emplace, EmplaceHint, Erase, EraseRange,
+                           EraseKey, Swap);
+
+}  // namespace container_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_CONTAINER_INTERNAL_UNORDERED_SET_MODIFIERS_TEST_H_
diff --git a/third_party/abseil_cpp/absl/container/internal/unordered_set_test.cc b/third_party/abseil_cpp/absl/container/internal/unordered_set_test.cc
new file mode 100644
index 000000000000..a134b53984bc
--- /dev/null
+++ b/third_party/abseil_cpp/absl/container/internal/unordered_set_test.cc
@@ -0,0 +1,41 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <unordered_set>
+
+#include "absl/container/internal/unordered_set_constructor_test.h"
+#include "absl/container/internal/unordered_set_lookup_test.h"
+#include "absl/container/internal/unordered_set_members_test.h"
+#include "absl/container/internal/unordered_set_modifiers_test.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+namespace {
+
+using SetTypes = ::testing::Types<
+    std::unordered_set<int, StatefulTestingHash, StatefulTestingEqual,
+                       Alloc<int>>,
+    std::unordered_set<std::string, StatefulTestingHash, StatefulTestingEqual,
+                       Alloc<std::string>>>;
+
+INSTANTIATE_TYPED_TEST_SUITE_P(UnorderedSet, ConstructorTest, SetTypes);
+INSTANTIATE_TYPED_TEST_SUITE_P(UnorderedSet, LookupTest, SetTypes);
+INSTANTIATE_TYPED_TEST_SUITE_P(UnorderedSet, MembersTest, SetTypes);
+INSTANTIATE_TYPED_TEST_SUITE_P(UnorderedSet, ModifiersTest, SetTypes);
+
+}  // namespace
+}  // namespace container_internal
+ABSL_NAMESPACE_END
+}  // namespace absl