about summary refs log tree commit diff
path: root/absl/container
diff options
context:
space:
mode:
Diffstat (limited to 'absl/container')
-rw-r--r--absl/container/flat_hash_map.h9
-rw-r--r--absl/container/internal/container_memory.h36
-rw-r--r--absl/container/internal/hashtablez_sampler.h20
-rw-r--r--absl/container/internal/hashtablez_sampler_test.cc23
-rw-r--r--absl/container/internal/raw_hash_set.h16
5 files changed, 85 insertions, 19 deletions
diff --git a/absl/container/flat_hash_map.h b/absl/container/flat_hash_map.h
index cc3d8b69b7a3..f6d28472ad2b 100644
--- a/absl/container/flat_hash_map.h
+++ b/absl/container/flat_hash_map.h
@@ -527,25 +527,26 @@ namespace container_internal {
 
 template <class K, class V>
 struct FlatHashMapPolicy {
-  using slot_type = container_internal::slot_type<K, V>;
+  using slot_policy = container_internal::map_slot_policy<K, V>;
+  using slot_type = typename slot_policy::slot_type;
   using key_type = K;
   using mapped_type = V;
   using init_type = std::pair</*non const*/ key_type, mapped_type>;
 
   template <class Allocator, class... Args>
   static void construct(Allocator* alloc, slot_type* slot, Args&&... args) {
-    slot_type::construct(alloc, slot, std::forward<Args>(args)...);
+    slot_policy::construct(alloc, slot, std::forward<Args>(args)...);
   }
 
   template <class Allocator>
   static void destroy(Allocator* alloc, slot_type* slot) {
-    slot_type::destroy(alloc, slot);
+    slot_policy::destroy(alloc, slot);
   }
 
   template <class Allocator>
   static void transfer(Allocator* alloc, slot_type* new_slot,
                        slot_type* old_slot) {
-    slot_type::transfer(alloc, new_slot, old_slot);
+    slot_policy::transfer(alloc, new_slot, old_slot);
   }
 
   template <class F, class... Args>
diff --git a/absl/container/internal/container_memory.h b/absl/container/internal/container_memory.h
index 35b691ce5af3..3a3f97032cdf 100644
--- a/absl/container/internal/container_memory.h
+++ b/absl/container/internal/container_memory.h
@@ -311,7 +311,23 @@ struct IsLayoutCompatible {
 // kMutableKeys. For C++11, the relevant section of the standard is
 // https://timsong-cpp.github.io/cppwp/n3337/class.mem#19 (9.2.19)
 template <class K, class V>
-union slot_type {
+union map_slot_type {
+  map_slot_type() {}
+  ~map_slot_type() = delete;
+  using value_type = std::pair<const K, V>;
+  using mutable_value_type = std::pair<K, V>;
+
+  value_type value;
+  mutable_value_type mutable_value;
+  K key;
+};
+
+template <class K, class V>
+struct map_slot_policy {
+  using slot_type = map_slot_type<K, V>;
+  using value_type = std::pair<const K, V>;
+  using mutable_value_type = std::pair<K, V>;
+
  private:
   static void emplace(slot_type* slot) {
     // The construction of union doesn't do anything at runtime but it allows us
@@ -321,19 +337,17 @@ union slot_type {
   // If pair<const K, V> and pair<K, V> are layout-compatible, we can accept one
   // or the other via slot_type. We are also free to access the key via
   // slot_type::key in this case.
-  using kMutableKeys =
-      std::integral_constant<bool,
-                             memory_internal::IsLayoutCompatible<K, V>::value>;
+  using kMutableKeys = memory_internal::IsLayoutCompatible<K, V>;
 
  public:
-  slot_type() {}
-  ~slot_type() = delete;
-  using value_type = std::pair<const K, V>;
-  using mutable_value_type = std::pair<K, V>;
+  static value_type& element(slot_type* slot) { return slot->value; }
+  static const value_type& element(const slot_type* slot) {
+    return slot->value;
+  }
 
-  value_type value;
-  mutable_value_type mutable_value;
-  K key;
+  static const K& key(const slot_type* slot) {
+    return kMutableKeys::value ? slot->key : slot->value.first;
+  }
 
   template <class Allocator, class... Args>
   static void construct(Allocator* alloc, slot_type* slot, Args&&... args) {
diff --git a/absl/container/internal/hashtablez_sampler.h b/absl/container/internal/hashtablez_sampler.h
index 8b81653a57f6..e2d8f3643b86 100644
--- a/absl/container/internal/hashtablez_sampler.h
+++ b/absl/container/internal/hashtablez_sampler.h
@@ -33,6 +33,7 @@
 
 #include "absl/base/internal/per_thread_tls.h"
 #include "absl/base/optimization.h"
+#include "absl/container/internal/have_sse.h"
 #include "absl/synchronization/mutex.h"
 #include "absl/utility/utility.h"
 
@@ -82,10 +83,24 @@ struct HashtablezInfo {
   void* stack[kMaxStackDepth];
 };
 
+inline void RecordRehashSlow(HashtablezInfo* info, size_t total_probe_length) {
+#if SWISSTABLE_HAVE_SSE2
+  total_probe_length /= 16;
+#else
+  total_probe_length /= 8;
+#endif
+  info->total_probe_length.store(total_probe_length, std::memory_order_relaxed);
+  info->num_erases.store(0, std::memory_order_relaxed);
+}
+
 inline void RecordStorageChangedSlow(HashtablezInfo* info, size_t size,
                                      size_t capacity) {
   info->size.store(size, std::memory_order_relaxed);
   info->capacity.store(capacity, std::memory_order_relaxed);
+  if (size == 0) {
+    // This is a clear, reset the total/num_erases too.
+    RecordRehashSlow(info, 0);
+  }
 }
 
 void RecordInsertSlow(HashtablezInfo* info, size_t hash,
@@ -126,6 +141,11 @@ class HashtablezInfoHandle {
     RecordStorageChangedSlow(info_, size, capacity);
   }
 
+  inline void RecordRehash(size_t total_probe_length) {
+    if (ABSL_PREDICT_TRUE(info_ == nullptr)) return;
+    RecordRehashSlow(info_, total_probe_length);
+  }
+
   inline void RecordInsert(size_t hash, size_t distance_from_desired) {
     if (ABSL_PREDICT_TRUE(info_ == nullptr)) return;
     RecordInsertSlow(info_, hash, distance_from_desired);
diff --git a/absl/container/internal/hashtablez_sampler_test.cc b/absl/container/internal/hashtablez_sampler_test.cc
index f9ee941a015c..a6e4ac815749 100644
--- a/absl/container/internal/hashtablez_sampler_test.cc
+++ b/absl/container/internal/hashtablez_sampler_test.cc
@@ -145,6 +145,29 @@ TEST(HashtablezInfoTest, RecordErase) {
   EXPECT_EQ(info.num_erases.load(), 1);
 }
 
+TEST(HashtablezInfoTest, RecordRehash) {
+  HashtablezInfo info;
+  absl::MutexLock l(&info.init_mu);
+  info.PrepareForSampling();
+  RecordInsertSlow(&info, 0x1, 0);
+  RecordInsertSlow(&info, 0x2, kProbeLength);
+  RecordInsertSlow(&info, 0x4, kProbeLength);
+  RecordInsertSlow(&info, 0x8, 2 * kProbeLength);
+  EXPECT_EQ(info.size.load(), 4);
+  EXPECT_EQ(info.total_probe_length.load(), 4);
+
+  RecordEraseSlow(&info);
+  RecordEraseSlow(&info);
+  EXPECT_EQ(info.size.load(), 2);
+  EXPECT_EQ(info.total_probe_length.load(), 4);
+  EXPECT_EQ(info.num_erases.load(), 2);
+
+  RecordRehashSlow(&info, 3 * kProbeLength);
+  EXPECT_EQ(info.size.load(), 2);
+  EXPECT_EQ(info.total_probe_length.load(), 3);
+  EXPECT_EQ(info.num_erases.load(), 0);
+}
+
 TEST(HashtablezSamplerTest, SmallSampleParameter) {
   SetHashtablezEnabled(true);
   SetHashtablezSampleParameter(100);
diff --git a/absl/container/internal/raw_hash_set.h b/absl/container/internal/raw_hash_set.h
index a5d0cae00bfb..33f8f8fac527 100644
--- a/absl/container/internal/raw_hash_set.h
+++ b/absl/container/internal/raw_hash_set.h
@@ -936,7 +936,7 @@ class raw_hash_set {
       reset_growth_left();
     }
     assert(empty());
-    infoz_.RecordStorageChanged(size_, capacity_);
+    infoz_.RecordStorageChanged(0, capacity_);
   }
 
   // This overload kicks in when the argument is an rvalue of insertable and
@@ -1226,7 +1226,7 @@ class raw_hash_set {
     if (n == 0 && capacity_ == 0) return;
     if (n == 0 && size_ == 0) {
       destroy_slots();
-      infoz_.RecordStorageChanged(size_, capacity_);
+      infoz_.RecordStorageChanged(0, 0);
       return;
     }
     // bitor is a faster way of doing `max` here. We will round up to the next
@@ -1483,11 +1483,14 @@ class raw_hash_set {
     capacity_ = new_capacity;
     initialize_slots();
 
+    size_t total_probe_length = 0;
     for (size_t i = 0; i != old_capacity; ++i) {
       if (IsFull(old_ctrl[i])) {
         size_t hash = PolicyTraits::apply(HashElement{hash_ref()},
                                           PolicyTraits::element(old_slots + i));
-        size_t new_i = find_first_non_full(hash).offset;
+        auto target = find_first_non_full(hash);
+        size_t new_i = target.offset;
+        total_probe_length += target.probe_length;
         set_ctrl(new_i, H2(hash));
         PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, old_slots + i);
       }
@@ -1499,6 +1502,7 @@ class raw_hash_set {
       Deallocate<Layout::Alignment()>(&alloc_ref(), old_ctrl,
                                       layout.AllocSize());
     }
+    infoz_.RecordRehash(total_probe_length);
   }
 
   void drop_deletes_without_resize() ABSL_ATTRIBUTE_NOINLINE {
@@ -1522,12 +1526,15 @@ class raw_hash_set {
     ConvertDeletedToEmptyAndFullToDeleted(ctrl_, capacity_);
     typename std::aligned_storage<sizeof(slot_type), alignof(slot_type)>::type
         raw;
+    size_t total_probe_length = 0;
     slot_type* slot = reinterpret_cast<slot_type*>(&raw);
     for (size_t i = 0; i != capacity_; ++i) {
       if (!IsDeleted(ctrl_[i])) continue;
       size_t hash = PolicyTraits::apply(HashElement{hash_ref()},
                                         PolicyTraits::element(slots_ + i));
-      size_t new_i = find_first_non_full(hash).offset;
+      auto target = find_first_non_full(hash);
+      size_t new_i = target.offset;
+      total_probe_length += target.probe_length;
 
       // Verify if the old and new i fall within the same group wrt the hash.
       // If they do, we don't need to move the object as it falls already in the
@@ -1560,6 +1567,7 @@ class raw_hash_set {
       }
     }
     reset_growth_left();
+    infoz_.RecordRehash(total_probe_length);
   }
 
   void rehash_and_grow_if_necessary() {