diff options
author | Abseil Team <absl-team@google.com> | 2019-02-19T22·29-0800 |
---|---|---|
committer | Jon Cohen <cohenjon@google.com> | 2019-02-20T23·02-0500 |
commit | 93d155bc4414f6c121bb1f19dba9fdb27c8943bc (patch) | |
tree | 47facabc0db6f4204fe5b7cdba5420276b00a1f0 /absl/container | |
parent | 426eaa4aa44e4580418bee46c1bd13911151bfb1 (diff) |
Export of internal Abseil changes.
-- 3d20ce6cd6541579abecaba169d4b8716d511272 by Jon Cohen <cohenjon@google.com>: Only use LSAN for clang version >= 3.5. This should fix https://github.com/abseil/abseil-cpp/issues/244 PiperOrigin-RevId: 234675129 -- e15bd4ec7a81aa95cc3d09fa1e0e81d58ae478fb by Conrad Parker <conradparker@google.com>: Fix errors in apply() sample code The following changes are made: * Make the example method public. * Give the two user functions different names to avoid confusion about whether apply() can select the correct overload of a function based on its tuple argument (it can't). * Pass tuple2 to the second example apply() invocation, instead of passing its contents individually. * Fix a s/tuple/tuple3/ typo in the third example apply() invocation. PiperOrigin-RevId: 234223407 -- de0ed71e21bc76ddf9fe715fdbaef74cd0df95c7 by Abseil Team <absl-team@google.com>: First test if a macro is defined to avoid -Wundef. ABSL clients may need to compile their code with the -Wundef warning flag. It will be helpful if ABSL header files can be compiled without the -Wundef warning. How to avoid the -Wundef warning: If a macro may be undefined, we need to first test whether the macro is defined before testing its value. We can't rely on the C preprocessor rule that an undefined macro has the value 0L. PiperOrigin-RevId: 234201123 -- fa484ad7dae0cac21140a96662809ecb0ec8eb5d by Abseil Team <absl-team@google.com>: Internal change. PiperOrigin-RevId: 234185697 -- d69b1baef681e27954b065375ecf9c2320463b2b by Samuel Benzaquen <sbenza@google.com>: Mix pointers more thoroughly. Some pointer alignments interact badly with the mixing constant. By mixing twice we reduce this problem. PiperOrigin-RevId: 234178401 -- 1041d0e474610f3a8fea0db90958857327d6da1c by Samuel Benzaquen <sbenza@google.com>: Record rehashes in the hashtablez struct. Only recording the probe length on insertion causes a huge overestimation of the total probe length at any given time. With natural growth, elements are inserted when the load factor is between (max load/2, max load). However, after a rehash the majority of elements are actually inserted when the load factor is less than max/2 and have a much lower average probe length. Also reset some values when the table is cleared. PiperOrigin-RevId: 234013580 -- 299205caf3c89c47339f7409bc831746602cea84 by Mark Barolak <mbar@google.com>: Fix a sample code snippet that assumes `absl::string_view::const_iterator` is `const char*`. This is generally true, however in C++17 builds, absl::string_view is an alias for std::string_view and on MSVC, the std::string_view::const_iterator is an object instead of just a pointer. PiperOrigin-RevId: 233844595 -- af6c6370cf51a1e6c1469c79dfb2a486a4009136 by Abseil Team <absl-team@google.com>: Internal change. PiperOrigin-RevId: 233773470 -- 6e59e4b8e2bb6101b448f0f32b0bea81fe399ccf by Abseil Team <absl-team@google.com>: fix typo in {Starts|Ends}WithIgnoreCase comment in match.h PiperOrigin-RevId: 233662951 GitOrigin-RevId: 3d20ce6cd6541579abecaba169d4b8716d511272 Change-Id: Ib9a29b1c38c6aedf5d9f3f7f00596e8d30e864dd
Diffstat (limited to 'absl/container')
-rw-r--r-- | absl/container/flat_hash_map.h | 9 | ||||
-rw-r--r-- | absl/container/internal/container_memory.h | 36 | ||||
-rw-r--r-- | absl/container/internal/hashtablez_sampler.h | 20 | ||||
-rw-r--r-- | absl/container/internal/hashtablez_sampler_test.cc | 23 | ||||
-rw-r--r-- | absl/container/internal/raw_hash_set.h | 16 |
5 files changed, 85 insertions, 19 deletions
diff --git a/absl/container/flat_hash_map.h b/absl/container/flat_hash_map.h index cc3d8b69b7a3..f6d28472ad2b 100644 --- a/absl/container/flat_hash_map.h +++ b/absl/container/flat_hash_map.h @@ -527,25 +527,26 @@ namespace container_internal { template <class K, class V> struct FlatHashMapPolicy { - using slot_type = container_internal::slot_type<K, V>; + using slot_policy = container_internal::map_slot_policy<K, V>; + using slot_type = typename slot_policy::slot_type; using key_type = K; using mapped_type = V; using init_type = std::pair</*non const*/ key_type, mapped_type>; template <class Allocator, class... Args> static void construct(Allocator* alloc, slot_type* slot, Args&&... args) { - slot_type::construct(alloc, slot, std::forward<Args>(args)...); + slot_policy::construct(alloc, slot, std::forward<Args>(args)...); } template <class Allocator> static void destroy(Allocator* alloc, slot_type* slot) { - slot_type::destroy(alloc, slot); + slot_policy::destroy(alloc, slot); } template <class Allocator> static void transfer(Allocator* alloc, slot_type* new_slot, slot_type* old_slot) { - slot_type::transfer(alloc, new_slot, old_slot); + slot_policy::transfer(alloc, new_slot, old_slot); } template <class F, class... Args> diff --git a/absl/container/internal/container_memory.h b/absl/container/internal/container_memory.h index 35b691ce5af3..3a3f97032cdf 100644 --- a/absl/container/internal/container_memory.h +++ b/absl/container/internal/container_memory.h @@ -311,7 +311,23 @@ struct IsLayoutCompatible { // kMutableKeys. For C++11, the relevant section of the standard is // https://timsong-cpp.github.io/cppwp/n3337/class.mem#19 (9.2.19) template <class K, class V> -union slot_type { +union map_slot_type { + map_slot_type() {} + ~map_slot_type() = delete; + using value_type = std::pair<const K, V>; + using mutable_value_type = std::pair<K, V>; + + value_type value; + mutable_value_type mutable_value; + K key; +}; + +template <class K, class V> +struct map_slot_policy { + using slot_type = map_slot_type<K, V>; + using value_type = std::pair<const K, V>; + using mutable_value_type = std::pair<K, V>; + private: static void emplace(slot_type* slot) { // The construction of union doesn't do anything at runtime but it allows us @@ -321,19 +337,17 @@ union slot_type { // If pair<const K, V> and pair<K, V> are layout-compatible, we can accept one // or the other via slot_type. We are also free to access the key via // slot_type::key in this case. - using kMutableKeys = - std::integral_constant<bool, - memory_internal::IsLayoutCompatible<K, V>::value>; + using kMutableKeys = memory_internal::IsLayoutCompatible<K, V>; public: - slot_type() {} - ~slot_type() = delete; - using value_type = std::pair<const K, V>; - using mutable_value_type = std::pair<K, V>; + static value_type& element(slot_type* slot) { return slot->value; } + static const value_type& element(const slot_type* slot) { + return slot->value; + } - value_type value; - mutable_value_type mutable_value; - K key; + static const K& key(const slot_type* slot) { + return kMutableKeys::value ? slot->key : slot->value.first; + } template <class Allocator, class... Args> static void construct(Allocator* alloc, slot_type* slot, Args&&... args) { diff --git a/absl/container/internal/hashtablez_sampler.h b/absl/container/internal/hashtablez_sampler.h index 8b81653a57f6..e2d8f3643b86 100644 --- a/absl/container/internal/hashtablez_sampler.h +++ b/absl/container/internal/hashtablez_sampler.h @@ -33,6 +33,7 @@ #include "absl/base/internal/per_thread_tls.h" #include "absl/base/optimization.h" +#include "absl/container/internal/have_sse.h" #include "absl/synchronization/mutex.h" #include "absl/utility/utility.h" @@ -82,10 +83,24 @@ struct HashtablezInfo { void* stack[kMaxStackDepth]; }; +inline void RecordRehashSlow(HashtablezInfo* info, size_t total_probe_length) { +#if SWISSTABLE_HAVE_SSE2 + total_probe_length /= 16; +#else + total_probe_length /= 8; +#endif + info->total_probe_length.store(total_probe_length, std::memory_order_relaxed); + info->num_erases.store(0, std::memory_order_relaxed); +} + inline void RecordStorageChangedSlow(HashtablezInfo* info, size_t size, size_t capacity) { info->size.store(size, std::memory_order_relaxed); info->capacity.store(capacity, std::memory_order_relaxed); + if (size == 0) { + // This is a clear, reset the total/num_erases too. + RecordRehashSlow(info, 0); + } } void RecordInsertSlow(HashtablezInfo* info, size_t hash, @@ -126,6 +141,11 @@ class HashtablezInfoHandle { RecordStorageChangedSlow(info_, size, capacity); } + inline void RecordRehash(size_t total_probe_length) { + if (ABSL_PREDICT_TRUE(info_ == nullptr)) return; + RecordRehashSlow(info_, total_probe_length); + } + inline void RecordInsert(size_t hash, size_t distance_from_desired) { if (ABSL_PREDICT_TRUE(info_ == nullptr)) return; RecordInsertSlow(info_, hash, distance_from_desired); diff --git a/absl/container/internal/hashtablez_sampler_test.cc b/absl/container/internal/hashtablez_sampler_test.cc index f9ee941a015c..a6e4ac815749 100644 --- a/absl/container/internal/hashtablez_sampler_test.cc +++ b/absl/container/internal/hashtablez_sampler_test.cc @@ -145,6 +145,29 @@ TEST(HashtablezInfoTest, RecordErase) { EXPECT_EQ(info.num_erases.load(), 1); } +TEST(HashtablezInfoTest, RecordRehash) { + HashtablezInfo info; + absl::MutexLock l(&info.init_mu); + info.PrepareForSampling(); + RecordInsertSlow(&info, 0x1, 0); + RecordInsertSlow(&info, 0x2, kProbeLength); + RecordInsertSlow(&info, 0x4, kProbeLength); + RecordInsertSlow(&info, 0x8, 2 * kProbeLength); + EXPECT_EQ(info.size.load(), 4); + EXPECT_EQ(info.total_probe_length.load(), 4); + + RecordEraseSlow(&info); + RecordEraseSlow(&info); + EXPECT_EQ(info.size.load(), 2); + EXPECT_EQ(info.total_probe_length.load(), 4); + EXPECT_EQ(info.num_erases.load(), 2); + + RecordRehashSlow(&info, 3 * kProbeLength); + EXPECT_EQ(info.size.load(), 2); + EXPECT_EQ(info.total_probe_length.load(), 3); + EXPECT_EQ(info.num_erases.load(), 0); +} + TEST(HashtablezSamplerTest, SmallSampleParameter) { SetHashtablezEnabled(true); SetHashtablezSampleParameter(100); diff --git a/absl/container/internal/raw_hash_set.h b/absl/container/internal/raw_hash_set.h index a5d0cae00bfb..33f8f8fac527 100644 --- a/absl/container/internal/raw_hash_set.h +++ b/absl/container/internal/raw_hash_set.h @@ -936,7 +936,7 @@ class raw_hash_set { reset_growth_left(); } assert(empty()); - infoz_.RecordStorageChanged(size_, capacity_); + infoz_.RecordStorageChanged(0, capacity_); } // This overload kicks in when the argument is an rvalue of insertable and @@ -1226,7 +1226,7 @@ class raw_hash_set { if (n == 0 && capacity_ == 0) return; if (n == 0 && size_ == 0) { destroy_slots(); - infoz_.RecordStorageChanged(size_, capacity_); + infoz_.RecordStorageChanged(0, 0); return; } // bitor is a faster way of doing `max` here. We will round up to the next @@ -1483,11 +1483,14 @@ class raw_hash_set { capacity_ = new_capacity; initialize_slots(); + size_t total_probe_length = 0; for (size_t i = 0; i != old_capacity; ++i) { if (IsFull(old_ctrl[i])) { size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, PolicyTraits::element(old_slots + i)); - size_t new_i = find_first_non_full(hash).offset; + auto target = find_first_non_full(hash); + size_t new_i = target.offset; + total_probe_length += target.probe_length; set_ctrl(new_i, H2(hash)); PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, old_slots + i); } @@ -1499,6 +1502,7 @@ class raw_hash_set { Deallocate<Layout::Alignment()>(&alloc_ref(), old_ctrl, layout.AllocSize()); } + infoz_.RecordRehash(total_probe_length); } void drop_deletes_without_resize() ABSL_ATTRIBUTE_NOINLINE { @@ -1522,12 +1526,15 @@ class raw_hash_set { ConvertDeletedToEmptyAndFullToDeleted(ctrl_, capacity_); typename std::aligned_storage<sizeof(slot_type), alignof(slot_type)>::type raw; + size_t total_probe_length = 0; slot_type* slot = reinterpret_cast<slot_type*>(&raw); for (size_t i = 0; i != capacity_; ++i) { if (!IsDeleted(ctrl_[i])) continue; size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, PolicyTraits::element(slots_ + i)); - size_t new_i = find_first_non_full(hash).offset; + auto target = find_first_non_full(hash); + size_t new_i = target.offset; + total_probe_length += target.probe_length; // Verify if the old and new i fall within the same group wrt the hash. // If they do, we don't need to move the object as it falls already in the @@ -1560,6 +1567,7 @@ class raw_hash_set { } } reset_growth_left(); + infoz_.RecordRehash(total_probe_length); } void rehash_and_grow_if_necessary() { |