diff options
author | Abseil Team <absl-team@google.com> | 2019-01-18T15·54-0800 |
---|---|---|
committer | Alex Strelnikov <strel@google.com> | 2019-01-18T19·05-0500 |
commit | 0b1e6d417b414aad9282e32e8c49c719edeb63c1 (patch) | |
tree | 249747e061ac3bc4af717a367da8c4a50486dbbe /absl | |
parent | efccc502606bed768e50a6cd5806d8eb13e4e935 (diff) |
Export of internal Abseil changes.
-- 461c1b6eb19490429db3bc6dd10ee32df9429cd7 by Samuel Benzaquen <sbenza@google.com>: Group all the capacity/growth calculation in one place. This helps remove the unnecessary floating point operations. PiperOrigin-RevId: 229928140 GitOrigin-RevId: 461c1b6eb19490429db3bc6dd10ee32df9429cd7 Change-Id: Ib00f85a6033fcd06a1d38a5987670b1524a80f93
Diffstat (limited to 'absl')
-rw-r--r-- | absl/container/internal/raw_hash_set.h | 62 | ||||
-rw-r--r-- | absl/container/internal/raw_hash_set_test.cc | 29 |
2 files changed, 66 insertions, 25 deletions
diff --git a/absl/container/internal/raw_hash_set.h b/absl/container/internal/raw_hash_set.h index 8f2350a75a81..8f6469fff7f3 100644 --- a/absl/container/internal/raw_hash_set.h +++ b/absl/container/internal/raw_hash_set.h @@ -480,6 +480,28 @@ inline size_t NormalizeCapacity(size_t n) { : (std::numeric_limits<size_t>::max)() >> LeadingZeros(n); } +// We use 7/8th as maximum load factor. +// For 16-wide groups, that gives an average of two empty slots per group. +inline size_t CapacityToGrowth(size_t capacity) { + assert(IsValidCapacity(capacity)); + // `capacity*7/8` + if (Group::kWidth == 8 && capacity == 7) { + // x-x/8 does not work when x==7. + return 6; + } + return capacity - capacity / 8; +} +// From desired "growth" to a lowerbound of the necessary capacity. +// Might not be a valid one and required NormalizeCapacity(). +inline size_t GrowthToLowerboundCapacity(size_t growth) { + // `growth*8/7` + if (Group::kWidth == 8 && growth == 7) { + // x+(x-1)/7 does not work when x==7. + return 8; + } + return growth + static_cast<size_t>((static_cast<int64_t>(growth) - 1) / 7); +} + // The node_handle concept from C++17. // We specialize node_handle for sets and maps. node_handle_base holds the // common API of both. @@ -819,7 +841,7 @@ class raw_hash_set { : ctrl_(EmptyGroup()), settings_(0, hash, eq, alloc) { if (bucket_count) { capacity_ = NormalizeCapacity(bucket_count); - growth_left() = static_cast<size_t>(capacity_ * kMaxLoadFactor); + reset_growth_left(); initialize_slots(); } } @@ -1031,7 +1053,7 @@ class raw_hash_set { } size_ = 0; reset_ctrl(); - growth_left() = static_cast<size_t>(capacity_ * kMaxLoadFactor); + reset_growth_left(); } assert(empty()); infoz_.RecordStorageChanged(size_, capacity_); @@ -1325,16 +1347,16 @@ class raw_hash_set { infoz_.RecordStorageChanged(size_, capacity_); return; } - auto m = NormalizeCapacity((std::max)(n, NumSlotsFast(size()))); + // bitor is a faster way of doing `max` here. We will round up to the next + // power-of-2-minus-1, so bitor is good enough. + auto m = NormalizeCapacity(n | GrowthToLowerboundCapacity(size())); // n == 0 unconditionally rehashes as per the standard. if (n == 0 || m > capacity_) { resize(m); } } - void reserve(size_t n) { - rehash(NumSlotsFast(n)); - } + void reserve(size_t n) { rehash(GrowthToLowerboundCapacity(n)); } // Extension API: support for heterogeneous keys. // @@ -1512,13 +1534,6 @@ class raw_hash_set { slot_type&& slot; }; - // Computes std::ceil(n / kMaxLoadFactor). Faster than calling std::ceil. - static inline size_t NumSlotsFast(size_t n) { - return static_cast<size_t>( - (n * kMaxLoadFactorDenominator + (kMaxLoadFactorNumerator - 1)) / - kMaxLoadFactorNumerator); - } - // "erases" the object from the container, except that it doesn't actually // destroy the object. It only updates all the metadata of the class. // This can be used in conjunction with Policy::transfer to move the object to @@ -1556,7 +1571,7 @@ class raw_hash_set { ctrl_ = reinterpret_cast<ctrl_t*>(layout.template Pointer<0>(mem)); slots_ = layout.template Pointer<1>(mem); reset_ctrl(); - growth_left() = static_cast<size_t>(capacity_ * kMaxLoadFactor) - size_; + reset_growth_left(); infoz_.RecordStorageChanged(size_, capacity_); } @@ -1662,13 +1677,13 @@ class raw_hash_set { --i; // repeat } } - growth_left() = static_cast<size_t>(capacity_ * kMaxLoadFactor) - size_; + reset_growth_left(); } void rehash_and_grow_if_necessary() { if (capacity_ == 0) { resize(Group::kWidth - 1); - } else if (size() <= kMaxLoadFactor / 2 * capacity_) { + } else if (size() <= CapacityToGrowth(capacity()) / 2) { // Squash DELETED without growing if there is enough capacity. drop_deletes_without_resize(); } else { @@ -1811,6 +1826,10 @@ class raw_hash_set { SanitizerPoisonMemoryRegion(slots_, sizeof(slot_type) * capacity_); } + void reset_growth_left() { + growth_left() = CapacityToGrowth(capacity()) - size_; + } + // Sets the control byte, and if `i < Group::kWidth`, set the cloned byte at // the end too. void set_ctrl(size_t i, ctrl_t h) { @@ -1837,12 +1856,6 @@ class raw_hash_set { return settings_.template get<3>(); } - // On average each group has 2 empty slot (for the vectorized case). - static constexpr int64_t kMaxLoadFactorNumerator = 14; - static constexpr int64_t kMaxLoadFactorDenominator = 16; - static constexpr float kMaxLoadFactor = - 1.0 * kMaxLoadFactorNumerator / kMaxLoadFactorDenominator; - // TODO(alkis): Investigate removing some of these fields: // - ctrl/slots can be derived from each other // - size can be moved into the slot array @@ -1903,10 +1916,9 @@ struct HashtableDebugAccess<Set, absl::void_t<typename Set::raw_hash_set>> { } static size_t LowerBoundAllocatedByteSize(size_t size) { - size_t capacity = container_internal::NormalizeCapacity( - std::ceil(size / Set::kMaxLoadFactor)); + size_t capacity = GrowthToLowerboundCapacity(size); if (capacity == 0) return 0; - auto layout = Set::MakeLayout(capacity); + auto layout = Set::MakeLayout(NormalizeCapacity(capacity)); size_t m = layout.AllocSize(); size_t per_slot = Traits::space_used(static_cast<const Slot*>(nullptr)); if (per_slot != ~size_t{}) { diff --git a/absl/container/internal/raw_hash_set_test.cc b/absl/container/internal/raw_hash_set_test.cc index 78b627556d74..d9f1826a09ca 100644 --- a/absl/container/internal/raw_hash_set_test.cc +++ b/absl/container/internal/raw_hash_set_test.cc @@ -48,6 +48,8 @@ namespace { using ::testing::DoubleNear; using ::testing::ElementsAre; +using ::testing::Ge; +using ::testing::Lt; using ::testing::Optional; using ::testing::Pair; using ::testing::UnorderedElementsAre; @@ -62,6 +64,33 @@ TEST(Util, NormalizeCapacity) { EXPECT_EQ(kMinCapacity * 2 + 1, NormalizeCapacity(kMinCapacity + 2)); } +TEST(Util, GrowthAndCapacity) { + // Verify that GrowthToCapacity gives the minimum capacity that has enough + // growth. + for (size_t growth = 0; growth < 10000; ++growth) { + SCOPED_TRACE(growth); + size_t capacity = NormalizeCapacity(GrowthToLowerboundCapacity(growth)); + // The capacity is large enough for `growth` + EXPECT_THAT(CapacityToGrowth(capacity), Ge(growth)); + if (growth < Group::kWidth - 1) { + // Fits in one group, that is the minimum capacity. + EXPECT_EQ(capacity, Group::kWidth - 1); + } else { + // There is no smaller capacity that works. + EXPECT_THAT(CapacityToGrowth(capacity / 2), Lt(growth)); + } + } + + for (size_t capacity = Group::kWidth - 1; capacity < 10000; + capacity = 2 * capacity + 1) { + SCOPED_TRACE(capacity); + size_t growth = CapacityToGrowth(capacity); + EXPECT_THAT(growth, Lt(capacity)); + EXPECT_LE(GrowthToLowerboundCapacity(growth), capacity); + EXPECT_EQ(NormalizeCapacity(GrowthToLowerboundCapacity(growth)), capacity); + } +} + TEST(Util, probe_seq) { probe_seq<16> seq(0, 127); auto gen = [&]() { |