about summary refs log tree commit diff
path: root/absl/container
diff options
context:
space:
mode:
Diffstat (limited to 'absl/container')
-rw-r--r--absl/container/flat_hash_map.h8
-rw-r--r--absl/container/flat_hash_set.h8
-rw-r--r--absl/container/inlined_vector.h28
-rw-r--r--absl/container/inlined_vector_benchmark.cc118
-rw-r--r--absl/container/internal/inlined_vector.h16
-rw-r--r--absl/container/internal/raw_hash_set_test.cc19
6 files changed, 107 insertions, 90 deletions
diff --git a/absl/container/flat_hash_map.h b/absl/container/flat_hash_map.h
index dfc497b564b4..00cc4dcff9fc 100644
--- a/absl/container/flat_hash_map.h
+++ b/absl/container/flat_hash_map.h
@@ -219,8 +219,12 @@ class flat_hash_map : public absl::container_internal::raw_hash_map<
   //   Erases the element at `position` of the `flat_hash_map`, returning
   //   `void`.
   //
-  //   NOTE: this return behavior is different than that of STL containers in
-  //   general and `std::unordered_map` in particular.
+  //   NOTE: returning `void` in this case is different than that of STL
+  //   containers in general and `std::unordered_map` in particular (which
+  //   return an iterator to the element following the erased element). If that
+  //   iterator is needed, simply post increment the iterator:
+  //
+  //     map.erase(it++);
   //
   // iterator erase(const_iterator first, const_iterator last):
   //
diff --git a/absl/container/flat_hash_set.h b/absl/container/flat_hash_set.h
index f27f174c1745..6bf51833fca6 100644
--- a/absl/container/flat_hash_set.h
+++ b/absl/container/flat_hash_set.h
@@ -212,8 +212,12 @@ class flat_hash_set
   //   Erases the element at `position` of the `flat_hash_set`, returning
   //   `void`.
   //
-  //   NOTE: this return behavior is different than that of STL containers in
-  //   general and `std::unordered_map` in particular.
+  //   NOTE: returning `void` in this case is different than that of STL
+  //   containers in general and `std::unordered_set` in particular (which
+  //   return an iterator to the element following the erased element). If that
+  //   iterator is needed, simply post increment the iterator:
+  //
+  //     set.erase(it++);
   //
   // iterator erase(const_iterator first, const_iterator last):
   //
diff --git a/absl/container/inlined_vector.h b/absl/container/inlined_vector.h
index 683087502295..77988058e913 100644
--- a/absl/container/inlined_vector.h
+++ b/absl/container/inlined_vector.h
@@ -66,7 +66,10 @@ namespace absl {
 // designed to cover the same API footprint as covered by `std::vector`.
 template <typename T, size_t N, typename A = std::allocator<T>>
 class InlinedVector {
-  using Storage = inlined_vector_internal::InlinedVectorStorage<T, N, A>;
+  static_assert(
+      N > 0, "InlinedVector cannot be instantiated with `0` inlined elements.");
+
+  using Storage = inlined_vector_internal::Storage<InlinedVector>;
   using Tag = typename Storage::Tag;
   using AllocatorAndTag = typename Storage::AllocatorAndTag;
   using Allocation = typename Storage::Allocation;
@@ -283,8 +286,7 @@ class InlinedVector {
   // will no longer be inlined and `capacity()` will equal its capacity on the
   // allocated heap.
   size_type capacity() const noexcept {
-    return allocated() ? allocation().capacity()
-                       : Storage::GetInlinedCapacity();
+    return allocated() ? allocation().capacity() : static_cast<size_type>(N);
   }
 
   // `InlinedVector::data()`
@@ -802,19 +804,19 @@ class InlinedVector {
   // `InlinedVector::shrink_to_fit()`
   //
   // Reduces memory usage by freeing unused memory. After this call, calls to
-  // `capacity()` will be equal to `max(Storage::GetInlinedCapacity(), size())`.
+  // `capacity()` will be equal to `max(N, size())`.
   //
-  // If `size() <= Storage::GetInlinedCapacity()` and the elements are currently
-  // stored on the heap, they will be moved to the inlined storage and the heap
-  // memory will be deallocated.
+  // If `size() <= N` and the elements are currently stored on the heap, they
+  // will be moved to the inlined storage and the heap memory will be
+  // deallocated.
   //
-  // If `size() > Storage::GetInlinedCapacity()` and `size() < capacity()` the
-  // elements will be moved to a smaller heap allocation.
+  // If `size() > N` and `size() < capacity()` the elements will be moved to a
+  // smaller heap allocation.
   void shrink_to_fit() {
     const auto s = size();
     if (ABSL_PREDICT_FALSE(!allocated() || s == capacity())) return;
 
-    if (s <= Storage::GetInlinedCapacity()) {
+    if (s <= N) {
       // Move the elements to the inlined storage.
       // We have to do this using a temporary, because `inlined_storage` and
       // `allocation_storage` are in a union field.
@@ -943,7 +945,7 @@ class InlinedVector {
     const size_type s = size();
     assert(s <= capacity());
 
-    size_type target = (std::max)(Storage::GetInlinedCapacity(), s + delta);
+    size_type target = (std::max)(N, s + delta);
 
     // Compute new capacity by repeatedly doubling current capacity
     // TODO(psrc): Check and avoid overflow?
@@ -1046,7 +1048,7 @@ class InlinedVector {
   }
 
   void InitAssign(size_type n) {
-    if (n > Storage::GetInlinedCapacity()) {
+    if (n > N) {
       Allocation new_allocation(allocator(), n);
       init_allocation(new_allocation);
       UninitializedFill(allocated_space(), allocated_space() + n);
@@ -1058,7 +1060,7 @@ class InlinedVector {
   }
 
   void InitAssign(size_type n, const_reference v) {
-    if (n > Storage::GetInlinedCapacity()) {
+    if (n > N) {
       Allocation new_allocation(allocator(), n);
       init_allocation(new_allocation);
       UninitializedFill(allocated_space(), allocated_space() + n, v);
diff --git a/absl/container/inlined_vector_benchmark.cc b/absl/container/inlined_vector_benchmark.cc
index fc928afe5d65..867a29ea32c1 100644
--- a/absl/container/inlined_vector_benchmark.cc
+++ b/absl/container/inlined_vector_benchmark.cc
@@ -23,17 +23,13 @@
 
 namespace {
 
-using IntVec = absl::InlinedVector<int, 8>;
-
 void BM_InlinedVectorFill(benchmark::State& state) {
-  const int len = state.range(0);
+  absl::InlinedVector<int, 8> v;
+  int val = 10;
   for (auto _ : state) {
-    IntVec v;
-    for (int i = 0; i < len; i++) {
-      v.push_back(i);
-    }
+    benchmark::DoNotOptimize(v);
+    v.push_back(val);
   }
-  state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * len);
 }
 BENCHMARK(BM_InlinedVectorFill)->Range(0, 1024);
 
@@ -43,23 +39,25 @@ void BM_InlinedVectorFillRange(benchmark::State& state) {
   for (int i = 0; i < len; i++) {
     ia[i] = i;
   }
+  auto* from = ia.get();
+  auto* to = from + len;
   for (auto _ : state) {
-    IntVec v(ia.get(), ia.get() + len);
+    benchmark::DoNotOptimize(from);
+    benchmark::DoNotOptimize(to);
+    absl::InlinedVector<int, 8> v(from, to);
     benchmark::DoNotOptimize(v);
   }
-  state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * len);
 }
 BENCHMARK(BM_InlinedVectorFillRange)->Range(0, 1024);
 
 void BM_StdVectorFill(benchmark::State& state) {
-  const int len = state.range(0);
+  std::vector<int> v;
+  int val = 10;
   for (auto _ : state) {
-    std::vector<int> v;
-    for (int i = 0; i < len; i++) {
-      v.push_back(i);
-    }
+    benchmark::DoNotOptimize(v);
+    benchmark::DoNotOptimize(val);
+    v.push_back(val);
   }
-  state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * len);
 }
 BENCHMARK(BM_StdVectorFill)->Range(0, 1024);
 
@@ -124,7 +122,7 @@ struct Buffer {  // some arbitrary structure for benchmarking.
   void* user_data;
 };
 
-void BM_InlinedVectorTenAssignments(benchmark::State& state) {
+void BM_InlinedVectorAssignments(benchmark::State& state) {
   const int len = state.range(0);
   using BufferVec = absl::InlinedVector<Buffer, 2>;
 
@@ -133,18 +131,25 @@ void BM_InlinedVectorTenAssignments(benchmark::State& state) {
 
   BufferVec dst;
   for (auto _ : state) {
-    for (int i = 0; i < 10; ++i) {
-      dst = src;
-    }
+    benchmark::DoNotOptimize(dst);
+    benchmark::DoNotOptimize(src);
+    dst = src;
   }
 }
-BENCHMARK(BM_InlinedVectorTenAssignments)
-    ->Arg(0)->Arg(1)->Arg(2)->Arg(3)->Arg(4)->Arg(20);
+BENCHMARK(BM_InlinedVectorAssignments)
+    ->Arg(0)
+    ->Arg(1)
+    ->Arg(2)
+    ->Arg(3)
+    ->Arg(4)
+    ->Arg(20);
 
 void BM_CreateFromContainer(benchmark::State& state) {
   for (auto _ : state) {
-    absl::InlinedVector<int, 4> x(absl::InlinedVector<int, 4>{1, 2, 3});
-    benchmark::DoNotOptimize(x);
+    absl::InlinedVector<int, 4> src{1, 2, 3};
+    benchmark::DoNotOptimize(src);
+    absl::InlinedVector<int, 4> dst(std::move(src));
+    benchmark::DoNotOptimize(dst);
   }
 }
 BENCHMARK(BM_CreateFromContainer);
@@ -214,6 +219,8 @@ void BM_SwapElements(benchmark::State& state) {
   Vec b;
   for (auto _ : state) {
     using std::swap;
+    benchmark::DoNotOptimize(a);
+    benchmark::DoNotOptimize(b);
     swap(a, b);
   }
 }
@@ -259,60 +266,44 @@ BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<std::string, 8>);
 void BM_InlinedVectorIndexInlined(benchmark::State& state) {
   absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7};
   for (auto _ : state) {
-    for (int i = 0; i < 1000; ++i) {
-      benchmark::DoNotOptimize(v);
-      benchmark::DoNotOptimize(v[4]);
-    }
+    benchmark::DoNotOptimize(v);
+    benchmark::DoNotOptimize(v[4]);
   }
-  state.SetItemsProcessed(1000 * static_cast<int64_t>(state.iterations()));
 }
 BENCHMARK(BM_InlinedVectorIndexInlined);
 
 void BM_InlinedVectorIndexExternal(benchmark::State& state) {
   absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
   for (auto _ : state) {
-    for (int i = 0; i < 1000; ++i) {
-      benchmark::DoNotOptimize(v);
-      benchmark::DoNotOptimize(v[4]);
-    }
+    benchmark::DoNotOptimize(v);
+    benchmark::DoNotOptimize(v[4]);
   }
-  state.SetItemsProcessed(1000 * static_cast<int64_t>(state.iterations()));
 }
 BENCHMARK(BM_InlinedVectorIndexExternal);
 
 void BM_StdVectorIndex(benchmark::State& state) {
   std::vector<int> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
   for (auto _ : state) {
-    for (int i = 0; i < 1000; ++i) {
-      benchmark::DoNotOptimize(v);
-      benchmark::DoNotOptimize(v[4]);
-    }
+    benchmark::DoNotOptimize(v);
+    benchmark::DoNotOptimize(v[4]);
   }
-  state.SetItemsProcessed(1000 * static_cast<int64_t>(state.iterations()));
 }
 BENCHMARK(BM_StdVectorIndex);
 
-#define UNROLL_2(x)            \
-  benchmark::DoNotOptimize(x); \
-  benchmark::DoNotOptimize(x);
-
-#define UNROLL_4(x) UNROLL_2(x) UNROLL_2(x)
-#define UNROLL_8(x) UNROLL_4(x) UNROLL_4(x)
-#define UNROLL_16(x) UNROLL_8(x) UNROLL_8(x);
-
 void BM_InlinedVectorDataInlined(benchmark::State& state) {
   absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7};
   for (auto _ : state) {
-    UNROLL_16(v.data());
+    benchmark::DoNotOptimize(v);
+    benchmark::DoNotOptimize(v.data());
   }
-  state.SetItemsProcessed(16 * static_cast<int64_t>(state.iterations()));
 }
 BENCHMARK(BM_InlinedVectorDataInlined);
 
 void BM_InlinedVectorDataExternal(benchmark::State& state) {
   absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
   for (auto _ : state) {
-    UNROLL_16(v.data());
+    benchmark::DoNotOptimize(v);
+    benchmark::DoNotOptimize(v.data());
   }
   state.SetItemsProcessed(16 * static_cast<int64_t>(state.iterations()));
 }
@@ -321,7 +312,8 @@ BENCHMARK(BM_InlinedVectorDataExternal);
 void BM_StdVectorData(benchmark::State& state) {
   std::vector<int> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
   for (auto _ : state) {
-    UNROLL_16(v.data());
+    benchmark::DoNotOptimize(v);
+    benchmark::DoNotOptimize(v.data());
   }
   state.SetItemsProcessed(16 * static_cast<int64_t>(state.iterations()));
 }
@@ -330,54 +322,54 @@ BENCHMARK(BM_StdVectorData);
 void BM_InlinedVectorSizeInlined(benchmark::State& state) {
   absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7};
   for (auto _ : state) {
-    UNROLL_16(v.size());
+    benchmark::DoNotOptimize(v);
+    benchmark::DoNotOptimize(v.size());
   }
-  state.SetItemsProcessed(16 * static_cast<int64_t>(state.iterations()));
 }
 BENCHMARK(BM_InlinedVectorSizeInlined);
 
 void BM_InlinedVectorSizeExternal(benchmark::State& state) {
   absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
   for (auto _ : state) {
-    UNROLL_16(v.size());
+    benchmark::DoNotOptimize(v);
+    benchmark::DoNotOptimize(v.size());
   }
-  state.SetItemsProcessed(16 * static_cast<int64_t>(state.iterations()));
 }
 BENCHMARK(BM_InlinedVectorSizeExternal);
 
 void BM_StdVectorSize(benchmark::State& state) {
   std::vector<int> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
   for (auto _ : state) {
-    UNROLL_16(v.size());
+    benchmark::DoNotOptimize(v);
+    benchmark::DoNotOptimize(v.size());
   }
-  state.SetItemsProcessed(16 * static_cast<int64_t>(state.iterations()));
 }
 BENCHMARK(BM_StdVectorSize);
 
 void BM_InlinedVectorEmptyInlined(benchmark::State& state) {
   absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7};
   for (auto _ : state) {
-    UNROLL_16(v.empty());
+    benchmark::DoNotOptimize(v);
+    benchmark::DoNotOptimize(v.empty());
   }
-  state.SetItemsProcessed(16 * static_cast<int64_t>(state.iterations()));
 }
 BENCHMARK(BM_InlinedVectorEmptyInlined);
 
 void BM_InlinedVectorEmptyExternal(benchmark::State& state) {
   absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
   for (auto _ : state) {
-    UNROLL_16(v.empty());
+    benchmark::DoNotOptimize(v);
+    benchmark::DoNotOptimize(v.empty());
   }
-  state.SetItemsProcessed(16 * static_cast<int64_t>(state.iterations()));
 }
 BENCHMARK(BM_InlinedVectorEmptyExternal);
 
 void BM_StdVectorEmpty(benchmark::State& state) {
   std::vector<int> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
   for (auto _ : state) {
-    UNROLL_16(v.empty());
+    benchmark::DoNotOptimize(v);
+    benchmark::DoNotOptimize(v.empty());
   }
-  state.SetItemsProcessed(16 * static_cast<int64_t>(state.iterations()));
 }
 BENCHMARK(BM_StdVectorEmpty);
 
diff --git a/absl/container/internal/inlined_vector.h b/absl/container/internal/inlined_vector.h
index 308506093300..24059d94c876 100644
--- a/absl/container/internal/inlined_vector.h
+++ b/absl/container/internal/inlined_vector.h
@@ -24,11 +24,12 @@
 namespace absl {
 namespace inlined_vector_internal {
 
-template <typename T, size_t N, typename A>
-class InlinedVectorStorage {
-  static_assert(
-      N > 0, "InlinedVector cannot be instantiated with `0` inline elements.");
+template <typename InlinedVector>
+class Storage;
 
+template <template <typename, size_t, typename> class InlinedVector, typename T,
+          size_t N, typename A>
+class Storage<InlinedVector<T, N, A>> {
  public:
   using allocator_type = A;
   using value_type = typename allocator_type::value_type;
@@ -44,12 +45,7 @@ class InlinedVectorStorage {
   using reverse_iterator = std::reverse_iterator<iterator>;
   using const_reverse_iterator = std::reverse_iterator<const_iterator>;
 
-  constexpr static size_type GetInlinedCapacity() {
-    return static_cast<size_type>(N);
-  }
-
-  explicit InlinedVectorStorage(const allocator_type& a)
-      : allocator_and_tag_(a) {}
+  explicit Storage(const allocator_type& a) : allocator_and_tag_(a) {}
 
   // TODO(johnsoncj): Make the below types and members private after migration
 
diff --git a/absl/container/internal/raw_hash_set_test.cc b/absl/container/internal/raw_hash_set_test.cc
index 87511148d04a..02fd0bfa0d93 100644
--- a/absl/container/internal/raw_hash_set_test.cc
+++ b/absl/container/internal/raw_hash_set_test.cc
@@ -845,6 +845,25 @@ TEST(Table, Erase) {
   EXPECT_TRUE(t.find(0) == t.end());
 }
 
+TEST(Table, EraseMaintainsValidIterator) {
+  IntTable t;
+  const int kNumElements = 100;
+  for (int i = 0; i < kNumElements; i ++) {
+    EXPECT_TRUE(t.emplace(i).second);
+  }
+  EXPECT_EQ(t.size(), kNumElements);
+
+  int num_erase_calls = 0;
+  auto it = t.begin();
+  while (it != t.end()) {
+    t.erase(it++);
+    num_erase_calls++;
+  }
+
+  EXPECT_TRUE(t.empty());
+  EXPECT_EQ(num_erase_calls, kNumElements);
+}
+
 // Collect N bad keys by following algorithm:
 // 1. Create an empty table and reserve it to 2 * N.
 // 2. Insert N random elements.