about summary refs log tree commit diff
path: root/absl/container
diff options
context:
space:
mode:
authorAbseil Team <absl-team@google.com>2019-03-22T19·20-0700
committerDerek Mauro <dmauro@google.com>2019-03-22T19·30-0400
commiteab2078b53c9e3d9d240135c09d27e3393acb50a (patch)
tree4d89c6a5877c78cd0e816d6824ca6ba6baf0684c /absl/container
parent253eb7416421661873afbaa33828a850db978541 (diff)
Export of internal Abseil changes.
--
8b7c3bc2fb69608e9b2389b1be0b0de840a4c59d by Derek Mauro <dmauro@google.com>:

Set correct flags for clang-cl.
https://github.com/abseil/abseil-cpp/pull/278

clang-cl produce binaries with MSVC ABI and wants to be as
flag-compatible with pure MSVC as possible, so this leads to all sorts
of weird cases.

clang-cl alias /Wall as clang's -Weverything which is way too verbose,
so it needs /W3 like pure MSVC.

clang-cl only understand GCC style warning flags (-W[no]blah) and just
silent drop MSVC style warning flags (/wd[num]).

clang-cl needs MSVC define flags since it is consuming the same header
files as pure MSVC.

CMake set CMAKE_CXX_COMPILER_ID as Clang when clang-cl is detected, so
need extra if (MSVC) to differentiate it.

We are not doing clang-cl specialization in Bazel as currently there
is no reliable way to detect clang-cl in Bazel..

Other changes:
Add ABSL_ prefix to variable names to avoid name collision in CMake.

PiperOrigin-RevId: 239841297

--
add96c3fc067d5c7b6f016d2ba74725a443a185e by CJ Johnson <johnsoncj@google.com>:

Eventually Storage will need to refer to the type `absl::InlinedVector<...>*`. This can be done via a forward declaration. However, doing so would move the defaulted allocator template parameter to the forward declaration and thus inside an internal file. Instead of doing that, this change gives Storage access to the template and it's parameters so the complete type can be formed without including it.

PiperOrigin-RevId: 239811298

--
b5f5279f1b13b09cae5c745597d64ea1efab146b by CJ Johnson <johnsoncj@google.com>:

Simplify/cleanup the benchmark tests for InlinedVector

PiperOrigin-RevId: 239805767

--
f5991e51b43b13a0ae95025474071f5039a33d27 by Matt Calabrese <calabrese@google.com>:

Update the internal-only IsSwappable traits to be nested inside of namespace absl so that the script to add inline namespaces for LTS releases works with the implementation.

PiperOrigin-RevId: 239622024

--
d1cb234dc5706f033ad56f4eb16d94ac5da80d52 by Abseil Team <absl-team@google.com>:

Mutex: fix tsan annotations

This fixes 2 bugs:
1. We call cond directly in Mutex::AwaitCommon without using EvalConditionAnnotated. As the result we call into user code ignoring synchronization, miss synchronization and report false positives later. Use EvalConditionAnnotated to call cond as we should.

2. We call Mutex invariant ignoring synchronization. Result is the same: we miss synchronization and report false positive races later. Reuse EvalConditionAnnotated to call mutex invariant too.

PiperOrigin-RevId: 239583878

--
52295e4922a9b408fa0dd03d27bc91ccc6645cd7 by Abseil Team <absl-team@google.com>:

Clarify how to obtain the same behavior as std::unordered_map::erase if need be.

PiperOrigin-RevId: 239549513

--
6e76e68ed084fd1247981dbb92677ce8e563b0ec by Jon Cohen <cohenjon@google.com>:

Avoid the -S -B form of `cmake` since it's only supported starting in CMake 3.13

PiperOrigin-RevId: 239473143
GitOrigin-RevId: 8b7c3bc2fb69608e9b2389b1be0b0de840a4c59d
Change-Id: Ib6d356fa1a7435260273df991e65df4149bd5861
Diffstat (limited to 'absl/container')
-rw-r--r--absl/container/flat_hash_map.h8
-rw-r--r--absl/container/flat_hash_set.h8
-rw-r--r--absl/container/inlined_vector.h28
-rw-r--r--absl/container/inlined_vector_benchmark.cc118
-rw-r--r--absl/container/internal/inlined_vector.h16
-rw-r--r--absl/container/internal/raw_hash_set_test.cc19
6 files changed, 107 insertions, 90 deletions
diff --git a/absl/container/flat_hash_map.h b/absl/container/flat_hash_map.h
index dfc497b564b4..00cc4dcff9fc 100644
--- a/absl/container/flat_hash_map.h
+++ b/absl/container/flat_hash_map.h
@@ -219,8 +219,12 @@ class flat_hash_map : public absl::container_internal::raw_hash_map<
   //   Erases the element at `position` of the `flat_hash_map`, returning
   //   `void`.
   //
-  //   NOTE: this return behavior is different than that of STL containers in
-  //   general and `std::unordered_map` in particular.
+  //   NOTE: returning `void` in this case is different than that of STL
+  //   containers in general and `std::unordered_map` in particular (which
+  //   return an iterator to the element following the erased element). If that
+  //   iterator is needed, simply post increment the iterator:
+  //
+  //     map.erase(it++);
   //
   // iterator erase(const_iterator first, const_iterator last):
   //
diff --git a/absl/container/flat_hash_set.h b/absl/container/flat_hash_set.h
index f27f174c1745..6bf51833fca6 100644
--- a/absl/container/flat_hash_set.h
+++ b/absl/container/flat_hash_set.h
@@ -212,8 +212,12 @@ class flat_hash_set
   //   Erases the element at `position` of the `flat_hash_set`, returning
   //   `void`.
   //
-  //   NOTE: this return behavior is different than that of STL containers in
-  //   general and `std::unordered_map` in particular.
+  //   NOTE: returning `void` in this case is different than that of STL
+  //   containers in general and `std::unordered_set` in particular (which
+  //   return an iterator to the element following the erased element). If that
+  //   iterator is needed, simply post increment the iterator:
+  //
+  //     set.erase(it++);
   //
   // iterator erase(const_iterator first, const_iterator last):
   //
diff --git a/absl/container/inlined_vector.h b/absl/container/inlined_vector.h
index 683087502295..77988058e913 100644
--- a/absl/container/inlined_vector.h
+++ b/absl/container/inlined_vector.h
@@ -66,7 +66,10 @@ namespace absl {
 // designed to cover the same API footprint as covered by `std::vector`.
 template <typename T, size_t N, typename A = std::allocator<T>>
 class InlinedVector {
-  using Storage = inlined_vector_internal::InlinedVectorStorage<T, N, A>;
+  static_assert(
+      N > 0, "InlinedVector cannot be instantiated with `0` inlined elements.");
+
+  using Storage = inlined_vector_internal::Storage<InlinedVector>;
   using Tag = typename Storage::Tag;
   using AllocatorAndTag = typename Storage::AllocatorAndTag;
   using Allocation = typename Storage::Allocation;
@@ -283,8 +286,7 @@ class InlinedVector {
   // will no longer be inlined and `capacity()` will equal its capacity on the
   // allocated heap.
   size_type capacity() const noexcept {
-    return allocated() ? allocation().capacity()
-                       : Storage::GetInlinedCapacity();
+    return allocated() ? allocation().capacity() : static_cast<size_type>(N);
   }
 
   // `InlinedVector::data()`
@@ -802,19 +804,19 @@ class InlinedVector {
   // `InlinedVector::shrink_to_fit()`
   //
   // Reduces memory usage by freeing unused memory. After this call, calls to
-  // `capacity()` will be equal to `max(Storage::GetInlinedCapacity(), size())`.
+  // `capacity()` will be equal to `max(N, size())`.
   //
-  // If `size() <= Storage::GetInlinedCapacity()` and the elements are currently
-  // stored on the heap, they will be moved to the inlined storage and the heap
-  // memory will be deallocated.
+  // If `size() <= N` and the elements are currently stored on the heap, they
+  // will be moved to the inlined storage and the heap memory will be
+  // deallocated.
   //
-  // If `size() > Storage::GetInlinedCapacity()` and `size() < capacity()` the
-  // elements will be moved to a smaller heap allocation.
+  // If `size() > N` and `size() < capacity()` the elements will be moved to a
+  // smaller heap allocation.
   void shrink_to_fit() {
     const auto s = size();
     if (ABSL_PREDICT_FALSE(!allocated() || s == capacity())) return;
 
-    if (s <= Storage::GetInlinedCapacity()) {
+    if (s <= N) {
       // Move the elements to the inlined storage.
       // We have to do this using a temporary, because `inlined_storage` and
       // `allocation_storage` are in a union field.
@@ -943,7 +945,7 @@ class InlinedVector {
     const size_type s = size();
     assert(s <= capacity());
 
-    size_type target = (std::max)(Storage::GetInlinedCapacity(), s + delta);
+    size_type target = (std::max)(N, s + delta);
 
     // Compute new capacity by repeatedly doubling current capacity
     // TODO(psrc): Check and avoid overflow?
@@ -1046,7 +1048,7 @@ class InlinedVector {
   }
 
   void InitAssign(size_type n) {
-    if (n > Storage::GetInlinedCapacity()) {
+    if (n > N) {
       Allocation new_allocation(allocator(), n);
       init_allocation(new_allocation);
       UninitializedFill(allocated_space(), allocated_space() + n);
@@ -1058,7 +1060,7 @@ class InlinedVector {
   }
 
   void InitAssign(size_type n, const_reference v) {
-    if (n > Storage::GetInlinedCapacity()) {
+    if (n > N) {
       Allocation new_allocation(allocator(), n);
       init_allocation(new_allocation);
       UninitializedFill(allocated_space(), allocated_space() + n, v);
diff --git a/absl/container/inlined_vector_benchmark.cc b/absl/container/inlined_vector_benchmark.cc
index fc928afe5d65..867a29ea32c1 100644
--- a/absl/container/inlined_vector_benchmark.cc
+++ b/absl/container/inlined_vector_benchmark.cc
@@ -23,17 +23,13 @@
 
 namespace {
 
-using IntVec = absl::InlinedVector<int, 8>;
-
 void BM_InlinedVectorFill(benchmark::State& state) {
-  const int len = state.range(0);
+  absl::InlinedVector<int, 8> v;
+  int val = 10;
   for (auto _ : state) {
-    IntVec v;
-    for (int i = 0; i < len; i++) {
-      v.push_back(i);
-    }
+    benchmark::DoNotOptimize(v);
+    v.push_back(val);
   }
-  state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * len);
 }
 BENCHMARK(BM_InlinedVectorFill)->Range(0, 1024);
 
@@ -43,23 +39,25 @@ void BM_InlinedVectorFillRange(benchmark::State& state) {
   for (int i = 0; i < len; i++) {
     ia[i] = i;
   }
+  auto* from = ia.get();
+  auto* to = from + len;
   for (auto _ : state) {
-    IntVec v(ia.get(), ia.get() + len);
+    benchmark::DoNotOptimize(from);
+    benchmark::DoNotOptimize(to);
+    absl::InlinedVector<int, 8> v(from, to);
     benchmark::DoNotOptimize(v);
   }
-  state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * len);
 }
 BENCHMARK(BM_InlinedVectorFillRange)->Range(0, 1024);
 
 void BM_StdVectorFill(benchmark::State& state) {
-  const int len = state.range(0);
+  std::vector<int> v;
+  int val = 10;
   for (auto _ : state) {
-    std::vector<int> v;
-    for (int i = 0; i < len; i++) {
-      v.push_back(i);
-    }
+    benchmark::DoNotOptimize(v);
+    benchmark::DoNotOptimize(val);
+    v.push_back(val);
   }
-  state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * len);
 }
 BENCHMARK(BM_StdVectorFill)->Range(0, 1024);
 
@@ -124,7 +122,7 @@ struct Buffer {  // some arbitrary structure for benchmarking.
   void* user_data;
 };
 
-void BM_InlinedVectorTenAssignments(benchmark::State& state) {
+void BM_InlinedVectorAssignments(benchmark::State& state) {
   const int len = state.range(0);
   using BufferVec = absl::InlinedVector<Buffer, 2>;
 
@@ -133,18 +131,25 @@ void BM_InlinedVectorTenAssignments(benchmark::State& state) {
 
   BufferVec dst;
   for (auto _ : state) {
-    for (int i = 0; i < 10; ++i) {
-      dst = src;
-    }
+    benchmark::DoNotOptimize(dst);
+    benchmark::DoNotOptimize(src);
+    dst = src;
   }
 }
-BENCHMARK(BM_InlinedVectorTenAssignments)
-    ->Arg(0)->Arg(1)->Arg(2)->Arg(3)->Arg(4)->Arg(20);
+BENCHMARK(BM_InlinedVectorAssignments)
+    ->Arg(0)
+    ->Arg(1)
+    ->Arg(2)
+    ->Arg(3)
+    ->Arg(4)
+    ->Arg(20);
 
 void BM_CreateFromContainer(benchmark::State& state) {
   for (auto _ : state) {
-    absl::InlinedVector<int, 4> x(absl::InlinedVector<int, 4>{1, 2, 3});
-    benchmark::DoNotOptimize(x);
+    absl::InlinedVector<int, 4> src{1, 2, 3};
+    benchmark::DoNotOptimize(src);
+    absl::InlinedVector<int, 4> dst(std::move(src));
+    benchmark::DoNotOptimize(dst);
   }
 }
 BENCHMARK(BM_CreateFromContainer);
@@ -214,6 +219,8 @@ void BM_SwapElements(benchmark::State& state) {
   Vec b;
   for (auto _ : state) {
     using std::swap;
+    benchmark::DoNotOptimize(a);
+    benchmark::DoNotOptimize(b);
     swap(a, b);
   }
 }
@@ -259,60 +266,44 @@ BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<std::string, 8>);
 void BM_InlinedVectorIndexInlined(benchmark::State& state) {
   absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7};
   for (auto _ : state) {
-    for (int i = 0; i < 1000; ++i) {
-      benchmark::DoNotOptimize(v);
-      benchmark::DoNotOptimize(v[4]);
-    }
+    benchmark::DoNotOptimize(v);
+    benchmark::DoNotOptimize(v[4]);
   }
-  state.SetItemsProcessed(1000 * static_cast<int64_t>(state.iterations()));
 }
 BENCHMARK(BM_InlinedVectorIndexInlined);
 
 void BM_InlinedVectorIndexExternal(benchmark::State& state) {
   absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
   for (auto _ : state) {
-    for (int i = 0; i < 1000; ++i) {
-      benchmark::DoNotOptimize(v);
-      benchmark::DoNotOptimize(v[4]);
-    }
+    benchmark::DoNotOptimize(v);
+    benchmark::DoNotOptimize(v[4]);
   }
-  state.SetItemsProcessed(1000 * static_cast<int64_t>(state.iterations()));
 }
 BENCHMARK(BM_InlinedVectorIndexExternal);
 
 void BM_StdVectorIndex(benchmark::State& state) {
   std::vector<int> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
   for (auto _ : state) {
-    for (int i = 0; i < 1000; ++i) {
-      benchmark::DoNotOptimize(v);
-      benchmark::DoNotOptimize(v[4]);
-    }
+    benchmark::DoNotOptimize(v);
+    benchmark::DoNotOptimize(v[4]);
   }
-  state.SetItemsProcessed(1000 * static_cast<int64_t>(state.iterations()));
 }
 BENCHMARK(BM_StdVectorIndex);
 
-#define UNROLL_2(x)            \
-  benchmark::DoNotOptimize(x); \
-  benchmark::DoNotOptimize(x);
-
-#define UNROLL_4(x) UNROLL_2(x) UNROLL_2(x)
-#define UNROLL_8(x) UNROLL_4(x) UNROLL_4(x)
-#define UNROLL_16(x) UNROLL_8(x) UNROLL_8(x);
-
 void BM_InlinedVectorDataInlined(benchmark::State& state) {
   absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7};
   for (auto _ : state) {
-    UNROLL_16(v.data());
+    benchmark::DoNotOptimize(v);
+    benchmark::DoNotOptimize(v.data());
   }
-  state.SetItemsProcessed(16 * static_cast<int64_t>(state.iterations()));
 }
 BENCHMARK(BM_InlinedVectorDataInlined);
 
 void BM_InlinedVectorDataExternal(benchmark::State& state) {
   absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
   for (auto _ : state) {
-    UNROLL_16(v.data());
+    benchmark::DoNotOptimize(v);
+    benchmark::DoNotOptimize(v.data());
   }
   state.SetItemsProcessed(16 * static_cast<int64_t>(state.iterations()));
 }
@@ -321,7 +312,8 @@ BENCHMARK(BM_InlinedVectorDataExternal);
 void BM_StdVectorData(benchmark::State& state) {
   std::vector<int> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
   for (auto _ : state) {
-    UNROLL_16(v.data());
+    benchmark::DoNotOptimize(v);
+    benchmark::DoNotOptimize(v.data());
   }
   state.SetItemsProcessed(16 * static_cast<int64_t>(state.iterations()));
 }
@@ -330,54 +322,54 @@ BENCHMARK(BM_StdVectorData);
 void BM_InlinedVectorSizeInlined(benchmark::State& state) {
   absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7};
   for (auto _ : state) {
-    UNROLL_16(v.size());
+    benchmark::DoNotOptimize(v);
+    benchmark::DoNotOptimize(v.size());
   }
-  state.SetItemsProcessed(16 * static_cast<int64_t>(state.iterations()));
 }
 BENCHMARK(BM_InlinedVectorSizeInlined);
 
 void BM_InlinedVectorSizeExternal(benchmark::State& state) {
   absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
   for (auto _ : state) {
-    UNROLL_16(v.size());
+    benchmark::DoNotOptimize(v);
+    benchmark::DoNotOptimize(v.size());
   }
-  state.SetItemsProcessed(16 * static_cast<int64_t>(state.iterations()));
 }
 BENCHMARK(BM_InlinedVectorSizeExternal);
 
 void BM_StdVectorSize(benchmark::State& state) {
   std::vector<int> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
   for (auto _ : state) {
-    UNROLL_16(v.size());
+    benchmark::DoNotOptimize(v);
+    benchmark::DoNotOptimize(v.size());
   }
-  state.SetItemsProcessed(16 * static_cast<int64_t>(state.iterations()));
 }
 BENCHMARK(BM_StdVectorSize);
 
 void BM_InlinedVectorEmptyInlined(benchmark::State& state) {
   absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7};
   for (auto _ : state) {
-    UNROLL_16(v.empty());
+    benchmark::DoNotOptimize(v);
+    benchmark::DoNotOptimize(v.empty());
   }
-  state.SetItemsProcessed(16 * static_cast<int64_t>(state.iterations()));
 }
 BENCHMARK(BM_InlinedVectorEmptyInlined);
 
 void BM_InlinedVectorEmptyExternal(benchmark::State& state) {
   absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
   for (auto _ : state) {
-    UNROLL_16(v.empty());
+    benchmark::DoNotOptimize(v);
+    benchmark::DoNotOptimize(v.empty());
   }
-  state.SetItemsProcessed(16 * static_cast<int64_t>(state.iterations()));
 }
 BENCHMARK(BM_InlinedVectorEmptyExternal);
 
 void BM_StdVectorEmpty(benchmark::State& state) {
   std::vector<int> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
   for (auto _ : state) {
-    UNROLL_16(v.empty());
+    benchmark::DoNotOptimize(v);
+    benchmark::DoNotOptimize(v.empty());
   }
-  state.SetItemsProcessed(16 * static_cast<int64_t>(state.iterations()));
 }
 BENCHMARK(BM_StdVectorEmpty);
 
diff --git a/absl/container/internal/inlined_vector.h b/absl/container/internal/inlined_vector.h
index 308506093300..24059d94c876 100644
--- a/absl/container/internal/inlined_vector.h
+++ b/absl/container/internal/inlined_vector.h
@@ -24,11 +24,12 @@
 namespace absl {
 namespace inlined_vector_internal {
 
-template <typename T, size_t N, typename A>
-class InlinedVectorStorage {
-  static_assert(
-      N > 0, "InlinedVector cannot be instantiated with `0` inline elements.");
+template <typename InlinedVector>
+class Storage;
 
+template <template <typename, size_t, typename> class InlinedVector, typename T,
+          size_t N, typename A>
+class Storage<InlinedVector<T, N, A>> {
  public:
   using allocator_type = A;
   using value_type = typename allocator_type::value_type;
@@ -44,12 +45,7 @@ class InlinedVectorStorage {
   using reverse_iterator = std::reverse_iterator<iterator>;
   using const_reverse_iterator = std::reverse_iterator<const_iterator>;
 
-  constexpr static size_type GetInlinedCapacity() {
-    return static_cast<size_type>(N);
-  }
-
-  explicit InlinedVectorStorage(const allocator_type& a)
-      : allocator_and_tag_(a) {}
+  explicit Storage(const allocator_type& a) : allocator_and_tag_(a) {}
 
   // TODO(johnsoncj): Make the below types and members private after migration
 
diff --git a/absl/container/internal/raw_hash_set_test.cc b/absl/container/internal/raw_hash_set_test.cc
index 87511148d04a..02fd0bfa0d93 100644
--- a/absl/container/internal/raw_hash_set_test.cc
+++ b/absl/container/internal/raw_hash_set_test.cc
@@ -845,6 +845,25 @@ TEST(Table, Erase) {
   EXPECT_TRUE(t.find(0) == t.end());
 }
 
+TEST(Table, EraseMaintainsValidIterator) {
+  IntTable t;
+  const int kNumElements = 100;
+  for (int i = 0; i < kNumElements; i ++) {
+    EXPECT_TRUE(t.emplace(i).second);
+  }
+  EXPECT_EQ(t.size(), kNumElements);
+
+  int num_erase_calls = 0;
+  auto it = t.begin();
+  while (it != t.end()) {
+    t.erase(it++);
+    num_erase_calls++;
+  }
+
+  EXPECT_TRUE(t.empty());
+  EXPECT_EQ(num_erase_calls, kNumElements);
+}
+
 // Collect N bad keys by following algorithm:
 // 1. Create an empty table and reserve it to 2 * N.
 // 2. Insert N random elements.