about summary refs log tree commit diff
path: root/absl/container
diff options
context:
space:
mode:
authorAbseil Team <absl-team@google.com>2019-07-02T19·50-0700
committerMark Barolak <mbar@google.com>2019-07-02T21·23-0400
commite6b050212c859fbaf67abac76105da10ec348274 (patch)
tree820bfd57d6117171e84089e6ff23d6333bb79c43 /absl/container
parentc964fcffac27bd4a9ff67fe393410dd1146ef8b8 (diff)
Export of internal Abseil changes.
--
c2e2e2b21c3fe59b63279e7418c93c7289ee3e27 by Mark Barolak <mbar@google.com>:

Import of CCTZ from GitHub.

PiperOrigin-RevId: 256220326

--
3996b01f0c3eb60f72825b154dce8019b6215f1d by Derek Mauro <dmauro@google.com>:

Add GCC 4.9 test script. This will become our new minumum version and
GCC 4.8 will be removed soon.

PiperOrigin-RevId: 256160891

--
2c13aa44bc8e497ebae9abe8b8adf73c2152622d by Abseil Team <absl-team@google.com>:

[Trivial] tweak flat_hash_map.h doc comment

The comment is probably right both ways, but the lack of an "if" here seemed jarring to me.

PiperOrigin-RevId: 256089069

--
16bc03b9b30fbf08d7dc61025fa8ec4b57077be8 by Abseil Team <absl-team@google.com>:

Fix symbolization for elf files whose SYMTAB section is stripped, but have a DYNSYM section.

Previously, if we did not find a SYMTAB section, we would bail out of symbolization early, rather than checking the DYNSYM section.

PiperOrigin-RevId: 256061954

--
4c60ee329b1eeb0b0d10c4f76f282e5fbae2c5b2 by Derek Mauro <dmauro@google.com>:

Update to LLVM r363242 and Bazel 0.27.0

PiperOrigin-RevId: 256024405

--
18e1ba970d33f122026803d8ca90035b9088949d by Eric Fiselier <ericwf@google.com>:

Disable variant tests that break with P0608R3

PiperOrigin-RevId: 255975764

--
0a89858464977e86096b62476faa3b64eb94aa1d by Abseil Team <absl-team@google.com>:

Internal change

PiperOrigin-RevId: 255891019

--
9b7424cac66f5407f0ed74ed288bf3099a4fa330 by CJ Johnson <johnsoncj@google.com>:

Updates the implementation of InlinedVector::insert(...) to be exception safe and adds an exception safety tests for insert(...)

PiperOrigin-RevId: 255881809

--
1288f4ba3883c510d92b09437fb8b051c19aa241 by CJ Johnson <johnsoncj@google.com>:

Updates the implementation of InlinedVector::insert(...) to be exception safe and adds an exception safety tests for insert(...)

PiperOrigin-RevId: 255875277

--
39c04f3621491eaff9e2eda619718d5b5f20fbd9 by Abseil Team <absl-team@google.com>:

Use a typedef to allow building with NVCC

Without this change NVCC fails to compile compressed_tuple.h.  NVCC is relevant
because TensorFlow uses NVCC on Ubuntu and inclues abseil.

PiperOrigin-RevId: 255850176

--
e23f0309ccad69eb508ca02c9034cd4cdd740da0 by Abseil Team <absl-team@google.com>:

Internal change

PiperOrigin-RevId: 255787167

--
054aafcebf595077054164f1da3703124ab209b4 by Abseil Team <absl-team@google.com>:

Updates the ScopedAllocatorWorks test for InlinedVector to not rely on the byte count allocated by the standard library

In doing so, removes LegacyNextCapacityFrom(...) impl function from InlinedVector

Also applies clang-format to the test file

PiperOrigin-RevId: 255760356

--
eb05fc9f78e3a163c93f1866e9fe9a8ad0d01622 by Abseil Team <absl-team@google.com>:

Internal change

PiperOrigin-RevId: 255706834

--
97abb824417604c45d9fcbb3e4ff1aa3000836f2 by Jorg Brown <jorg@google.com>:

Enhance compatibility of abseil's strings package with nvcc.

PiperOrigin-RevId: 255688500

--
efc5b9c221ee31e15d10b35d31c8f3ae6eddaa8c by Abseil Team <absl-team@google.com>:

Follow CCTZ's lead and allow GetWeekday() and GetYearDay() to be
called with any civil-time type.  A CivilSecond, for example, has
a weekday just as much as a CivilDay does.

PiperOrigin-RevId: 255659840

--
a75acbe954457919d8c6c8f4c2339b543760b375 by Derek Mauro <dmauro@google.com>:

Increase the timeout of randen_engine_test.
It seems to timeout under TSAN often enough to justify the increase.

PiperOrigin-RevId: 255628086

--
160976ba47c7c6eb57af08e21f8eb640aa51e91b by Derek Mauro <dmauro@google.com>:

Update CMake documentation
Fixes https://github.com/abseil/abseil-cpp/issues/332

PiperOrigin-RevId: 255607313
GitOrigin-RevId: c2e2e2b21c3fe59b63279e7418c93c7289ee3e27
Change-Id: Iba4ac7ed23cbcdb22965b4958601f689be92cda4
Diffstat (limited to 'absl/container')
-rw-r--r--absl/container/flat_hash_map.h2
-rw-r--r--absl/container/inlined_vector_test.cc135
-rw-r--r--absl/container/internal/compressed_tuple.h11
-rw-r--r--absl/container/internal/inlined_vector.h16
4 files changed, 102 insertions, 62 deletions
diff --git a/absl/container/flat_hash_map.h b/absl/container/flat_hash_map.h
index 00cc4dcff9fc..0bc501b1143a 100644
--- a/absl/container/flat_hash_map.h
+++ b/absl/container/flat_hash_map.h
@@ -77,7 +77,7 @@ struct FlatHashMapPolicy;
 // NOTE: A `flat_hash_map` stores its value types directly inside its
 // implementation array to avoid memory indirection. Because a `flat_hash_map`
 // is designed to move data when rehashed, map values will not retain pointer
-// stability. If you require pointer stability, or your values are large,
+// stability. If you require pointer stability, or if your values are large,
 // consider using `absl::flat_hash_map<Key, std::unique_ptr<Value>>` instead.
 // If your types are not moveable or you require pointer stability for keys,
 // consider `absl::node_hash_map`.
diff --git a/absl/container/inlined_vector_test.cc b/absl/container/inlined_vector_test.cc
index 50315b83c147..60fe89b28aee 100644
--- a/absl/container/inlined_vector_test.cc
+++ b/absl/container/inlined_vector_test.cc
@@ -76,9 +76,12 @@ TYPED_TEST_SUITE_P(InstanceTest);
 // destroyed in the erase(begin, end) test.
 class RefCounted {
  public:
-  RefCounted(int value, int* count) : value_(value), count_(count) { Ref(); }
+  RefCounted(int value, int* count) : value_(value), count_(count) {
+    Ref();
+  }
 
-  RefCounted(const RefCounted& v) : value_(v.value_), count_(v.count_) {
+  RefCounted(const RefCounted& v)
+      : value_(v.value_), count_(v.count_) {
     Ref();
   }
 
@@ -287,7 +290,7 @@ TEST(RefCountedVec, EraseBeginEnd) {
         }
 
         // Check that the elements at the end are preserved.
-        for (int i = erase_end; i < len; ++i) {
+        for (int i = erase_end; i< len; ++i) {
           EXPECT_EQ(1, counts[i]);
         }
       }
@@ -549,10 +552,10 @@ TEST(IntVec, Resize) {
     static const int kResizeElem = 1000000;
     for (int k = 0; k < 10; k++) {
       // Enlarging resize
-      v.resize(len + k, kResizeElem);
-      EXPECT_EQ(len + k, v.size());
-      EXPECT_LE(len + k, v.capacity());
-      for (int i = 0; i < len + k; i++) {
+      v.resize(len+k, kResizeElem);
+      EXPECT_EQ(len+k, v.size());
+      EXPECT_LE(len+k, v.capacity());
+      for (int i = 0; i < len+k; i++) {
         if (i < len) {
           EXPECT_EQ(i, v[i]);
         } else {
@@ -863,7 +866,7 @@ TYPED_TEST_P(InstanceTest, Swap) {
       auto min_len = std::min(l1, l2);
       auto max_len = std::max(l1, l2);
       for (int i = 0; i < l1; i++) a.push_back(Instance(i));
-      for (int i = 0; i < l2; i++) b.push_back(Instance(100 + i));
+      for (int i = 0; i < l2; i++) b.push_back(Instance(100+i));
       EXPECT_EQ(tracker.instances(), l1 + l2);
       tracker.ResetCopiesMovesSwaps();
       {
@@ -931,7 +934,7 @@ TEST(IntVec, EqualAndNotEqual) {
     EXPECT_FALSE(a == b);
     EXPECT_TRUE(a != b);
 
-    b[i] = b[i] - 1;  // Back to before
+    b[i] = b[i] - 1;    // Back to before
     EXPECT_TRUE(a == b);
     EXPECT_FALSE(a != b);
   }
@@ -998,7 +1001,7 @@ TYPED_TEST_P(InstanceTest, CountConstructorsDestructors) {
 
     // reserve() must not increase the number of initialized objects
     SCOPED_TRACE("reserve");
-    v.reserve(len + 1000);
+    v.reserve(len+1000);
     EXPECT_EQ(tracker.instances(), len);
     EXPECT_EQ(tracker.copies() + tracker.moves(), len);
 
@@ -1244,8 +1247,9 @@ void InstanceCountElemAssignWithAllocationTest() {
     absl::InlinedVector<Instance, 2> v(original_contents.begin(),
                                        original_contents.end());
     v.assign(3, Instance(123));
-    EXPECT_THAT(v, AllOf(SizeIs(3), ElementsAre(ValueIs(123), ValueIs(123),
-                                                ValueIs(123))));
+    EXPECT_THAT(v,
+                AllOf(SizeIs(3),
+                      ElementsAre(ValueIs(123), ValueIs(123), ValueIs(123))));
     EXPECT_LE(v.size(), v.capacity());
   }
 }
@@ -1524,8 +1528,8 @@ TYPED_TEST_P(InstanceTest, InitializerListAssign) {
     SCOPED_TRACE(original_size);
     absl::InlinedVector<Instance, 2> v(original_size, Instance(12345));
     v.assign({Instance(3), Instance(4), Instance(5)});
-    EXPECT_THAT(
-        v, AllOf(SizeIs(3), ElementsAre(ValueIs(3), ValueIs(4), ValueIs(5))));
+    EXPECT_THAT(v, AllOf(SizeIs(3),
+                         ElementsAre(ValueIs(3), ValueIs(4), ValueIs(5))));
     EXPECT_LE(3, v.capacity());
   }
 }
@@ -1550,7 +1554,7 @@ TEST(DynamicVec, DynamicVecCompiles) {
 TEST(AllocatorSupportTest, Constructors) {
   using MyAlloc = CountingAllocator<int>;
   using AllocVec = absl::InlinedVector<int, 4, MyAlloc>;
-  const int ia[] = {0, 1, 2, 3, 4, 5, 6, 7};
+  const int ia[] = { 0, 1, 2, 3, 4, 5, 6, 7 };
   int64_t allocated = 0;
   MyAlloc alloc(&allocated);
   { AllocVec ABSL_ATTRIBUTE_UNUSED v; }
@@ -1566,7 +1570,7 @@ TEST(AllocatorSupportTest, Constructors) {
 TEST(AllocatorSupportTest, CountAllocations) {
   using MyAlloc = CountingAllocator<int>;
   using AllocVec = absl::InlinedVector<int, 4, MyAlloc>;
-  const int ia[] = {0, 1, 2, 3, 4, 5, 6, 7};
+  const int ia[] = { 0, 1, 2, 3, 4, 5, 6, 7 };
   int64_t allocated = 0;
   MyAlloc alloc(&allocated);
   {
@@ -1630,8 +1634,8 @@ TEST(AllocatorSupportTest, SwapBothAllocated) {
   int64_t allocated1 = 0;
   int64_t allocated2 = 0;
   {
-    const int ia1[] = {0, 1, 2, 3, 4, 5, 6, 7};
-    const int ia2[] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
+    const int ia1[] = { 0, 1, 2, 3, 4, 5, 6, 7 };
+    const int ia2[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8 };
     MyAlloc a1(&allocated1);
     MyAlloc a2(&allocated2);
     AllocVec v1(ia1, ia1 + ABSL_ARRAYSIZE(ia1), a1);
@@ -1655,8 +1659,8 @@ TEST(AllocatorSupportTest, SwapOneAllocated) {
   int64_t allocated1 = 0;
   int64_t allocated2 = 0;
   {
-    const int ia1[] = {0, 1, 2, 3, 4, 5, 6, 7};
-    const int ia2[] = {0, 1, 2, 3};
+    const int ia1[] = { 0, 1, 2, 3, 4, 5, 6, 7 };
+    const int ia2[] = { 0, 1, 2, 3 };
     MyAlloc a1(&allocated1);
     MyAlloc a2(&allocated2);
     AllocVec v1(ia1, ia1 + ABSL_ARRAYSIZE(ia1), a1);
@@ -1677,42 +1681,65 @@ TEST(AllocatorSupportTest, SwapOneAllocated) {
 
 TEST(AllocatorSupportTest, ScopedAllocatorWorks) {
   using StdVector = std::vector<int, CountingAllocator<int>>;
-  using Alloc = CountingAllocator<StdVector>;
-  using ScopedAlloc = std::scoped_allocator_adaptor<Alloc>;
-  using AllocVec = absl::InlinedVector<StdVector, 1, ScopedAlloc>;
-
-  {
-    int64_t total_allocated_byte_count = 0;
-
-    AllocVec inlined_case(ScopedAlloc(Alloc(+&total_allocated_byte_count)));
-    inlined_case.emplace_back();
-
-    int64_t absl_responsible_for_count = total_allocated_byte_count;
-    EXPECT_EQ(absl_responsible_for_count, 0);
-
-    inlined_case[0].emplace_back();
-    EXPECT_GT(total_allocated_byte_count, absl_responsible_for_count);
-
-    inlined_case.clear();
-    EXPECT_EQ(total_allocated_byte_count, 0);
-  }
-
-  {
-    int64_t total_allocated_byte_count = 0;
-
-    AllocVec allocated_case(ScopedAlloc(Alloc(+&total_allocated_byte_count)));
-    allocated_case.emplace_back();
-    allocated_case.emplace_back();
-
-    int64_t absl_responsible_for_count = total_allocated_byte_count;
-    EXPECT_GT(absl_responsible_for_count, 0);
+  using MyAlloc =
+      std::scoped_allocator_adaptor<CountingAllocator<StdVector>>;
+  using AllocVec = absl::InlinedVector<StdVector, 4, MyAlloc>;
+
+  // MSVC 2017's std::vector allocates different amounts of memory in debug
+  // versus opt mode.
+  int64_t test_allocated = 0;
+  StdVector v(CountingAllocator<int>{&test_allocated});
+  // The amount of memory allocated by a default constructed vector<int>
+  auto default_std_vec_allocated = test_allocated;
+  v.push_back(1);
+  // The amound of memory allocated by a copy-constructed vector<int> with one
+  // element.
+  int64_t one_element_std_vec_copy_allocated = test_allocated;
 
-    allocated_case[1].emplace_back();
-    EXPECT_GT(total_allocated_byte_count, absl_responsible_for_count);
+  int64_t allocated = 0;
+  AllocVec vec(MyAlloc{CountingAllocator<StdVector>{&allocated}});
+  EXPECT_EQ(allocated, 0);
 
-    allocated_case.clear();
-    EXPECT_EQ(total_allocated_byte_count, 0);
-  }
+  // This default constructs a vector<int>, but the allocator should pass itself
+  // into the vector<int>, so check allocation compared to that.
+  // The absl::InlinedVector does not allocate any memory.
+  // The vector<int> may allocate any memory.
+  auto expected = default_std_vec_allocated;
+  vec.resize(1);
+  EXPECT_EQ(allocated, expected);
+
+  // We make vector<int> allocate memory.
+  // It must go through the allocator even though we didn't construct the
+  // vector directly.  This assumes that vec[0] doesn't need to grow its
+  // allocation.
+  expected += sizeof(int);
+  vec[0].push_back(1);
+  EXPECT_EQ(allocated, expected);
+
+  // Another allocating vector.
+  expected += one_element_std_vec_copy_allocated;
+  vec.push_back(vec[0]);
+  EXPECT_EQ(allocated, expected);
+
+  // Overflow the inlined memory.
+  // The absl::InlinedVector will now allocate.
+  expected += sizeof(StdVector) * 8 + default_std_vec_allocated * 3;
+  vec.resize(5);
+  EXPECT_EQ(allocated, expected);
+
+  // Adding one more in external mode should also work.
+  expected += one_element_std_vec_copy_allocated;
+  vec.push_back(vec[0]);
+  EXPECT_EQ(allocated, expected);
+
+  // And extending these should still work.  This assumes that vec[0] does not
+  // need to grow its allocation.
+  expected += sizeof(int);
+  vec[0].push_back(1);
+  EXPECT_EQ(allocated, expected);
+
+  vec.clear();
+  EXPECT_EQ(allocated, 0);
 }
 
 TEST(AllocatorSupportTest, SizeAllocConstructor) {
diff --git a/absl/container/internal/compressed_tuple.h b/absl/container/internal/compressed_tuple.h
index 1713ad6862c5..c29ab41eb9fd 100644
--- a/absl/container/internal/compressed_tuple.h
+++ b/absl/container/internal/compressed_tuple.h
@@ -188,6 +188,9 @@ class ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTuple
   template <int I>
   using ElemT = internal_compressed_tuple::ElemT<CompressedTuple, I>;
 
+  template <int I>
+  using StorageT = internal_compressed_tuple::Storage<ElemT<I>, I>;
+
  public:
   constexpr CompressedTuple() = default;
   explicit constexpr CompressedTuple(Ts... base)
@@ -200,19 +203,17 @@ class ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTuple
 
   template <int I>
   constexpr const ElemT<I>& get() const& {
-    return internal_compressed_tuple::Storage<ElemT<I>, I>::get();
+    return StorageT<I>::get();
   }
 
   template <int I>
   ElemT<I>&& get() && {
-    return std::move(*this)
-        .internal_compressed_tuple::template Storage<ElemT<I>, I>::get();
+    return std::move(*this).StorageT<I>::get();
   }
 
   template <int I>
   constexpr const ElemT<I>&& get() const&& {
-    return absl::move(*this)
-        .internal_compressed_tuple::template Storage<ElemT<I>, I>::get();
+    return absl::move(*this).StorageT<I>::get();
   }
 };
 
diff --git a/absl/container/internal/inlined_vector.h b/absl/container/internal/inlined_vector.h
index 84b97791fa20..0ab2d7daeaf2 100644
--- a/absl/container/internal/inlined_vector.h
+++ b/absl/container/internal/inlined_vector.h
@@ -402,6 +402,16 @@ class Storage {
     return current_capacity * 2;
   }
 
+  static size_type LegacyNextCapacityFrom(size_type current_capacity,
+                                          size_type requested_capacity) {
+    // TODO(johnsoncj): Get rid of this old behavior.
+    size_type new_capacity = current_capacity;
+    while (new_capacity < requested_capacity) {
+      new_capacity *= 2;
+    }
+    return new_capacity;
+  }
+
   using Metadata =
       container_internal::CompressedTuple<allocator_type, size_type>;
 
@@ -512,7 +522,8 @@ auto Storage<T, N, A>::Resize(ValueAdapter values, size_type new_size) -> void {
   absl::Span<value_type> destroy_loop;
 
   if (new_size > storage_view.capacity) {
-    pointer new_data = allocation_tx.Allocate(new_size);
+    pointer new_data = allocation_tx.Allocate(
+        LegacyNextCapacityFrom(storage_view.capacity, new_size));
 
     // Construct new objects in `new_data`
     construct_loop = {new_data + storage_view.size,
@@ -632,7 +643,8 @@ auto Storage<T, N, A>::Reserve(size_type requested_capacity) -> void {
   IteratorValueAdapter<MoveIterator> move_values(
       MoveIterator(storage_view.data));
 
-  pointer new_data = allocation_tx.Allocate(requested_capacity);
+  pointer new_data = allocation_tx.Allocate(
+      LegacyNextCapacityFrom(storage_view.capacity, requested_capacity));
 
   inlined_vector_internal::ConstructElements(GetAllocPtr(), new_data,
                                              &move_values, storage_view.size);