diff options
Diffstat (limited to 'absl/container')
-rw-r--r-- | absl/container/BUILD.bazel | 1 | ||||
-rw-r--r-- | absl/container/CMakeLists.txt | 1 | ||||
-rw-r--r-- | absl/container/inlined_vector.h | 421 | ||||
-rw-r--r-- | absl/container/inlined_vector_benchmark.cc | 192 | ||||
-rw-r--r-- | absl/container/inlined_vector_exception_safety_test.cc | 93 | ||||
-rw-r--r-- | absl/container/inlined_vector_test.cc | 6 | ||||
-rw-r--r-- | absl/container/internal/compressed_tuple.h | 78 | ||||
-rw-r--r-- | absl/container/internal/compressed_tuple_test.cc | 21 | ||||
-rw-r--r-- | absl/container/internal/inlined_vector.h | 197 |
9 files changed, 711 insertions, 299 deletions
diff --git a/absl/container/BUILD.bazel b/absl/container/BUILD.bazel index 998294c07cab..17d725c18ba9 100644 --- a/absl/container/BUILD.bazel +++ b/absl/container/BUILD.bazel @@ -127,6 +127,7 @@ cc_library( "//absl/base:core_headers", "//absl/memory", "//absl/meta:type_traits", + "//absl/types:span", ], ) diff --git a/absl/container/CMakeLists.txt b/absl/container/CMakeLists.txt index 5196e50346a8..6df331e17165 100644 --- a/absl/container/CMakeLists.txt +++ b/absl/container/CMakeLists.txt @@ -126,6 +126,7 @@ absl_cc_library( absl::compressed_tuple absl::core_headers absl::memory + absl::span absl::type_traits PUBLIC ) diff --git a/absl/container/inlined_vector.h b/absl/container/inlined_vector.h index 2c96cc37b47a..10881b22a918 100644 --- a/absl/container/inlined_vector.h +++ b/absl/container/inlined_vector.h @@ -166,7 +166,7 @@ class InlinedVector { InlinedVector(const InlinedVector& other, const allocator_type& alloc) : storage_(alloc) { if (IsMemcpyOk::value && !other.storage_.GetIsAllocated()) { - storage_.MemcpyContents(other.storage_); + storage_.MemcpyFrom(other.storage_); } else { storage_.Initialize(IteratorValueAdapter<const_pointer>(other.data()), other.size()); @@ -193,7 +193,7 @@ class InlinedVector { std::is_nothrow_move_constructible<value_type>::value) : storage_(*other.storage_.GetAllocPtr()) { if (IsMemcpyOk::value) { - storage_.MemcpyContents(other.storage_); + storage_.MemcpyFrom(other.storage_); other.storage_.SetInlinedSize(0); } else if (other.storage_.GetIsAllocated()) { storage_.SetAllocatedData(other.storage_.GetAllocatedData(), @@ -227,7 +227,7 @@ class InlinedVector { absl::allocator_is_nothrow<allocator_type>::value) : storage_(alloc) { if (IsMemcpyOk::value) { - storage_.MemcpyContents(other.storage_); + storage_.MemcpyFrom(other.storage_); other.storage_.SetInlinedSize(0); } else if ((*storage_.GetAllocPtr() == *other.storage_.GetAllocPtr()) && other.storage_.GetIsAllocated()) { @@ -464,26 +464,22 @@ class InlinedVector { InlinedVector& operator=(InlinedVector&& other) { if (ABSL_PREDICT_FALSE(this == std::addressof(other))) return *this; - if (other.storage_.GetIsAllocated()) { - clear(); - storage_.SetAllocatedSize(other.size()); - storage_.SetAllocatedData(other.storage_.GetAllocatedData(), - other.storage_.GetAllocatedCapacity()); + if (IsMemcpyOk::value || other.storage_.GetIsAllocated()) { + inlined_vector_internal::DestroyElements(storage_.GetAllocPtr(), data(), + size()); + if (storage_.GetIsAllocated()) { + AllocatorTraits::deallocate(*storage_.GetAllocPtr(), + storage_.GetAllocatedData(), + storage_.GetAllocatedCapacity()); + } + storage_.MemcpyFrom(other.storage_); other.storage_.SetInlinedSize(0); } else { - if (storage_.GetIsAllocated()) clear(); - // Both are inlined now. - if (size() < other.size()) { - auto mid = std::make_move_iterator(other.begin() + size()); - std::copy(std::make_move_iterator(other.begin()), mid, begin()); - UninitializedCopy(mid, std::make_move_iterator(other.end()), end()); - } else { - auto new_end = std::copy(std::make_move_iterator(other.begin()), - std::make_move_iterator(other.end()), begin()); - Destroy(new_end, end()); - } - storage_.SetInlinedSize(other.size()); + storage_.Assign(IteratorValueAdapter<MoveIterator>( + MoveIterator(other.storage_.GetInlinedData())), + other.size()); } + return *this; } @@ -491,23 +487,7 @@ class InlinedVector { // // Replaces the contents of the inlined vector with `n` copies of `v`. void assign(size_type n, const_reference v) { - if (n <= size()) { // Possibly shrink - std::fill_n(begin(), n, v); - erase(begin() + n, end()); - return; - } - // Grow - reserve(n); - std::fill_n(begin(), size(), v); - if (storage_.GetIsAllocated()) { - UninitializedFill(storage_.GetAllocatedData() + size(), - storage_.GetAllocatedData() + n, v); - storage_.SetAllocatedSize(n); - } else { - UninitializedFill(storage_.GetInlinedData() + size(), - storage_.GetInlinedData() + n, v); - storage_.SetInlinedSize(n); - } + storage_.Assign(CopyValueAdapter(v), n); } // Overload of `InlinedVector::assign()` to replace the contents of the @@ -522,24 +502,8 @@ class InlinedVector { template <typename ForwardIterator, EnableIfAtLeastForwardIterator<ForwardIterator>* = nullptr> void assign(ForwardIterator first, ForwardIterator last) { - auto length = std::distance(first, last); - - // Prefer reassignment to copy construction for elements. - if (static_cast<size_type>(length) <= size()) { - erase(std::copy(first, last, begin()), end()); - return; - } - - reserve(length); - iterator out = begin(); - for (; out != end(); ++first, ++out) *out = *first; - if (storage_.GetIsAllocated()) { - UninitializedCopy(first, last, out); - storage_.SetAllocatedSize(length); - } else { - UninitializedCopy(first, last, out); - storage_.SetInlinedSize(length); - } + storage_.Assign(IteratorValueAdapter<ForwardIterator>(first), + std::distance(first, last)); } // Overload of `InlinedVector::assign()` to replace the contents of the @@ -624,7 +588,15 @@ class InlinedVector { // of `v` starting at `pos`. Returns an `iterator` pointing to the first of // the newly inserted elements. iterator insert(const_iterator pos, size_type n, const_reference v) { - return InsertWithCount(pos, n, v); + assert(pos >= begin() && pos <= end()); + if (ABSL_PREDICT_FALSE(n == 0)) { + return const_cast<iterator>(pos); + } + value_type copy = v; + std::pair<iterator, iterator> it_pair = ShiftRight(pos, n); + std::fill(it_pair.first, it_pair.second, copy); + UninitializedFill(it_pair.second, it_pair.first + n, copy); + return it_pair.first; } // Overload of `InlinedVector::insert()` for copying the contents of the @@ -644,7 +616,17 @@ class InlinedVector { EnableIfAtLeastForwardIterator<ForwardIterator>* = nullptr> iterator insert(const_iterator pos, ForwardIterator first, ForwardIterator last) { - return InsertWithForwardRange(pos, first, last); + assert(pos >= begin() && pos <= end()); + if (ABSL_PREDICT_FALSE(first == last)) { + return const_cast<iterator>(pos); + } + auto n = std::distance(first, last); + std::pair<iterator, iterator> it_pair = ShiftRight(pos, n); + size_type used_spots = it_pair.second - it_pair.first; + auto open_spot = std::next(first, used_spots); + std::copy(first, open_spot, it_pair.first); + UninitializedCopy(open_spot, last, it_pair.second); + return it_pair.first; } // Overload of `InlinedVector::insert()` for inserting elements constructed @@ -696,17 +678,26 @@ class InlinedVector { reference emplace_back(Args&&... args) { size_type s = size(); if (ABSL_PREDICT_FALSE(s == capacity())) { - return GrowAndEmplaceBack(std::forward<Args>(args)...); - } - pointer space; - if (storage_.GetIsAllocated()) { - storage_.SetAllocatedSize(s + 1); - space = storage_.GetAllocatedData(); + size_type new_capacity = 2 * capacity(); + pointer new_data = + AllocatorTraits::allocate(*storage_.GetAllocPtr(), new_capacity); + reference new_element = + Construct(new_data + s, std::forward<Args>(args)...); + UninitializedCopy(std::make_move_iterator(data()), + std::make_move_iterator(data() + s), new_data); + ResetAllocation(new_data, new_capacity, s + 1); + return new_element; } else { - storage_.SetInlinedSize(s + 1); - space = storage_.GetInlinedData(); + pointer space; + if (storage_.GetIsAllocated()) { + storage_.SetAllocatedSize(s + 1); + space = storage_.GetAllocatedData(); + } else { + storage_.SetInlinedSize(s + 1); + space = storage_.GetInlinedData(); + } + return Construct(space + s, std::forward<Args>(args)...); } - return Construct(space + s, std::forward<Args>(args)...); } // `InlinedVector::push_back()` @@ -727,7 +718,7 @@ class InlinedVector { void pop_back() noexcept { assert(!empty()); AllocatorTraits::destroy(*storage_.GetAllocPtr(), data() + (size() - 1)); - storage_.AddSize(-1); + storage_.SubtractSize(1); } // `InlinedVector::erase()` @@ -794,10 +785,20 @@ class InlinedVector { // effects. Otherwise, `reserve()` will reallocate, performing an n-time // element-wise move of everything contained. void reserve(size_type n) { - if (n > capacity()) { - // Make room for new elements - EnlargeBy(n - size()); + if (n <= capacity()) { + return; + } + const size_type s = size(); + size_type target = (std::max)(static_cast<size_type>(N), n); + size_type new_capacity = capacity(); + while (new_capacity < target) { + new_capacity <<= 1; } + pointer new_data = + AllocatorTraits::allocate(*storage_.GetAllocPtr(), new_capacity); + UninitializedCopy(std::make_move_iterator(data()), + std::make_move_iterator(data() + s), new_data); + ResetAllocation(new_data, new_capacity, s); } // `InlinedVector::shrink_to_fit()` @@ -812,37 +813,105 @@ class InlinedVector { // If `size() > N` and `size() < capacity()` the elements will be moved to a // smaller heap allocation. void shrink_to_fit() { - const auto s = size(); - if (ABSL_PREDICT_FALSE(!storage_.GetIsAllocated() || s == capacity())) - return; - - if (s <= N) { - // Move the elements to the inlined storage. - // We have to do this using a temporary, because `inlined_storage` and - // `allocation_storage` are in a union field. - auto temp = std::move(*this); - assign(std::make_move_iterator(temp.begin()), - std::make_move_iterator(temp.end())); - return; + if (storage_.GetIsAllocated()) { + storage_.ShrinkToFit(); } - - // Reallocate storage and move elements. - // We can't simply use the same approach as above, because `assign()` would - // call into `reserve()` internally and reserve larger capacity than we need - pointer new_data = AllocatorTraits::allocate(*storage_.GetAllocPtr(), s); - UninitializedCopy(std::make_move_iterator(storage_.GetAllocatedData()), - std::make_move_iterator(storage_.GetAllocatedData() + s), - new_data); - ResetAllocation(new_data, s, s); } // `InlinedVector::swap()` // // Swaps the contents of this inlined vector with the contents of `other`. void swap(InlinedVector& other) { - if (ABSL_PREDICT_FALSE(this == std::addressof(other))) return; + using std::swap; + + if (ABSL_PREDICT_FALSE(this == std::addressof(other))) { + return; + } + + bool is_allocated = storage_.GetIsAllocated(); + bool other_is_allocated = other.storage_.GetIsAllocated(); + + if (is_allocated && other_is_allocated) { + // Both out of line, so just swap the tag, allocation, and allocator. + storage_.SwapSizeAndIsAllocated(std::addressof(other.storage_)); + storage_.SwapAllocatedSizeAndCapacity(std::addressof(other.storage_)); + swap(*storage_.GetAllocPtr(), *other.storage_.GetAllocPtr()); + + return; + } + + if (!is_allocated && !other_is_allocated) { + // Both inlined: swap up to smaller size, then move remaining elements. + InlinedVector* a = this; + InlinedVector* b = std::addressof(other); + if (size() < other.size()) { + swap(a, b); + } + + const size_type a_size = a->size(); + const size_type b_size = b->size(); + assert(a_size >= b_size); + // `a` is larger. Swap the elements up to the smaller array size. + std::swap_ranges(a->storage_.GetInlinedData(), + a->storage_.GetInlinedData() + b_size, + b->storage_.GetInlinedData()); + + // Move the remaining elements: + // [`b_size`, `a_size`) from `a` -> [`b_size`, `a_size`) from `b` + b->UninitializedCopy(a->storage_.GetInlinedData() + b_size, + a->storage_.GetInlinedData() + a_size, + b->storage_.GetInlinedData() + b_size); + a->Destroy(a->storage_.GetInlinedData() + b_size, + a->storage_.GetInlinedData() + a_size); + + storage_.SwapSizeAndIsAllocated(std::addressof(other.storage_)); + swap(*storage_.GetAllocPtr(), *other.storage_.GetAllocPtr()); + + assert(b->size() == a_size); + assert(a->size() == b_size); + return; + } + + // One is out of line, one is inline. + // We first move the elements from the inlined vector into the + // inlined space in the other vector. We then put the other vector's + // pointer/capacity into the originally inlined vector and swap + // the tags. + InlinedVector* a = this; + InlinedVector* b = std::addressof(other); + if (a->storage_.GetIsAllocated()) { + swap(a, b); + } + + assert(!a->storage_.GetIsAllocated()); + assert(b->storage_.GetIsAllocated()); + + const size_type a_size = a->size(); + const size_type b_size = b->size(); + // In an optimized build, `b_size` would be unused. + static_cast<void>(b_size); + + // Made Local copies of `size()`, these can now be swapped + a->storage_.SwapSizeAndIsAllocated(std::addressof(b->storage_)); + + // Copy out before `b`'s union gets clobbered by `inline_space` + pointer b_data = b->storage_.GetAllocatedData(); + size_type b_capacity = b->storage_.GetAllocatedCapacity(); + + b->UninitializedCopy(a->storage_.GetInlinedData(), + a->storage_.GetInlinedData() + a_size, + b->storage_.GetInlinedData()); + a->Destroy(a->storage_.GetInlinedData(), + a->storage_.GetInlinedData() + a_size); + + a->storage_.SetAllocatedData(b_data, b_capacity); - SwapImpl(other); + if (*a->storage_.GetAllocPtr() != *b->storage_.GetAllocPtr()) { + swap(*a->storage_.GetAllocPtr(), *b->storage_.GetAllocPtr()); + } + + assert(b->size() == a_size); + assert(a->size() == b_size); } private: @@ -900,31 +969,6 @@ class InlinedVector { #endif // !defined(NDEBUG) } - // Enlarge the underlying representation so we can store `size_ + delta` elems - // in allocated space. The size is not changed, and any newly added memory is - // not initialized. - void EnlargeBy(size_type delta) { - const size_type s = size(); - assert(s <= capacity()); - - size_type target = (std::max)(static_cast<size_type>(N), s + delta); - - // Compute new capacity by repeatedly doubling current capacity - // TODO(psrc): Check and avoid overflow? - size_type new_capacity = capacity(); - while (new_capacity < target) { - new_capacity <<= 1; - } - - pointer new_data = - AllocatorTraits::allocate(*storage_.GetAllocPtr(), new_capacity); - - UninitializedCopy(std::make_move_iterator(data()), - std::make_move_iterator(data() + s), new_data); - - ResetAllocation(new_data, new_capacity, s); - } - // Shift all elements from `position` to `end()` by `n` places to the right. // If the vector needs to be enlarged, memory will be allocated. // Returns `iterator`s pointing to the start of the previously-initialized @@ -991,147 +1035,6 @@ class InlinedVector { return std::make_pair(start_used, start_raw); } - template <typename... Args> - reference GrowAndEmplaceBack(Args&&... args) { - assert(size() == capacity()); - const size_type s = size(); - - size_type new_capacity = 2 * capacity(); - pointer new_data = - AllocatorTraits::allocate(*storage_.GetAllocPtr(), new_capacity); - - reference new_element = - Construct(new_data + s, std::forward<Args>(args)...); - UninitializedCopy(std::make_move_iterator(data()), - std::make_move_iterator(data() + s), new_data); - - ResetAllocation(new_data, new_capacity, s + 1); - - return new_element; - } - - iterator InsertWithCount(const_iterator position, size_type n, - const_reference v) { - assert(position >= begin() && position <= end()); - if (ABSL_PREDICT_FALSE(n == 0)) return const_cast<iterator>(position); - - value_type copy = v; - std::pair<iterator, iterator> it_pair = ShiftRight(position, n); - std::fill(it_pair.first, it_pair.second, copy); - UninitializedFill(it_pair.second, it_pair.first + n, copy); - - return it_pair.first; - } - - template <typename ForwardIt> - iterator InsertWithForwardRange(const_iterator position, ForwardIt first, - ForwardIt last) { - static_assert(absl::inlined_vector_internal::IsAtLeastForwardIterator< - ForwardIt>::value, - ""); - assert(position >= begin() && position <= end()); - - if (ABSL_PREDICT_FALSE(first == last)) - return const_cast<iterator>(position); - - auto n = std::distance(first, last); - std::pair<iterator, iterator> it_pair = ShiftRight(position, n); - size_type used_spots = it_pair.second - it_pair.first; - auto open_spot = std::next(first, used_spots); - std::copy(first, open_spot, it_pair.first); - UninitializedCopy(open_spot, last, it_pair.second); - return it_pair.first; - } - - void SwapImpl(InlinedVector& other) { - using std::swap; - - bool is_allocated = storage_.GetIsAllocated(); - bool other_is_allocated = other.storage_.GetIsAllocated(); - - if (is_allocated && other_is_allocated) { - // Both out of line, so just swap the tag, allocation, and allocator. - storage_.SwapSizeAndIsAllocated(std::addressof(other.storage_)); - storage_.SwapAllocatedSizeAndCapacity(std::addressof(other.storage_)); - swap(*storage_.GetAllocPtr(), *other.storage_.GetAllocPtr()); - - return; - } - - if (!is_allocated && !other_is_allocated) { - // Both inlined: swap up to smaller size, then move remaining elements. - InlinedVector* a = this; - InlinedVector* b = std::addressof(other); - if (size() < other.size()) { - swap(a, b); - } - - const size_type a_size = a->size(); - const size_type b_size = b->size(); - assert(a_size >= b_size); - // `a` is larger. Swap the elements up to the smaller array size. - std::swap_ranges(a->storage_.GetInlinedData(), - a->storage_.GetInlinedData() + b_size, - b->storage_.GetInlinedData()); - - // Move the remaining elements: - // [`b_size`, `a_size`) from `a` -> [`b_size`, `a_size`) from `b` - b->UninitializedCopy(a->storage_.GetInlinedData() + b_size, - a->storage_.GetInlinedData() + a_size, - b->storage_.GetInlinedData() + b_size); - a->Destroy(a->storage_.GetInlinedData() + b_size, - a->storage_.GetInlinedData() + a_size); - - storage_.SwapSizeAndIsAllocated(std::addressof(other.storage_)); - swap(*storage_.GetAllocPtr(), *other.storage_.GetAllocPtr()); - - assert(b->size() == a_size); - assert(a->size() == b_size); - return; - } - - // One is out of line, one is inline. - // We first move the elements from the inlined vector into the - // inlined space in the other vector. We then put the other vector's - // pointer/capacity into the originally inlined vector and swap - // the tags. - InlinedVector* a = this; - InlinedVector* b = std::addressof(other); - if (a->storage_.GetIsAllocated()) { - swap(a, b); - } - - assert(!a->storage_.GetIsAllocated()); - assert(b->storage_.GetIsAllocated()); - - const size_type a_size = a->size(); - const size_type b_size = b->size(); - // In an optimized build, `b_size` would be unused. - static_cast<void>(b_size); - - // Made Local copies of `size()`, these can now be swapped - a->storage_.SwapSizeAndIsAllocated(std::addressof(b->storage_)); - - // Copy out before `b`'s union gets clobbered by `inline_space` - pointer b_data = b->storage_.GetAllocatedData(); - size_type b_capacity = b->storage_.GetAllocatedCapacity(); - - b->UninitializedCopy(a->storage_.GetInlinedData(), - a->storage_.GetInlinedData() + a_size, - b->storage_.GetInlinedData()); - a->Destroy(a->storage_.GetInlinedData(), - a->storage_.GetInlinedData() + a_size); - - a->storage_.SetAllocatedData(b_data, b_capacity); - - if (*a->storage_.GetAllocPtr() != *b->storage_.GetAllocPtr()) { - swap(*a->storage_.GetAllocPtr(), *b->storage_.GetAllocPtr()); - } - - assert(b->size() == a_size); - assert(a->size() == b_size); - } - Storage storage_; }; diff --git a/absl/container/inlined_vector_benchmark.cc b/absl/container/inlined_vector_benchmark.cc index df4d3ce5a2d1..b99bbd62544f 100644 --- a/absl/container/inlined_vector_benchmark.cc +++ b/absl/container/inlined_vector_benchmark.cc @@ -599,6 +599,146 @@ void BM_AssignFromMove(benchmark::State& state) { ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignFromMove, TrivialType); ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignFromMove, NontrivialType); +template <typename T, size_t FromSize, size_t ToSize> +void BM_ResizeSize(benchmark::State& state) { + BatchedBenchmark<T>( + state, + /* prepare_vec = */ + [](InlVec<T>* vec, size_t) { + vec->clear(); + vec->resize(FromSize); + }, + /* test_vec = */ + [](InlVec<T>* vec, size_t) { vec->resize(ToSize); }); +} +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_ResizeSize, TrivialType); +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_ResizeSize, NontrivialType); + +template <typename T, size_t FromSize, size_t ToSize> +void BM_ResizeSizeRef(benchmark::State& state) { + auto t = T(); + BatchedBenchmark<T>( + state, + /* prepare_vec = */ + [](InlVec<T>* vec, size_t) { + vec->clear(); + vec->resize(FromSize); + }, + /* test_vec = */ + [&](InlVec<T>* vec, size_t) { + benchmark::DoNotOptimize(t); + vec->resize(ToSize, t); + }); +} +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_ResizeSizeRef, TrivialType); +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_ResizeSizeRef, NontrivialType); + +template <typename T, size_t FromSize, size_t ToSize> +void BM_InsertSizeRef(benchmark::State& state) { + auto t = T(); + BatchedBenchmark<T>( + state, + /* prepare_vec = */ + [](InlVec<T>* vec, size_t) { + vec->clear(); + vec->resize(FromSize); + }, + /* test_vec = */ + [&](InlVec<T>* vec, size_t) { + benchmark::DoNotOptimize(t); + auto* pos = vec->data() + (vec->size() / 2); + vec->insert(pos, t); + }); +} +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_InsertSizeRef, TrivialType); +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_InsertSizeRef, NontrivialType); + +template <typename T, size_t FromSize, size_t ToSize> +void BM_InsertRange(benchmark::State& state) { + InlVec<T> other_vec(ToSize); + BatchedBenchmark<T>( + state, + /* prepare_vec = */ + [](InlVec<T>* vec, size_t) { + vec->clear(); + vec->resize(FromSize); + }, + /* test_vec = */ + [&](InlVec<T>* vec, size_t) { + benchmark::DoNotOptimize(other_vec); + auto* pos = vec->data() + (vec->size() / 2); + vec->insert(pos, other_vec.begin(), other_vec.end()); + }); +} +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_InsertRange, TrivialType); +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_InsertRange, NontrivialType); + +template <typename T, size_t FromSize> +void BM_EmplaceBack(benchmark::State& state) { + BatchedBenchmark<T>( + state, + /* prepare_vec = */ + [](InlVec<T>* vec, size_t) { + vec->clear(); + vec->resize(FromSize); + }, + /* test_vec = */ + [](InlVec<T>* vec, size_t) { vec->emplace_back(); }); +} +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_EmplaceBack, TrivialType); +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_EmplaceBack, NontrivialType); + +template <typename T, size_t FromSize> +void BM_PopBack(benchmark::State& state) { + BatchedBenchmark<T>( + state, + /* prepare_vec = */ + [](InlVec<T>* vec, size_t) { + vec->clear(); + vec->resize(FromSize); + }, + /* test_vec = */ + [](InlVec<T>* vec, size_t) { vec->pop_back(); }); +} +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_PopBack, TrivialType); +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_PopBack, NontrivialType); + +template <typename T, size_t FromSize> +void BM_EraseOne(benchmark::State& state) { + BatchedBenchmark<T>( + state, + /* prepare_vec = */ + [](InlVec<T>* vec, size_t) { + vec->clear(); + vec->resize(FromSize); + }, + /* test_vec = */ + [](InlVec<T>* vec, size_t) { + auto* pos = vec->data() + (vec->size() / 2); + vec->erase(pos); + }); +} +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_EraseOne, TrivialType); +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_EraseOne, NontrivialType); + +template <typename T, size_t FromSize> +void BM_EraseRange(benchmark::State& state) { + BatchedBenchmark<T>( + state, + /* prepare_vec = */ + [](InlVec<T>* vec, size_t) { + vec->clear(); + vec->resize(FromSize); + }, + /* test_vec = */ + [](InlVec<T>* vec, size_t) { + auto* pos = vec->data() + (vec->size() / 2); + vec->erase(pos, pos + 1); + }); +} +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_EraseRange, TrivialType); +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_EraseRange, NontrivialType); + template <typename T, size_t FromSize> void BM_Clear(benchmark::State& state) { BatchedBenchmark<T>( @@ -609,4 +749,56 @@ void BM_Clear(benchmark::State& state) { ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_Clear, TrivialType); ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_Clear, NontrivialType); +template <typename T, size_t FromSize, size_t ToCapacity> +void BM_Reserve(benchmark::State& state) { + BatchedBenchmark<T>( + state, + /* prepare_vec = */ + [](InlVec<T>* vec, size_t) { + vec->clear(); + vec->resize(FromSize); + }, + /* test_vec = */ + [](InlVec<T>* vec, size_t) { vec->reserve(ToCapacity); }); +} +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_Reserve, TrivialType); +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_Reserve, NontrivialType); + +template <typename T, size_t FromCapacity, size_t ToCapacity> +void BM_ShrinkToFit(benchmark::State& state) { + BatchedBenchmark<T>( + state, + /* prepare_vec = */ + [](InlVec<T>* vec, size_t) { + vec->clear(); + vec->resize(ToCapacity); + vec->reserve(FromCapacity); + }, + /* test_vec = */ [](InlVec<T>* vec, size_t) { vec->shrink_to_fit(); }); +} +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_ShrinkToFit, TrivialType); +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_ShrinkToFit, NontrivialType); + +template <typename T, size_t FromSize, size_t ToSize> +void BM_Swap(benchmark::State& state) { + using VecT = InlVec<T>; + std::array<VecT, kBatchSize> vector_batch{}; + BatchedBenchmark<T>( + state, + /* prepare_vec = */ + [&](InlVec<T>* vec, size_t i) { + vector_batch[i].clear(); + vector_batch[i].resize(ToSize); + vec->resize(FromSize); + }, + /* test_vec = */ + [&](InlVec<T>* vec, size_t i) { + using std::swap; + benchmark::DoNotOptimize(vector_batch[i]); + swap(*vec, vector_batch[i]); + }); +} +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_Swap, TrivialType); +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_Swap, NontrivialType); + } // namespace diff --git a/absl/container/inlined_vector_exception_safety_test.cc b/absl/container/inlined_vector_exception_safety_test.cc index 0a9649250302..e7c47127df9f 100644 --- a/absl/container/inlined_vector_exception_safety_test.cc +++ b/absl/container/inlined_vector_exception_safety_test.cc @@ -12,7 +12,11 @@ // See the License for the specific language governing permissions and // limitations under the License. +#include <array> +#include <initializer_list> +#include <iterator> #include <memory> +#include <utility> #include "gtest/gtest.h" #include "absl/base/internal/exception_safety_testing.h" @@ -81,6 +85,24 @@ using OneSizeTestParams = TestParams<ThrowAllocMovableThrowerVec, kLargeSize>, TestParams<ThrowAllocMovableThrowerVec, kSmallSize>>; +using TwoSizeTestParams = ::testing::Types< + TestParams<ThrowerVec, kLargeSize, kLargeSize>, + TestParams<ThrowerVec, kLargeSize, kSmallSize>, + TestParams<ThrowerVec, kSmallSize, kLargeSize>, + TestParams<ThrowerVec, kSmallSize, kSmallSize>, + TestParams<MovableThrowerVec, kLargeSize, kLargeSize>, + TestParams<MovableThrowerVec, kLargeSize, kSmallSize>, + TestParams<MovableThrowerVec, kSmallSize, kLargeSize>, + TestParams<MovableThrowerVec, kSmallSize, kSmallSize>, + TestParams<ThrowAllocThrowerVec, kLargeSize, kLargeSize>, + TestParams<ThrowAllocThrowerVec, kLargeSize, kSmallSize>, + TestParams<ThrowAllocThrowerVec, kSmallSize, kLargeSize>, + TestParams<ThrowAllocThrowerVec, kSmallSize, kSmallSize>, + TestParams<ThrowAllocMovableThrowerVec, kLargeSize, kLargeSize>, + TestParams<ThrowAllocMovableThrowerVec, kLargeSize, kSmallSize>, + TestParams<ThrowAllocMovableThrowerVec, kSmallSize, kLargeSize>, + TestParams<ThrowAllocMovableThrowerVec, kSmallSize, kSmallSize>>; + template <typename> struct NoSizeTest : ::testing::Test {}; TYPED_TEST_SUITE(NoSizeTest, NoSizeTestParams); @@ -89,6 +111,25 @@ template <typename> struct OneSizeTest : ::testing::Test {}; TYPED_TEST_SUITE(OneSizeTest, OneSizeTestParams); +template <typename> +struct TwoSizeTest : ::testing::Test {}; +TYPED_TEST_SUITE(TwoSizeTest, TwoSizeTestParams); + +template <typename VecT> +bool InlinedVectorInvariants(VecT* vec) { + if (*vec != *vec) return false; + if (vec->size() > vec->capacity()) return false; + if (vec->size() > vec->max_size()) return false; + if (vec->capacity() > vec->max_size()) return false; + if (vec->data() != std::addressof(vec->at(0))) return false; + if (vec->data() != vec->begin()) return false; + if (*vec->data() != *vec->begin()) return false; + if (vec->begin() > vec->end()) return false; + if ((vec->end() - vec->begin()) != vec->size()) return false; + if (std::distance(vec->begin(), vec->end()) != vec->size()) return false; + return true; +} + // Function that always returns false is correct, but refactoring is required // for clarity. It's needed to express that, as a contract, certain operations // should not throw at all. Execution of this function means an exception was @@ -179,6 +220,45 @@ TYPED_TEST(OneSizeTest, MoveConstructor) { } } +TYPED_TEST(TwoSizeTest, Assign) { + using VecT = typename TypeParam::VecT; + using value_type = typename VecT::value_type; + constexpr static auto from_size = TypeParam::GetSizeAt(0); + constexpr static auto to_size = TypeParam::GetSizeAt(1); + + auto tester = testing::MakeExceptionSafetyTester() + .WithInitialValue(VecT{from_size}) + .WithContracts(InlinedVectorInvariants<VecT>); + + EXPECT_TRUE(tester.Test([](VecT* vec) { + *vec = ABSL_INTERNAL_MAKE_INIT_LIST(value_type, to_size); + })); + + EXPECT_TRUE(tester.Test([](VecT* vec) { + VecT other_vec{to_size}; + *vec = other_vec; + })); + + EXPECT_TRUE(tester.Test([](VecT* vec) { + VecT other_vec{to_size}; + *vec = std::move(other_vec); + })); + + EXPECT_TRUE(tester.Test([](VecT* vec) { + value_type val{}; + vec->assign(to_size, val); + })); + + EXPECT_TRUE(tester.Test([](VecT* vec) { + vec->assign(ABSL_INTERNAL_MAKE_INIT_LIST(value_type, to_size)); + })); + + EXPECT_TRUE(tester.Test([](VecT* vec) { + std::array<value_type, to_size> arr{}; + vec->assign(arr.begin(), arr.end()); + })); +} + TYPED_TEST(OneSizeTest, PopBack) { using VecT = typename TypeParam::VecT; constexpr static auto size = TypeParam::GetSizeAt(0); @@ -205,4 +285,17 @@ TYPED_TEST(OneSizeTest, Clear) { })); } +TYPED_TEST(OneSizeTest, ShrinkToFit) { + using VecT = typename TypeParam::VecT; + constexpr static auto size = TypeParam::GetSizeAt(0); + + auto tester = testing::MakeExceptionSafetyTester() + .WithInitialValue(VecT{size}) + .WithContracts(InlinedVectorInvariants<VecT>); + + EXPECT_TRUE(tester.Test([](VecT* vec) { + vec->shrink_to_fit(); // + })); +} + } // namespace diff --git a/absl/container/inlined_vector_test.cc b/absl/container/inlined_vector_test.cc index 6037001a0473..60fe89b28aee 100644 --- a/absl/container/inlined_vector_test.cc +++ b/absl/container/inlined_vector_test.cc @@ -190,6 +190,12 @@ TEST(IntVec, SimpleOps) { } } +TEST(IntVec, PopBackNoOverflow) { + IntVec v = {1}; + v.pop_back(); + EXPECT_EQ(v.size(), 0); +} + TEST(IntVec, AtThrows) { IntVec v = {1, 2, 3}; EXPECT_EQ(v.at(2), 3); diff --git a/absl/container/internal/compressed_tuple.h b/absl/container/internal/compressed_tuple.h index bb3471f5d747..1713ad6862c5 100644 --- a/absl/container/internal/compressed_tuple.h +++ b/absl/container/internal/compressed_tuple.h @@ -32,6 +32,7 @@ #ifndef ABSL_CONTAINER_INTERNAL_COMPRESSED_TUPLE_H_ #define ABSL_CONTAINER_INTERNAL_COMPRESSED_TUPLE_H_ +#include <initializer_list> #include <tuple> #include <type_traits> #include <utility> @@ -75,17 +76,30 @@ constexpr bool IsFinal() { #endif } +// We can't use EBCO on other CompressedTuples because that would mean that we +// derive from multiple Storage<> instantiations with the same I parameter, +// and potentially from multiple identical Storage<> instantiations. So anytime +// we use type inheritance rather than encapsulation, we mark +// CompressedTupleImpl, to make this easy to detect. +struct uses_inheritance {}; + template <typename T> constexpr bool ShouldUseBase() { - return std::is_class<T>::value && std::is_empty<T>::value && !IsFinal<T>(); + return std::is_class<T>::value && std::is_empty<T>::value && !IsFinal<T>() && + !std::is_base_of<uses_inheritance, T>::value; } // The storage class provides two specializations: // - For empty classes, it stores T as a base class. // - For everything else, it stores T as a member. -template <typename D, size_t I, bool = ShouldUseBase<ElemT<D, I>>()> +template <typename T, size_t I, +#if defined(_MSC_VER) + bool UseBase = + ShouldUseBase<typename std::enable_if<true, T>::type>()> +#else + bool UseBase = ShouldUseBase<T>()> +#endif struct Storage { - using T = ElemT<D, I>; T value; constexpr Storage() = default; explicit constexpr Storage(T&& v) : value(absl::forward<T>(v)) {} @@ -95,10 +109,8 @@ struct Storage { T&& get() && { return std::move(*this).value; } }; -template <typename D, size_t I> -struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC Storage<D, I, true> - : ElemT<D, I> { - using T = internal_compressed_tuple::ElemT<D, I>; +template <typename T, size_t I> +struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC Storage<T, I, true> : T { constexpr Storage() = default; explicit constexpr Storage(T&& v) : T(absl::forward<T>(v)) {} constexpr const T& get() const& { return *this; } @@ -107,29 +119,54 @@ struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC Storage<D, I, true> T&& get() && { return std::move(*this); } }; -template <typename D, typename I> +template <typename D, typename I, bool ShouldAnyUseBase> struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTupleImpl; -template <typename... Ts, size_t... I> -struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC - CompressedTupleImpl<CompressedTuple<Ts...>, absl::index_sequence<I...>> +template <typename... Ts, size_t... I, bool ShouldAnyUseBase> +struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTupleImpl< + CompressedTuple<Ts...>, absl::index_sequence<I...>, ShouldAnyUseBase> // We use the dummy identity function through std::integral_constant to // convince MSVC of accepting and expanding I in that context. Without it // you would get: // error C3548: 'I': parameter pack cannot be used in this context - : Storage<CompressedTuple<Ts...>, - std::integral_constant<size_t, I>::value>... { + : uses_inheritance, + Storage<Ts, std::integral_constant<size_t, I>::value>... { + constexpr CompressedTupleImpl() = default; + explicit constexpr CompressedTupleImpl(Ts&&... args) + : Storage<Ts, I>(absl::forward<Ts>(args))... {} + friend CompressedTuple<Ts...>; +}; + +template <typename... Ts, size_t... I> +struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTupleImpl< + CompressedTuple<Ts...>, absl::index_sequence<I...>, false> + // We use the dummy identity function as above... + : Storage<Ts, std::integral_constant<size_t, I>::value, false>... { constexpr CompressedTupleImpl() = default; explicit constexpr CompressedTupleImpl(Ts&&... args) - : Storage<CompressedTuple<Ts...>, I>(absl::forward<Ts>(args))... {} + : Storage<Ts, I, false>(absl::forward<Ts>(args))... {} + friend CompressedTuple<Ts...>; }; +std::false_type Or(std::initializer_list<std::false_type>); +std::true_type Or(std::initializer_list<bool>); + +// MSVC requires this to be done separately rather than within the declaration +// of CompressedTuple below. +template <typename... Ts> +constexpr bool ShouldAnyUseBase() { + return decltype( + Or({std::integral_constant<bool, ShouldUseBase<Ts>()>()...})){}; +} + } // namespace internal_compressed_tuple // Helper class to perform the Empty Base Class Optimization. // Ts can contain classes and non-classes, empty or not. For the ones that // are empty classes, we perform the CompressedTuple. If all types in Ts are -// empty classes, then CompressedTuple<Ts...> is itself an empty class. +// empty classes, then CompressedTuple<Ts...> is itself an empty class. (This +// does not apply when one or more of those empty classes is itself an empty +// CompressedTuple.) // // To access the members, use member .get<N>() function. // @@ -145,7 +182,8 @@ struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC template <typename... Ts> class ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTuple : private internal_compressed_tuple::CompressedTupleImpl< - CompressedTuple<Ts...>, absl::index_sequence_for<Ts...>> { + CompressedTuple<Ts...>, absl::index_sequence_for<Ts...>, + internal_compressed_tuple::ShouldAnyUseBase<Ts...>()> { private: template <int I> using ElemT = internal_compressed_tuple::ElemT<CompressedTuple, I>; @@ -157,24 +195,24 @@ class ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTuple template <int I> ElemT<I>& get() & { - return internal_compressed_tuple::Storage<CompressedTuple, I>::get(); + return internal_compressed_tuple::Storage<ElemT<I>, I>::get(); } template <int I> constexpr const ElemT<I>& get() const& { - return internal_compressed_tuple::Storage<CompressedTuple, I>::get(); + return internal_compressed_tuple::Storage<ElemT<I>, I>::get(); } template <int I> ElemT<I>&& get() && { return std::move(*this) - .internal_compressed_tuple::template Storage<CompressedTuple, I>::get(); + .internal_compressed_tuple::template Storage<ElemT<I>, I>::get(); } template <int I> constexpr const ElemT<I>&& get() const&& { return absl::move(*this) - .internal_compressed_tuple::template Storage<CompressedTuple, I>::get(); + .internal_compressed_tuple::template Storage<ElemT<I>, I>::get(); } }; diff --git a/absl/container/internal/compressed_tuple_test.cc b/absl/container/internal/compressed_tuple_test.cc index 28e7741c9a45..3b0ec4555abf 100644 --- a/absl/container/internal/compressed_tuple_test.cc +++ b/absl/container/internal/compressed_tuple_test.cc @@ -22,10 +22,8 @@ #include "absl/memory/memory.h" #include "absl/utility/utility.h" -namespace absl { -namespace container_internal { -namespace { - +// These are declared at global scope purely so that error messages +// are smaller and easier to understand. enum class CallType { kConstRef, kConstMove }; template <int> @@ -45,6 +43,10 @@ struct TwoValues { U value2; }; +namespace absl { +namespace container_internal { +namespace { + TEST(CompressedTupleTest, Sizeof) { EXPECT_EQ(sizeof(int), sizeof(CompressedTuple<int>)); EXPECT_EQ(sizeof(int), sizeof(CompressedTuple<int, Empty<0>>)); @@ -120,9 +122,14 @@ TEST(CompressedTupleTest, Nested) { EXPECT_EQ(4 * sizeof(char), sizeof(CompressedTuple<CompressedTuple<char, char>, CompressedTuple<char, char>>)); - EXPECT_TRUE( - (std::is_empty<CompressedTuple<CompressedTuple<Empty<0>>, - CompressedTuple<Empty<1>>>>::value)); + EXPECT_TRUE((std::is_empty<CompressedTuple<Empty<0>, Empty<1>>>::value)); + + // Make sure everything still works when things are nested. + struct CT_Empty : CompressedTuple<Empty<0>> {}; + CompressedTuple<Empty<0>, CT_Empty> nested_empty; + auto contained = nested_empty.get<0>(); + auto nested = nested_empty.get<1>().get<0>(); + EXPECT_TRUE((std::is_same<decltype(contained), decltype(nested)>::value)); } TEST(CompressedTupleTest, Reference) { diff --git a/absl/container/internal/inlined_vector.h b/absl/container/internal/inlined_vector.h index 92c21ab96571..f117ee0c75de 100644 --- a/absl/container/internal/inlined_vector.h +++ b/absl/container/internal/inlined_vector.h @@ -25,6 +25,7 @@ #include "absl/container/internal/compressed_tuple.h" #include "absl/memory/memory.h" #include "absl/meta/type_traits.h" +#include "absl/types/span.h" namespace absl { namespace inlined_vector_internal { @@ -78,6 +79,14 @@ void ConstructElements(AllocatorType* alloc_ptr, ValueType* construct_first, } } +template <typename ValueType, typename ValueAdapter, typename SizeType> +void AssignElements(ValueType* assign_first, ValueAdapter* values_ptr, + SizeType assign_size) { + for (SizeType i = 0; i < assign_size; ++i) { + values_ptr->AssignNext(assign_first + i); + } +} + template <typename AllocatorType> struct StorageView { using pointer = typename AllocatorType::pointer; @@ -101,6 +110,11 @@ class IteratorValueAdapter { ++it_; } + void AssignNext(pointer assign_at) { + *assign_at = *it_; + ++it_; + } + private: Iterator it_; }; @@ -119,6 +133,8 @@ class CopyValueAdapter { AllocatorTraits::construct(*alloc_ptr, construct_at, *ptr_); } + void AssignNext(pointer assign_at) { *assign_at = *ptr_; } + private: const_pointer ptr_; }; @@ -135,6 +151,44 @@ class DefaultValueAdapter { void ConstructNext(AllocatorType* alloc_ptr, pointer construct_at) { AllocatorTraits::construct(*alloc_ptr, construct_at); } + + void AssignNext(pointer assign_at) { *assign_at = value_type(); } +}; + +template <typename AllocatorType> +class AllocationTransaction { + using value_type = typename AllocatorType::value_type; + using pointer = typename AllocatorType::pointer; + using size_type = typename AllocatorType::size_type; + using AllocatorTraits = absl::allocator_traits<AllocatorType>; + + public: + explicit AllocationTransaction(AllocatorType* alloc_ptr) + : alloc_data_(*alloc_ptr, nullptr) {} + + AllocationTransaction(const AllocationTransaction&) = delete; + void operator=(const AllocationTransaction&) = delete; + + AllocatorType& GetAllocator() { return alloc_data_.template get<0>(); } + pointer& GetData() { return alloc_data_.template get<1>(); } + size_type& GetCapacity() { return capacity_; } + + bool DidAllocate() { return GetData() != nullptr; } + pointer Allocate(size_type capacity) { + GetData() = AllocatorTraits::allocate(GetAllocator(), capacity); + GetCapacity() = capacity; + return GetData(); + } + + ~AllocationTransaction() { + if (DidAllocate()) { + AllocatorTraits::deallocate(GetAllocator(), GetData(), GetCapacity()); + } + } + + private: + container_internal::CompressedTuple<AllocatorType, pointer> alloc_data_; + size_type capacity_ = 0; }; template <typename T, size_t N, typename A> @@ -167,6 +221,9 @@ class Storage { using DefaultValueAdapter = inlined_vector_internal::DefaultValueAdapter<allocator_type>; + using AllocationTransaction = + inlined_vector_internal::AllocationTransaction<allocator_type>; + Storage() : metadata_() {} explicit Storage(const allocator_type& alloc) @@ -215,19 +272,48 @@ class Storage { void SetIsAllocated() { GetSizeAndIsAllocated() |= 1; } + void UnsetIsAllocated() { + SetIsAllocated(); + GetSizeAndIsAllocated() -= 1; + } + void SetAllocatedSize(size_type size) { GetSizeAndIsAllocated() = (size << 1) | static_cast<size_type>(1); } void SetInlinedSize(size_type size) { GetSizeAndIsAllocated() = size << 1; } + void SetSize(size_type size) { + GetSizeAndIsAllocated() = + (size << 1) | static_cast<size_type>(GetIsAllocated()); + } + void AddSize(size_type count) { GetSizeAndIsAllocated() += count << 1; } + void SubtractSize(size_type count) { + assert(count <= GetSize()); + GetSizeAndIsAllocated() -= count << 1; + } + void SetAllocatedData(pointer data, size_type capacity) { data_.allocated.allocated_data = data; data_.allocated.allocated_capacity = capacity; } + void DeallocateIfAllocated() { + if (GetIsAllocated()) { + AllocatorTraits::deallocate(*GetAllocPtr(), GetAllocatedData(), + GetAllocatedCapacity()); + } + } + + void AcquireAllocation(AllocationTransaction* allocation_tx_ptr) { + SetAllocatedData(allocation_tx_ptr->GetData(), + allocation_tx_ptr->GetCapacity()); + allocation_tx_ptr->GetData() = nullptr; + allocation_tx_ptr->GetCapacity() = 0; + } + void SwapSizeAndIsAllocated(Storage* other) { using std::swap; swap(GetSizeAndIsAllocated(), other->GetSizeAndIsAllocated()); @@ -238,11 +324,11 @@ class Storage { swap(data_.allocated, other->data_.allocated); } - void MemcpyContents(const Storage& other) { - assert(IsMemcpyOk::value); + void MemcpyFrom(const Storage& other_storage) { + assert(IsMemcpyOk::value || other_storage.GetIsAllocated()); - GetSizeAndIsAllocated() = other.GetSizeAndIsAllocated(); - data_ = other.data_; + GetSizeAndIsAllocated() = other_storage.GetSizeAndIsAllocated(); + data_ = other_storage.data_; } void DestroyAndDeallocate(); @@ -250,6 +336,11 @@ class Storage { template <typename ValueAdapter> void Initialize(ValueAdapter values, size_type new_size); + template <typename ValueAdapter> + void Assign(ValueAdapter values, size_type new_size); + + void ShrinkToFit(); + private: size_type& GetSizeAndIsAllocated() { return metadata_.template get<1>(); } @@ -282,15 +373,10 @@ class Storage { template <typename T, size_t N, typename A> void Storage<T, N, A>::DestroyAndDeallocate() { - StorageView storage_view = MakeStorageView(); - - inlined_vector_internal::DestroyElements(GetAllocPtr(), storage_view.data, - storage_view.size); - - if (GetIsAllocated()) { - AllocatorTraits::deallocate(*GetAllocPtr(), storage_view.data, - storage_view.capacity); - } + inlined_vector_internal::DestroyElements( + GetAllocPtr(), (GetIsAllocated() ? GetAllocatedData() : GetInlinedData()), + GetSize()); + DeallocateIfAllocated(); } template <typename T, size_t N, typename A> @@ -323,6 +409,91 @@ auto Storage<T, N, A>::Initialize(ValueAdapter values, size_type new_size) AddSize(new_size); } +template <typename T, size_t N, typename A> +template <typename ValueAdapter> +auto Storage<T, N, A>::Assign(ValueAdapter values, size_type new_size) -> void { + StorageView storage_view = MakeStorageView(); + + AllocationTransaction allocation_tx(GetAllocPtr()); + + absl::Span<value_type> assign_loop; + absl::Span<value_type> construct_loop; + absl::Span<value_type> destroy_loop; + + if (new_size > storage_view.capacity) { + construct_loop = {allocation_tx.Allocate(new_size), new_size}; + destroy_loop = {storage_view.data, storage_view.size}; + } else if (new_size > storage_view.size) { + assign_loop = {storage_view.data, storage_view.size}; + construct_loop = {storage_view.data + storage_view.size, + new_size - storage_view.size}; + } else { + assign_loop = {storage_view.data, new_size}; + destroy_loop = {storage_view.data + new_size, storage_view.size - new_size}; + } + + inlined_vector_internal::AssignElements(assign_loop.data(), &values, + assign_loop.size()); + inlined_vector_internal::ConstructElements( + GetAllocPtr(), construct_loop.data(), &values, construct_loop.size()); + inlined_vector_internal::DestroyElements(GetAllocPtr(), destroy_loop.data(), + destroy_loop.size()); + + if (allocation_tx.DidAllocate()) { + DeallocateIfAllocated(); + AcquireAllocation(&allocation_tx); + SetIsAllocated(); + } + + SetSize(new_size); +} + +template <typename T, size_t N, typename A> +auto Storage<T, N, A>::ShrinkToFit() -> void { + // May only be called on allocated instances! + assert(GetIsAllocated()); + + StorageView storage_view = {GetAllocatedData(), GetSize(), + GetAllocatedCapacity()}; + + AllocationTransaction allocation_tx(GetAllocPtr()); + + IteratorValueAdapter<MoveIterator> move_values( + MoveIterator(storage_view.data)); + + pointer construct_data; + + if (storage_view.size <= static_cast<size_type>(N)) { + construct_data = GetInlinedData(); + } else if (storage_view.size < GetAllocatedCapacity()) { + construct_data = allocation_tx.Allocate(storage_view.size); + } else { + return; + } + + ABSL_INTERNAL_TRY { + inlined_vector_internal::ConstructElements(GetAllocPtr(), construct_data, + &move_values, storage_view.size); + } + ABSL_INTERNAL_CATCH_ANY { + // Writing to inlined data will trample on the existing state, thus it needs + // to be restored when a construction fails. + SetAllocatedData(storage_view.data, storage_view.capacity); + ABSL_INTERNAL_RETHROW; + } + + inlined_vector_internal::DestroyElements(GetAllocPtr(), storage_view.data, + storage_view.size); + AllocatorTraits::deallocate(*GetAllocPtr(), storage_view.data, + storage_view.capacity); + + if (allocation_tx.DidAllocate()) { + AcquireAllocation(&allocation_tx); + } else { + UnsetIsAllocated(); + } +} + } // namespace inlined_vector_internal } // namespace absl |