diff options
Diffstat (limited to 'absl/container')
-rw-r--r-- | absl/container/BUILD.bazel | 20 | ||||
-rw-r--r-- | absl/container/CMakeLists.txt | 1 | ||||
-rw-r--r-- | absl/container/fixed_array.h | 154 | ||||
-rw-r--r-- | absl/container/fixed_array_test.cc | 213 | ||||
-rw-r--r-- | absl/container/internal/compressed_tuple.h | 175 | ||||
-rw-r--r-- | absl/container/internal/compressed_tuple_test.cc | 166 |
6 files changed, 667 insertions, 62 deletions
diff --git a/absl/container/BUILD.bazel b/absl/container/BUILD.bazel index 07df3675207e..6d5c958f382b 100644 --- a/absl/container/BUILD.bazel +++ b/absl/container/BUILD.bazel @@ -26,10 +26,30 @@ package(default_visibility = ["//visibility:public"]) licenses(["notice"]) # Apache 2.0 cc_library( + name = "compressed_tuple", + hdrs = ["internal/compressed_tuple.h"], + copts = ABSL_DEFAULT_COPTS, + deps = [ + "//absl/utility", + ], +) + +cc_test( + name = "compressed_tuple_test", + srcs = ["internal/compressed_tuple_test.cc"], + copts = ABSL_TEST_COPTS, + deps = [ + ":compressed_tuple", + "@com_google_googletest//:gtest_main", + ], +) + +cc_library( name = "fixed_array", hdrs = ["fixed_array.h"], copts = ABSL_DEFAULT_COPTS, deps = [ + ":compressed_tuple", "//absl/algorithm", "//absl/base:core_headers", "//absl/base:dynamic_annotations", diff --git a/absl/container/CMakeLists.txt b/absl/container/CMakeLists.txt index d580b48976cb..123e4c4849aa 100644 --- a/absl/container/CMakeLists.txt +++ b/absl/container/CMakeLists.txt @@ -52,7 +52,6 @@ absl_library( ${TEST_INSTANCE_TRACKER_LIB_SRC} PUBLIC_LIBRARIES absl::container - DISABLE_INSTALL ) diff --git a/absl/container/fixed_array.h b/absl/container/fixed_array.h index 62600df05be3..f480047a5db4 100644 --- a/absl/container/fixed_array.h +++ b/absl/container/fixed_array.h @@ -47,6 +47,7 @@ #include "absl/base/macros.h" #include "absl/base/optimization.h" #include "absl/base/port.h" +#include "absl/container/internal/compressed_tuple.h" #include "absl/memory/memory.h" namespace absl { @@ -76,73 +77,99 @@ constexpr static auto kFixedArrayUseDefault = static_cast<size_t>(-1); // heap allocation, it will do so with global `::operator new[]()` and // `::operator delete[]()`, even if T provides class-scope overrides for these // operators. -template <typename T, size_t inlined = kFixedArrayUseDefault> +template <typename T, size_t N = kFixedArrayUseDefault, + typename A = std::allocator<T>> class FixedArray { static_assert(!std::is_array<T>::value || std::extent<T>::value > 0, "Arrays with unknown bounds cannot be used with FixedArray."); + static constexpr size_t kInlineBytesDefault = 256; + using AllocatorTraits = std::allocator_traits<A>; // std::iterator_traits isn't guaranteed to be SFINAE-friendly until C++17, // but this seems to be mostly pedantic. template <typename Iterator> using EnableIfForwardIterator = absl::enable_if_t<std::is_convertible< typename std::iterator_traits<Iterator>::iterator_category, std::forward_iterator_tag>::value>; + static constexpr bool NoexceptCopyable() { + return std::is_nothrow_copy_constructible<StorageElement>::value && + absl::allocator_is_nothrow<allocator_type>::value; + } + static constexpr bool NoexceptMovable() { + return std::is_nothrow_move_constructible<StorageElement>::value && + absl::allocator_is_nothrow<allocator_type>::value; + } + static constexpr bool DefaultConstructorIsNonTrivial() { + return !absl::is_trivially_default_constructible<StorageElement>::value; + } public: - using value_type = T; - using iterator = T*; - using const_iterator = const T*; + using allocator_type = typename AllocatorTraits::allocator_type; + using value_type = typename allocator_type::value_type; + using pointer = typename allocator_type::pointer; + using const_pointer = typename allocator_type::const_pointer; + using reference = typename allocator_type::reference; + using const_reference = typename allocator_type::const_reference; + using size_type = typename allocator_type::size_type; + using difference_type = typename allocator_type::difference_type; + using iterator = pointer; + using const_iterator = const_pointer; using reverse_iterator = std::reverse_iterator<iterator>; using const_reverse_iterator = std::reverse_iterator<const_iterator>; - using reference = T&; - using const_reference = const T&; - using pointer = T*; - using const_pointer = const T*; - using difference_type = ptrdiff_t; - using size_type = size_t; static constexpr size_type inline_elements = - inlined == kFixedArrayUseDefault - ? kInlineBytesDefault / sizeof(value_type) - : inlined; + (N == kFixedArrayUseDefault ? kInlineBytesDefault / sizeof(value_type) + : static_cast<size_type>(N)); - FixedArray(const FixedArray& other) - : FixedArray(other.begin(), other.end()) {} + FixedArray( + const FixedArray& other, + const allocator_type& a = allocator_type()) noexcept(NoexceptCopyable()) + : FixedArray(other.begin(), other.end(), a) {} - FixedArray(FixedArray&& other) noexcept( - absl::conjunction<absl::allocator_is_nothrow<std::allocator<value_type>>, - std::is_nothrow_move_constructible<value_type>>::value) + FixedArray( + FixedArray&& other, + const allocator_type& a = allocator_type()) noexcept(NoexceptMovable()) : FixedArray(std::make_move_iterator(other.begin()), - std::make_move_iterator(other.end())) {} + std::make_move_iterator(other.end()), a) {} // Creates an array object that can store `n` elements. // Note that trivially constructible elements will be uninitialized. - explicit FixedArray(size_type n) : storage_(n) { - absl::memory_internal::uninitialized_default_construct_n(storage_.begin(), - size()); + explicit FixedArray(size_type n, const allocator_type& a = allocator_type()) + : storage_(n, a) { + if (DefaultConstructorIsNonTrivial()) { + memory_internal::ConstructStorage(storage_.alloc(), storage_.begin(), + storage_.end()); + } } // Creates an array initialized with `n` copies of `val`. - FixedArray(size_type n, const value_type& val) : storage_(n) { - std::uninitialized_fill_n(data(), size(), val); + FixedArray(size_type n, const value_type& val, + const allocator_type& a = allocator_type()) + : storage_(n, a) { + memory_internal::ConstructStorage(storage_.alloc(), storage_.begin(), + storage_.end(), val); } + // Creates an array initialized with the size and contents of `init_list`. + FixedArray(std::initializer_list<value_type> init_list, + const allocator_type& a = allocator_type()) + : FixedArray(init_list.begin(), init_list.end(), a) {} + // Creates an array initialized with the elements from the input // range. The array's size will always be `std::distance(first, last)`. // REQUIRES: Iterator must be a forward_iterator or better. template <typename Iterator, EnableIfForwardIterator<Iterator>* = nullptr> - FixedArray(Iterator first, Iterator last) - : storage_(std::distance(first, last)) { - std::uninitialized_copy(first, last, data()); + FixedArray(Iterator first, Iterator last, + const allocator_type& a = allocator_type()) + : storage_(std::distance(first, last), a) { + memory_internal::CopyToStorageFromRange(storage_.alloc(), storage_.begin(), + first, last); } - FixedArray(std::initializer_list<value_type> init_list) - : FixedArray(init_list.begin(), init_list.end()) {} - ~FixedArray() noexcept { - for (const StorageElement& cur : storage_) { - cur.~StorageElement(); + for (auto* cur = storage_.begin(); cur != storage_.end(); ++cur) { + AllocatorTraits::destroy(*storage_.alloc(), cur); } } @@ -332,7 +359,6 @@ class FixedArray { friend bool operator>=(const FixedArray& lhs, const FixedArray& rhs) { return !(lhs < rhs); } - private: // StorageElement // @@ -364,6 +390,8 @@ class FixedArray { using StorageElement = absl::conditional_t<std::is_array<value_type>::value, StorageElementWrapper<value_type>, value_type>; + using StorageElementBuffer = + absl::aligned_storage_t<sizeof(StorageElement), alignof(StorageElement)>; static pointer AsValueType(pointer ptr) { return ptr; } static pointer AsValueType(StorageElementWrapper<value_type>* ptr) { @@ -374,9 +402,6 @@ class FixedArray { static_assert(alignof(StorageElement) == alignof(value_type), ""); struct NonEmptyInlinedStorage { - using StorageElementBuffer = - absl::aligned_storage_t<sizeof(StorageElement), - alignof(StorageElement)>; StorageElement* data() { return reinterpret_cast<StorageElement*>(inlined_storage_.data()); } @@ -386,8 +411,8 @@ class FixedArray { void* RedzoneEnd() { return &redzone_end_ + 1; } #endif // ADDRESS_SANITIZER - void AnnotateConstruct(size_t); - void AnnotateDestruct(size_t); + void AnnotateConstruct(size_type); + void AnnotateDestruct(size_type); ADDRESS_SANITIZER_REDZONE(redzone_begin_); std::array<StorageElementBuffer, inline_elements> inlined_storage_; @@ -396,8 +421,8 @@ class FixedArray { struct EmptyInlinedStorage { StorageElement* data() { return nullptr; } - void AnnotateConstruct(size_t) {} - void AnnotateDestruct(size_t) {} + void AnnotateConstruct(size_type) {} + void AnnotateDestruct(size_type) {} }; using InlinedStorage = @@ -414,48 +439,57 @@ class FixedArray { // class Storage : public InlinedStorage { public: - explicit Storage(size_type n) : data_(CreateStorage(n)), size_(n) {} + Storage(size_type n, const allocator_type& a) + : size_alloc_(n, a), data_(InitializeData()) {} + ~Storage() noexcept { if (UsingInlinedStorage(size())) { - this->AnnotateDestruct(size()); + InlinedStorage::AnnotateDestruct(size()); } else { - std::allocator<StorageElement>().deallocate(begin(), size()); + AllocatorTraits::deallocate(*alloc(), AsValueType(begin()), size()); } } - size_type size() const { return size_; } + size_type size() const { return size_alloc_.template get<0>(); } StorageElement* begin() const { return data_; } StorageElement* end() const { return begin() + size(); } + allocator_type* alloc() { + return std::addressof(size_alloc_.template get<1>()); + } private: static bool UsingInlinedStorage(size_type n) { return n <= inline_elements; } - StorageElement* CreateStorage(size_type n) { - if (UsingInlinedStorage(n)) { - this->AnnotateConstruct(n); + StorageElement* InitializeData() { + if (UsingInlinedStorage(size())) { + InlinedStorage::AnnotateConstruct(size()); return InlinedStorage::data(); } else { - return std::allocator<StorageElement>().allocate(n); + return reinterpret_cast<StorageElement*>( + AllocatorTraits::allocate(*alloc(), size())); } } - StorageElement* const data_; - const size_type size_; + // `CompressedTuple` takes advantage of EBCO for stateless `allocator_type`s + container_internal::CompressedTuple<size_type, allocator_type> size_alloc_; + StorageElement* data_; }; - const Storage storage_; + Storage storage_; }; -template <typename T, size_t N> -constexpr size_t FixedArray<T, N>::inline_elements; +template <typename T, size_t N, typename A> +constexpr size_t FixedArray<T, N, A>::kInlineBytesDefault; -template <typename T, size_t N> -constexpr size_t FixedArray<T, N>::kInlineBytesDefault; +template <typename T, size_t N, typename A> +constexpr typename FixedArray<T, N, A>::size_type + FixedArray<T, N, A>::inline_elements; -template <typename T, size_t N> -void FixedArray<T, N>::NonEmptyInlinedStorage::AnnotateConstruct(size_t n) { +template <typename T, size_t N, typename A> +void FixedArray<T, N, A>::NonEmptyInlinedStorage::AnnotateConstruct( + typename FixedArray<T, N, A>::size_type n) { #ifdef ADDRESS_SANITIZER if (!n) return; ANNOTATE_CONTIGUOUS_CONTAINER(data(), RedzoneEnd(), RedzoneEnd(), data() + n); @@ -464,8 +498,9 @@ void FixedArray<T, N>::NonEmptyInlinedStorage::AnnotateConstruct(size_t n) { static_cast<void>(n); // Mark used when not in asan mode } -template <typename T, size_t N> -void FixedArray<T, N>::NonEmptyInlinedStorage::AnnotateDestruct(size_t n) { +template <typename T, size_t N, typename A> +void FixedArray<T, N, A>::NonEmptyInlinedStorage::AnnotateDestruct( + typename FixedArray<T, N, A>::size_type n) { #ifdef ADDRESS_SANITIZER if (!n) return; ANNOTATE_CONTIGUOUS_CONTAINER(data(), RedzoneEnd(), data() + n, RedzoneEnd()); @@ -473,6 +508,5 @@ void FixedArray<T, N>::NonEmptyInlinedStorage::AnnotateDestruct(size_t n) { #endif // ADDRESS_SANITIZER static_cast<void>(n); // Mark used when not in asan mode } - } // namespace absl #endif // ABSL_CONTAINER_FIXED_ARRAY_H_ diff --git a/absl/container/fixed_array_test.cc b/absl/container/fixed_array_test.cc index 2142132d1352..b07ebcb6d9ca 100644 --- a/absl/container/fixed_array_test.cc +++ b/absl/container/fixed_array_test.cc @@ -15,9 +15,11 @@ #include "absl/container/fixed_array.h" #include <stdio.h> +#include <cstring> #include <list> #include <memory> #include <numeric> +#include <scoped_allocator> #include <stdexcept> #include <string> #include <vector> @@ -607,6 +609,216 @@ TEST(FixedArrayTest, Fill) { empty.fill(fill_val); } +// TODO(johnsoncj): Investigate InlinedStorage default initialization in GCC 4.x +#ifndef __GNUC__ +TEST(FixedArrayTest, DefaultCtorDoesNotValueInit) { + using T = char; + constexpr auto capacity = 10; + using FixedArrType = absl::FixedArray<T, capacity>; + using FixedArrBuffType = + absl::aligned_storage_t<sizeof(FixedArrType), alignof(FixedArrType)>; + constexpr auto scrubbed_bits = 0x95; + constexpr auto length = capacity / 2; + + FixedArrBuffType buff; + std::memset(std::addressof(buff), scrubbed_bits, sizeof(FixedArrBuffType)); + + FixedArrType* arr = + ::new (static_cast<void*>(std::addressof(buff))) FixedArrType(length); + EXPECT_THAT(*arr, testing::Each(scrubbed_bits)); + arr->~FixedArrType(); +} +#endif // __GNUC__ + +// This is a stateful allocator, but the state lives outside of the +// allocator (in whatever test is using the allocator). This is odd +// but helps in tests where the allocator is propagated into nested +// containers - that chain of allocators uses the same state and is +// thus easier to query for aggregate allocation information. +template <typename T> +class CountingAllocator : public std::allocator<T> { + public: + using Alloc = std::allocator<T>; + using pointer = typename Alloc::pointer; + using size_type = typename Alloc::size_type; + + CountingAllocator() : bytes_used_(nullptr), instance_count_(nullptr) {} + explicit CountingAllocator(int64_t* b) + : bytes_used_(b), instance_count_(nullptr) {} + CountingAllocator(int64_t* b, int64_t* a) + : bytes_used_(b), instance_count_(a) {} + + template <typename U> + explicit CountingAllocator(const CountingAllocator<U>& x) + : Alloc(x), + bytes_used_(x.bytes_used_), + instance_count_(x.instance_count_) {} + + pointer allocate(size_type n, const void* const hint = nullptr) { + assert(bytes_used_ != nullptr); + *bytes_used_ += n * sizeof(T); + return Alloc::allocate(n, hint); + } + + void deallocate(pointer p, size_type n) { + Alloc::deallocate(p, n); + assert(bytes_used_ != nullptr); + *bytes_used_ -= n * sizeof(T); + } + + template <typename... Args> + void construct(pointer p, Args&&... args) { + Alloc::construct(p, absl::forward<Args>(args)...); + if (instance_count_) { + *instance_count_ += 1; + } + } + + void destroy(pointer p) { + Alloc::destroy(p); + if (instance_count_) { + *instance_count_ -= 1; + } + } + + template <typename U> + class rebind { + public: + using other = CountingAllocator<U>; + }; + + int64_t* bytes_used_; + int64_t* instance_count_; +}; + +TEST(AllocatorSupportTest, CountInlineAllocations) { + constexpr size_t inlined_size = 4; + using Alloc = CountingAllocator<int>; + using AllocFxdArr = absl::FixedArray<int, inlined_size, Alloc>; + + int64_t allocated = 0; + int64_t active_instances = 0; + + { + const int ia[] = {0, 1, 2, 3, 4, 5, 6, 7}; + + Alloc alloc(&allocated, &active_instances); + + AllocFxdArr arr(ia, ia + inlined_size, alloc); + static_cast<void>(arr); + } + + EXPECT_EQ(allocated, 0); + EXPECT_EQ(active_instances, 0); +} + +TEST(AllocatorSupportTest, CountOutoflineAllocations) { + constexpr size_t inlined_size = 4; + using Alloc = CountingAllocator<int>; + using AllocFxdArr = absl::FixedArray<int, inlined_size, Alloc>; + + int64_t allocated = 0; + int64_t active_instances = 0; + + { + const int ia[] = {0, 1, 2, 3, 4, 5, 6, 7}; + Alloc alloc(&allocated, &active_instances); + + AllocFxdArr arr(ia, ia + ABSL_ARRAYSIZE(ia), alloc); + + EXPECT_EQ(allocated, arr.size() * sizeof(int)); + static_cast<void>(arr); + } + + EXPECT_EQ(active_instances, 0); +} + +TEST(AllocatorSupportTest, CountCopyInlineAllocations) { + constexpr size_t inlined_size = 4; + using Alloc = CountingAllocator<int>; + using AllocFxdArr = absl::FixedArray<int, inlined_size, Alloc>; + + int64_t allocated1 = 0; + int64_t allocated2 = 0; + int64_t active_instances = 0; + Alloc alloc(&allocated1, &active_instances); + Alloc alloc2(&allocated2, &active_instances); + + { + int initial_value = 1; + + AllocFxdArr arr1(inlined_size / 2, initial_value, alloc); + + EXPECT_EQ(allocated1, 0); + + AllocFxdArr arr2(arr1, alloc2); + + EXPECT_EQ(allocated2, 0); + static_cast<void>(arr1); + static_cast<void>(arr2); + } + + EXPECT_EQ(active_instances, 0); +} + +TEST(AllocatorSupportTest, CountCopyOutoflineAllocations) { + constexpr size_t inlined_size = 4; + using Alloc = CountingAllocator<int>; + using AllocFxdArr = absl::FixedArray<int, inlined_size, Alloc>; + + int64_t allocated1 = 0; + int64_t allocated2 = 0; + int64_t active_instances = 0; + Alloc alloc(&allocated1, &active_instances); + Alloc alloc2(&allocated2, &active_instances); + + { + int initial_value = 1; + + AllocFxdArr arr1(inlined_size * 2, initial_value, alloc); + + EXPECT_EQ(allocated1, arr1.size() * sizeof(int)); + + AllocFxdArr arr2(arr1, alloc2); + + EXPECT_EQ(allocated2, inlined_size * 2 * sizeof(int)); + static_cast<void>(arr1); + static_cast<void>(arr2); + } + + EXPECT_EQ(active_instances, 0); +} + +TEST(AllocatorSupportTest, SizeValAllocConstructor) { + using testing::AllOf; + using testing::Each; + using testing::SizeIs; + + constexpr size_t inlined_size = 4; + using Alloc = CountingAllocator<int>; + using AllocFxdArr = absl::FixedArray<int, inlined_size, Alloc>; + + { + auto len = inlined_size / 2; + auto val = 0; + int64_t allocated = 0; + AllocFxdArr arr(len, val, Alloc(&allocated)); + + EXPECT_EQ(allocated, 0); + EXPECT_THAT(arr, AllOf(SizeIs(len), Each(0))); + } + + { + auto len = inlined_size * 2; + auto val = 0; + int64_t allocated = 0; + AllocFxdArr arr(len, val, Alloc(&allocated)); + + EXPECT_EQ(allocated, len * sizeof(int)); + EXPECT_THAT(arr, AllOf(SizeIs(len), Each(0))); + } +} + #ifdef ADDRESS_SANITIZER TEST(FixedArrayTest, AddressSanitizerAnnotations1) { absl::FixedArray<int, 32> a(10); @@ -655,5 +867,4 @@ TEST(FixedArrayTest, AddressSanitizerAnnotations4) { EXPECT_DEATH(raw[21] = ThreeInts(), "container-overflow"); } #endif // ADDRESS_SANITIZER - } // namespace diff --git a/absl/container/internal/compressed_tuple.h b/absl/container/internal/compressed_tuple.h new file mode 100644 index 000000000000..cc52614f5b37 --- /dev/null +++ b/absl/container/internal/compressed_tuple.h @@ -0,0 +1,175 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Helper class to perform the Empty Base Optimization. +// Ts can contain classes and non-classes, empty or not. For the ones that +// are empty classes, we perform the optimization. If all types in Ts are empty +// classes, then CompressedTuple<Ts...> is itself an empty class. +// +// To access the members, use member get<N>() function. +// +// Eg: +// absl::container_internal::CompressedTuple<int, T1, T2, T3> value(7, t1, t2, +// t3); +// assert(value.get<0>() == 7); +// T1& t1 = value.get<1>(); +// const T2& t2 = value.get<2>(); +// ... +// +// http://en.cppreference.com/w/cpp/language/ebo + +#ifndef ABSL_CONTAINER_INTERNAL_COMPRESSED_TUPLE_H_ +#define ABSL_CONTAINER_INTERNAL_COMPRESSED_TUPLE_H_ + +#include <tuple> +#include <type_traits> +#include <utility> + +#include "absl/utility/utility.h" + +#ifdef _MSC_VER +// We need to mark these classes with this declspec to ensure that +// CompressedTuple happens. +#define ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC __declspec(empty_bases) +#else // _MSC_VER +#define ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC +#endif // _MSC_VER + +namespace absl { +namespace container_internal { + +template <typename... Ts> +class CompressedTuple; + +namespace internal_compressed_tuple { + +template <typename D, size_t I> +struct Elem; +template <typename... B, size_t I> +struct Elem<CompressedTuple<B...>, I> + : std::tuple_element<I, std::tuple<B...>> {}; +template <typename D, size_t I> +using ElemT = typename Elem<D, I>::type; + +// Use the __is_final intrinsic if available. Where it's not available, classes +// declared with the 'final' specifier cannot be used as CompressedTuple +// elements. +// TODO(sbenza): Replace this with std::is_final in C++14. +template <typename T> +constexpr bool IsFinal() { +#if defined(__clang__) || defined(__GNUC__) + return __is_final(T); +#else + return false; +#endif +} + +template <typename T> +constexpr bool ShouldUseBase() { + return std::is_class<T>::value && std::is_empty<T>::value && !IsFinal<T>(); +} + +// The storage class provides two specializations: +// - For empty classes, it stores T as a base class. +// - For everything else, it stores T as a member. +template <typename D, size_t I, bool = ShouldUseBase<ElemT<D, I>>()> +struct Storage { + using T = ElemT<D, I>; + T value; + constexpr Storage() = default; + explicit constexpr Storage(T&& v) : value(absl::forward<T>(v)) {} + constexpr const T& get() const { return value; } + T& get() { return value; } +}; + +template <typename D, size_t I> +struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC Storage<D, I, true> + : ElemT<D, I> { + using T = internal_compressed_tuple::ElemT<D, I>; + constexpr Storage() = default; + explicit constexpr Storage(T&& v) : T(absl::forward<T>(v)) {} + constexpr const T& get() const { return *this; } + T& get() { return *this; } +}; + +template <typename D, typename I> +struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTupleImpl; + +template <typename... Ts, size_t... I> +struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC + CompressedTupleImpl<CompressedTuple<Ts...>, absl::index_sequence<I...>> + // We use the dummy identity function through std::integral_constant to + // convince MSVC of accepting and expanding I in that context. Without it + // you would get: + // error C3548: 'I': parameter pack cannot be used in this context + : Storage<CompressedTuple<Ts...>, + std::integral_constant<size_t, I>::value>... { + constexpr CompressedTupleImpl() = default; + explicit constexpr CompressedTupleImpl(Ts&&... args) + : Storage<CompressedTuple<Ts...>, I>(absl::forward<Ts>(args))... {} +}; + +} // namespace internal_compressed_tuple + +// Helper class to perform the Empty Base Class Optimization. +// Ts can contain classes and non-classes, empty or not. For the ones that +// are empty classes, we perform the CompressedTuple. If all types in Ts are +// empty classes, then CompressedTuple<Ts...> is itself an empty class. +// +// To access the members, use member .get<N>() function. +// +// Eg: +// absl::container_internal::CompressedTuple<int, T1, T2, T3> value(7, t1, t2, +// t3); +// assert(value.get<0>() == 7); +// T1& t1 = value.get<1>(); +// const T2& t2 = value.get<2>(); +// ... +// +// http://en.cppreference.com/w/cpp/language/ebo +template <typename... Ts> +class ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTuple + : private internal_compressed_tuple::CompressedTupleImpl< + CompressedTuple<Ts...>, absl::index_sequence_for<Ts...>> { + private: + template <int I> + using ElemT = internal_compressed_tuple::ElemT<CompressedTuple, I>; + + public: + constexpr CompressedTuple() = default; + explicit constexpr CompressedTuple(Ts... base) + : CompressedTuple::CompressedTupleImpl(absl::forward<Ts>(base)...) {} + + template <int I> + ElemT<I>& get() { + return internal_compressed_tuple::Storage<CompressedTuple, I>::get(); + } + + template <int I> + constexpr const ElemT<I>& get() const { + return internal_compressed_tuple::Storage<CompressedTuple, I>::get(); + } +}; + +// Explicit specialization for a zero-element tuple +// (needed to avoid ambiguous overloads for the default constructor). +template <> +class ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTuple<> {}; + +} // namespace container_internal +} // namespace absl + +#undef ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC + +#endif // ABSL_CONTAINER_INTERNAL_COMPRESSED_TUPLE_H_ diff --git a/absl/container/internal/compressed_tuple_test.cc b/absl/container/internal/compressed_tuple_test.cc new file mode 100644 index 000000000000..45030c675ee1 --- /dev/null +++ b/absl/container/internal/compressed_tuple_test.cc @@ -0,0 +1,166 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/internal/compressed_tuple.h" + +#include <string> + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace absl { +namespace container_internal { +namespace { + +template <int> +struct Empty {}; + +template <typename T> +struct NotEmpty { + T value; +}; + +template <typename T, typename U> +struct TwoValues { + T value1; + U value2; +}; + +TEST(CompressedTupleTest, Sizeof) { + EXPECT_EQ(sizeof(int), sizeof(CompressedTuple<int>)); + EXPECT_EQ(sizeof(int), sizeof(CompressedTuple<int, Empty<0>>)); + EXPECT_EQ(sizeof(int), sizeof(CompressedTuple<int, Empty<0>, Empty<1>>)); + EXPECT_EQ(sizeof(int), + sizeof(CompressedTuple<int, Empty<0>, Empty<1>, Empty<2>>)); + + EXPECT_EQ(sizeof(TwoValues<int, double>), + sizeof(CompressedTuple<int, NotEmpty<double>>)); + EXPECT_EQ(sizeof(TwoValues<int, double>), + sizeof(CompressedTuple<int, Empty<0>, NotEmpty<double>>)); + EXPECT_EQ(sizeof(TwoValues<int, double>), + sizeof(CompressedTuple<int, Empty<0>, NotEmpty<double>, Empty<1>>)); +} + +TEST(CompressedTupleTest, Access) { + struct S { + std::string x; + }; + CompressedTuple<int, Empty<0>, S> x(7, {}, S{"ABC"}); + EXPECT_EQ(sizeof(x), sizeof(TwoValues<int, S>)); + EXPECT_EQ(7, x.get<0>()); + EXPECT_EQ("ABC", x.get<2>().x); +} + +TEST(CompressedTupleTest, NonClasses) { + CompressedTuple<int, const char*> x(7, "ABC"); + EXPECT_EQ(7, x.get<0>()); + EXPECT_STREQ("ABC", x.get<1>()); +} + +TEST(CompressedTupleTest, MixClassAndNonClass) { + CompressedTuple<int, const char*, Empty<0>, NotEmpty<double>> x(7, "ABC", {}, + {1.25}); + struct Mock { + int v; + const char* p; + double d; + }; + EXPECT_EQ(sizeof(x), sizeof(Mock)); + EXPECT_EQ(7, x.get<0>()); + EXPECT_STREQ("ABC", x.get<1>()); + EXPECT_EQ(1.25, x.get<3>().value); +} + +TEST(CompressedTupleTest, Nested) { + CompressedTuple<int, CompressedTuple<int>, + CompressedTuple<int, CompressedTuple<int>>> + x(1, CompressedTuple<int>(2), + CompressedTuple<int, CompressedTuple<int>>(3, CompressedTuple<int>(4))); + EXPECT_EQ(1, x.get<0>()); + EXPECT_EQ(2, x.get<1>().get<0>()); + EXPECT_EQ(3, x.get<2>().get<0>()); + EXPECT_EQ(4, x.get<2>().get<1>().get<0>()); + + CompressedTuple<Empty<0>, Empty<0>, + CompressedTuple<Empty<0>, CompressedTuple<Empty<0>>>> + y; + std::set<Empty<0>*> empties{&y.get<0>(), &y.get<1>(), &y.get<2>().get<0>(), + &y.get<2>().get<1>().get<0>()}; +#ifdef _MSC_VER + // MSVC has a bug where many instances of the same base class are layed out in + // the same address when using __declspec(empty_bases). + // This will be fixed in a future version of MSVC. + int expected = 1; +#else + int expected = 4; +#endif + EXPECT_EQ(expected, sizeof(y)); + EXPECT_EQ(expected, empties.size()); + EXPECT_EQ(sizeof(y), sizeof(Empty<0>) * empties.size()); + + EXPECT_EQ(4 * sizeof(char), + sizeof(CompressedTuple<CompressedTuple<char, char>, + CompressedTuple<char, char>>)); + EXPECT_TRUE( + (std::is_empty<CompressedTuple<CompressedTuple<Empty<0>>, + CompressedTuple<Empty<1>>>>::value)); +} + +TEST(CompressedTupleTest, Reference) { + int i = 7; + std::string s = "Very long std::string that goes in the heap"; + CompressedTuple<int, int&, std::string, std::string&> x(i, i, s, s); + + // Sanity check. We should have not moved from `s` + EXPECT_EQ(s, "Very long std::string that goes in the heap"); + + EXPECT_EQ(x.get<0>(), x.get<1>()); + EXPECT_NE(&x.get<0>(), &x.get<1>()); + EXPECT_EQ(&x.get<1>(), &i); + + EXPECT_EQ(x.get<2>(), x.get<3>()); + EXPECT_NE(&x.get<2>(), &x.get<3>()); + EXPECT_EQ(&x.get<3>(), &s); +} + +TEST(CompressedTupleTest, NoElements) { + CompressedTuple<> x; + static_cast<void>(x); // Silence -Wunused-variable. + EXPECT_TRUE(std::is_empty<CompressedTuple<>>::value); +} + +TEST(CompressedTupleTest, Constexpr) { + constexpr CompressedTuple<int, double, CompressedTuple<int>> x( + 7, 1.25, CompressedTuple<int>(5)); + constexpr int x0 = x.get<0>(); + constexpr double x1 = x.get<1>(); + constexpr int x2 = x.get<2>().get<0>(); + EXPECT_EQ(x0, 7); + EXPECT_EQ(x1, 1.25); + EXPECT_EQ(x2, 5); +} + +#if defined(__clang__) || defined(__GNUC__) +TEST(CompressedTupleTest, EmptyFinalClass) { + struct S final { + int f() const { return 5; } + }; + CompressedTuple<S> x; + EXPECT_EQ(x.get<0>().f(), 5); +} +#endif + +} // namespace +} // namespace container_internal +} // namespace absl |