diff options
author | Abseil Team <absl-team@google.com> | 2019-06-10T08·57-0700 |
---|---|---|
committer | Gennadiy Civil <misterg@google.com> | 2019-06-10T17·31-0400 |
commit | 361cb8a9db2f2130442389fd80593255be26d681 (patch) | |
tree | f38eca22b4fbf0933e06abf5d0546ec9c29721b4 /absl/container/inlined_vector_benchmark.cc | |
parent | 0238ab0a831f179518c1a814f9584e99da2d75a3 (diff) |
Export of internal Abseil changes.
-- 4a21ad4ffa957d28b770de8717289fab7410f567 by Gennadiy Rozental <rogeeff@google.com>: Internal cleanup PiperOrigin-RevId: 252366381 -- b6b0f25439549c54f1537a16625be1fecd3c7d8c by Xiaoyi Zhang <zhangxy@google.com>: Fix C4245 compiler warning of visual studio. This allows using abseil headers in code requiring strict warning settings. This is an import of https://github.com/abseil/abseil-cpp/pull/321. PiperOrigin-RevId: 252101240 -- 0543b7861b658a5a665298e1d868e29968ff7b27 by CJ Johnson <johnsoncj@google.com>: Adds new benchmarks for the constructors of InlinedVector PiperOrigin-RevId: 251905349 -- c65a08507917e9f8f6450b8beb235fe1426d7954 by CJ Johnson <johnsoncj@google.com>: Updates the InlinedVector BatchedBenchmark abstractions to 1) provide the index of the instance back to the prepare and test functions so that callers may perform extra work on local state with a unique per-instance ID and 2) reduce the number of manually written calls to BENCHMARK_TEMPLATE. PiperOrigin-RevId: 251895546 -- 99a1ae2d786b80096172f6e018711e15c0c750b9 by Samuel Benzaquen <sbenza@google.com>: Fix ambiguous construction problem in absl::variant<> to make in line with std::variant. ImaginaryFun is hiding duplicate objects instead of causing ambiguity. Add a second unique argument to make sure all overloads exist in the final overload set. PiperOrigin-RevId: 251860694 -- b54d0a12673be6ebb6e77e24a556ce9b758b3a7e by Abseil Team <absl-team@google.com>: Import of CCTZ from GitHub. PiperOrigin-RevId: 251739183 -- f51b115e0dc3fc9a9c9c20b33a1f27027a700d48 by Abseil Team <absl-team@google.com>: Import of CCTZ from GitHub. PiperOrigin-RevId: 251686812 -- 30e868049282dc6a6fc77d923ca7d2a5d35a1658 by Xiaoyi Zhang <zhangxy@google.com>: Import of CCTZ from GitHub. PiperOrigin-RevId: 251652119 GitOrigin-RevId: 4a21ad4ffa957d28b770de8717289fab7410f567 Change-Id: I7171cb613793fa90e0eb0143b65ec8264a2a84db
Diffstat (limited to 'absl/container/inlined_vector_benchmark.cc')
-rw-r--r-- | absl/container/inlined_vector_benchmark.cc | 110 |
1 files changed, 100 insertions, 10 deletions
diff --git a/absl/container/inlined_vector_benchmark.cc b/absl/container/inlined_vector_benchmark.cc index b57fc1de9e3b..a8368d418c53 100644 --- a/absl/container/inlined_vector_benchmark.cc +++ b/absl/container/inlined_vector_benchmark.cc @@ -379,6 +379,10 @@ constexpr size_t kLargeSize = kInlinedCapacity * 2; constexpr size_t kSmallSize = kInlinedCapacity / 2; constexpr size_t kBatchSize = 100; +#define ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_FunctionTemplate, T) \ + BENCHMARK_TEMPLATE(BM_FunctionTemplate, T, kLargeSize); \ + BENCHMARK_TEMPLATE(BM_FunctionTemplate, T, kSmallSize) + template <typename T> using InlVec = absl::InlinedVector<T, kInlinedCapacity>; @@ -420,29 +424,115 @@ void BatchedBenchmark(benchmark::State& state, PrepareVecFn prepare_vec, while (state.KeepRunningBatch(kBatchSize)) { // Prepare batch state.PauseTiming(); - for (auto& vec : vector_batch) { - prepare_vec(&vec); + for (size_t i = 0; i < kBatchSize; ++i) { + prepare_vec(vector_batch.data() + i, i); } benchmark::DoNotOptimize(vector_batch); state.ResumeTiming(); // Test batch - for (auto& vec : vector_batch) { - test_vec(&vec); + for (size_t i = 0; i < kBatchSize; ++i) { + test_vec(vector_batch.data() + i, i); } } } +template <typename T, size_t ToSize> +void BM_ConstructFromSize(benchmark::State& state) { + using VecT = InlVec<T>; + auto size = ToSize; + BatchedBenchmark<T>( + state, + /* prepare_vec = */ [](InlVec<T>* vec, size_t) { vec->~VecT(); }, + /* test_vec = */ + [&](void* ptr, size_t) { + benchmark::DoNotOptimize(size); + ::new (ptr) VecT(size); + }); +} +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromSize, TrivialType); +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromSize, NontrivialType); + +template <typename T, size_t ToSize> +void BM_ConstructFromSizeRef(benchmark::State& state) { + using VecT = InlVec<T>; + auto size = ToSize; + auto ref = T(); + BatchedBenchmark<T>( + state, + /* prepare_vec = */ [](InlVec<T>* vec, size_t) { vec->~VecT(); }, + /* test_vec = */ + [&](void* ptr, size_t) { + benchmark::DoNotOptimize(size); + benchmark::DoNotOptimize(ref); + ::new (ptr) VecT(size, ref); + }); +} +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromSizeRef, TrivialType); +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromSizeRef, NontrivialType); + +template <typename T, size_t ToSize> +void BM_ConstructFromRange(benchmark::State& state) { + using VecT = InlVec<T>; + std::array<T, ToSize> arr{}; + BatchedBenchmark<T>( + state, + /* prepare_vec = */ [](InlVec<T>* vec, size_t) { vec->~VecT(); }, + /* test_vec = */ + [&](void* ptr, size_t) { + benchmark::DoNotOptimize(arr); + ::new (ptr) VecT(arr.begin(), arr.end()); + }); +} +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromRange, TrivialType); +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromRange, NontrivialType); + +template <typename T, size_t ToSize> +void BM_ConstructFromCopy(benchmark::State& state) { + using VecT = InlVec<T>; + VecT other_vec(ToSize); + BatchedBenchmark<T>( + state, + /* prepare_vec = */ + [](InlVec<T>* vec, size_t) { vec->~VecT(); }, + /* test_vec = */ + [&](void* ptr, size_t) { + benchmark::DoNotOptimize(other_vec); + ::new (ptr) VecT(other_vec); + }); +} +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromCopy, TrivialType); +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromCopy, NontrivialType); + +template <typename T, size_t ToSize> +void BM_ConstructFromMove(benchmark::State& state) { + using VecT = InlVec<T>; + std::array<VecT, kBatchSize> vector_batch{}; + BatchedBenchmark<T>( + state, + /* prepare_vec = */ + [&](InlVec<T>* vec, size_t i) { + vector_batch[i].clear(); + vector_batch[i].resize(ToSize); + vec->~VecT(); + }, + /* test_vec = */ + [&](void* ptr, size_t i) { + benchmark::DoNotOptimize(vector_batch[i]); + ::new (ptr) VecT(std::move(vector_batch[i])); + }); +} +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromMove, TrivialType); +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromMove, NontrivialType); + template <typename T, size_t FromSize> void BM_Clear(benchmark::State& state) { BatchedBenchmark<T>( state, - /* prepare_vec = */ [](InlVec<T>* vec) { vec->resize(FromSize); }, - /* test_vec = */ [](InlVec<T>* vec) { vec->clear(); }); + /* prepare_vec = */ [](InlVec<T>* vec, size_t) { vec->resize(FromSize); }, + /* test_vec = */ [](InlVec<T>* vec, size_t) { vec->clear(); }); } -BENCHMARK_TEMPLATE(BM_Clear, TrivialType, kLargeSize); -BENCHMARK_TEMPLATE(BM_Clear, TrivialType, kSmallSize); -BENCHMARK_TEMPLATE(BM_Clear, NontrivialType, kLargeSize); -BENCHMARK_TEMPLATE(BM_Clear, NontrivialType, kSmallSize); +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_Clear, TrivialType); +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_Clear, NontrivialType); } // namespace |