diff options
Diffstat (limited to 'absl/container/inlined_vector_benchmark.cc')
-rw-r--r-- | absl/container/inlined_vector_benchmark.cc | 110 |
1 files changed, 100 insertions, 10 deletions
diff --git a/absl/container/inlined_vector_benchmark.cc b/absl/container/inlined_vector_benchmark.cc index b57fc1de9e3b..a8368d418c53 100644 --- a/absl/container/inlined_vector_benchmark.cc +++ b/absl/container/inlined_vector_benchmark.cc @@ -379,6 +379,10 @@ constexpr size_t kLargeSize = kInlinedCapacity * 2; constexpr size_t kSmallSize = kInlinedCapacity / 2; constexpr size_t kBatchSize = 100; +#define ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_FunctionTemplate, T) \ + BENCHMARK_TEMPLATE(BM_FunctionTemplate, T, kLargeSize); \ + BENCHMARK_TEMPLATE(BM_FunctionTemplate, T, kSmallSize) + template <typename T> using InlVec = absl::InlinedVector<T, kInlinedCapacity>; @@ -420,29 +424,115 @@ void BatchedBenchmark(benchmark::State& state, PrepareVecFn prepare_vec, while (state.KeepRunningBatch(kBatchSize)) { // Prepare batch state.PauseTiming(); - for (auto& vec : vector_batch) { - prepare_vec(&vec); + for (size_t i = 0; i < kBatchSize; ++i) { + prepare_vec(vector_batch.data() + i, i); } benchmark::DoNotOptimize(vector_batch); state.ResumeTiming(); // Test batch - for (auto& vec : vector_batch) { - test_vec(&vec); + for (size_t i = 0; i < kBatchSize; ++i) { + test_vec(vector_batch.data() + i, i); } } } +template <typename T, size_t ToSize> +void BM_ConstructFromSize(benchmark::State& state) { + using VecT = InlVec<T>; + auto size = ToSize; + BatchedBenchmark<T>( + state, + /* prepare_vec = */ [](InlVec<T>* vec, size_t) { vec->~VecT(); }, + /* test_vec = */ + [&](void* ptr, size_t) { + benchmark::DoNotOptimize(size); + ::new (ptr) VecT(size); + }); +} +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromSize, TrivialType); +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromSize, NontrivialType); + +template <typename T, size_t ToSize> +void BM_ConstructFromSizeRef(benchmark::State& state) { + using VecT = InlVec<T>; + auto size = ToSize; + auto ref = T(); + BatchedBenchmark<T>( + state, + /* prepare_vec = */ [](InlVec<T>* vec, size_t) { vec->~VecT(); }, + /* test_vec = */ + [&](void* ptr, size_t) { + benchmark::DoNotOptimize(size); + benchmark::DoNotOptimize(ref); + ::new (ptr) VecT(size, ref); + }); +} +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromSizeRef, TrivialType); +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromSizeRef, NontrivialType); + +template <typename T, size_t ToSize> +void BM_ConstructFromRange(benchmark::State& state) { + using VecT = InlVec<T>; + std::array<T, ToSize> arr{}; + BatchedBenchmark<T>( + state, + /* prepare_vec = */ [](InlVec<T>* vec, size_t) { vec->~VecT(); }, + /* test_vec = */ + [&](void* ptr, size_t) { + benchmark::DoNotOptimize(arr); + ::new (ptr) VecT(arr.begin(), arr.end()); + }); +} +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromRange, TrivialType); +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromRange, NontrivialType); + +template <typename T, size_t ToSize> +void BM_ConstructFromCopy(benchmark::State& state) { + using VecT = InlVec<T>; + VecT other_vec(ToSize); + BatchedBenchmark<T>( + state, + /* prepare_vec = */ + [](InlVec<T>* vec, size_t) { vec->~VecT(); }, + /* test_vec = */ + [&](void* ptr, size_t) { + benchmark::DoNotOptimize(other_vec); + ::new (ptr) VecT(other_vec); + }); +} +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromCopy, TrivialType); +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromCopy, NontrivialType); + +template <typename T, size_t ToSize> +void BM_ConstructFromMove(benchmark::State& state) { + using VecT = InlVec<T>; + std::array<VecT, kBatchSize> vector_batch{}; + BatchedBenchmark<T>( + state, + /* prepare_vec = */ + [&](InlVec<T>* vec, size_t i) { + vector_batch[i].clear(); + vector_batch[i].resize(ToSize); + vec->~VecT(); + }, + /* test_vec = */ + [&](void* ptr, size_t i) { + benchmark::DoNotOptimize(vector_batch[i]); + ::new (ptr) VecT(std::move(vector_batch[i])); + }); +} +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromMove, TrivialType); +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromMove, NontrivialType); + template <typename T, size_t FromSize> void BM_Clear(benchmark::State& state) { BatchedBenchmark<T>( state, - /* prepare_vec = */ [](InlVec<T>* vec) { vec->resize(FromSize); }, - /* test_vec = */ [](InlVec<T>* vec) { vec->clear(); }); + /* prepare_vec = */ [](InlVec<T>* vec, size_t) { vec->resize(FromSize); }, + /* test_vec = */ [](InlVec<T>* vec, size_t) { vec->clear(); }); } -BENCHMARK_TEMPLATE(BM_Clear, TrivialType, kLargeSize); -BENCHMARK_TEMPLATE(BM_Clear, TrivialType, kSmallSize); -BENCHMARK_TEMPLATE(BM_Clear, NontrivialType, kLargeSize); -BENCHMARK_TEMPLATE(BM_Clear, NontrivialType, kSmallSize); +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_Clear, TrivialType); +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_Clear, NontrivialType); } // namespace |