about summary refs log tree commit diff
path: root/absl/container/inlined_vector_benchmark.cc
diff options
context:
space:
mode:
authorAbseil Team <absl-team@google.com>2019-06-18T20·05-0700
committerGennadiy Rozental <rogeeff@google.com>2019-06-18T20·10-0400
commit43ef2148c0936ebf7cb4be6b19927a9d9d145b8f (patch)
tree8a8ddca3da4d2f667ddfea48a736e80a147d5494 /absl/container/inlined_vector_benchmark.cc
parenta13d3df2b3ba68aeead92e2d078fba0510d55024 (diff)
Export of internal Abseil changes.
--
635146be541d732fbf2e9c93c6bec89035552484 by Gennadiy Rozental <rogeeff@google.com>:

Merge external PR #324

PiperOrigin-RevId: 253849839

--
7a37f87f0f419ab535e59c7dae7961546586671a by Gennadiy Rozental <rogeeff@google.com>:

Merge external PR #323

PiperOrigin-RevId: 253849558

--
75455e93e1f3987c926f35fbe80a0ea84e4ba35b by CJ Johnson <johnsoncj@google.com>:

Removes `ivi` namespace typedef to reduce reader confusion

PiperOrigin-RevId: 253789534

--
2f99d27194468129767c48ab621b952660427493 by CJ Johnson <johnsoncj@google.com>:

New benchmarks the various overloads of InlinedVector::assign(...)/InlinedVector::operator=(...)

PiperOrigin-RevId: 253787316

--
a0949eb100b93aae22b85b4a4820e4bf9a5a2dbb by CJ Johnson <johnsoncj@google.com>:

Updates the definition of `InlinedVector::pop_back(...)` to be cleaner and more direct (hiding the is_allocated branch behind a single call to `data()`)

Adds exception safety test for `InlinedVector::pop_back(...)`

PiperOrigin-RevId: 253607385

--
2dbc728ddf84835dcb6341f9a166f1c9bde103b9 by CJ Johnson <johnsoncj@google.com>:

Adds the remaining constructor exception safety tests for InlinedVector

PiperOrigin-RevId: 253592324

--
40d88e0d6232c93af5e008088f69ad41cb44e4ce by CJ Johnson <johnsoncj@google.com>:

Updates the constructors of InlinedVector to new, exception-safe and more-performant implementations.

PiperOrigin-RevId: 253294508
GitOrigin-RevId: 635146be541d732fbf2e9c93c6bec89035552484
Change-Id: I7d37a749632084f5d7fa56d42392e622a9d0180d
Diffstat (limited to 'absl/container/inlined_vector_benchmark.cc')
-rw-r--r--absl/container/inlined_vector_benchmark.cc74
1 files changed, 74 insertions, 0 deletions
diff --git a/absl/container/inlined_vector_benchmark.cc b/absl/container/inlined_vector_benchmark.cc
index a8368d418c53..df4d3ce5a2d1 100644
--- a/absl/container/inlined_vector_benchmark.cc
+++ b/absl/container/inlined_vector_benchmark.cc
@@ -383,6 +383,12 @@ constexpr size_t kBatchSize = 100;
   BENCHMARK_TEMPLATE(BM_FunctionTemplate, T, kLargeSize);        \
   BENCHMARK_TEMPLATE(BM_FunctionTemplate, T, kSmallSize)
 
+#define ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_FunctionTemplate, T)      \
+  BENCHMARK_TEMPLATE(BM_FunctionTemplate, T, kLargeSize, kLargeSize); \
+  BENCHMARK_TEMPLATE(BM_FunctionTemplate, T, kLargeSize, kSmallSize); \
+  BENCHMARK_TEMPLATE(BM_FunctionTemplate, T, kSmallSize, kLargeSize); \
+  BENCHMARK_TEMPLATE(BM_FunctionTemplate, T, kSmallSize, kSmallSize)
+
 template <typename T>
 using InlVec = absl::InlinedVector<T, kInlinedCapacity>;
 
@@ -525,6 +531,74 @@ void BM_ConstructFromMove(benchmark::State& state) {
 ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromMove, TrivialType);
 ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromMove, NontrivialType);
 
+template <typename T, size_t FromSize, size_t ToSize>
+void BM_AssignSizeRef(benchmark::State& state) {
+  auto size = ToSize;
+  auto ref = T();
+  BatchedBenchmark<T>(
+      state,
+      /* prepare_vec = */ [](InlVec<T>* vec, size_t) { vec->resize(FromSize); },
+      /* test_vec = */
+      [&](InlVec<T>* vec, size_t) {
+        benchmark::DoNotOptimize(size);
+        benchmark::DoNotOptimize(ref);
+        vec->assign(size, ref);
+      });
+}
+ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignSizeRef, TrivialType);
+ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignSizeRef, NontrivialType);
+
+template <typename T, size_t FromSize, size_t ToSize>
+void BM_AssignRange(benchmark::State& state) {
+  std::array<T, ToSize> arr{};
+  BatchedBenchmark<T>(
+      state,
+      /* prepare_vec = */ [](InlVec<T>* vec, size_t) { vec->resize(FromSize); },
+      /* test_vec = */
+      [&](InlVec<T>* vec, size_t) {
+        benchmark::DoNotOptimize(arr);
+        vec->assign(arr.begin(), arr.end());
+      });
+}
+ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignRange, TrivialType);
+ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignRange, NontrivialType);
+
+template <typename T, size_t FromSize, size_t ToSize>
+void BM_AssignFromCopy(benchmark::State& state) {
+  InlVec<T> other_vec(ToSize);
+  BatchedBenchmark<T>(
+      state,
+      /* prepare_vec = */ [](InlVec<T>* vec, size_t) { vec->resize(FromSize); },
+      /* test_vec = */
+      [&](InlVec<T>* vec, size_t) {
+        benchmark::DoNotOptimize(other_vec);
+        *vec = other_vec;
+      });
+}
+ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignFromCopy, TrivialType);
+ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignFromCopy, NontrivialType);
+
+template <typename T, size_t FromSize, size_t ToSize>
+void BM_AssignFromMove(benchmark::State& state) {
+  using VecT = InlVec<T>;
+  std::array<VecT, kBatchSize> vector_batch{};
+  BatchedBenchmark<T>(
+      state,
+      /* prepare_vec = */
+      [&](InlVec<T>* vec, size_t i) {
+        vector_batch[i].clear();
+        vector_batch[i].resize(ToSize);
+        vec->resize(FromSize);
+      },
+      /* test_vec = */
+      [&](InlVec<T>* vec, size_t i) {
+        benchmark::DoNotOptimize(vector_batch[i]);
+        *vec = std::move(vector_batch[i]);
+      });
+}
+ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignFromMove, TrivialType);
+ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignFromMove, NontrivialType);
+
 template <typename T, size_t FromSize>
 void BM_Clear(benchmark::State& state) {
   BatchedBenchmark<T>(