about summary refs log tree commit diff
path: root/absl/container/internal
diff options
context:
space:
mode:
Diffstat (limited to 'absl/container/internal')
-rw-r--r--absl/container/internal/counting_allocator.h79
-rw-r--r--absl/container/internal/hashtablez_sampler.cc4
-rw-r--r--absl/container/internal/hashtablez_sampler.h11
-rw-r--r--absl/container/internal/raw_hash_set.h6
4 files changed, 95 insertions, 5 deletions
diff --git a/absl/container/internal/counting_allocator.h b/absl/container/internal/counting_allocator.h
new file mode 100644
index 000000000000..f4e652d9600e
--- /dev/null
+++ b/absl/container/internal/counting_allocator.h
@@ -0,0 +1,79 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_COUNTING_ALLOCATOR_H_
+#define ABSL_CONTAINER_INTERNAL_COUNTING_ALLOCATOR_H_
+
+#include <cassert>
+#include <cstdint>
+#include <memory>
+
+namespace absl {
+namespace container_internal {
+
+// This is a stateful allocator, but the state lives outside of the
+// allocator (in whatever test is using the allocator). This is odd
+// but helps in tests where the allocator is propagated into nested
+// containers - that chain of allocators uses the same state and is
+// thus easier to query for aggregate allocation information.
+template <typename T>
+class CountingAllocator : public std::allocator<T> {
+ public:
+  using Alloc = std::allocator<T>;
+  using pointer = typename Alloc::pointer;
+  using size_type = typename Alloc::size_type;
+
+  CountingAllocator() : bytes_used_(nullptr) {}
+  explicit CountingAllocator(int64_t* b) : bytes_used_(b) {}
+
+  template <typename U>
+  CountingAllocator(const CountingAllocator<U>& x)
+      : Alloc(x), bytes_used_(x.bytes_used_) {}
+
+  pointer allocate(size_type n,
+                   std::allocator<void>::const_pointer hint = nullptr) {
+    assert(bytes_used_ != nullptr);
+    *bytes_used_ += n * sizeof(T);
+    return Alloc::allocate(n, hint);
+  }
+
+  void deallocate(pointer p, size_type n) {
+    Alloc::deallocate(p, n);
+    assert(bytes_used_ != nullptr);
+    *bytes_used_ -= n * sizeof(T);
+  }
+
+  template<typename U>
+  class rebind {
+   public:
+    using other = CountingAllocator<U>;
+  };
+
+  friend bool operator==(const CountingAllocator& a,
+                         const CountingAllocator& b) {
+    return a.bytes_used_ == b.bytes_used_;
+  }
+
+  friend bool operator!=(const CountingAllocator& a,
+                         const CountingAllocator& b) {
+    return !(a == b);
+  }
+
+  int64_t* bytes_used_;
+};
+
+}  // namespace container_internal
+}  // namespace absl
+
+#endif  // ABSL_CONTAINER_INTERNAL_COUNTING_ALLOCATOR_H_
diff --git a/absl/container/internal/hashtablez_sampler.cc b/absl/container/internal/hashtablez_sampler.cc
index 6cc10c201c34..e588f24c25c5 100644
--- a/absl/container/internal/hashtablez_sampler.cc
+++ b/absl/container/internal/hashtablez_sampler.cc
@@ -238,6 +238,10 @@ HashtablezInfo* SampleSlow(int64_t* next_sample) {
   return HashtablezSampler::Global().Register();
 }
 
+#if ABSL_PER_THREAD_TLS == 1
+ABSL_PER_THREAD_TLS_KEYWORD int64_t next_sample = 0;
+#endif  // ABSL_PER_THREAD_TLS == 1
+
 void UnsampleSlow(HashtablezInfo* info) {
   HashtablezSampler::Global().Unregister(info);
 }
diff --git a/absl/container/internal/hashtablez_sampler.h b/absl/container/internal/hashtablez_sampler.h
index 4aea3ffa67de..c42f1842ffe7 100644
--- a/absl/container/internal/hashtablez_sampler.h
+++ b/absl/container/internal/hashtablez_sampler.h
@@ -31,6 +31,7 @@
 #include <memory>
 #include <vector>
 
+#include "absl/base/internal/per_thread_tls.h"
 #include "absl/base/optimization.h"
 #include "absl/synchronization/mutex.h"
 #include "absl/utility/utility.h"
@@ -147,14 +148,16 @@ class HashtablezInfoHandle {
 
 // Returns an RAII sampling handle that manages registration and unregistation
 // with the global sampler.
+#if ABSL_PER_THREAD_TLS == 1
+extern ABSL_PER_THREAD_TLS_KEYWORD int64_t next_sample;
+#endif  // ABSL_PER_THREAD_TLS
+
 inline HashtablezInfoHandle Sample() {
-#if ABSL_HAVE_THREAD_LOCAL
-  thread_local int64_t next_sample = 0;
-#else   // ABSL_HAVE_THREAD_LOCAL
+#if ABSL_PER_THREAD_TLS == 0
   static auto* mu = new absl::Mutex;
   static int64_t next_sample = 0;
   absl::MutexLock l(mu);
-#endif  // ABSL_HAVE_THREAD_LOCAL
+#endif  // !ABSL_HAVE_THREAD_LOCAL
 
   if (ABSL_PREDICT_TRUE(--next_sample > 0)) {
     return HashtablezInfoHandle(nullptr);
diff --git a/absl/container/internal/raw_hash_set.h b/absl/container/internal/raw_hash_set.h
index 34d69d7af2fc..8cdea4ec8cd4 100644
--- a/absl/container/internal/raw_hash_set.h
+++ b/absl/container/internal/raw_hash_set.h
@@ -785,7 +785,11 @@ class raw_hash_set {
     }
 
     ctrl_t* ctrl_ = nullptr;
-    slot_type* slot_;
+    // To avoid uninitialized member warnigs, put slot_ in an anonymous union.
+    // The member is not initialized on singleton and end iterators.
+    union {
+      slot_type* slot_;
+    };
   };
 
   class const_iterator {