diff options
-rw-r--r-- | absl/base/internal/spinlock_linux.inc | 17 | ||||
-rw-r--r-- | absl/base/internal/spinlock_wait.cc | 13 | ||||
-rw-r--r-- | absl/container/BUILD.bazel | 28 | ||||
-rw-r--r-- | absl/container/CMakeLists.txt | 25 | ||||
-rw-r--r-- | absl/container/inlined_vector.h | 178 | ||||
-rw-r--r-- | absl/container/internal/hashtablez_force_sampling.cc | 24 | ||||
-rw-r--r-- | absl/container/internal/hashtablez_force_sampling_test.cc | 60 | ||||
-rw-r--r-- | absl/container/internal/hashtablez_sampler.cc | 16 | ||||
-rw-r--r-- | absl/container/internal/hashtablez_sampler.h | 16 | ||||
-rw-r--r-- | absl/container/internal/hashtablez_sampler_force_weak_definition.cc | 27 | ||||
-rw-r--r-- | absl/container/internal/hashtablez_sampler_test.cc | 25 | ||||
-rw-r--r-- | absl/container/internal/raw_hash_set.h | 2 | ||||
-rw-r--r-- | absl/synchronization/internal/create_thread_identity.cc | 26 | ||||
-rw-r--r-- | absl/time/clock.cc | 2 |
14 files changed, 362 insertions, 97 deletions
diff --git a/absl/base/internal/spinlock_linux.inc b/absl/base/internal/spinlock_linux.inc index 94c861dc6ca2..3bbd4954f1ed 100644 --- a/absl/base/internal/spinlock_linux.inc +++ b/absl/base/internal/spinlock_linux.inc @@ -51,17 +51,12 @@ extern "C" { ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockDelay( std::atomic<uint32_t> *w, uint32_t value, int loop, absl::base_internal::SchedulingMode) { - if (loop != 0) { - int save_errno = errno; - struct timespec tm; - tm.tv_sec = 0; - // Increase the delay; we expect (but do not rely on) explicit wakeups. - // We don't rely on explicit wakeups because we intentionally allow for - // a race on the kSpinLockSleeper bit. - tm.tv_nsec = 16 * absl::base_internal::SpinLockSuggestedDelayNS(loop); - syscall(SYS_futex, w, FUTEX_WAIT | FUTEX_PRIVATE_FLAG, value, &tm); - errno = save_errno; - } + int save_errno = errno; + struct timespec tm; + tm.tv_sec = 0; + tm.tv_nsec = absl::base_internal::SpinLockSuggestedDelayNS(loop); + syscall(SYS_futex, w, FUTEX_WAIT | FUTEX_PRIVATE_FLAG, value, &tm); + errno = save_errno; } ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockWake(std::atomic<uint32_t> *w, diff --git a/absl/base/internal/spinlock_wait.cc b/absl/base/internal/spinlock_wait.cc index 365a7939494f..7e4f435245a7 100644 --- a/absl/base/internal/spinlock_wait.cc +++ b/absl/base/internal/spinlock_wait.cc @@ -65,17 +65,14 @@ int SpinLockSuggestedDelayNS(int loop) { r = 0x5deece66dLL * r + 0xb; // numbers from nrand48() delay_rand.store(r, std::memory_order_relaxed); - r <<= 16; // 48-bit random number now in top 48-bits. if (loop < 0 || loop > 32) { // limit loop to 0..32 loop = 32; } - // loop>>3 cannot exceed 4 because loop cannot exceed 32. - // Select top 20..24 bits of lower 48 bits, - // giving approximately 0ms to 16ms. - // Mean is exponential in loop for first 32 iterations, then 8ms. - // The futex path multiplies this by 16, since we expect explicit wakeups - // almost always on that path. - return static_cast<int>(r >> (44 - (loop >> 3))); + const int kMinDelay = 128 << 10; // 128us + // Double delay every 8 iterations, up to 16x (2ms). + int delay = kMinDelay << (loop / 8); + // Randomize in delay..2*delay range, for resulting 128us..4ms range. + return delay | ((delay - 1) & static_cast<int>(r)); } } // namespace base_internal diff --git a/absl/container/BUILD.bazel b/absl/container/BUILD.bazel index 87fc7349618a..acdbc473d961 100644 --- a/absl/container/BUILD.bazel +++ b/absl/container/BUILD.bazel @@ -447,8 +447,20 @@ cc_library( ) cc_library( + name = "hashtablez_force_sampling", + srcs = ["internal/hashtablez_force_sampling.cc"], + copts = ABSL_DEFAULT_COPTS, + deps = [ + ":hashtablez_sampler", + ], +) + +cc_library( name = "hashtablez_sampler", - srcs = ["internal/hashtablez_sampler.cc"], + srcs = [ + "internal/hashtablez_sampler.cc", + "internal/hashtablez_sampler_force_weak_definition.cc", + ], hdrs = ["internal/hashtablez_sampler.h"], copts = ABSL_DEFAULT_COPTS, deps = [ @@ -476,6 +488,20 @@ cc_test( ], ) +cc_test( + name = "hashtablez_force_sampling_test", + srcs = ["internal/hashtablez_force_sampling_test.cc"], + tags = [ + "no_test_darwin_x86_64", + "no_test_msvc_x64", + ], + deps = [ + ":hashtablez_force_sampling", + ":hashtablez_sampler", + "@com_google_googletest//:gtest_main", + ], +) + cc_library( name = "node_hash_policy", hdrs = ["internal/node_hash_policy.h"], diff --git a/absl/container/CMakeLists.txt b/absl/container/CMakeLists.txt index 21c9cb95cfda..822388bd5518 100644 --- a/absl/container/CMakeLists.txt +++ b/absl/container/CMakeLists.txt @@ -444,6 +444,7 @@ absl_cc_library( "internal/hashtablez_sampler.h" SRCS "internal/hashtablez_sampler.cc" + "internal/hashtablez_sampler_force_weak_definition.cc" COPTS ${ABSL_DEFAULT_COPTS} DEPS @@ -465,6 +466,30 @@ absl_cc_test( absl_cc_library( NAME + hashtablez_force_sampling + SRCS + "internal/hashtablez_force_sampling.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::base + absl::have_sse + absl::synchronization +) + +absl_cc_test( + NAME + hashtablez_force_sampling_test + SRCS + "internal/hashtablez_force_sampling_test.cc" + DEPS + absl::hashtablez_force_sampling + absl::hashtablez_sampler + gmock_main +) + +absl_cc_library( + NAME hashtable_debug HDRS "internal/hashtable_debug.h" diff --git a/absl/container/inlined_vector.h b/absl/container/inlined_vector.h index 649e904a9704..6c67862e2df1 100644 --- a/absl/container/inlined_vector.h +++ b/absl/container/inlined_vector.h @@ -82,10 +82,6 @@ class InlinedVector { std::forward_iterator_tag>; template <typename Iterator> - using DisableIfIntegral = - absl::enable_if_t<!std::is_integral<Iterator>::value>; - - template <typename Iterator> using EnableIfAtLeastInputIterator = absl::enable_if_t<IsAtLeastInputIterator<Iterator>::value>; @@ -139,24 +135,35 @@ class InlinedVector { InitAssign(n, v); } - // Creates an inlined vector of copies of the values in `init_list`. - InlinedVector(std::initializer_list<value_type> init_list, + // Creates an inlined vector of copies of the values in `list`. + InlinedVector(std::initializer_list<value_type> list, const allocator_type& alloc = allocator_type()) : allocator_and_tag_(alloc) { - AppendRange(init_list.begin(), init_list.end()); + AppendForwardRange(list.begin(), list.end()); } // Creates an inlined vector with elements constructed from the provided - // Iterator range [`first`, `last`). + // forward iterator range [`first`, `last`). // // NOTE: The `enable_if` prevents ambiguous interpretation between a call to // this constructor with two integral arguments and a call to the above // `InlinedVector(size_type, const_reference)` constructor. - template <typename InputIterator, DisableIfIntegral<InputIterator>* = nullptr> + template <typename ForwardIterator, + EnableIfAtLeastForwardIterator<ForwardIterator>* = nullptr> + InlinedVector(ForwardIterator first, ForwardIterator last, + const allocator_type& alloc = allocator_type()) + : allocator_and_tag_(alloc) { + AppendForwardRange(first, last); + } + + // Creates an inlined vector with elements constructed from the provided input + // iterator range [`first`, `last`). + template <typename InputIterator, + DisableIfAtLeastForwardIterator<InputIterator>* = nullptr> InlinedVector(InputIterator first, InputIterator last, const allocator_type& alloc = allocator_type()) : allocator_and_tag_(alloc) { - AppendRange(first, last); + AppendInputRange(first, last); } // Creates a copy of `other` using `other`'s allocator. @@ -433,8 +440,8 @@ class InlinedVector { // // Replaces the contents of the inlined vector with copies of the elements in // the provided `std::initializer_list`. - InlinedVector& operator=(std::initializer_list<value_type> init_list) { - AssignRange(init_list.begin(), init_list.end()); + InlinedVector& operator=(std::initializer_list<value_type> list) { + AssignForwardRange(list.begin(), list.end()); return *this; } @@ -510,15 +517,24 @@ class InlinedVector { // Overload of `InlinedVector::assign()` to replace the contents of the // inlined vector with copies of the values in the provided // `std::initializer_list`. - void assign(std::initializer_list<value_type> init_list) { - AssignRange(init_list.begin(), init_list.end()); + void assign(std::initializer_list<value_type> list) { + AssignForwardRange(list.begin(), list.end()); + } + + // Overload of `InlinedVector::assign()` to replace the contents of the + // inlined vector with the forward iterator range [`first`, `last`). + template <typename ForwardIterator, + EnableIfAtLeastForwardIterator<ForwardIterator>* = nullptr> + void assign(ForwardIterator first, ForwardIterator last) { + AssignForwardRange(first, last); } // Overload of `InlinedVector::assign()` to replace the contents of the - // inlined vector with values constructed from the range [`first`, `last`). - template <typename InputIterator, DisableIfIntegral<InputIterator>* = nullptr> + // inlined vector with the input iterator range [`first`, `last`). + template <typename InputIterator, + DisableIfAtLeastForwardIterator<InputIterator>* = nullptr> void assign(InputIterator first, InputIterator last) { - AssignRange(first, last); + AssignInputRange(first, last); } // `InlinedVector::resize()` @@ -569,62 +585,70 @@ class InlinedVector { // `InlinedVector::insert()` // - // Copies `v` into `position`, returning an `iterator` pointing to the newly + // Copies `v` into `pos`, returning an `iterator` pointing to the newly // inserted element. - iterator insert(const_iterator position, const_reference v) { - return emplace(position, v); + iterator insert(const_iterator pos, const_reference v) { + return emplace(pos, v); } - // Overload of `InlinedVector::insert()` for moving `v` into `position`, - // returning an iterator pointing to the newly inserted element. - iterator insert(const_iterator position, rvalue_reference v) { - return emplace(position, std::move(v)); + // Overload of `InlinedVector::insert()` for moving `v` into `pos`, returning + // an iterator pointing to the newly inserted element. + iterator insert(const_iterator pos, rvalue_reference v) { + return emplace(pos, std::move(v)); } // Overload of `InlinedVector::insert()` for inserting `n` contiguous copies - // of `v` starting at `position`. Returns an `iterator` pointing to the first - // of the newly inserted elements. - iterator insert(const_iterator position, size_type n, const_reference v) { - return InsertWithCount(position, n, v); + // of `v` starting at `pos`. Returns an `iterator` pointing to the first of + // the newly inserted elements. + iterator insert(const_iterator pos, size_type n, const_reference v) { + return InsertWithCount(pos, n, v); } // Overload of `InlinedVector::insert()` for copying the contents of the - // `std::initializer_list` into the vector starting at `position`. Returns an + // `std::initializer_list` into the vector starting at `pos`. Returns an // `iterator` pointing to the first of the newly inserted elements. - iterator insert(const_iterator position, - std::initializer_list<value_type> init_list) { - return insert(position, init_list.begin(), init_list.end()); + iterator insert(const_iterator pos, std::initializer_list<value_type> list) { + return insert(pos, list.begin(), list.end()); } // Overload of `InlinedVector::insert()` for inserting elements constructed - // from the range [`first`, `last`). Returns an `iterator` pointing to the - // first of the newly inserted elements. + // from the forward iterator range [`first`, `last`). Returns an `iterator` + // pointing to the first of the newly inserted elements. // // NOTE: The `enable_if` is intended to disambiguate the two three-argument // overloads of `insert()`. + template <typename ForwardIterator, + EnableIfAtLeastForwardIterator<ForwardIterator>* = nullptr> + iterator insert(const_iterator pos, ForwardIterator first, + ForwardIterator last) { + return InsertWithForwardRange(pos, first, last); + } + + // Overload of `InlinedVector::insert()` for inserting elements constructed + // from the input iterator range [`first`, `last`). Returns an `iterator` + // pointing to the first of the newly inserted elements. template <typename InputIterator, - EnableIfAtLeastInputIterator<InputIterator>* = nullptr> - iterator insert(const_iterator position, InputIterator first, - InputIterator last) { - return InsertWithRange(position, first, last); + DisableIfAtLeastForwardIterator<InputIterator>* = nullptr> + iterator insert(const_iterator pos, InputIterator first, InputIterator last) { + return InsertWithInputRange(pos, first, last); } // `InlinedVector::emplace()` // - // Constructs and inserts an object in the inlined vector at the given - // `position`, returning an `iterator` pointing to the newly emplaced element. + // Constructs and inserts an object in the inlined vector at the given `pos`, + // returning an `iterator` pointing to the newly emplaced element. template <typename... Args> - iterator emplace(const_iterator position, Args&&... args) { - assert(position >= begin()); - assert(position <= end()); - if (ABSL_PREDICT_FALSE(position == end())) { + iterator emplace(const_iterator pos, Args&&... args) { + assert(pos >= begin()); + assert(pos <= end()); + if (ABSL_PREDICT_FALSE(pos == end())) { emplace_back(std::forward<Args>(args)...); return end() - 1; } T new_t = T(std::forward<Args>(args)...); - auto range = ShiftRight(position, 1); + auto range = ShiftRight(pos, 1); if (range.first == range.second) { // constructing into uninitialized memory Construct(range.first, std::move(new_t)); @@ -689,18 +713,18 @@ class InlinedVector { // `InlinedVector::erase()` // - // Erases the element at `position` of the inlined vector, returning an - // `iterator` pointing to the first element following the erased element. + // Erases the element at `pos` of the inlined vector, returning an `iterator` + // pointing to the first element following the erased element. // // NOTE: May return the end iterator, which is not dereferencable. - iterator erase(const_iterator position) { - assert(position >= begin()); - assert(position < end()); + iterator erase(const_iterator pos) { + assert(pos >= begin()); + assert(pos < end()); - iterator pos = const_cast<iterator>(position); - std::move(pos + 1, end(), pos); + iterator position = const_cast<iterator>(pos); + std::move(position + 1, end(), position); pop_back(); - return pos; + return position; } // Overload of `InlinedVector::erase()` for erasing all elements in the @@ -1086,15 +1110,18 @@ class InlinedVector { } } - template <typename ForwardIterator, - EnableIfAtLeastForwardIterator<ForwardIterator>* = nullptr> - void AssignRange(ForwardIterator first, ForwardIterator last) { + template <typename ForwardIterator> + void AssignForwardRange(ForwardIterator first, ForwardIterator last) { + static_assert(IsAtLeastForwardIterator<ForwardIterator>::value, ""); + auto length = std::distance(first, last); + // Prefer reassignment to copy construction for elements. if (static_cast<size_type>(length) <= size()) { erase(std::copy(first, last, begin()), end()); return; } + reserve(length); iterator out = begin(); for (; out != end(); ++first, ++out) *out = *first; @@ -1107,9 +1134,10 @@ class InlinedVector { } } - template <typename InputIterator, - DisableIfAtLeastForwardIterator<InputIterator>* = nullptr> - void AssignRange(InputIterator first, InputIterator last) { + template <typename InputIterator> + void AssignInputRange(InputIterator first, InputIterator last) { + static_assert(IsAtLeastInputIterator<InputIterator>::value, ""); + // Optimized to avoid reallocation. // Prefer reassignment to copy construction for elements. iterator out = begin(); @@ -1120,9 +1148,10 @@ class InlinedVector { std::copy(first, last, std::back_inserter(*this)); } - template <typename ForwardIterator, - EnableIfAtLeastForwardIterator<ForwardIterator>* = nullptr> - void AppendRange(ForwardIterator first, ForwardIterator last) { + template <typename ForwardIterator> + void AppendForwardRange(ForwardIterator first, ForwardIterator last) { + static_assert(IsAtLeastForwardIterator<ForwardIterator>::value, ""); + auto length = std::distance(first, last); reserve(size() + length); if (allocated()) { @@ -1134,9 +1163,10 @@ class InlinedVector { } } - template <typename InputIterator, - DisableIfAtLeastForwardIterator<InputIterator>* = nullptr> - void AppendRange(InputIterator first, InputIterator last) { + template <typename InputIterator> + void AppendInputRange(InputIterator first, InputIterator last) { + static_assert(IsAtLeastInputIterator<InputIterator>::value, ""); + std::copy(first, last, std::back_inserter(*this)); } @@ -1153,11 +1183,12 @@ class InlinedVector { return it_pair.first; } - template <typename ForwardIterator, - EnableIfAtLeastForwardIterator<ForwardIterator>* = nullptr> - iterator InsertWithRange(const_iterator position, ForwardIterator first, - ForwardIterator last) { + template <typename ForwardIterator> + iterator InsertWithForwardRange(const_iterator position, + ForwardIterator first, ForwardIterator last) { + static_assert(IsAtLeastForwardIterator<ForwardIterator>::value, ""); assert(position >= begin() && position <= end()); + if (ABSL_PREDICT_FALSE(first == last)) return const_cast<iterator>(position); @@ -1170,11 +1201,12 @@ class InlinedVector { return it_pair.first; } - template <typename InputIterator, - DisableIfAtLeastForwardIterator<InputIterator>* = nullptr> - iterator InsertWithRange(const_iterator position, InputIterator first, - InputIterator last) { + template <typename InputIterator> + iterator InsertWithInputRange(const_iterator position, InputIterator first, + InputIterator last) { + static_assert(IsAtLeastInputIterator<InputIterator>::value, ""); assert(position >= begin() && position <= end()); + size_type index = position - cbegin(); size_type i = index; while (first != last) insert(begin() + i++, *first++); diff --git a/absl/container/internal/hashtablez_force_sampling.cc b/absl/container/internal/hashtablez_force_sampling.cc new file mode 100644 index 000000000000..868976ec5240 --- /dev/null +++ b/absl/container/internal/hashtablez_force_sampling.cc @@ -0,0 +1,24 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/internal/hashtablez_sampler.h" + +namespace absl { +namespace container_internal { + +// See hashtablez_sampler.h for details. +extern "C" const bool kAbslContainerInternalSampleEverything = true; + +} // namespace container_internal +} // namespace absl diff --git a/absl/container/internal/hashtablez_force_sampling_test.cc b/absl/container/internal/hashtablez_force_sampling_test.cc new file mode 100644 index 000000000000..9ff1046a9ad3 --- /dev/null +++ b/absl/container/internal/hashtablez_force_sampling_test.cc @@ -0,0 +1,60 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include <cstddef> + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/container/internal/hashtablez_sampler.h" + +namespace absl { +namespace container_internal { + +class HashtablezInfoHandlePeer { + public: + static bool IsSampled(const HashtablezInfoHandle& h) { + return h.info_ != nullptr; + } +}; + +namespace { + +bool samples[3]{true, true, true}; + +// We do this test in a global object to test that this works even before main. +struct Global { + Global() { + // By default it is sampled. + samples[0] = HashtablezInfoHandlePeer::IsSampled(Sample()); + + // Even with a large parameter, it is sampled. + SetHashtablezSampleParameter(100); + samples[1] = HashtablezInfoHandlePeer::IsSampled(Sample()); + + // Even if we turn it off, it is still sampled. + SetHashtablezEnabled(false); + samples[2] = HashtablezInfoHandlePeer::IsSampled(Sample()); + } +} global; + +TEST(kAbslContainerInternalSampleEverything, Works) { + EXPECT_THAT(samples, testing::Each(true)); + EXPECT_TRUE(kAbslContainerInternalSampleEverything); + // One more after main() + EXPECT_TRUE(HashtablezInfoHandlePeer::IsSampled(Sample())); +} + +} // namespace +} // namespace container_internal +} // namespace absl diff --git a/absl/container/internal/hashtablez_sampler.cc b/absl/container/internal/hashtablez_sampler.cc index 1ba9564513e2..7c411140be49 100644 --- a/absl/container/internal/hashtablez_sampler.cc +++ b/absl/container/internal/hashtablez_sampler.cc @@ -116,6 +116,11 @@ HashtablezSampler& HashtablezSampler::Global() { return *sampler; } +HashtablezSampler::DisposeCallback HashtablezSampler::SetDisposeCallback( + DisposeCallback f) { + return dispose_.exchange(f, std::memory_order_relaxed); +} + HashtablezInfo::HashtablezInfo() { PrepareForSampling(); } HashtablezInfo::~HashtablezInfo() = default; @@ -138,7 +143,7 @@ void HashtablezInfo::PrepareForSampling() { } HashtablezSampler::HashtablezSampler() - : dropped_samples_(0), size_estimate_(0), all_(nullptr) { + : dropped_samples_(0), size_estimate_(0), all_(nullptr), dispose_(nullptr) { absl::MutexLock l(&graveyard_.init_mu); graveyard_.dead = &graveyard_; } @@ -161,6 +166,10 @@ void HashtablezSampler::PushNew(HashtablezInfo* sample) { } void HashtablezSampler::PushDead(HashtablezInfo* sample) { + if (auto* dispose = dispose_.load(std::memory_order_relaxed)) { + dispose(*sample); + } + absl::MutexLock graveyard_lock(&graveyard_.init_mu); absl::MutexLock sample_lock(&sample->init_mu); sample->dead = graveyard_.dead; @@ -220,6 +229,11 @@ int64_t HashtablezSampler::Iterate( } HashtablezInfo* SampleSlow(int64_t* next_sample) { + if (kAbslContainerInternalSampleEverything) { + *next_sample = 1; + return HashtablezSampler::Global().Register(); + } + bool first = *next_sample < 0; *next_sample = GetGeometricVariable( g_hashtablez_sample_parameter.load(std::memory_order_relaxed)); diff --git a/absl/container/internal/hashtablez_sampler.h b/absl/container/internal/hashtablez_sampler.h index c42f1842ffe7..126a0ade431e 100644 --- a/absl/container/internal/hashtablez_sampler.h +++ b/absl/container/internal/hashtablez_sampler.h @@ -183,6 +183,13 @@ class HashtablezSampler { // Unregisters the sample. void Unregister(HashtablezInfo* sample); + // The dispose callback will be called on all samples the moment they are + // being unregistered. Only affects samples that are unregistered after the + // callback has been set. + // Returns the previous callback. + using DisposeCallback = void (*)(const HashtablezInfo&); + DisposeCallback SetDisposeCallback(DisposeCallback f); + // Iterates over all the registered `StackInfo`s. Returning the number of // samples that have been dropped. int64_t Iterate(const std::function<void(const HashtablezInfo& stack)>& f); @@ -222,6 +229,8 @@ class HashtablezSampler { // std::atomic<HashtablezInfo*> all_; HashtablezInfo graveyard_; + + std::atomic<DisposeCallback> dispose_; }; // Enables or disables sampling for Swiss tables. @@ -233,6 +242,13 @@ void SetHashtablezSampleParameter(int32_t rate); // Sets a soft max for the number of samples that will be kept. void SetHashtablezMaxSamples(int32_t max); +// Configuration override. +// This allows process-wide sampling without depending on order of +// initialization of static storage duration objects. +// The definition of this constant is weak, which allows us to inject a +// different value for it at link time. +extern "C" const bool kAbslContainerInternalSampleEverything; + } // namespace container_internal } // namespace absl diff --git a/absl/container/internal/hashtablez_sampler_force_weak_definition.cc b/absl/container/internal/hashtablez_sampler_force_weak_definition.cc new file mode 100644 index 000000000000..38a3f2601c08 --- /dev/null +++ b/absl/container/internal/hashtablez_sampler_force_weak_definition.cc @@ -0,0 +1,27 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/internal/hashtablez_sampler.h" + +#include "absl/base/attributes.h" + +namespace absl { +namespace container_internal { + +// See hashtablez_sampler.h for details. +extern "C" ABSL_ATTRIBUTE_WEAK const bool + kAbslContainerInternalSampleEverything = false; + +} // namespace container_internal +} // namespace absl diff --git a/absl/container/internal/hashtablez_sampler_test.cc b/absl/container/internal/hashtablez_sampler_test.cc index 31e7641a1222..f9ee941a015c 100644 --- a/absl/container/internal/hashtablez_sampler_test.cc +++ b/absl/container/internal/hashtablez_sampler_test.cc @@ -302,6 +302,31 @@ TEST(HashtablezSamplerTest, MultiThreaded) { stop.Notify(); } +TEST(HashtablezSamplerTest, Callback) { + HashtablezSampler sampler; + + auto* info1 = Register(&sampler, 1); + auto* info2 = Register(&sampler, 2); + + static const HashtablezInfo* expected; + + auto callback = [](const HashtablezInfo& info) { + // We can't use `info` outside of this callback because the object will be + // disposed as soon as we return from here. + EXPECT_EQ(&info, expected); + }; + + // Set the callback. + EXPECT_EQ(sampler.SetDisposeCallback(callback), nullptr); + expected = info1; + sampler.Unregister(info1); + + // Unset the callback. + EXPECT_EQ(callback, sampler.SetDisposeCallback(nullptr)); + expected = nullptr; // no more calls. + sampler.Unregister(info2); +} + } // namespace } // namespace container_internal } // namespace absl diff --git a/absl/container/internal/raw_hash_set.h b/absl/container/internal/raw_hash_set.h index 8f6469fff7f3..8e3fa02d7e42 100644 --- a/absl/container/internal/raw_hash_set.h +++ b/absl/container/internal/raw_hash_set.h @@ -1035,7 +1035,7 @@ class raw_hash_set { size_t capacity() const { return capacity_; } size_t max_size() const { return (std::numeric_limits<size_t>::max)(); } - void clear() { + ABSL_ATTRIBUTE_REINITIALIZES void clear() { // Iterating over this container is O(bucket_count()). When bucket_count() // is much greater than size(), iteration becomes prohibitively expensive. // For clear() it is more important to reuse the allocated array when the diff --git a/absl/synchronization/internal/create_thread_identity.cc b/absl/synchronization/internal/create_thread_identity.cc index e7a65cd884a1..60be25c9f4a0 100644 --- a/absl/synchronization/internal/create_thread_identity.cc +++ b/absl/synchronization/internal/create_thread_identity.cc @@ -67,6 +67,30 @@ static intptr_t RoundUp(intptr_t addr, intptr_t align) { return (addr + align - 1) & ~(align - 1); } +static void ResetThreadIdentity(base_internal::ThreadIdentity* identity) { + base_internal::PerThreadSynch* pts = &identity->per_thread_synch; + pts->next = nullptr; + pts->skip = nullptr; + pts->may_skip = false; + pts->waitp = nullptr; + pts->suppress_fatal_errors = false; + pts->readers = 0; + pts->priority = 0; + pts->next_priority_read_cycles = 0; + pts->state.store(base_internal::PerThreadSynch::State::kAvailable, + std::memory_order_relaxed); + pts->maybe_unlocking = false; + pts->wake = false; + pts->cond_waiter = false; + pts->all_locks = nullptr; + identity->waiter_state = {}; + identity->blocked_count_ptr = nullptr; + identity->ticker.store(0, std::memory_order_relaxed); + identity->wait_start.store(0, std::memory_order_relaxed); + identity->is_idle.store(false, std::memory_order_relaxed); + identity->next = nullptr; +} + static base_internal::ThreadIdentity* NewThreadIdentity() { base_internal::ThreadIdentity* identity = nullptr; @@ -90,7 +114,7 @@ static base_internal::ThreadIdentity* NewThreadIdentity() { RoundUp(reinterpret_cast<intptr_t>(allocation), base_internal::PerThreadSynch::kAlignment)); } - memset(identity, 0, sizeof(*identity)); + ResetThreadIdentity(identity); return identity; } diff --git a/absl/time/clock.cc b/absl/time/clock.cc index 74ee1401b98c..4863f643c2e0 100644 --- a/absl/time/clock.cc +++ b/absl/time/clock.cc @@ -379,7 +379,7 @@ static uint64_t UpdateLastSample( // // Manually mark this 'noinline' to minimize stack frame size of the fast // path. Without this, sometimes a compiler may inline this big block of code -// into the fast past. That causes lots of register spills and reloads that +// into the fast path. That causes lots of register spills and reloads that // are unnecessary unless the slow path is taken. // // TODO(absl-team): Remove this attribute when our compiler is smart enough |