diff options
Diffstat (limited to 'absl')
-rw-r--r-- | absl/container/flat_hash_map.h | 8 | ||||
-rw-r--r-- | absl/container/flat_hash_set.h | 8 | ||||
-rw-r--r-- | absl/container/inlined_vector.h | 28 | ||||
-rw-r--r-- | absl/container/inlined_vector_benchmark.cc | 118 | ||||
-rw-r--r-- | absl/container/internal/inlined_vector.h | 16 | ||||
-rw-r--r-- | absl/container/internal/raw_hash_set_test.cc | 19 | ||||
-rw-r--r-- | absl/meta/type_traits.h | 36 | ||||
-rw-r--r-- | absl/synchronization/mutex.cc | 107 |
8 files changed, 195 insertions, 145 deletions
diff --git a/absl/container/flat_hash_map.h b/absl/container/flat_hash_map.h index dfc497b564b4..00cc4dcff9fc 100644 --- a/absl/container/flat_hash_map.h +++ b/absl/container/flat_hash_map.h @@ -219,8 +219,12 @@ class flat_hash_map : public absl::container_internal::raw_hash_map< // Erases the element at `position` of the `flat_hash_map`, returning // `void`. // - // NOTE: this return behavior is different than that of STL containers in - // general and `std::unordered_map` in particular. + // NOTE: returning `void` in this case is different than that of STL + // containers in general and `std::unordered_map` in particular (which + // return an iterator to the element following the erased element). If that + // iterator is needed, simply post increment the iterator: + // + // map.erase(it++); // // iterator erase(const_iterator first, const_iterator last): // diff --git a/absl/container/flat_hash_set.h b/absl/container/flat_hash_set.h index f27f174c1745..6bf51833fca6 100644 --- a/absl/container/flat_hash_set.h +++ b/absl/container/flat_hash_set.h @@ -212,8 +212,12 @@ class flat_hash_set // Erases the element at `position` of the `flat_hash_set`, returning // `void`. // - // NOTE: this return behavior is different than that of STL containers in - // general and `std::unordered_map` in particular. + // NOTE: returning `void` in this case is different than that of STL + // containers in general and `std::unordered_set` in particular (which + // return an iterator to the element following the erased element). If that + // iterator is needed, simply post increment the iterator: + // + // set.erase(it++); // // iterator erase(const_iterator first, const_iterator last): // diff --git a/absl/container/inlined_vector.h b/absl/container/inlined_vector.h index 683087502295..77988058e913 100644 --- a/absl/container/inlined_vector.h +++ b/absl/container/inlined_vector.h @@ -66,7 +66,10 @@ namespace absl { // designed to cover the same API footprint as covered by `std::vector`. template <typename T, size_t N, typename A = std::allocator<T>> class InlinedVector { - using Storage = inlined_vector_internal::InlinedVectorStorage<T, N, A>; + static_assert( + N > 0, "InlinedVector cannot be instantiated with `0` inlined elements."); + + using Storage = inlined_vector_internal::Storage<InlinedVector>; using Tag = typename Storage::Tag; using AllocatorAndTag = typename Storage::AllocatorAndTag; using Allocation = typename Storage::Allocation; @@ -283,8 +286,7 @@ class InlinedVector { // will no longer be inlined and `capacity()` will equal its capacity on the // allocated heap. size_type capacity() const noexcept { - return allocated() ? allocation().capacity() - : Storage::GetInlinedCapacity(); + return allocated() ? allocation().capacity() : static_cast<size_type>(N); } // `InlinedVector::data()` @@ -802,19 +804,19 @@ class InlinedVector { // `InlinedVector::shrink_to_fit()` // // Reduces memory usage by freeing unused memory. After this call, calls to - // `capacity()` will be equal to `max(Storage::GetInlinedCapacity(), size())`. + // `capacity()` will be equal to `max(N, size())`. // - // If `size() <= Storage::GetInlinedCapacity()` and the elements are currently - // stored on the heap, they will be moved to the inlined storage and the heap - // memory will be deallocated. + // If `size() <= N` and the elements are currently stored on the heap, they + // will be moved to the inlined storage and the heap memory will be + // deallocated. // - // If `size() > Storage::GetInlinedCapacity()` and `size() < capacity()` the - // elements will be moved to a smaller heap allocation. + // If `size() > N` and `size() < capacity()` the elements will be moved to a + // smaller heap allocation. void shrink_to_fit() { const auto s = size(); if (ABSL_PREDICT_FALSE(!allocated() || s == capacity())) return; - if (s <= Storage::GetInlinedCapacity()) { + if (s <= N) { // Move the elements to the inlined storage. // We have to do this using a temporary, because `inlined_storage` and // `allocation_storage` are in a union field. @@ -943,7 +945,7 @@ class InlinedVector { const size_type s = size(); assert(s <= capacity()); - size_type target = (std::max)(Storage::GetInlinedCapacity(), s + delta); + size_type target = (std::max)(N, s + delta); // Compute new capacity by repeatedly doubling current capacity // TODO(psrc): Check and avoid overflow? @@ -1046,7 +1048,7 @@ class InlinedVector { } void InitAssign(size_type n) { - if (n > Storage::GetInlinedCapacity()) { + if (n > N) { Allocation new_allocation(allocator(), n); init_allocation(new_allocation); UninitializedFill(allocated_space(), allocated_space() + n); @@ -1058,7 +1060,7 @@ class InlinedVector { } void InitAssign(size_type n, const_reference v) { - if (n > Storage::GetInlinedCapacity()) { + if (n > N) { Allocation new_allocation(allocator(), n); init_allocation(new_allocation); UninitializedFill(allocated_space(), allocated_space() + n, v); diff --git a/absl/container/inlined_vector_benchmark.cc b/absl/container/inlined_vector_benchmark.cc index fc928afe5d65..867a29ea32c1 100644 --- a/absl/container/inlined_vector_benchmark.cc +++ b/absl/container/inlined_vector_benchmark.cc @@ -23,17 +23,13 @@ namespace { -using IntVec = absl::InlinedVector<int, 8>; - void BM_InlinedVectorFill(benchmark::State& state) { - const int len = state.range(0); + absl::InlinedVector<int, 8> v; + int val = 10; for (auto _ : state) { - IntVec v; - for (int i = 0; i < len; i++) { - v.push_back(i); - } + benchmark::DoNotOptimize(v); + v.push_back(val); } - state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * len); } BENCHMARK(BM_InlinedVectorFill)->Range(0, 1024); @@ -43,23 +39,25 @@ void BM_InlinedVectorFillRange(benchmark::State& state) { for (int i = 0; i < len; i++) { ia[i] = i; } + auto* from = ia.get(); + auto* to = from + len; for (auto _ : state) { - IntVec v(ia.get(), ia.get() + len); + benchmark::DoNotOptimize(from); + benchmark::DoNotOptimize(to); + absl::InlinedVector<int, 8> v(from, to); benchmark::DoNotOptimize(v); } - state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * len); } BENCHMARK(BM_InlinedVectorFillRange)->Range(0, 1024); void BM_StdVectorFill(benchmark::State& state) { - const int len = state.range(0); + std::vector<int> v; + int val = 10; for (auto _ : state) { - std::vector<int> v; - for (int i = 0; i < len; i++) { - v.push_back(i); - } + benchmark::DoNotOptimize(v); + benchmark::DoNotOptimize(val); + v.push_back(val); } - state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * len); } BENCHMARK(BM_StdVectorFill)->Range(0, 1024); @@ -124,7 +122,7 @@ struct Buffer { // some arbitrary structure for benchmarking. void* user_data; }; -void BM_InlinedVectorTenAssignments(benchmark::State& state) { +void BM_InlinedVectorAssignments(benchmark::State& state) { const int len = state.range(0); using BufferVec = absl::InlinedVector<Buffer, 2>; @@ -133,18 +131,25 @@ void BM_InlinedVectorTenAssignments(benchmark::State& state) { BufferVec dst; for (auto _ : state) { - for (int i = 0; i < 10; ++i) { - dst = src; - } + benchmark::DoNotOptimize(dst); + benchmark::DoNotOptimize(src); + dst = src; } } -BENCHMARK(BM_InlinedVectorTenAssignments) - ->Arg(0)->Arg(1)->Arg(2)->Arg(3)->Arg(4)->Arg(20); +BENCHMARK(BM_InlinedVectorAssignments) + ->Arg(0) + ->Arg(1) + ->Arg(2) + ->Arg(3) + ->Arg(4) + ->Arg(20); void BM_CreateFromContainer(benchmark::State& state) { for (auto _ : state) { - absl::InlinedVector<int, 4> x(absl::InlinedVector<int, 4>{1, 2, 3}); - benchmark::DoNotOptimize(x); + absl::InlinedVector<int, 4> src{1, 2, 3}; + benchmark::DoNotOptimize(src); + absl::InlinedVector<int, 4> dst(std::move(src)); + benchmark::DoNotOptimize(dst); } } BENCHMARK(BM_CreateFromContainer); @@ -214,6 +219,8 @@ void BM_SwapElements(benchmark::State& state) { Vec b; for (auto _ : state) { using std::swap; + benchmark::DoNotOptimize(a); + benchmark::DoNotOptimize(b); swap(a, b); } } @@ -259,60 +266,44 @@ BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<std::string, 8>); void BM_InlinedVectorIndexInlined(benchmark::State& state) { absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7}; for (auto _ : state) { - for (int i = 0; i < 1000; ++i) { - benchmark::DoNotOptimize(v); - benchmark::DoNotOptimize(v[4]); - } + benchmark::DoNotOptimize(v); + benchmark::DoNotOptimize(v[4]); } - state.SetItemsProcessed(1000 * static_cast<int64_t>(state.iterations())); } BENCHMARK(BM_InlinedVectorIndexInlined); void BM_InlinedVectorIndexExternal(benchmark::State& state) { absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; for (auto _ : state) { - for (int i = 0; i < 1000; ++i) { - benchmark::DoNotOptimize(v); - benchmark::DoNotOptimize(v[4]); - } + benchmark::DoNotOptimize(v); + benchmark::DoNotOptimize(v[4]); } - state.SetItemsProcessed(1000 * static_cast<int64_t>(state.iterations())); } BENCHMARK(BM_InlinedVectorIndexExternal); void BM_StdVectorIndex(benchmark::State& state) { std::vector<int> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; for (auto _ : state) { - for (int i = 0; i < 1000; ++i) { - benchmark::DoNotOptimize(v); - benchmark::DoNotOptimize(v[4]); - } + benchmark::DoNotOptimize(v); + benchmark::DoNotOptimize(v[4]); } - state.SetItemsProcessed(1000 * static_cast<int64_t>(state.iterations())); } BENCHMARK(BM_StdVectorIndex); -#define UNROLL_2(x) \ - benchmark::DoNotOptimize(x); \ - benchmark::DoNotOptimize(x); - -#define UNROLL_4(x) UNROLL_2(x) UNROLL_2(x) -#define UNROLL_8(x) UNROLL_4(x) UNROLL_4(x) -#define UNROLL_16(x) UNROLL_8(x) UNROLL_8(x); - void BM_InlinedVectorDataInlined(benchmark::State& state) { absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7}; for (auto _ : state) { - UNROLL_16(v.data()); + benchmark::DoNotOptimize(v); + benchmark::DoNotOptimize(v.data()); } - state.SetItemsProcessed(16 * static_cast<int64_t>(state.iterations())); } BENCHMARK(BM_InlinedVectorDataInlined); void BM_InlinedVectorDataExternal(benchmark::State& state) { absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; for (auto _ : state) { - UNROLL_16(v.data()); + benchmark::DoNotOptimize(v); + benchmark::DoNotOptimize(v.data()); } state.SetItemsProcessed(16 * static_cast<int64_t>(state.iterations())); } @@ -321,7 +312,8 @@ BENCHMARK(BM_InlinedVectorDataExternal); void BM_StdVectorData(benchmark::State& state) { std::vector<int> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; for (auto _ : state) { - UNROLL_16(v.data()); + benchmark::DoNotOptimize(v); + benchmark::DoNotOptimize(v.data()); } state.SetItemsProcessed(16 * static_cast<int64_t>(state.iterations())); } @@ -330,54 +322,54 @@ BENCHMARK(BM_StdVectorData); void BM_InlinedVectorSizeInlined(benchmark::State& state) { absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7}; for (auto _ : state) { - UNROLL_16(v.size()); + benchmark::DoNotOptimize(v); + benchmark::DoNotOptimize(v.size()); } - state.SetItemsProcessed(16 * static_cast<int64_t>(state.iterations())); } BENCHMARK(BM_InlinedVectorSizeInlined); void BM_InlinedVectorSizeExternal(benchmark::State& state) { absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; for (auto _ : state) { - UNROLL_16(v.size()); + benchmark::DoNotOptimize(v); + benchmark::DoNotOptimize(v.size()); } - state.SetItemsProcessed(16 * static_cast<int64_t>(state.iterations())); } BENCHMARK(BM_InlinedVectorSizeExternal); void BM_StdVectorSize(benchmark::State& state) { std::vector<int> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; for (auto _ : state) { - UNROLL_16(v.size()); + benchmark::DoNotOptimize(v); + benchmark::DoNotOptimize(v.size()); } - state.SetItemsProcessed(16 * static_cast<int64_t>(state.iterations())); } BENCHMARK(BM_StdVectorSize); void BM_InlinedVectorEmptyInlined(benchmark::State& state) { absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7}; for (auto _ : state) { - UNROLL_16(v.empty()); + benchmark::DoNotOptimize(v); + benchmark::DoNotOptimize(v.empty()); } - state.SetItemsProcessed(16 * static_cast<int64_t>(state.iterations())); } BENCHMARK(BM_InlinedVectorEmptyInlined); void BM_InlinedVectorEmptyExternal(benchmark::State& state) { absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; for (auto _ : state) { - UNROLL_16(v.empty()); + benchmark::DoNotOptimize(v); + benchmark::DoNotOptimize(v.empty()); } - state.SetItemsProcessed(16 * static_cast<int64_t>(state.iterations())); } BENCHMARK(BM_InlinedVectorEmptyExternal); void BM_StdVectorEmpty(benchmark::State& state) { std::vector<int> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; for (auto _ : state) { - UNROLL_16(v.empty()); + benchmark::DoNotOptimize(v); + benchmark::DoNotOptimize(v.empty()); } - state.SetItemsProcessed(16 * static_cast<int64_t>(state.iterations())); } BENCHMARK(BM_StdVectorEmpty); diff --git a/absl/container/internal/inlined_vector.h b/absl/container/internal/inlined_vector.h index 308506093300..24059d94c876 100644 --- a/absl/container/internal/inlined_vector.h +++ b/absl/container/internal/inlined_vector.h @@ -24,11 +24,12 @@ namespace absl { namespace inlined_vector_internal { -template <typename T, size_t N, typename A> -class InlinedVectorStorage { - static_assert( - N > 0, "InlinedVector cannot be instantiated with `0` inline elements."); +template <typename InlinedVector> +class Storage; +template <template <typename, size_t, typename> class InlinedVector, typename T, + size_t N, typename A> +class Storage<InlinedVector<T, N, A>> { public: using allocator_type = A; using value_type = typename allocator_type::value_type; @@ -44,12 +45,7 @@ class InlinedVectorStorage { using reverse_iterator = std::reverse_iterator<iterator>; using const_reverse_iterator = std::reverse_iterator<const_iterator>; - constexpr static size_type GetInlinedCapacity() { - return static_cast<size_type>(N); - } - - explicit InlinedVectorStorage(const allocator_type& a) - : allocator_and_tag_(a) {} + explicit Storage(const allocator_type& a) : allocator_and_tag_(a) {} // TODO(johnsoncj): Make the below types and members private after migration diff --git a/absl/container/internal/raw_hash_set_test.cc b/absl/container/internal/raw_hash_set_test.cc index 87511148d04a..02fd0bfa0d93 100644 --- a/absl/container/internal/raw_hash_set_test.cc +++ b/absl/container/internal/raw_hash_set_test.cc @@ -845,6 +845,25 @@ TEST(Table, Erase) { EXPECT_TRUE(t.find(0) == t.end()); } +TEST(Table, EraseMaintainsValidIterator) { + IntTable t; + const int kNumElements = 100; + for (int i = 0; i < kNumElements; i ++) { + EXPECT_TRUE(t.emplace(i).second); + } + EXPECT_EQ(t.size(), kNumElements); + + int num_erase_calls = 0; + auto it = t.begin(); + while (it != t.end()) { + t.erase(it++); + num_erase_calls++; + } + + EXPECT_TRUE(t.empty()); + EXPECT_EQ(num_erase_calls, kNumElements); +} + // Collect N bad keys by following algorithm: // 1. Create an empty table and reserve it to 2 * N. // 2. Insert N random elements. diff --git a/absl/meta/type_traits.h b/absl/meta/type_traits.h index b1b149146b44..fbdc921f7816 100644 --- a/absl/meta/type_traits.h +++ b/absl/meta/type_traits.h @@ -483,16 +483,17 @@ inline void AssertHashEnabled() { } // namespace type_traits_internal -} // namespace absl - // An internal namespace that is required to implement the C++17 swap traits. -// -// NOTE: This is its own top-level namespace to avoid subtleties due to -// functions named "swap" that may appear in the absl namespace. -namespace absl_internal_swap { +// It is not further nested in type_traits_internal to avoid long symbol names. +namespace swap_internal { +// Necessary for the traits. using std::swap; +// This declaration prevents global `swap` and `absl::swap` overloads from being +// considered unless ADL picks them up. +void swap(); + template <class T> using IsSwappableImpl = decltype(swap(std::declval<T&>(), std::declval<T&>())); @@ -520,22 +521,13 @@ struct IsNothrowSwappable // Swap() // -// Performs the swap idiom from a namespace with no additional `swap` overloads. +// Performs the swap idiom from a namespace where valid candidates may only be +// found in `std` or via ADL. template <class T, absl::enable_if_t<IsSwappable<T>::value, int> = 0> void Swap(T& lhs, T& rhs) noexcept(IsNothrowSwappable<T>::value) { swap(lhs, rhs); } -} // namespace absl_internal_swap - -namespace absl { -namespace type_traits_internal { - -// Make the swap-related traits/function accessible from this namespace. -using absl_internal_swap::IsNothrowSwappable; -using absl_internal_swap::IsSwappable; -using absl_internal_swap::Swap; - // StdSwapIsUnconstrained // // Some standard library implementations are broken in that they do not @@ -543,6 +535,16 @@ using absl_internal_swap::Swap; // one of those implementations. using StdSwapIsUnconstrained = IsSwappable<void()>; +} // namespace swap_internal + +namespace type_traits_internal { + +// Make the swap-related traits/function accessible from this namespace. +using swap_internal::IsNothrowSwappable; +using swap_internal::IsSwappable; +using swap_internal::Swap; +using swap_internal::StdSwapIsUnconstrained; + } // namespace type_traits_internal } // namespace absl diff --git a/absl/synchronization/mutex.cc b/absl/synchronization/mutex.cc index 6b2eb3314796..37ffff3da872 100644 --- a/absl/synchronization/mutex.cc +++ b/absl/synchronization/mutex.cc @@ -118,6 +118,10 @@ ABSL_CONST_INIT absl::base_internal::AtomicHook< } // namespace +static inline bool EvalConditionAnnotated(const Condition *cond, Mutex *mu, + bool locking, bool trylock, + bool read_lock); + void RegisterMutexProfiler(void (*fn)(int64_t wait_timestamp)) { submit_profile_data.Store(fn); } @@ -233,15 +237,14 @@ enum { // Mutex and CondVar events passed as "ev" to PostSynchEvent SYNCH_EV_SIGNALALL, }; -enum { // Event flags - SYNCH_F_R = 0x01, // reader event - SYNCH_F_LCK = 0x02, // PostSynchEvent called with mutex held - SYNCH_F_ACQ = 0x04, // event is an acquire +enum { // Event flags + SYNCH_F_R = 0x01, // reader event + SYNCH_F_LCK = 0x02, // PostSynchEvent called with mutex held + SYNCH_F_TRY = 0x04, // TryLock or ReaderTryLock + SYNCH_F_UNLOCK = 0x08, // Unlock or ReaderUnlock SYNCH_F_LCK_W = SYNCH_F_LCK, SYNCH_F_LCK_R = SYNCH_F_LCK | SYNCH_F_R, - SYNCH_F_ACQ_W = SYNCH_F_ACQ, - SYNCH_F_ACQ_R = SYNCH_F_ACQ | SYNCH_F_R, }; } // anonymous namespace @@ -250,20 +253,20 @@ static const struct { int flags; const char *msg; } event_properties[] = { - { SYNCH_F_LCK_W|SYNCH_F_ACQ_W, "TryLock succeeded " }, - { 0, "TryLock failed " }, - { SYNCH_F_LCK_R|SYNCH_F_ACQ_R, "ReaderTryLock succeeded " }, - { 0, "ReaderTryLock failed " }, - { SYNCH_F_ACQ_W, "Lock blocking " }, - { SYNCH_F_LCK_W, "Lock returning " }, - { SYNCH_F_ACQ_R, "ReaderLock blocking " }, - { SYNCH_F_LCK_R, "ReaderLock returning " }, - { SYNCH_F_LCK_W, "Unlock " }, - { SYNCH_F_LCK_R, "ReaderUnlock " }, - { 0, "Wait on " }, - { 0, "Wait unblocked " }, - { 0, "Signal on " }, - { 0, "SignalAll on " }, + {SYNCH_F_LCK_W | SYNCH_F_TRY, "TryLock succeeded "}, + {0, "TryLock failed "}, + {SYNCH_F_LCK_R | SYNCH_F_TRY, "ReaderTryLock succeeded "}, + {0, "ReaderTryLock failed "}, + {0, "Lock blocking "}, + {SYNCH_F_LCK_W, "Lock returning "}, + {0, "ReaderLock blocking "}, + {SYNCH_F_LCK_R, "ReaderLock returning "}, + {SYNCH_F_LCK_W | SYNCH_F_UNLOCK, "Unlock "}, + {SYNCH_F_LCK_R | SYNCH_F_UNLOCK, "ReaderUnlock "}, + {0, "Wait on "}, + {0, "Wait unblocked "}, + {0, "Signal on "}, + {0, "SignalAll on "}, }; static absl::base_internal::SpinLock synch_event_mu( @@ -415,9 +418,26 @@ static void PostSynchEvent(void *obj, int ev) { ABSL_RAW_LOG(INFO, "%s%p %s %s", event_properties[ev].msg, obj, (e == nullptr ? "" : e->name), buffer); } - if ((event_properties[ev].flags & SYNCH_F_LCK) != 0 && e != nullptr && - e->invariant != nullptr) { - (*e->invariant)(e->arg); + const int flags = event_properties[ev].flags; + if ((flags & SYNCH_F_LCK) != 0 && e != nullptr && e->invariant != nullptr) { + // Calling the invariant as is causes problems under ThreadSanitizer. + // We are currently inside of Mutex Lock/Unlock and are ignoring all + // memory accesses and synchronization. If the invariant transitively + // synchronizes something else and we ignore the synchronization, we will + // get false positive race reports later. + // Reuse EvalConditionAnnotated to properly call into user code. + struct local { + static bool pred(SynchEvent *ev) { + (*ev->invariant)(ev->arg); + return false; + } + }; + Condition cond(&local::pred, e); + Mutex *mu = static_cast<Mutex *>(obj); + const bool locking = (flags & SYNCH_F_UNLOCK) == 0; + const bool trylock = (flags & SYNCH_F_TRY) != 0; + const bool read_lock = (flags & SYNCH_F_R) != 0; + EvalConditionAnnotated(&cond, mu, locking, trylock, read_lock); } UnrefSynchEvent(e); } @@ -1553,7 +1573,7 @@ bool Mutex::AwaitCommon(const Condition &cond, KernelTimeout t) { ABSL_TSAN_MUTEX_PRE_LOCK(this, TsanFlags(how)); this->LockSlowLoop(&waitp, flags); bool res = waitp.cond != nullptr || // => cond known true from LockSlowLoop - cond.Eval(); + EvalConditionAnnotated(&cond, this, true, false, how == kShared); ABSL_TSAN_MUTEX_POST_LOCK(this, TsanFlags(how), 0); return res; } @@ -1731,12 +1751,17 @@ void Mutex::LockSlow(MuHow how, const Condition *cond, int flags) { // Compute cond->Eval() and tell race detectors that we do it under mutex mu. static inline bool EvalConditionAnnotated(const Condition *cond, Mutex *mu, - bool locking, Mutex::MuHow how) { + bool locking, bool trylock, + bool read_lock) { // Delicate annotation dance. // We are currently inside of read/write lock/unlock operation. // All memory accesses are ignored inside of mutex operations + for unlock // operation tsan considers that we've already released the mutex. bool res = false; +#ifdef THREAD_SANITIZER + const int flags = read_lock ? __tsan_mutex_read_lock : 0; + const int tryflags = flags | (trylock ? __tsan_mutex_try_lock : 0); +#endif if (locking) { // For lock we pretend that we have finished the operation, // evaluate the predicate, then unlock the mutex and start locking it again @@ -1744,24 +1769,26 @@ static inline bool EvalConditionAnnotated(const Condition *cond, Mutex *mu, // Note: we can't simply do POST_LOCK, Eval, PRE_LOCK, because then tsan // will think the lock acquisition is recursive which will trigger // deadlock detector. - ABSL_TSAN_MUTEX_POST_LOCK(mu, TsanFlags(how), 0); + ABSL_TSAN_MUTEX_POST_LOCK(mu, tryflags, 0); res = cond->Eval(); - ABSL_TSAN_MUTEX_PRE_UNLOCK(mu, TsanFlags(how)); - ABSL_TSAN_MUTEX_POST_UNLOCK(mu, TsanFlags(how)); - ABSL_TSAN_MUTEX_PRE_LOCK(mu, TsanFlags(how)); + // There is no "try" version of Unlock, so use flags instead of tryflags. + ABSL_TSAN_MUTEX_PRE_UNLOCK(mu, flags); + ABSL_TSAN_MUTEX_POST_UNLOCK(mu, flags); + ABSL_TSAN_MUTEX_PRE_LOCK(mu, tryflags); } else { // Similarly, for unlock we pretend that we have unlocked the mutex, // lock the mutex, evaluate the predicate, and start unlocking it again // to match the annotation at the end of outer unlock operation. - ABSL_TSAN_MUTEX_POST_UNLOCK(mu, TsanFlags(how)); - ABSL_TSAN_MUTEX_PRE_LOCK(mu, TsanFlags(how)); - ABSL_TSAN_MUTEX_POST_LOCK(mu, TsanFlags(how), 0); + ABSL_TSAN_MUTEX_POST_UNLOCK(mu, flags); + ABSL_TSAN_MUTEX_PRE_LOCK(mu, flags); + ABSL_TSAN_MUTEX_POST_LOCK(mu, flags, 0); res = cond->Eval(); - ABSL_TSAN_MUTEX_PRE_UNLOCK(mu, TsanFlags(how)); + ABSL_TSAN_MUTEX_PRE_UNLOCK(mu, flags); } // Prevent unused param warnings in non-TSAN builds. static_cast<void>(mu); - static_cast<void>(how); + static_cast<void>(trylock); + static_cast<void>(read_lock); return res; } @@ -1807,7 +1834,8 @@ bool Mutex::LockSlowWithDeadline(MuHow how, const Condition *cond, v, (how->fast_or | (v & zap_desig_waker[flags & kMuHasBlocked])) + how->fast_add, std::memory_order_acquire, std::memory_order_relaxed)) { - if (cond == nullptr || EvalConditionAnnotated(cond, this, true, how)) { + if (cond == nullptr || + EvalConditionAnnotated(cond, this, true, false, how == kShared)) { return true; } unlock = true; @@ -1825,7 +1853,8 @@ bool Mutex::LockSlowWithDeadline(MuHow how, const Condition *cond, } this->LockSlowLoop(&waitp, flags); return waitp.cond != nullptr || // => cond known true from LockSlowLoop - cond == nullptr || EvalConditionAnnotated(cond, this, true, how); + cond == nullptr || + EvalConditionAnnotated(cond, this, true, false, how == kShared); } // RAW_CHECK_FMT() takes a condition, a printf-style format string, and @@ -1881,7 +1910,8 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) { waitp->how->fast_add, std::memory_order_acquire, std::memory_order_relaxed)) { if (waitp->cond == nullptr || - EvalConditionAnnotated(waitp->cond, this, true, waitp->how)) { + EvalConditionAnnotated(waitp->cond, this, true, false, + waitp->how == kShared)) { break; // we timed out, or condition true, so return } this->UnlockSlow(waitp); // got lock but condition false @@ -1924,7 +1954,8 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) { std::memory_order_release, std::memory_order_relaxed)); if (waitp->cond == nullptr || - EvalConditionAnnotated(waitp->cond, this, true, waitp->how)) { + EvalConditionAnnotated(waitp->cond, this, true, false, + waitp->how == kShared)) { break; // we timed out, or condition true, so return } this->UnlockSlow(waitp); // got lock but condition false |