diff options
author | Vincent Ambo <mail@tazj.in> | 2020-11-21T13·43+0100 |
---|---|---|
committer | Vincent Ambo <mail@tazj.in> | 2020-11-21T14·48+0100 |
commit | 082c006c04343a78d87b6c6ab3608c25d6213c3f (patch) | |
tree | 16e6f04f8d1d1d2d67e8e917d5e7bb48c1b60375 /third_party/abseil_cpp/absl/synchronization | |
parent | cc27324d0226953943f408ce3c69ad7d648e005e (diff) |
merge(3p/absl): subtree merge of Abseil up to e19260f r/1889
... notably, this includes Abseil's own StatusOr type, which conflicted with our implementation (that was taken from TensorFlow). Change-Id: Ie7d6764b64055caaeb8dc7b6b9d066291e6b538f
Diffstat (limited to 'third_party/abseil_cpp/absl/synchronization')
15 files changed, 379 insertions, 788 deletions
diff --git a/third_party/abseil_cpp/absl/synchronization/BUILD.bazel b/third_party/abseil_cpp/absl/synchronization/BUILD.bazel index 3f876b9fdc90..cd4009a15739 100644 --- a/third_party/abseil_cpp/absl/synchronization/BUILD.bazel +++ b/third_party/abseil_cpp/absl/synchronization/BUILD.bazel @@ -24,7 +24,7 @@ load( package(default_visibility = ["//visibility:public"]) -licenses(["notice"]) # Apache 2.0 +licenses(["notice"]) # Internal data structure for efficiently detecting mutex dependency cycles cc_library( @@ -73,15 +73,14 @@ cc_library( "internal/create_thread_identity.cc", "internal/per_thread_sem.cc", "internal/waiter.cc", + "mutex.cc", "notification.cc", - ] + select({ - "//conditions:default": ["mutex.cc"], - }), + ], hdrs = [ "barrier.h", "blocking_counter.h", "internal/create_thread_identity.h", - "internal/mutex_nonprod.inc", + "internal/futex.h", "internal/per_thread_sem.h", "internal/waiter.h", "mutex.h", @@ -90,6 +89,7 @@ cc_library( copts = ABSL_DEFAULT_COPTS, linkopts = select({ "//absl:windows": [], + "//absl:wasm": [], "//conditions:default": ["-pthread"], }) + ABSL_DEFAULT_LINKOPTS, deps = [ @@ -189,6 +189,7 @@ cc_test( ":synchronization", ":thread_pool", "//absl/base", + "//absl/base:config", "//absl/base:core_headers", "//absl/base:raw_logging_internal", "//absl/memory", @@ -210,6 +211,7 @@ cc_library( ":synchronization", ":thread_pool", "//absl/base", + "//absl/base:config", "@com_github_google_benchmark//:benchmark_main", ], alwayslink = 1, @@ -248,6 +250,7 @@ cc_library( deps = [ ":synchronization", "//absl/base", + "//absl/base:config", "//absl/strings", "//absl/time", "@com_google_googletest//:gtest", diff --git a/third_party/abseil_cpp/absl/synchronization/CMakeLists.txt b/third_party/abseil_cpp/absl/synchronization/CMakeLists.txt index dfe5d05df237..e633d0bf5312 100644 --- a/third_party/abseil_cpp/absl/synchronization/CMakeLists.txt +++ b/third_party/abseil_cpp/absl/synchronization/CMakeLists.txt @@ -52,7 +52,7 @@ absl_cc_library( "barrier.h" "blocking_counter.h" "internal/create_thread_identity.h" - "internal/mutex_nonprod.inc" + "internal/futex.h" "internal/per_thread_sem.h" "internal/waiter.h" "mutex.h" @@ -149,6 +149,7 @@ absl_cc_test( absl::synchronization absl::thread_pool absl::base + absl::config absl::core_headers absl::memory absl::raw_logging_internal @@ -179,6 +180,7 @@ absl_cc_library( DEPS absl::synchronization absl::base + absl::config absl::strings absl::time gmock diff --git a/third_party/abseil_cpp/absl/synchronization/internal/futex.h b/third_party/abseil_cpp/absl/synchronization/internal/futex.h new file mode 100644 index 000000000000..06fbd6d072d1 --- /dev/null +++ b/third_party/abseil_cpp/absl/synchronization/internal/futex.h @@ -0,0 +1,154 @@ +// Copyright 2020 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#ifndef ABSL_SYNCHRONIZATION_INTERNAL_FUTEX_H_ +#define ABSL_SYNCHRONIZATION_INTERNAL_FUTEX_H_ + +#include "absl/base/config.h" + +#ifdef _WIN32 +#include <windows.h> +#else +#include <sys/time.h> +#include <unistd.h> +#endif + +#ifdef __linux__ +#include <linux/futex.h> +#include <sys/syscall.h> +#endif + +#include <errno.h> +#include <stdio.h> +#include <time.h> + +#include <atomic> +#include <cstdint> + +#include "absl/base/optimization.h" +#include "absl/synchronization/internal/kernel_timeout.h" + +#ifdef ABSL_INTERNAL_HAVE_FUTEX +#error ABSL_INTERNAL_HAVE_FUTEX may not be set on the command line +#elif defined(__BIONIC__) +// Bionic supports all the futex operations we need even when some of the futex +// definitions are missing. +#define ABSL_INTERNAL_HAVE_FUTEX +#elif defined(__linux__) && defined(FUTEX_CLOCK_REALTIME) +// FUTEX_CLOCK_REALTIME requires Linux >= 2.6.28. +#define ABSL_INTERNAL_HAVE_FUTEX +#endif + +#ifdef ABSL_INTERNAL_HAVE_FUTEX + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace synchronization_internal { + +// Some Android headers are missing these definitions even though they +// support these futex operations. +#ifdef __BIONIC__ +#ifndef SYS_futex +#define SYS_futex __NR_futex +#endif +#ifndef FUTEX_WAIT_BITSET +#define FUTEX_WAIT_BITSET 9 +#endif +#ifndef FUTEX_PRIVATE_FLAG +#define FUTEX_PRIVATE_FLAG 128 +#endif +#ifndef FUTEX_CLOCK_REALTIME +#define FUTEX_CLOCK_REALTIME 256 +#endif +#ifndef FUTEX_BITSET_MATCH_ANY +#define FUTEX_BITSET_MATCH_ANY 0xFFFFFFFF +#endif +#endif + +#if defined(__NR_futex_time64) && !defined(SYS_futex_time64) +#define SYS_futex_time64 __NR_futex_time64 +#endif + +#if defined(SYS_futex_time64) && !defined(SYS_futex) +#define SYS_futex SYS_futex_time64 +#endif + +class FutexImpl { + public: + static int WaitUntil(std::atomic<int32_t> *v, int32_t val, + KernelTimeout t) { + int err = 0; + if (t.has_timeout()) { + // https://locklessinc.com/articles/futex_cheat_sheet/ + // Unlike FUTEX_WAIT, FUTEX_WAIT_BITSET uses absolute time. + struct timespec abs_timeout = t.MakeAbsTimespec(); + // Atomically check that the futex value is still 0, and if it + // is, sleep until abs_timeout or until woken by FUTEX_WAKE. + err = syscall( + SYS_futex, reinterpret_cast<int32_t *>(v), + FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME, val, + &abs_timeout, nullptr, FUTEX_BITSET_MATCH_ANY); + } else { + // Atomically check that the futex value is still 0, and if it + // is, sleep until woken by FUTEX_WAKE. + err = syscall(SYS_futex, reinterpret_cast<int32_t *>(v), + FUTEX_WAIT | FUTEX_PRIVATE_FLAG, val, nullptr); + } + if (ABSL_PREDICT_FALSE(err != 0)) { + err = -errno; + } + return err; + } + + static int WaitBitsetAbsoluteTimeout(std::atomic<int32_t> *v, int32_t val, + int32_t bits, + const struct timespec *abstime) { + int err = syscall(SYS_futex, reinterpret_cast<int32_t *>(v), + FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG, val, abstime, + nullptr, bits); + if (ABSL_PREDICT_FALSE(err != 0)) { + err = -errno; + } + return err; + } + + static int Wake(std::atomic<int32_t> *v, int32_t count) { + int err = syscall(SYS_futex, reinterpret_cast<int32_t *>(v), + FUTEX_WAKE | FUTEX_PRIVATE_FLAG, count); + if (ABSL_PREDICT_FALSE(err < 0)) { + err = -errno; + } + return err; + } + + // FUTEX_WAKE_BITSET + static int WakeBitset(std::atomic<int32_t> *v, int32_t count, int32_t bits) { + int err = syscall(SYS_futex, reinterpret_cast<int32_t *>(v), + FUTEX_WAKE_BITSET | FUTEX_PRIVATE_FLAG, count, nullptr, + nullptr, bits); + if (ABSL_PREDICT_FALSE(err < 0)) { + err = -errno; + } + return err; + } +}; + +class Futex : public FutexImpl {}; + +} // namespace synchronization_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_INTERNAL_HAVE_FUTEX + +#endif // ABSL_SYNCHRONIZATION_INTERNAL_FUTEX_H_ diff --git a/third_party/abseil_cpp/absl/synchronization/internal/graphcycles.cc b/third_party/abseil_cpp/absl/synchronization/internal/graphcycles.cc index 19f9aab5b1a5..27fec21681dc 100644 --- a/third_party/abseil_cpp/absl/synchronization/internal/graphcycles.cc +++ b/third_party/abseil_cpp/absl/synchronization/internal/graphcycles.cc @@ -37,6 +37,7 @@ #include <algorithm> #include <array> +#include <limits> #include "absl/base/internal/hide_ptr.h" #include "absl/base/internal/raw_logging.h" #include "absl/base/internal/spinlock.h" diff --git a/third_party/abseil_cpp/absl/synchronization/internal/kernel_timeout.h b/third_party/abseil_cpp/absl/synchronization/internal/kernel_timeout.h index d6ac5db0af10..bbd4d2d70f44 100644 --- a/third_party/abseil_cpp/absl/synchronization/internal/kernel_timeout.h +++ b/third_party/abseil_cpp/absl/synchronization/internal/kernel_timeout.h @@ -26,6 +26,7 @@ #define ABSL_SYNCHRONIZATION_INTERNAL_KERNEL_TIMEOUT_H_ #include <time.h> + #include <algorithm> #include <limits> @@ -57,6 +58,10 @@ class KernelTimeout { bool has_timeout() const { return ns_ != 0; } + // Convert to parameter for sem_timedwait/futex/similar. Only for approved + // users. Do not call if !has_timeout. + struct timespec MakeAbsTimespec(); + private: // internal rep, not user visible: ns after unix epoch. // zero = no timeout. @@ -82,34 +87,6 @@ class KernelTimeout { return x; } - // Convert to parameter for sem_timedwait/futex/similar. Only for approved - // users. Do not call if !has_timeout. - struct timespec MakeAbsTimespec() { - int64_t n = ns_; - static const int64_t kNanosPerSecond = 1000 * 1000 * 1000; - if (n == 0) { - ABSL_RAW_LOG( - ERROR, - "Tried to create a timespec from a non-timeout; never do this."); - // But we'll try to continue sanely. no-timeout ~= saturated timeout. - n = (std::numeric_limits<int64_t>::max)(); - } - - // Kernel APIs validate timespecs as being at or after the epoch, - // despite the kernel time type being signed. However, no one can - // tell the difference between a timeout at or before the epoch (since - // all such timeouts have expired!) - if (n < 0) n = 0; - - struct timespec abstime; - int64_t seconds = (std::min)(n / kNanosPerSecond, - int64_t{(std::numeric_limits<time_t>::max)()}); - abstime.tv_sec = static_cast<time_t>(seconds); - abstime.tv_nsec = - static_cast<decltype(abstime.tv_nsec)>(n % kNanosPerSecond); - return abstime; - } - #ifdef _WIN32 // Converts to milliseconds from now, or INFINITE when // !has_timeout(). For use by SleepConditionVariableSRW on @@ -148,6 +125,30 @@ class KernelTimeout { friend class Waiter; }; +inline struct timespec KernelTimeout::MakeAbsTimespec() { + int64_t n = ns_; + static const int64_t kNanosPerSecond = 1000 * 1000 * 1000; + if (n == 0) { + ABSL_RAW_LOG( + ERROR, "Tried to create a timespec from a non-timeout; never do this."); + // But we'll try to continue sanely. no-timeout ~= saturated timeout. + n = (std::numeric_limits<int64_t>::max)(); + } + + // Kernel APIs validate timespecs as being at or after the epoch, + // despite the kernel time type being signed. However, no one can + // tell the difference between a timeout at or before the epoch (since + // all such timeouts have expired!) + if (n < 0) n = 0; + + struct timespec abstime; + int64_t seconds = (std::min)(n / kNanosPerSecond, + int64_t{(std::numeric_limits<time_t>::max)()}); + abstime.tv_sec = static_cast<time_t>(seconds); + abstime.tv_nsec = static_cast<decltype(abstime.tv_nsec)>(n % kNanosPerSecond); + return abstime; +} + } // namespace synchronization_internal ABSL_NAMESPACE_END } // namespace absl diff --git a/third_party/abseil_cpp/absl/synchronization/internal/mutex_nonprod.cc b/third_party/abseil_cpp/absl/synchronization/internal/mutex_nonprod.cc deleted file mode 100644 index 1732f8365005..000000000000 --- a/third_party/abseil_cpp/absl/synchronization/internal/mutex_nonprod.cc +++ /dev/null @@ -1,324 +0,0 @@ -// Copyright 2017 The Abseil Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Implementation of a small subset of Mutex and CondVar functionality -// for platforms where the production implementation hasn't been fully -// ported yet. - -#include "absl/synchronization/mutex.h" - -#if defined(_WIN32) -#include <chrono> // NOLINT(build/c++11) -#else -#include <sys/time.h> -#include <time.h> -#endif - -#include <algorithm> - -#include "absl/base/internal/raw_logging.h" -#include "absl/time/time.h" - -namespace absl { -ABSL_NAMESPACE_BEGIN - -void SetMutexDeadlockDetectionMode(OnDeadlockCycle) {} -void EnableMutexInvariantDebugging(bool) {} - -namespace synchronization_internal { - -namespace { - -// Return the current time plus the timeout. -absl::Time DeadlineFromTimeout(absl::Duration timeout) { - return absl::Now() + timeout; -} - -// Limit the deadline to a positive, 32-bit time_t value to accommodate -// implementation restrictions. This also deals with InfinitePast and -// InfiniteFuture. -absl::Time LimitedDeadline(absl::Time deadline) { - deadline = std::max(absl::FromTimeT(0), deadline); - deadline = std::min(deadline, absl::FromTimeT(0x7fffffff)); - return deadline; -} - -} // namespace - -#if defined(_WIN32) - -MutexImpl::MutexImpl() {} - -MutexImpl::~MutexImpl() { - if (locked_) { - std_mutex_.unlock(); - } -} - -void MutexImpl::Lock() { - std_mutex_.lock(); - locked_ = true; -} - -bool MutexImpl::TryLock() { - bool locked = std_mutex_.try_lock(); - if (locked) locked_ = true; - return locked; -} - -void MutexImpl::Unlock() { - locked_ = false; - released_.SignalAll(); - std_mutex_.unlock(); -} - -CondVarImpl::CondVarImpl() {} - -CondVarImpl::~CondVarImpl() {} - -void CondVarImpl::Signal() { std_cv_.notify_one(); } - -void CondVarImpl::SignalAll() { std_cv_.notify_all(); } - -void CondVarImpl::Wait(MutexImpl* mu) { - mu->released_.SignalAll(); - std_cv_.wait(mu->std_mutex_); -} - -bool CondVarImpl::WaitWithDeadline(MutexImpl* mu, absl::Time deadline) { - mu->released_.SignalAll(); - time_t when = ToTimeT(deadline); - int64_t nanos = ToInt64Nanoseconds(deadline - absl::FromTimeT(when)); - std::chrono::system_clock::time_point deadline_tp = - std::chrono::system_clock::from_time_t(when) + - std::chrono::duration_cast<std::chrono::system_clock::duration>( - std::chrono::nanoseconds(nanos)); - auto deadline_since_epoch = - std::chrono::duration_cast<std::chrono::duration<double>>( - deadline_tp - std::chrono::system_clock::from_time_t(0)); - return std_cv_.wait_until(mu->std_mutex_, deadline_tp) == - std::cv_status::timeout; -} - -#else // ! _WIN32 - -MutexImpl::MutexImpl() { - ABSL_RAW_CHECK(pthread_mutex_init(&pthread_mutex_, nullptr) == 0, - "pthread error"); -} - -MutexImpl::~MutexImpl() { - if (locked_) { - ABSL_RAW_CHECK(pthread_mutex_unlock(&pthread_mutex_) == 0, "pthread error"); - } - ABSL_RAW_CHECK(pthread_mutex_destroy(&pthread_mutex_) == 0, "pthread error"); -} - -void MutexImpl::Lock() { - ABSL_RAW_CHECK(pthread_mutex_lock(&pthread_mutex_) == 0, "pthread error"); - locked_ = true; -} - -bool MutexImpl::TryLock() { - bool locked = (0 == pthread_mutex_trylock(&pthread_mutex_)); - if (locked) locked_ = true; - return locked; -} - -void MutexImpl::Unlock() { - locked_ = false; - released_.SignalAll(); - ABSL_RAW_CHECK(pthread_mutex_unlock(&pthread_mutex_) == 0, "pthread error"); -} - -CondVarImpl::CondVarImpl() { - ABSL_RAW_CHECK(pthread_cond_init(&pthread_cv_, nullptr) == 0, - "pthread error"); -} - -CondVarImpl::~CondVarImpl() { - ABSL_RAW_CHECK(pthread_cond_destroy(&pthread_cv_) == 0, "pthread error"); -} - -void CondVarImpl::Signal() { - ABSL_RAW_CHECK(pthread_cond_signal(&pthread_cv_) == 0, "pthread error"); -} - -void CondVarImpl::SignalAll() { - ABSL_RAW_CHECK(pthread_cond_broadcast(&pthread_cv_) == 0, "pthread error"); -} - -void CondVarImpl::Wait(MutexImpl* mu) { - mu->released_.SignalAll(); - ABSL_RAW_CHECK(pthread_cond_wait(&pthread_cv_, &mu->pthread_mutex_) == 0, - "pthread error"); -} - -bool CondVarImpl::WaitWithDeadline(MutexImpl* mu, absl::Time deadline) { - mu->released_.SignalAll(); - struct timespec ts = ToTimespec(deadline); - int rc = pthread_cond_timedwait(&pthread_cv_, &mu->pthread_mutex_, &ts); - if (rc == ETIMEDOUT) return true; - ABSL_RAW_CHECK(rc == 0, "pthread error"); - return false; -} - -#endif // ! _WIN32 - -void MutexImpl::Await(const Condition& cond) { - if (cond.Eval()) return; - released_.SignalAll(); - do { - released_.Wait(this); - } while (!cond.Eval()); -} - -bool MutexImpl::AwaitWithDeadline(const Condition& cond, absl::Time deadline) { - if (cond.Eval()) return true; - released_.SignalAll(); - while (true) { - if (released_.WaitWithDeadline(this, deadline)) return false; - if (cond.Eval()) return true; - } -} - -} // namespace synchronization_internal - -Mutex::Mutex() {} - -Mutex::~Mutex() {} - -void Mutex::Lock() { impl()->Lock(); } - -void Mutex::Unlock() { impl()->Unlock(); } - -bool Mutex::TryLock() { return impl()->TryLock(); } - -void Mutex::ReaderLock() { Lock(); } - -void Mutex::ReaderUnlock() { Unlock(); } - -void Mutex::Await(const Condition& cond) { impl()->Await(cond); } - -void Mutex::LockWhen(const Condition& cond) { - Lock(); - Await(cond); -} - -bool Mutex::AwaitWithDeadline(const Condition& cond, absl::Time deadline) { - return impl()->AwaitWithDeadline( - cond, synchronization_internal::LimitedDeadline(deadline)); -} - -bool Mutex::AwaitWithTimeout(const Condition& cond, absl::Duration timeout) { - return AwaitWithDeadline( - cond, synchronization_internal::DeadlineFromTimeout(timeout)); -} - -bool Mutex::LockWhenWithDeadline(const Condition& cond, absl::Time deadline) { - Lock(); - return AwaitWithDeadline(cond, deadline); -} - -bool Mutex::LockWhenWithTimeout(const Condition& cond, absl::Duration timeout) { - return LockWhenWithDeadline( - cond, synchronization_internal::DeadlineFromTimeout(timeout)); -} - -void Mutex::ReaderLockWhen(const Condition& cond) { - ReaderLock(); - Await(cond); -} - -bool Mutex::ReaderLockWhenWithTimeout(const Condition& cond, - absl::Duration timeout) { - return LockWhenWithTimeout(cond, timeout); -} -bool Mutex::ReaderLockWhenWithDeadline(const Condition& cond, - absl::Time deadline) { - return LockWhenWithDeadline(cond, deadline); -} - -void Mutex::EnableDebugLog(const char*) {} -void Mutex::EnableInvariantDebugging(void (*)(void*), void*) {} -void Mutex::ForgetDeadlockInfo() {} -void Mutex::AssertHeld() const {} -void Mutex::AssertReaderHeld() const {} -void Mutex::AssertNotHeld() const {} - -CondVar::CondVar() {} - -CondVar::~CondVar() {} - -void CondVar::Signal() { impl()->Signal(); } - -void CondVar::SignalAll() { impl()->SignalAll(); } - -void CondVar::Wait(Mutex* mu) { return impl()->Wait(mu->impl()); } - -bool CondVar::WaitWithDeadline(Mutex* mu, absl::Time deadline) { - return impl()->WaitWithDeadline( - mu->impl(), synchronization_internal::LimitedDeadline(deadline)); -} - -bool CondVar::WaitWithTimeout(Mutex* mu, absl::Duration timeout) { - return WaitWithDeadline(mu, absl::Now() + timeout); -} - -void CondVar::EnableDebugLog(const char*) {} - -#ifdef THREAD_SANITIZER -extern "C" void __tsan_read1(void *addr); -#else -#define __tsan_read1(addr) // do nothing if TSan not enabled -#endif - -// A function that just returns its argument, dereferenced -static bool Dereference(void *arg) { - // ThreadSanitizer does not instrument this file for memory accesses. - // This function dereferences a user variable that can participate - // in a data race, so we need to manually tell TSan about this memory access. - __tsan_read1(arg); - return *(static_cast<bool *>(arg)); -} - -Condition::Condition() {} // null constructor, used for kTrue only -const Condition Condition::kTrue; - -Condition::Condition(bool (*func)(void *), void *arg) - : eval_(&CallVoidPtrFunction), - function_(func), - method_(nullptr), - arg_(arg) {} - -bool Condition::CallVoidPtrFunction(const Condition *c) { - return (*c->function_)(c->arg_); -} - -Condition::Condition(const bool *cond) - : eval_(CallVoidPtrFunction), - function_(Dereference), - method_(nullptr), - // const_cast is safe since Dereference does not modify arg - arg_(const_cast<bool *>(cond)) {} - -bool Condition::Eval() const { - // eval_ == null for kTrue - return (this->eval_ == nullptr) || (*this->eval_)(this); -} - -void RegisterSymbolizer(bool (*)(const void*, char*, int)) {} - -ABSL_NAMESPACE_END -} // namespace absl diff --git a/third_party/abseil_cpp/absl/synchronization/internal/mutex_nonprod.inc b/third_party/abseil_cpp/absl/synchronization/internal/mutex_nonprod.inc deleted file mode 100644 index d83bc8a94c75..000000000000 --- a/third_party/abseil_cpp/absl/synchronization/internal/mutex_nonprod.inc +++ /dev/null @@ -1,249 +0,0 @@ -// Do not include. This is an implementation detail of base/mutex.h. -// -// Declares three classes: -// -// base::internal::MutexImpl - implementation helper for Mutex -// base::internal::CondVarImpl - implementation helper for CondVar -// base::internal::SynchronizationStorage<T> - implementation helper for -// Mutex, CondVar - -#include <type_traits> - -#if defined(_WIN32) -#include <condition_variable> -#include <mutex> -#else -#include <pthread.h> -#endif - -#include "absl/base/call_once.h" -#include "absl/time/time.h" - -// Declare that Mutex::ReaderLock is actually Lock(). Intended primarily -// for tests, and even then as a last resort. -#ifdef ABSL_MUTEX_READER_LOCK_IS_EXCLUSIVE -#error ABSL_MUTEX_READER_LOCK_IS_EXCLUSIVE cannot be directly set -#else -#define ABSL_MUTEX_READER_LOCK_IS_EXCLUSIVE 1 -#endif - -// Declare that Mutex::EnableInvariantDebugging is not implemented. -// Intended primarily for tests, and even then as a last resort. -#ifdef ABSL_MUTEX_ENABLE_INVARIANT_DEBUGGING_NOT_IMPLEMENTED -#error ABSL_MUTEX_ENABLE_INVARIANT_DEBUGGING_NOT_IMPLEMENTED cannot be directly set -#else -#define ABSL_MUTEX_ENABLE_INVARIANT_DEBUGGING_NOT_IMPLEMENTED 1 -#endif - -namespace absl { -ABSL_NAMESPACE_BEGIN -class Condition; - -namespace synchronization_internal { - -class MutexImpl; - -// Do not use this implementation detail of CondVar. Provides most of the -// implementation, but should not be placed directly in static storage -// because it will not linker initialize properly. See -// SynchronizationStorage<T> below for what we mean by linker -// initialization. -class CondVarImpl { - public: - CondVarImpl(); - CondVarImpl(const CondVarImpl&) = delete; - CondVarImpl& operator=(const CondVarImpl&) = delete; - ~CondVarImpl(); - - void Signal(); - void SignalAll(); - void Wait(MutexImpl* mutex); - bool WaitWithDeadline(MutexImpl* mutex, absl::Time deadline); - - private: -#if defined(_WIN32) - std::condition_variable_any std_cv_; -#else - pthread_cond_t pthread_cv_; -#endif -}; - -// Do not use this implementation detail of Mutex. Provides most of the -// implementation, but should not be placed directly in static storage -// because it will not linker initialize properly. See -// SynchronizationStorage<T> below for what we mean by linker -// initialization. -class MutexImpl { - public: - MutexImpl(); - MutexImpl(const MutexImpl&) = delete; - MutexImpl& operator=(const MutexImpl&) = delete; - ~MutexImpl(); - - void Lock(); - bool TryLock(); - void Unlock(); - void Await(const Condition& cond); - bool AwaitWithDeadline(const Condition& cond, absl::Time deadline); - - private: - friend class CondVarImpl; - -#if defined(_WIN32) - std::mutex std_mutex_; -#else - pthread_mutex_t pthread_mutex_; -#endif - - // True if the underlying mutex is locked. If the destructor is entered - // while locked_, the underlying mutex is unlocked. Mutex supports - // destruction while locked, but the same is undefined behavior for both - // pthread_mutex_t and std::mutex. - bool locked_ = false; - - // Signaled before releasing the lock, in support of Await. - CondVarImpl released_; -}; - -// Do not use this implementation detail of CondVar and Mutex. A storage -// space for T that supports a LinkerInitialized constructor. T must -// have a default constructor, which is called by the first call to -// get(). T's destructor is never called if the LinkerInitialized -// constructor is called. -// -// Objects constructed with the default constructor are constructed and -// destructed like any other object, and should never be allocated in -// static storage. -// -// Objects constructed with the LinkerInitialized constructor should -// always be in static storage. For such objects, calls to get() are always -// valid, except from signal handlers. -// -// Note that this implementation relies on undefined language behavior that -// are known to hold for the set of supported compilers. An analysis -// follows. -// -// From the C++11 standard: -// -// [basic.life] says an object has non-trivial initialization if it is of -// class type and it is initialized by a constructor other than a trivial -// default constructor. (the LinkerInitialized constructor is -// non-trivial) -// -// [basic.life] says the lifetime of an object with a non-trivial -// constructor begins when the call to the constructor is complete. -// -// [basic.life] says the lifetime of an object with non-trivial destructor -// ends when the call to the destructor begins. -// -// [basic.life] p5 specifies undefined behavior when accessing non-static -// members of an instance outside its -// lifetime. (SynchronizationStorage::get() access non-static members) -// -// So, LinkerInitialized object of SynchronizationStorage uses a -// non-trivial constructor, which is called at some point during dynamic -// initialization, and is therefore subject to order of dynamic -// initialization bugs, where get() is called before the object's -// constructor is, resulting in undefined behavior. -// -// Similarly, a LinkerInitialized SynchronizationStorage object has a -// non-trivial destructor, and so its lifetime ends at some point during -// destruction of objects with static storage duration [basic.start.term] -// p4. There is a window where other exit code could call get() after this -// occurs, resulting in undefined behavior. -// -// Combined, these statements imply that LinkerInitialized instances -// of SynchronizationStorage<T> rely on undefined behavior. -// -// However, in practice, the implementation works on all supported -// compilers. Specifically, we rely on: -// -// a) zero-initialization being sufficient to initialize -// LinkerInitialized instances for the purposes of calling -// get(), regardless of when the constructor is called. This is -// because the is_dynamic_ boolean is correctly zero-initialized to -// false. -// -// b) the LinkerInitialized constructor is a NOP, and immaterial to -// even to concurrent calls to get(). -// -// c) the destructor being a NOP for LinkerInitialized objects -// (guaranteed by a check for !is_dynamic_), and so any concurrent and -// subsequent calls to get() functioning as if the destructor were not -// called, by virtue of the instances' storage remaining valid after the -// destructor runs. -// -// d) That a-c apply transitively when SynchronizationStorage<T> is the -// only member of a class allocated in static storage. -// -// Nothing in the language standard guarantees that a-d hold. In practice, -// these hold in all supported compilers. -// -// Future direction: -// -// Ideally, we would simply use std::mutex or a similar class, which when -// allocated statically would support use immediately after static -// initialization up until static storage is reclaimed (i.e. the properties -// we require of all "linker initialized" instances). -// -// Regarding construction in static storage, std::mutex is required to -// provide a constexpr default constructor [thread.mutex.class], which -// ensures the instance's lifetime begins with static initialization -// [basic.start.init], and so is immune to any problems caused by the order -// of dynamic initialization. However, as of this writing Microsoft's -// Visual Studio does not provide a constexpr constructor for std::mutex. -// See -// https://blogs.msdn.microsoft.com/vcblog/2015/06/02/constexpr-complete-for-vs-2015-rtm-c11-compiler-c17-stl/ -// -// Regarding destruction of instances in static storage, [basic.life] does -// say an object ends when storage in which the occupies is released, in -// the case of non-trivial destructor. However, std::mutex is not specified -// to have a trivial destructor. -// -// So, we would need a class with a constexpr default constructor and a -// trivial destructor. Today, we can achieve neither desired property using -// std::mutex directly. -template <typename T> -class SynchronizationStorage { - public: - // Instances allocated on the heap or on the stack should use the default - // constructor. - SynchronizationStorage() - : destruct_(true), once_() {} - - constexpr explicit SynchronizationStorage(absl::ConstInitType) - : destruct_(false), once_(), space_{{0}} {} - - SynchronizationStorage(SynchronizationStorage&) = delete; - SynchronizationStorage& operator=(SynchronizationStorage&) = delete; - - ~SynchronizationStorage() { - if (destruct_) { - get()->~T(); - } - } - - // Retrieve the object in storage. This is fast and thread safe, but does - // incur the cost of absl::call_once(). - T* get() { - absl::call_once(once_, SynchronizationStorage::Construct, this); - return reinterpret_cast<T*>(&space_); - } - - private: - static void Construct(SynchronizationStorage<T>* self) { - new (&self->space_) T(); - } - - // When true, T's destructor is run when this is destructed. - const bool destruct_; - - absl::once_flag once_; - - // An aligned space for the T. - alignas(T) unsigned char space_[sizeof(T)]; -}; - -} // namespace synchronization_internal -ABSL_NAMESPACE_END -} // namespace absl diff --git a/third_party/abseil_cpp/absl/synchronization/internal/per_thread_sem.h b/third_party/abseil_cpp/absl/synchronization/internal/per_thread_sem.h index 8ab439153ac1..2228b6e8ea91 100644 --- a/third_party/abseil_cpp/absl/synchronization/internal/per_thread_sem.h +++ b/third_party/abseil_cpp/absl/synchronization/internal/per_thread_sem.h @@ -78,7 +78,7 @@ class PerThreadSem { // !t.has_timeout() => Wait(t) will return true. static inline bool Wait(KernelTimeout t); - // White-listed callers. + // Permitted callers. friend class PerThreadSemTest; friend class absl::Mutex; friend absl::base_internal::ThreadIdentity* CreateThreadIdentity(); diff --git a/third_party/abseil_cpp/absl/synchronization/internal/per_thread_sem_test.cc b/third_party/abseil_cpp/absl/synchronization/internal/per_thread_sem_test.cc index b5a2f6d4b5cb..8cf59e64e961 100644 --- a/third_party/abseil_cpp/absl/synchronization/internal/per_thread_sem_test.cc +++ b/third_party/abseil_cpp/absl/synchronization/internal/per_thread_sem_test.cc @@ -23,6 +23,7 @@ #include <thread> // NOLINT(build/c++11) #include "gtest/gtest.h" +#include "absl/base/config.h" #include "absl/base/internal/cycleclock.h" #include "absl/base/internal/thread_identity.h" #include "absl/strings/str_cat.h" diff --git a/third_party/abseil_cpp/absl/synchronization/internal/waiter.cc b/third_party/abseil_cpp/absl/synchronization/internal/waiter.cc index b6150b9b2bf1..2123be60f54f 100644 --- a/third_party/abseil_cpp/absl/synchronization/internal/waiter.cc +++ b/third_party/abseil_cpp/absl/synchronization/internal/waiter.cc @@ -48,6 +48,7 @@ #include "absl/base/optimization.h" #include "absl/synchronization/internal/kernel_timeout.h" + namespace absl { ABSL_NAMESPACE_BEGIN namespace synchronization_internal { @@ -66,71 +67,6 @@ static void MaybeBecomeIdle() { #if ABSL_WAITER_MODE == ABSL_WAITER_MODE_FUTEX -// Some Android headers are missing these definitions even though they -// support these futex operations. -#ifdef __BIONIC__ -#ifndef SYS_futex -#define SYS_futex __NR_futex -#endif -#ifndef FUTEX_WAIT_BITSET -#define FUTEX_WAIT_BITSET 9 -#endif -#ifndef FUTEX_PRIVATE_FLAG -#define FUTEX_PRIVATE_FLAG 128 -#endif -#ifndef FUTEX_CLOCK_REALTIME -#define FUTEX_CLOCK_REALTIME 256 -#endif -#ifndef FUTEX_BITSET_MATCH_ANY -#define FUTEX_BITSET_MATCH_ANY 0xFFFFFFFF -#endif -#endif - -#if defined(__NR_futex_time64) && !defined(SYS_futex_time64) -#define SYS_futex_time64 __NR_futex_time64 -#endif - -#if defined(SYS_futex_time64) && !defined(SYS_futex) -#define SYS_futex SYS_futex_time64 -#endif - -class Futex { - public: - static int WaitUntil(std::atomic<int32_t> *v, int32_t val, - KernelTimeout t) { - int err = 0; - if (t.has_timeout()) { - // https://locklessinc.com/articles/futex_cheat_sheet/ - // Unlike FUTEX_WAIT, FUTEX_WAIT_BITSET uses absolute time. - struct timespec abs_timeout = t.MakeAbsTimespec(); - // Atomically check that the futex value is still 0, and if it - // is, sleep until abs_timeout or until woken by FUTEX_WAKE. - err = syscall( - SYS_futex, reinterpret_cast<int32_t *>(v), - FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME, val, - &abs_timeout, nullptr, FUTEX_BITSET_MATCH_ANY); - } else { - // Atomically check that the futex value is still 0, and if it - // is, sleep until woken by FUTEX_WAKE. - err = syscall(SYS_futex, reinterpret_cast<int32_t *>(v), - FUTEX_WAIT | FUTEX_PRIVATE_FLAG, val, nullptr); - } - if (err != 0) { - err = -errno; - } - return err; - } - - static int Wake(std::atomic<int32_t> *v, int32_t count) { - int err = syscall(SYS_futex, reinterpret_cast<int32_t *>(v), - FUTEX_WAKE | FUTEX_PRIVATE_FLAG, count); - if (ABSL_PREDICT_FALSE(err < 0)) { - err = -errno; - } - return err; - } -}; - Waiter::Waiter() { futex_.store(0, std::memory_order_relaxed); } diff --git a/third_party/abseil_cpp/absl/synchronization/internal/waiter.h b/third_party/abseil_cpp/absl/synchronization/internal/waiter.h index ae83907b1ca4..be3df180d4e2 100644 --- a/third_party/abseil_cpp/absl/synchronization/internal/waiter.h +++ b/third_party/abseil_cpp/absl/synchronization/internal/waiter.h @@ -36,6 +36,7 @@ #include <cstdint> #include "absl/base/internal/thread_identity.h" +#include "absl/synchronization/internal/futex.h" #include "absl/synchronization/internal/kernel_timeout.h" // May be chosen at compile time via -DABSL_FORCE_WAITER_MODE=<index> @@ -48,12 +49,7 @@ #define ABSL_WAITER_MODE ABSL_FORCE_WAITER_MODE #elif defined(_WIN32) && _WIN32_WINNT >= _WIN32_WINNT_VISTA #define ABSL_WAITER_MODE ABSL_WAITER_MODE_WIN32 -#elif defined(__BIONIC__) -// Bionic supports all the futex operations we need even when some of the futex -// definitions are missing. -#define ABSL_WAITER_MODE ABSL_WAITER_MODE_FUTEX -#elif defined(__linux__) && defined(FUTEX_CLOCK_REALTIME) -// FUTEX_CLOCK_REALTIME requires Linux >= 2.6.28. +#elif defined(ABSL_INTERNAL_HAVE_FUTEX) #define ABSL_WAITER_MODE ABSL_WAITER_MODE_FUTEX #elif defined(ABSL_HAVE_SEMAPHORE_H) #define ABSL_WAITER_MODE ABSL_WAITER_MODE_SEM @@ -100,7 +96,7 @@ class Waiter { } // How many periods to remain idle before releasing resources -#ifndef THREAD_SANITIZER +#ifndef ABSL_HAVE_THREAD_SANITIZER static constexpr int kIdlePeriods = 60; #else // Memory consumption under ThreadSanitizer is a serious concern, diff --git a/third_party/abseil_cpp/absl/synchronization/mutex.cc b/third_party/abseil_cpp/absl/synchronization/mutex.cc index 1f8a696e08ee..9e01393ca4df 100644 --- a/third_party/abseil_cpp/absl/synchronization/mutex.cc +++ b/third_party/abseil_cpp/absl/synchronization/mutex.cc @@ -39,6 +39,7 @@ #include <thread> // NOLINT(build/c++11) #include "absl/base/attributes.h" +#include "absl/base/call_once.h" #include "absl/base/config.h" #include "absl/base/dynamic_annotations.h" #include "absl/base/internal/atomic_hook.h" @@ -49,6 +50,7 @@ #include "absl/base/internal/spinlock.h" #include "absl/base/internal/sysinfo.h" #include "absl/base/internal/thread_identity.h" +#include "absl/base/internal/tsan_mutex_interface.h" #include "absl/base/port.h" #include "absl/debugging/stacktrace.h" #include "absl/debugging/symbolize.h" @@ -58,6 +60,7 @@ using absl::base_internal::CurrentThreadIdentityIfPresent; using absl::base_internal::PerThreadSynch; +using absl::base_internal::SchedulingGuard; using absl::base_internal::ThreadIdentity; using absl::synchronization_internal::GetOrCreateCurrentThreadIdentity; using absl::synchronization_internal::GraphCycles; @@ -75,7 +78,7 @@ ABSL_NAMESPACE_BEGIN namespace { -#if defined(THREAD_SANITIZER) +#if defined(ABSL_HAVE_THREAD_SANITIZER) constexpr OnDeadlockCycle kDeadlockDetectionDefault = OnDeadlockCycle::kIgnore; #else constexpr OnDeadlockCycle kDeadlockDetectionDefault = OnDeadlockCycle::kAbort; @@ -85,31 +88,9 @@ ABSL_CONST_INIT std::atomic<OnDeadlockCycle> synch_deadlock_detection( kDeadlockDetectionDefault); ABSL_CONST_INIT std::atomic<bool> synch_check_invariants(false); -// ------------------------------------------ spinlock support - -// Make sure read-only globals used in the Mutex code are contained on the -// same cacheline and cacheline aligned to eliminate any false sharing with -// other globals from this and other modules. -static struct MutexGlobals { - MutexGlobals() { - // Find machine-specific data needed for Delay() and - // TryAcquireWithSpinning(). This runs in the global constructor - // sequence, and before that zeros are safe values. - num_cpus = absl::base_internal::NumCPUs(); - spinloop_iterations = num_cpus > 1 ? 1500 : 0; - } - int num_cpus; - int spinloop_iterations; - // Pad this struct to a full cacheline to prevent false sharing. - char padding[ABSL_CACHELINE_SIZE - 2 * sizeof(int)]; -} ABSL_CACHELINE_ALIGNED mutex_globals; -static_assert( - sizeof(MutexGlobals) == ABSL_CACHELINE_SIZE, - "MutexGlobals must occupy an entire cacheline to prevent false sharing"); - ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES - absl::base_internal::AtomicHook<void (*)(int64_t wait_cycles)> - submit_profile_data; +absl::base_internal::AtomicHook<void (*)(int64_t wait_cycles)> + submit_profile_data; ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES absl::base_internal::AtomicHook<void (*)( const char *msg, const void *obj, int64_t wait_cycles)> mutex_tracer; @@ -143,33 +124,55 @@ void RegisterSymbolizer(bool (*fn)(const void *pc, char *out, int out_size)) { symbolizer.Store(fn); } -// spinlock delay on iteration c. Returns new c. +struct ABSL_CACHELINE_ALIGNED MutexGlobals { + absl::once_flag once; + int num_cpus = 0; + int spinloop_iterations = 0; +}; + +static const MutexGlobals& GetMutexGlobals() { + ABSL_CONST_INIT static MutexGlobals data; + absl::base_internal::LowLevelCallOnce(&data.once, [&]() { + data.num_cpus = absl::base_internal::NumCPUs(); + data.spinloop_iterations = data.num_cpus > 1 ? 1500 : 0; + }); + return data; +} + +// Spinlock delay on iteration c. Returns new c. namespace { enum DelayMode { AGGRESSIVE, GENTLE }; }; -static int Delay(int32_t c, DelayMode mode) { + +namespace synchronization_internal { +int MutexDelay(int32_t c, int mode) { // If this a uniprocessor, only yield/sleep. Otherwise, if the mode is // aggressive then spin many times before yielding. If the mode is // gentle then spin only a few times before yielding. Aggressive spinning is // used to ensure that an Unlock() call, which must get the spin lock for // any thread to make progress gets it without undue delay. - int32_t limit = (mutex_globals.num_cpus > 1) ? - ((mode == AGGRESSIVE) ? 5000 : 250) : 0; + const int32_t limit = + GetMutexGlobals().num_cpus > 1 ? (mode == AGGRESSIVE ? 5000 : 250) : 0; if (c < limit) { - c++; // spin + // Spin. + c++; } else { + SchedulingGuard::ScopedEnable enable_rescheduling; ABSL_TSAN_MUTEX_PRE_DIVERT(nullptr, 0); - if (c == limit) { // yield once + if (c == limit) { + // Yield once. AbslInternalMutexYield(); c++; - } else { // then wait + } else { + // Then wait. absl::SleepFor(absl::Microseconds(10)); c = 0; } ABSL_TSAN_MUTEX_POST_DIVERT(nullptr, 0); } - return (c); + return c; } +} // namespace synchronization_internal // --------------------------Generic atomic ops // Ensure that "(*pv & bits) == bits" by doing an atomic update of "*pv" to @@ -489,7 +492,7 @@ struct SynchWaitParams { std::atomic<intptr_t> *cv_word; int64_t contention_start_cycles; // Time (in cycles) when this thread started - // to contend for the mutex. + // to contend for the mutex. }; struct SynchLocksHeld { @@ -703,7 +706,7 @@ static constexpr bool kDebugMode = false; static constexpr bool kDebugMode = true; #endif -#ifdef THREAD_SANITIZER +#ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE static unsigned TsanFlags(Mutex::MuHow how) { return how == kShared ? __tsan_mutex_read_lock : 0; } @@ -1054,6 +1057,7 @@ static PerThreadSynch *DequeueAllWakeable(PerThreadSynch *head, // Try to remove thread s from the list of waiters on this mutex. // Does nothing if s is not on the waiter list. void Mutex::TryRemove(PerThreadSynch *s) { + SchedulingGuard::ScopedDisable disable_rescheduling; intptr_t v = mu_.load(std::memory_order_relaxed); // acquire spinlock & lock if ((v & (kMuWait | kMuSpin | kMuWriter | kMuReader)) == kMuWait && @@ -1118,7 +1122,7 @@ ABSL_XRAY_LOG_ARGS(1) void Mutex::Block(PerThreadSynch *s) { this->TryRemove(s); int c = 0; while (s->next != nullptr) { - c = Delay(c, GENTLE); + c = synchronization_internal::MutexDelay(c, GENTLE); this->TryRemove(s); } if (kDebugMode) { @@ -1437,7 +1441,7 @@ void Mutex::AssertNotHeld() const { // Attempt to acquire *mu, and return whether successful. The implementation // may spin for a short while if the lock cannot be acquired immediately. static bool TryAcquireWithSpinning(std::atomic<intptr_t>* mu) { - int c = mutex_globals.spinloop_iterations; + int c = GetMutexGlobals().spinloop_iterations; do { // do/while somewhat faster on AMD intptr_t v = mu->load(std::memory_order_relaxed); if ((v & (kMuReader|kMuEvent)) != 0) { @@ -1764,7 +1768,7 @@ static inline bool EvalConditionAnnotated(const Condition *cond, Mutex *mu, // All memory accesses are ignored inside of mutex operations + for unlock // operation tsan considers that we've already released the mutex. bool res = false; -#ifdef THREAD_SANITIZER +#ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE const int flags = read_lock ? __tsan_mutex_read_lock : 0; const int tryflags = flags | (trylock ? __tsan_mutex_try_lock : 0); #endif @@ -1814,9 +1818,9 @@ static inline bool EvalConditionIgnored(Mutex *mu, const Condition *cond) { // So we "divert" (which un-ignores both memory accesses and synchronization) // and then separately turn on ignores of memory accesses. ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0); - ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN(); + ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN(); bool res = cond->Eval(); - ANNOTATE_IGNORE_READS_AND_WRITES_END(); + ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_END(); ABSL_TSAN_MUTEX_POST_DIVERT(mu, 0); static_cast<void>(mu); // Prevent unused param warning in non-TSAN builds. return res; @@ -1897,6 +1901,7 @@ static void CheckForMutexCorruption(intptr_t v, const char* label) { } void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) { + SchedulingGuard::ScopedDisable disable_rescheduling; int c = 0; intptr_t v = mu_.load(std::memory_order_relaxed); if ((v & kMuEvent) != 0) { @@ -1998,7 +2003,8 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) { ABSL_RAW_CHECK( waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors, "detected illegal recursion into Mutex code"); - c = Delay(c, GENTLE); // delay, then try again + // delay, then try again + c = synchronization_internal::MutexDelay(c, GENTLE); } ABSL_RAW_CHECK( waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors, @@ -2016,6 +2022,7 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) { // or it is in the process of blocking on a condition variable; it must requeue // itself on the mutex/condvar to wait for its condition to become true. ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) { + SchedulingGuard::ScopedDisable disable_rescheduling; intptr_t v = mu_.load(std::memory_order_relaxed); this->AssertReaderHeld(); CheckForMutexCorruption(v, "Unlock"); @@ -2292,7 +2299,8 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) { mu_.store(nv, std::memory_order_release); break; // out of for(;;)-loop } - c = Delay(c, AGGRESSIVE); // aggressive here; no one can proceed till we do + // aggressive here; no one can proceed till we do + c = synchronization_internal::MutexDelay(c, AGGRESSIVE); } // end of for(;;)-loop if (wake_list != kPerThreadSynchNull) { @@ -2304,7 +2312,8 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) { if (!cond_waiter) { // Sample lock contention events only if the (first) waiter was trying to // acquire the lock, not waiting on a condition variable or Condition. - int64_t wait_cycles = base_internal::CycleClock::Now() - enqueue_timestamp; + int64_t wait_cycles = + base_internal::CycleClock::Now() - enqueue_timestamp; mutex_tracer("slow release", this, wait_cycles); ABSL_TSAN_MUTEX_PRE_DIVERT(this, 0); submit_profile_data(enqueue_timestamp); @@ -2331,6 +2340,7 @@ void Mutex::Trans(MuHow how) { // It will later acquire the mutex with high probability. Otherwise, we // enqueue thread w on this mutex. void Mutex::Fer(PerThreadSynch *w) { + SchedulingGuard::ScopedDisable disable_rescheduling; int c = 0; ABSL_RAW_CHECK(w->waitp->cond == nullptr, "Mutex::Fer while waiting on Condition"); @@ -2380,7 +2390,7 @@ void Mutex::Fer(PerThreadSynch *w) { return; } } - c = Delay(c, GENTLE); + c = synchronization_internal::MutexDelay(c, GENTLE); } } @@ -2429,6 +2439,7 @@ CondVar::~CondVar() { // Remove thread s from the list of waiters on this condition variable. void CondVar::Remove(PerThreadSynch *s) { + SchedulingGuard::ScopedDisable disable_rescheduling; intptr_t v; int c = 0; for (v = cv_.load(std::memory_order_relaxed);; @@ -2457,7 +2468,8 @@ void CondVar::Remove(PerThreadSynch *s) { std::memory_order_release); return; } else { - c = Delay(c, GENTLE); // try again after a delay + // try again after a delay + c = synchronization_internal::MutexDelay(c, GENTLE); } } } @@ -2490,7 +2502,7 @@ static void CondVarEnqueue(SynchWaitParams *waitp) { !cv_word->compare_exchange_weak(v, v | kCvSpin, std::memory_order_acquire, std::memory_order_relaxed)) { - c = Delay(c, GENTLE); + c = synchronization_internal::MutexDelay(c, GENTLE); v = cv_word->load(std::memory_order_relaxed); } ABSL_RAW_CHECK(waitp->thread->waitp == nullptr, "waiting when shouldn't be"); @@ -2589,6 +2601,7 @@ void CondVar::Wakeup(PerThreadSynch *w) { } void CondVar::Signal() { + SchedulingGuard::ScopedDisable disable_rescheduling; ABSL_TSAN_MUTEX_PRE_SIGNAL(nullptr, 0); intptr_t v; int c = 0; @@ -2621,7 +2634,7 @@ void CondVar::Signal() { ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0); return; } else { - c = Delay(c, GENTLE); + c = synchronization_internal::MutexDelay(c, GENTLE); } } ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0); @@ -2658,7 +2671,8 @@ void CondVar::SignalAll () { ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0); return; } else { - c = Delay(c, GENTLE); // try again after a delay + // try again after a delay + c = synchronization_internal::MutexDelay(c, GENTLE); } } ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0); @@ -2671,7 +2685,7 @@ void ReleasableMutexLock::Release() { this->mu_ = nullptr; } -#ifdef THREAD_SANITIZER +#ifdef ABSL_HAVE_THREAD_SANITIZER extern "C" void __tsan_read1(void *addr); #else #define __tsan_read1(addr) // do nothing if TSan not enabled diff --git a/third_party/abseil_cpp/absl/synchronization/mutex.h b/third_party/abseil_cpp/absl/synchronization/mutex.h index 876698ca5f62..598d1e0617cf 100644 --- a/third_party/abseil_cpp/absl/synchronization/mutex.h +++ b/third_party/abseil_cpp/absl/synchronization/mutex.h @@ -31,22 +31,23 @@ // // MutexLock - An RAII wrapper to acquire and release a `Mutex` for exclusive/ // write access within the current scope. +// // ReaderMutexLock // - An RAII wrapper to acquire and release a `Mutex` for shared/read // access within the current scope. // // WriterMutexLock -// - Alias for `MutexLock` above, designed for use in distinguishing -// reader and writer locks within code. +// - Effectively an alias for `MutexLock` above, designed for use in +// distinguishing reader and writer locks within code. // // In addition to simple mutex locks, this file also defines ways to perform // locking under certain conditions. // -// Condition - (Preferred) Used to wait for a particular predicate that -// depends on state protected by the `Mutex` to become true. -// CondVar - A lower-level variant of `Condition` that relies on -// application code to explicitly signal the `CondVar` when -// a condition has been met. +// Condition - (Preferred) Used to wait for a particular predicate that +// depends on state protected by the `Mutex` to become true. +// CondVar - A lower-level variant of `Condition` that relies on +// application code to explicitly signal the `CondVar` when +// a condition has been met. // // See below for more information on using `Condition` or `CondVar`. // @@ -72,15 +73,6 @@ #include "absl/synchronization/internal/per_thread_sem.h" #include "absl/time/time.h" -// Decide if we should use the non-production implementation because -// the production implementation hasn't been fully ported yet. -#ifdef ABSL_INTERNAL_USE_NONPROD_MUTEX -#error ABSL_INTERNAL_USE_NONPROD_MUTEX cannot be directly set -#elif defined(ABSL_LOW_LEVEL_ALLOC_MISSING) -#define ABSL_INTERNAL_USE_NONPROD_MUTEX 1 -#include "absl/synchronization/internal/mutex_nonprod.inc" -#endif - namespace absl { ABSL_NAMESPACE_BEGIN @@ -461,15 +453,6 @@ class ABSL_LOCKABLE Mutex { static void InternalAttemptToUseMutexInFatalSignalHandler(); private: -#ifdef ABSL_INTERNAL_USE_NONPROD_MUTEX - friend class CondVar; - - synchronization_internal::MutexImpl *impl() { return impl_.get(); } - - synchronization_internal::SynchronizationStorage< - synchronization_internal::MutexImpl> - impl_; -#else std::atomic<intptr_t> mu_; // The Mutex state. // Post()/Wait() versus associated PerThreadSem; in class for required @@ -504,7 +487,6 @@ class ABSL_LOCKABLE Mutex { void Trans(MuHow how); // used for CondVar->Mutex transfer void Fer( base_internal::PerThreadSynch *w); // used for CondVar->Mutex transfer -#endif // Catch the error of writing Mutex when intending MutexLock. Mutex(const volatile Mutex * /*ignored*/) {} // NOLINT(runtime/explicit) @@ -525,22 +507,36 @@ class ABSL_LOCKABLE Mutex { // Example: // // Class Foo { -// +// public: // Foo::Bar* Baz() { -// MutexLock l(&lock_); +// MutexLock lock(&mu_); // ... // return bar; // } // // private: -// Mutex lock_; +// Mutex mu_; // }; class ABSL_SCOPED_LOCKABLE MutexLock { public: + // Constructors + + // Calls `mu->Lock()` and returns when that call returns. That is, `*mu` is + // guaranteed to be locked when this object is constructed. Requires that + // `mu` be dereferenceable. explicit MutexLock(Mutex *mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu) : mu_(mu) { this->mu_->Lock(); } + // Like above, but calls `mu->LockWhen(cond)` instead. That is, in addition to + // the above, the condition given by `cond` is also guaranteed to hold when + // this object is constructed. + explicit MutexLock(Mutex *mu, const Condition &cond) + ABSL_EXCLUSIVE_LOCK_FUNCTION(mu) + : mu_(mu) { + this->mu_->LockWhen(cond); + } + MutexLock(const MutexLock &) = delete; // NOLINT(runtime/mutex) MutexLock(MutexLock&&) = delete; // NOLINT(runtime/mutex) MutexLock& operator=(const MutexLock&) = delete; @@ -562,6 +558,12 @@ class ABSL_SCOPED_LOCKABLE ReaderMutexLock { mu->ReaderLock(); } + explicit ReaderMutexLock(Mutex *mu, const Condition &cond) + ABSL_SHARED_LOCK_FUNCTION(mu) + : mu_(mu) { + mu->ReaderLockWhen(cond); + } + ReaderMutexLock(const ReaderMutexLock&) = delete; ReaderMutexLock(ReaderMutexLock&&) = delete; ReaderMutexLock& operator=(const ReaderMutexLock&) = delete; @@ -584,6 +586,12 @@ class ABSL_SCOPED_LOCKABLE WriterMutexLock { mu->WriterLock(); } + explicit WriterMutexLock(Mutex *mu, const Condition &cond) + ABSL_EXCLUSIVE_LOCK_FUNCTION(mu) + : mu_(mu) { + mu->WriterLockWhen(cond); + } + WriterMutexLock(const WriterMutexLock&) = delete; WriterMutexLock(WriterMutexLock&&) = delete; WriterMutexLock& operator=(const WriterMutexLock&) = delete; @@ -622,16 +630,26 @@ class ABSL_SCOPED_LOCKABLE WriterMutexLock { // `noexcept`; until then this requirement cannot be enforced in the // type system.) // -// Note: to use a `Condition`, you need only construct it and pass it within the -// appropriate `Mutex' member function, such as `Mutex::Await()`. +// Note: to use a `Condition`, you need only construct it and pass it to a +// suitable `Mutex' member function, such as `Mutex::Await()`, or to the +// constructor of one of the scope guard classes. // -// Example: +// Example using LockWhen/Unlock: // // // assume count_ is not internal reference count // int count_ ABSL_GUARDED_BY(mu_); +// Condition count_is_zero(+[](int *count) { return *count == 0; }, &count_); +// +// mu_.LockWhen(count_is_zero); +// // ... +// mu_.Unlock(); // -// mu_.LockWhen(Condition(+[](int* count) { return *count == 0; }, -// &count_)); +// Example using a scope guard: +// +// { +// MutexLock lock(&mu_, count_is_zero); +// // ... +// } // // When multiple threads are waiting on exactly the same condition, make sure // that they are constructed with the same parameters (same pointer to function @@ -685,6 +703,11 @@ class Condition { // return processed_ >= current; // }; // mu_.Await(Condition(&reached)); + // + // NOTE: never use "mu_.AssertHeld()" instead of "mu_.AssertReaderHeld()" in + // the lambda as it may be called when the mutex is being unlocked from a + // scope holding only a reader lock, which will make the assertion not + // fulfilled and crash the binary. // See class comment for performance advice. In particular, if there // might be more than one waiter for the same condition, make sure @@ -833,17 +856,10 @@ class CondVar { void EnableDebugLog(const char *name); private: -#ifdef ABSL_INTERNAL_USE_NONPROD_MUTEX - synchronization_internal::CondVarImpl *impl() { return impl_.get(); } - synchronization_internal::SynchronizationStorage< - synchronization_internal::CondVarImpl> - impl_; -#else bool WaitCommon(Mutex *mutex, synchronization_internal::KernelTimeout t); void Remove(base_internal::PerThreadSynch *s); void Wakeup(base_internal::PerThreadSynch *w); std::atomic<intptr_t> cv_; // Condition variable state. -#endif CondVar(const CondVar&) = delete; CondVar& operator=(const CondVar&) = delete; }; @@ -865,6 +881,15 @@ class ABSL_SCOPED_LOCKABLE MutexLockMaybe { this->mu_->Lock(); } } + + explicit MutexLockMaybe(Mutex *mu, const Condition &cond) + ABSL_EXCLUSIVE_LOCK_FUNCTION(mu) + : mu_(mu) { + if (this->mu_ != nullptr) { + this->mu_->LockWhen(cond); + } + } + ~MutexLockMaybe() ABSL_UNLOCK_FUNCTION() { if (this->mu_ != nullptr) { this->mu_->Unlock(); } } @@ -887,6 +912,13 @@ class ABSL_SCOPED_LOCKABLE ReleasableMutexLock { : mu_(mu) { this->mu_->Lock(); } + + explicit ReleasableMutexLock(Mutex *mu, const Condition &cond) + ABSL_EXCLUSIVE_LOCK_FUNCTION(mu) + : mu_(mu) { + this->mu_->LockWhen(cond); + } + ~ReleasableMutexLock() ABSL_UNLOCK_FUNCTION() { if (this->mu_ != nullptr) { this->mu_->Unlock(); } } @@ -901,12 +933,6 @@ class ABSL_SCOPED_LOCKABLE ReleasableMutexLock { ReleasableMutexLock& operator=(ReleasableMutexLock&&) = delete; }; -#ifdef ABSL_INTERNAL_USE_NONPROD_MUTEX - -inline constexpr Mutex::Mutex(absl::ConstInitType) : impl_(absl::kConstInit) {} - -#else - inline Mutex::Mutex() : mu_(0) { ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static); } @@ -915,8 +941,6 @@ inline constexpr Mutex::Mutex(absl::ConstInitType) : mu_(0) {} inline CondVar::CondVar() : cv_(0) {} -#endif // ABSL_INTERNAL_USE_NONPROD_MUTEX - // static template <typename T> bool Condition::CastAndCallMethod(const Condition *c) { @@ -983,7 +1007,7 @@ void RegisterMutexProfiler(void (*fn)(int64_t wait_timestamp)); // // This has the same memory ordering concerns as RegisterMutexProfiler() above. void RegisterMutexTracer(void (*fn)(const char *msg, const void *obj, - int64_t wait_cycles)); + int64_t wait_cycles)); // TODO(gfalcon): Combine RegisterMutexProfiler() and RegisterMutexTracer() // into a single interface, since they are only ever called in pairs. diff --git a/third_party/abseil_cpp/absl/synchronization/mutex_benchmark.cc b/third_party/abseil_cpp/absl/synchronization/mutex_benchmark.cc index ab1880012a0c..933ea14f8f1d 100644 --- a/third_party/abseil_cpp/absl/synchronization/mutex_benchmark.cc +++ b/third_party/abseil_cpp/absl/synchronization/mutex_benchmark.cc @@ -16,6 +16,7 @@ #include <mutex> // NOLINT(build/c++11) #include <vector> +#include "absl/base/config.h" #include "absl/base/internal/cycleclock.h" #include "absl/base/internal/spinlock.h" #include "absl/synchronization/blocking_counter.h" @@ -213,7 +214,7 @@ void BM_ConditionWaiters(benchmark::State& state) { } // Some configurations have higher thread limits than others. -#if defined(__linux__) && !defined(THREAD_SANITIZER) +#if defined(__linux__) && !defined(ABSL_HAVE_THREAD_SANITIZER) constexpr int kMaxConditionWaiters = 8192; #else constexpr int kMaxConditionWaiters = 1024; diff --git a/third_party/abseil_cpp/absl/synchronization/mutex_test.cc b/third_party/abseil_cpp/absl/synchronization/mutex_test.cc index afb363af6127..058f757b482f 100644 --- a/third_party/abseil_cpp/absl/synchronization/mutex_test.cc +++ b/third_party/abseil_cpp/absl/synchronization/mutex_test.cc @@ -30,6 +30,7 @@ #include "gtest/gtest.h" #include "absl/base/attributes.h" +#include "absl/base/config.h" #include "absl/base/internal/raw_logging.h" #include "absl/base/internal/sysinfo.h" #include "absl/memory/memory.h" @@ -706,6 +707,40 @@ TEST(Mutex, LockWhen) { t.join(); } +TEST(Mutex, LockWhenGuard) { + absl::Mutex mu; + int n = 30; + bool done = false; + + // We don't inline the lambda because the conversion is ambiguous in MSVC. + bool (*cond_eq_10)(int *) = [](int *p) { return *p == 10; }; + bool (*cond_lt_10)(int *) = [](int *p) { return *p < 10; }; + + std::thread t1([&mu, &n, &done, cond_eq_10]() { + absl::ReaderMutexLock lock(&mu, absl::Condition(cond_eq_10, &n)); + done = true; + }); + + std::thread t2[10]; + for (std::thread &t : t2) { + t = std::thread([&mu, &n, cond_lt_10]() { + absl::WriterMutexLock lock(&mu, absl::Condition(cond_lt_10, &n)); + ++n; + }); + } + + { + absl::MutexLock lock(&mu); + n = 0; + } + + for (std::thread &t : t2) t.join(); + t1.join(); + + EXPECT_TRUE(done); + EXPECT_EQ(n, 10); +} + // -------------------------------------------------------- // The following test requires Mutex::ReaderLock to be a real shared // lock, which is not the case in all builds. @@ -815,7 +850,7 @@ TEST(Mutex, MutexReaderDecrementBug) ABSL_NO_THREAD_SAFETY_ANALYSIS { // Test that we correctly handle the situation when a lock is // held and then destroyed (w/o unlocking). -#ifdef THREAD_SANITIZER +#ifdef ABSL_HAVE_THREAD_SANITIZER // TSAN reports errors when locked Mutexes are destroyed. TEST(Mutex, DISABLED_LockedMutexDestructionBug) NO_THREAD_SAFETY_ANALYSIS { #else @@ -1001,9 +1036,6 @@ TEST(Mutex, AcquireFromCondition) { x.mu0.Unlock(); } -// The deadlock detector is not part of non-prod builds, so do not test it. -#if !defined(ABSL_INTERNAL_USE_NONPROD_MUTEX) - TEST(Mutex, DeadlockDetector) { absl::SetMutexDeadlockDetectionMode(absl::OnDeadlockCycle::kAbort); @@ -1067,7 +1099,7 @@ class ScopedDisableBazelTestWarnings { const char ScopedDisableBazelTestWarnings::kVarName[] = "TEST_WARNINGS_OUTPUT_FILE"; -#ifdef THREAD_SANITIZER +#ifdef ABSL_HAVE_THREAD_SANITIZER // This test intentionally creates deadlocks to test the deadlock detector. TEST(Mutex, DISABLED_DeadlockDetectorBazelWarning) { #else @@ -1101,7 +1133,7 @@ TEST(Mutex, DeadlockDetectorBazelWarning) { // annotation-based static thread-safety analysis is not currently // predicate-aware and cannot tell if the two for-loops that acquire and // release the locks have the same predicates. -TEST(Mutex, DeadlockDetectorStessTest) ABSL_NO_THREAD_SAFETY_ANALYSIS { +TEST(Mutex, DeadlockDetectorStressTest) ABSL_NO_THREAD_SAFETY_ANALYSIS { // Stress test: Here we create a large number of locks and use all of them. // If a deadlock detector keeps a full graph of lock acquisition order, // it will likely be too slow for this test to pass. @@ -1119,7 +1151,7 @@ TEST(Mutex, DeadlockDetectorStessTest) ABSL_NO_THREAD_SAFETY_ANALYSIS { } } -#ifdef THREAD_SANITIZER +#ifdef ABSL_HAVE_THREAD_SANITIZER // TSAN reports errors when locked Mutexes are destroyed. TEST(Mutex, DISABLED_DeadlockIdBug) NO_THREAD_SAFETY_ANALYSIS { #else @@ -1157,7 +1189,6 @@ TEST(Mutex, DeadlockIdBug) ABSL_NO_THREAD_SAFETY_ANALYSIS { c.Lock(); c.Unlock(); } -#endif // !defined(ABSL_INTERNAL_USE_NONPROD_MUTEX) // -------------------------------------------------------- // Test for timeouts/deadlines on condition waits that are specified using |