about summary refs log tree commit diff
path: root/absl/synchronization
diff options
context:
space:
mode:
Diffstat (limited to 'absl/synchronization')
-rw-r--r--absl/synchronization/BUILD.bazel17
-rw-r--r--absl/synchronization/CMakeLists.txt15
-rw-r--r--absl/synchronization/barrier.h4
-rw-r--r--absl/synchronization/blocking_counter.h4
-rw-r--r--absl/synchronization/internal/thread_pool.h4
-rw-r--r--absl/synchronization/lifetime_test.cc4
-rw-r--r--absl/synchronization/mutex.cc14
-rw-r--r--absl/synchronization/mutex.h86
-rw-r--r--absl/synchronization/mutex_test.cc16
9 files changed, 96 insertions, 68 deletions
diff --git a/absl/synchronization/BUILD.bazel b/absl/synchronization/BUILD.bazel
index b48133883c..36dc98f39e 100644
--- a/absl/synchronization/BUILD.bazel
+++ b/absl/synchronization/BUILD.bazel
@@ -50,6 +50,21 @@ cc_library(
 )
 
 cc_library(
+    name = "kernel_timeout_internal",
+    hdrs = ["internal/kernel_timeout.h"],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    visibility = [
+        "//absl/synchronization:__pkg__",
+    ],
+    deps = [
+        "//absl/base:core_headers",
+        "//absl/base:raw_logging_internal",
+        "//absl/time",
+    ],
+)
+
+cc_library(
     name = "synchronization",
     srcs = [
         "barrier.cc",
@@ -65,7 +80,6 @@ cc_library(
         "barrier.h",
         "blocking_counter.h",
         "internal/create_thread_identity.h",
-        "internal/kernel_timeout.h",
         "internal/mutex_nonprod.inc",
         "internal/per_thread_sem.h",
         "internal/waiter.h",
@@ -79,6 +93,7 @@ cc_library(
     }) + ABSL_DEFAULT_LINKOPTS,
     deps = [
         ":graphcycles_internal",
+        ":kernel_timeout_internal",
         "//absl/base",
         "//absl/base:atomic_hook",
         "//absl/base:base_internal",
diff --git a/absl/synchronization/CMakeLists.txt b/absl/synchronization/CMakeLists.txt
index ce669c52c6..3c47a1bca5 100644
--- a/absl/synchronization/CMakeLists.txt
+++ b/absl/synchronization/CMakeLists.txt
@@ -33,12 +33,24 @@ absl_cc_library(
 
 absl_cc_library(
   NAME
+    kernel_timeout_internal
+  HDRS
+    "internal/kernel_timeout.h"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  DEPS
+    absl::core_headers
+    absl::raw_logging_internal
+    absl::time
+)
+
+absl_cc_library(
+  NAME
     synchronization
   HDRS
     "barrier.h"
     "blocking_counter.h"
     "internal/create_thread_identity.h"
-    "internal/kernel_timeout.h"
     "internal/mutex_nonprod.inc"
     "internal/per_thread_sem.h"
     "internal/waiter.h"
@@ -56,6 +68,7 @@ absl_cc_library(
     ${ABSL_DEFAULT_COPTS}
   DEPS
     absl::graphcycles_internal
+    absl::kernel_timeout_internal
     absl::atomic_hook
     absl::base
     absl::base_internal
diff --git a/absl/synchronization/barrier.h b/absl/synchronization/barrier.h
index 23bb2f58c7..cb5d821a21 100644
--- a/absl/synchronization/barrier.h
+++ b/absl/synchronization/barrier.h
@@ -69,8 +69,8 @@ class Barrier {
 
  private:
   Mutex lock_;
-  int num_to_block_ GUARDED_BY(lock_);
-  int num_to_exit_ GUARDED_BY(lock_);
+  int num_to_block_ ABSL_GUARDED_BY(lock_);
+  int num_to_exit_ ABSL_GUARDED_BY(lock_);
 };
 
 }  // namespace absl
diff --git a/absl/synchronization/blocking_counter.h b/absl/synchronization/blocking_counter.h
index 4c66e0abf9..77560fc056 100644
--- a/absl/synchronization/blocking_counter.h
+++ b/absl/synchronization/blocking_counter.h
@@ -88,8 +88,8 @@ class BlockingCounter {
 
  private:
   Mutex lock_;
-  int count_ GUARDED_BY(lock_);
-  int num_waiting_ GUARDED_BY(lock_);
+  int count_ ABSL_GUARDED_BY(lock_);
+  int num_waiting_ ABSL_GUARDED_BY(lock_);
 };
 
 }  // namespace absl
diff --git a/absl/synchronization/internal/thread_pool.h b/absl/synchronization/internal/thread_pool.h
index 7f458f5a3a..a00f2be8e8 100644
--- a/absl/synchronization/internal/thread_pool.h
+++ b/absl/synchronization/internal/thread_pool.h
@@ -60,7 +60,7 @@ class ThreadPool {
   }
 
  private:
-  bool WorkAvailable() const EXCLUSIVE_LOCKS_REQUIRED(mu_) {
+  bool WorkAvailable() const ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
     return !queue_.empty();
   }
 
@@ -81,7 +81,7 @@ class ThreadPool {
   }
 
   absl::Mutex mu_;
-  std::queue<std::function<void()>> queue_ GUARDED_BY(mu_);
+  std::queue<std::function<void()>> queue_ ABSL_GUARDED_BY(mu_);
   std::vector<std::thread> threads_;
 };
 
diff --git a/absl/synchronization/lifetime_test.cc b/absl/synchronization/lifetime_test.cc
index 0279c8f846..34b8875b4b 100644
--- a/absl/synchronization/lifetime_test.cc
+++ b/absl/synchronization/lifetime_test.cc
@@ -143,11 +143,11 @@ ABSL_CONST_INIT absl::Mutex early_const_init_mutex(absl::kConstInit);
 // constructors of globals "happen at link time"; memory is pre-initialized,
 // before the constructors of either grab_lock or check_still_locked are run.)
 extern absl::Mutex const_init_sanity_mutex;
-OnConstruction grab_lock([]() NO_THREAD_SAFETY_ANALYSIS {
+OnConstruction grab_lock([]() ABSL_NO_THREAD_SAFETY_ANALYSIS {
   const_init_sanity_mutex.Lock();
 });
 ABSL_CONST_INIT absl::Mutex const_init_sanity_mutex(absl::kConstInit);
-OnConstruction check_still_locked([]() NO_THREAD_SAFETY_ANALYSIS {
+OnConstruction check_still_locked([]() ABSL_NO_THREAD_SAFETY_ANALYSIS {
   const_init_sanity_mutex.AssertHeld();
   const_init_sanity_mutex.Unlock();
 });
diff --git a/absl/synchronization/mutex.cc b/absl/synchronization/mutex.cc
index 76bd307a4d..100def2dd8 100644
--- a/absl/synchronization/mutex.cc
+++ b/absl/synchronization/mutex.cc
@@ -207,8 +207,8 @@ static absl::base_internal::SpinLock deadlock_graph_mu(
     absl::base_internal::kLinkerInitialized);
 
 // graph used to detect deadlocks.
-static GraphCycles *deadlock_graph GUARDED_BY(deadlock_graph_mu)
-    PT_GUARDED_BY(deadlock_graph_mu);
+static GraphCycles *deadlock_graph ABSL_GUARDED_BY(deadlock_graph_mu)
+    ABSL_PT_GUARDED_BY(deadlock_graph_mu);
 
 //------------------------------------------------------------------
 // An event mechanism for debugging mutex use.
@@ -279,10 +279,10 @@ static const uint32_t kNSynchEvent = 1031;
 
 static struct SynchEvent {     // this is a trivial hash table for the events
   // struct is freed when refcount reaches 0
-  int refcount GUARDED_BY(synch_event_mu);
+  int refcount ABSL_GUARDED_BY(synch_event_mu);
 
   // buckets have linear, 0-terminated  chains
-  SynchEvent *next GUARDED_BY(synch_event_mu);
+  SynchEvent *next ABSL_GUARDED_BY(synch_event_mu);
 
   // Constant after initialization
   uintptr_t masked_addr;  // object at this address is called "name"
@@ -296,7 +296,7 @@ static struct SynchEvent {     // this is a trivial hash table for the events
 
   // Constant after initialization
   char name[1];         // actually longer---null-terminated std::string
-} *synch_event[kNSynchEvent] GUARDED_BY(synch_event_mu);
+} * synch_event[kNSynchEvent] ABSL_GUARDED_BY(synch_event_mu);
 
 // Ensure that the object at "addr" has a SynchEvent struct associated with it,
 // set "bits" in the word there (waiting until lockbit is clear before doing
@@ -1143,7 +1143,7 @@ PerThreadSynch *Mutex::Wakeup(PerThreadSynch *w) {
 }
 
 static GraphId GetGraphIdLocked(Mutex *mu)
-    EXCLUSIVE_LOCKS_REQUIRED(deadlock_graph_mu) {
+    ABSL_EXCLUSIVE_LOCKS_REQUIRED(deadlock_graph_mu) {
   if (!deadlock_graph) {  // (re)create the deadlock graph.
     deadlock_graph =
         new (base_internal::LowLevelAlloc::Alloc(sizeof(*deadlock_graph)))
@@ -1152,7 +1152,7 @@ static GraphId GetGraphIdLocked(Mutex *mu)
   return deadlock_graph->GetId(mu);
 }
 
-static GraphId GetGraphId(Mutex *mu) LOCKS_EXCLUDED(deadlock_graph_mu) {
+static GraphId GetGraphId(Mutex *mu) ABSL_LOCKS_EXCLUDED(deadlock_graph_mu) {
   deadlock_graph_mu.Lock();
   GraphId id = GetGraphIdLocked(mu);
   deadlock_graph_mu.Unlock();
diff --git a/absl/synchronization/mutex.h b/absl/synchronization/mutex.h
index c38e35616f..d33318d347 100644
--- a/absl/synchronization/mutex.h
+++ b/absl/synchronization/mutex.h
@@ -135,7 +135,7 @@ struct SynchWaitParams;
 //
 // See also `MutexLock`, below, for scoped `Mutex` acquisition.
 
-class LOCKABLE Mutex {
+class ABSL_LOCKABLE Mutex {
  public:
   // Creates a `Mutex` that is not held by anyone. This constructor is
   // typically used for Mutexes allocated on the heap or the stack.
@@ -164,27 +164,27 @@ class LOCKABLE Mutex {
   //
   // Blocks the calling thread, if necessary, until this `Mutex` is free, and
   // then acquires it exclusively. (This lock is also known as a "write lock.")
-  void Lock() EXCLUSIVE_LOCK_FUNCTION();
+  void Lock() ABSL_EXCLUSIVE_LOCK_FUNCTION();
 
   // Mutex::Unlock()
   //
   // Releases this `Mutex` and returns it from the exclusive/write state to the
   // free state. Caller must hold the `Mutex` exclusively.
-  void Unlock() UNLOCK_FUNCTION();
+  void Unlock() ABSL_UNLOCK_FUNCTION();
 
   // Mutex::TryLock()
   //
   // If the mutex can be acquired without blocking, does so exclusively and
   // returns `true`. Otherwise, returns `false`. Returns `true` with high
   // probability if the `Mutex` was free.
-  bool TryLock() EXCLUSIVE_TRYLOCK_FUNCTION(true);
+  bool TryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true);
 
   // Mutex::AssertHeld()
   //
   // Return immediately if this thread holds the `Mutex` exclusively (in write
   // mode). Otherwise, may report an error (typically by crashing with a
   // diagnostic), or may return immediately.
-  void AssertHeld() const ASSERT_EXCLUSIVE_LOCK();
+  void AssertHeld() const ABSL_ASSERT_EXCLUSIVE_LOCK();
 
   // ---------------------------------------------------------------------------
   // Reader-Writer Locking
@@ -225,28 +225,28 @@ class LOCKABLE Mutex {
   // `ReaderLock()` will block if some other thread has an exclusive/writer lock
   // on the mutex.
 
-  void ReaderLock() SHARED_LOCK_FUNCTION();
+  void ReaderLock() ABSL_SHARED_LOCK_FUNCTION();
 
   // Mutex::ReaderUnlock()
   //
   // Releases a read share of this `Mutex`. `ReaderUnlock` may return a mutex to
   // the free state if this thread holds the last reader lock on the mutex. Note
   // that you cannot call `ReaderUnlock()` on a mutex held in write mode.
-  void ReaderUnlock() UNLOCK_FUNCTION();
+  void ReaderUnlock() ABSL_UNLOCK_FUNCTION();
 
   // Mutex::ReaderTryLock()
   //
   // If the mutex can be acquired without blocking, acquires this mutex for
   // shared access and returns `true`. Otherwise, returns `false`. Returns
   // `true` with high probability if the `Mutex` was free or shared.
-  bool ReaderTryLock() SHARED_TRYLOCK_FUNCTION(true);
+  bool ReaderTryLock() ABSL_SHARED_TRYLOCK_FUNCTION(true);
 
   // Mutex::AssertReaderHeld()
   //
   // Returns immediately if this thread holds the `Mutex` in at least shared
   // mode (read mode). Otherwise, may report an error (typically by
   // crashing with a diagnostic), or may return immediately.
-  void AssertReaderHeld() const ASSERT_SHARED_LOCK();
+  void AssertReaderHeld() const ABSL_ASSERT_SHARED_LOCK();
 
   // Mutex::WriterLock()
   // Mutex::WriterUnlock()
@@ -257,11 +257,11 @@ class LOCKABLE Mutex {
   // These methods may be used (along with the complementary `Reader*()`
   // methods) to distingish simple exclusive `Mutex` usage (`Lock()`,
   // etc.) from reader/writer lock usage.
-  void WriterLock() EXCLUSIVE_LOCK_FUNCTION() { this->Lock(); }
+  void WriterLock() ABSL_EXCLUSIVE_LOCK_FUNCTION() { this->Lock(); }
 
-  void WriterUnlock() UNLOCK_FUNCTION() { this->Unlock(); }
+  void WriterUnlock() ABSL_UNLOCK_FUNCTION() { this->Unlock(); }
 
-  bool WriterTryLock() EXCLUSIVE_TRYLOCK_FUNCTION(true) {
+  bool WriterTryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) {
     return this->TryLock();
   }
 
@@ -315,11 +315,11 @@ class LOCKABLE Mutex {
   // be acquired, then atomically acquires this `Mutex`. `LockWhen()` is
   // logically equivalent to `*Lock(); Await();` though they may have different
   // performance characteristics.
-  void LockWhen(const Condition &cond) EXCLUSIVE_LOCK_FUNCTION();
+  void LockWhen(const Condition &cond) ABSL_EXCLUSIVE_LOCK_FUNCTION();
 
-  void ReaderLockWhen(const Condition &cond) SHARED_LOCK_FUNCTION();
+  void ReaderLockWhen(const Condition &cond) ABSL_SHARED_LOCK_FUNCTION();
 
-  void WriterLockWhen(const Condition &cond) EXCLUSIVE_LOCK_FUNCTION() {
+  void WriterLockWhen(const Condition &cond) ABSL_EXCLUSIVE_LOCK_FUNCTION() {
     this->LockWhen(cond);
   }
 
@@ -361,11 +361,11 @@ class LOCKABLE Mutex {
   //
   // Negative timeouts are equivalent to a zero timeout.
   bool LockWhenWithTimeout(const Condition &cond, absl::Duration timeout)
-      EXCLUSIVE_LOCK_FUNCTION();
+      ABSL_EXCLUSIVE_LOCK_FUNCTION();
   bool ReaderLockWhenWithTimeout(const Condition &cond, absl::Duration timeout)
-      SHARED_LOCK_FUNCTION();
+      ABSL_SHARED_LOCK_FUNCTION();
   bool WriterLockWhenWithTimeout(const Condition &cond, absl::Duration timeout)
-      EXCLUSIVE_LOCK_FUNCTION() {
+      ABSL_EXCLUSIVE_LOCK_FUNCTION() {
     return this->LockWhenWithTimeout(cond, timeout);
   }
 
@@ -381,11 +381,11 @@ class LOCKABLE Mutex {
   //
   // Deadlines in the past are equivalent to an immediate deadline.
   bool LockWhenWithDeadline(const Condition &cond, absl::Time deadline)
-      EXCLUSIVE_LOCK_FUNCTION();
+      ABSL_EXCLUSIVE_LOCK_FUNCTION();
   bool ReaderLockWhenWithDeadline(const Condition &cond, absl::Time deadline)
-      SHARED_LOCK_FUNCTION();
+      ABSL_SHARED_LOCK_FUNCTION();
   bool WriterLockWhenWithDeadline(const Condition &cond, absl::Time deadline)
-      EXCLUSIVE_LOCK_FUNCTION() {
+      ABSL_EXCLUSIVE_LOCK_FUNCTION() {
     return this->LockWhenWithDeadline(cond, deadline);
   }
 
@@ -535,9 +535,9 @@ class LOCKABLE Mutex {
 // private:
 //   Mutex lock_;
 // };
-class SCOPED_LOCKABLE MutexLock {
+class ABSL_SCOPED_LOCKABLE MutexLock {
  public:
-  explicit MutexLock(Mutex *mu) EXCLUSIVE_LOCK_FUNCTION(mu) : mu_(mu) {
+  explicit MutexLock(Mutex *mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu) : mu_(mu) {
     this->mu_->Lock();
   }
 
@@ -546,7 +546,7 @@ class SCOPED_LOCKABLE MutexLock {
   MutexLock& operator=(const MutexLock&) = delete;
   MutexLock& operator=(MutexLock&&) = delete;
 
-  ~MutexLock() UNLOCK_FUNCTION() { this->mu_->Unlock(); }
+  ~MutexLock() ABSL_UNLOCK_FUNCTION() { this->mu_->Unlock(); }
 
  private:
   Mutex *const mu_;
@@ -556,10 +556,9 @@ class SCOPED_LOCKABLE MutexLock {
 //
 // The `ReaderMutexLock` is a helper class, like `MutexLock`, which acquires and
 // releases a shared lock on a `Mutex` via RAII.
-class SCOPED_LOCKABLE ReaderMutexLock {
+class ABSL_SCOPED_LOCKABLE ReaderMutexLock {
  public:
-  explicit ReaderMutexLock(Mutex *mu) SHARED_LOCK_FUNCTION(mu)
-      :  mu_(mu) {
+  explicit ReaderMutexLock(Mutex *mu) ABSL_SHARED_LOCK_FUNCTION(mu) : mu_(mu) {
     mu->ReaderLock();
   }
 
@@ -568,9 +567,7 @@ class SCOPED_LOCKABLE ReaderMutexLock {
   ReaderMutexLock& operator=(const ReaderMutexLock&) = delete;
   ReaderMutexLock& operator=(ReaderMutexLock&&) = delete;
 
-  ~ReaderMutexLock() UNLOCK_FUNCTION() {
-    this->mu_->ReaderUnlock();
-  }
+  ~ReaderMutexLock() ABSL_UNLOCK_FUNCTION() { this->mu_->ReaderUnlock(); }
 
  private:
   Mutex *const mu_;
@@ -580,9 +577,9 @@ class SCOPED_LOCKABLE ReaderMutexLock {
 //
 // The `WriterMutexLock` is a helper class, like `MutexLock`, which acquires and
 // releases a write (exclusive) lock on a `Mutex` via RAII.
-class SCOPED_LOCKABLE WriterMutexLock {
+class ABSL_SCOPED_LOCKABLE WriterMutexLock {
  public:
-  explicit WriterMutexLock(Mutex *mu) EXCLUSIVE_LOCK_FUNCTION(mu)
+  explicit WriterMutexLock(Mutex *mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
       : mu_(mu) {
     mu->WriterLock();
   }
@@ -592,9 +589,7 @@ class SCOPED_LOCKABLE WriterMutexLock {
   WriterMutexLock& operator=(const WriterMutexLock&) = delete;
   WriterMutexLock& operator=(WriterMutexLock&&) = delete;
 
-  ~WriterMutexLock() UNLOCK_FUNCTION() {
-    this->mu_->WriterUnlock();
-  }
+  ~WriterMutexLock() ABSL_UNLOCK_FUNCTION() { this->mu_->WriterUnlock(); }
 
  private:
   Mutex *const mu_;
@@ -860,13 +855,18 @@ class CondVar {
 // MutexLockMaybe
 //
 // MutexLockMaybe is like MutexLock, but is a no-op when mu is null.
-class SCOPED_LOCKABLE MutexLockMaybe {
+class ABSL_SCOPED_LOCKABLE MutexLockMaybe {
  public:
-  explicit MutexLockMaybe(Mutex *mu) EXCLUSIVE_LOCK_FUNCTION(mu)
-      : mu_(mu) { if (this->mu_ != nullptr) { this->mu_->Lock(); } }
-  ~MutexLockMaybe() UNLOCK_FUNCTION() {
+  explicit MutexLockMaybe(Mutex *mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
+      : mu_(mu) {
+    if (this->mu_ != nullptr) {
+      this->mu_->Lock();
+    }
+  }
+  ~MutexLockMaybe() ABSL_UNLOCK_FUNCTION() {
     if (this->mu_ != nullptr) { this->mu_->Unlock(); }
   }
+
  private:
   Mutex *const mu_;
   MutexLockMaybe(const MutexLockMaybe&) = delete;
@@ -879,17 +879,17 @@ class SCOPED_LOCKABLE MutexLockMaybe {
 //
 // ReleasableMutexLock is like MutexLock, but permits `Release()` of its
 // mutex before destruction. `Release()` may be called at most once.
-class SCOPED_LOCKABLE ReleasableMutexLock {
+class ABSL_SCOPED_LOCKABLE ReleasableMutexLock {
  public:
-  explicit ReleasableMutexLock(Mutex *mu) EXCLUSIVE_LOCK_FUNCTION(mu)
+  explicit ReleasableMutexLock(Mutex *mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
       : mu_(mu) {
     this->mu_->Lock();
   }
-  ~ReleasableMutexLock() UNLOCK_FUNCTION() {
+  ~ReleasableMutexLock() ABSL_UNLOCK_FUNCTION() {
     if (this->mu_ != nullptr) { this->mu_->Unlock(); }
   }
 
-  void Release() UNLOCK_FUNCTION();
+  void Release() ABSL_UNLOCK_FUNCTION();
 
  private:
   Mutex *mu_;
diff --git a/absl/synchronization/mutex_test.cc b/absl/synchronization/mutex_test.cc
index 9851ac1982..afb363af61 100644
--- a/absl/synchronization/mutex_test.cc
+++ b/absl/synchronization/mutex_test.cc
@@ -425,10 +425,10 @@ TEST(Mutex, CondVarWaitSignalsAwait) {
   // Use a struct so the lock annotations apply.
   struct {
     absl::Mutex barrier_mu;
-    bool barrier GUARDED_BY(barrier_mu) = false;
+    bool barrier ABSL_GUARDED_BY(barrier_mu) = false;
 
     absl::Mutex release_mu;
-    bool release GUARDED_BY(release_mu) = false;
+    bool release ABSL_GUARDED_BY(release_mu) = false;
     absl::CondVar released_cv;
   } state;
 
@@ -466,10 +466,10 @@ TEST(Mutex, CondVarWaitWithTimeoutSignalsAwait) {
   // Use a struct so the lock annotations apply.
   struct {
     absl::Mutex barrier_mu;
-    bool barrier GUARDED_BY(barrier_mu) = false;
+    bool barrier ABSL_GUARDED_BY(barrier_mu) = false;
 
     absl::Mutex release_mu;
-    bool release GUARDED_BY(release_mu) = false;
+    bool release ABSL_GUARDED_BY(release_mu) = false;
     absl::CondVar released_cv;
   } state;
 
@@ -770,7 +770,7 @@ static void GetReadLock(ReaderDecrementBugStruct *x) {
 
 // Test for reader counter being decremented incorrectly by waiter
 // with false condition.
-TEST(Mutex, MutexReaderDecrementBug) NO_THREAD_SAFETY_ANALYSIS {
+TEST(Mutex, MutexReaderDecrementBug) ABSL_NO_THREAD_SAFETY_ANALYSIS {
   ReaderDecrementBugStruct x;
   x.cond = false;
   x.waiting_on_cond = false;
@@ -819,7 +819,7 @@ TEST(Mutex, MutexReaderDecrementBug) NO_THREAD_SAFETY_ANALYSIS {
 // TSAN reports errors when locked Mutexes are destroyed.
 TEST(Mutex, DISABLED_LockedMutexDestructionBug) NO_THREAD_SAFETY_ANALYSIS {
 #else
-TEST(Mutex, LockedMutexDestructionBug) NO_THREAD_SAFETY_ANALYSIS {
+TEST(Mutex, LockedMutexDestructionBug) ABSL_NO_THREAD_SAFETY_ANALYSIS {
 #endif
   for (int i = 0; i != 10; i++) {
     // Create, lock and destroy 10 locks.
@@ -1101,7 +1101,7 @@ TEST(Mutex, DeadlockDetectorBazelWarning) {
 // annotation-based static thread-safety analysis is not currently
 // predicate-aware and cannot tell if the two for-loops that acquire and
 // release the locks have the same predicates.
-TEST(Mutex, DeadlockDetectorStessTest) NO_THREAD_SAFETY_ANALYSIS {
+TEST(Mutex, DeadlockDetectorStessTest) ABSL_NO_THREAD_SAFETY_ANALYSIS {
   // Stress test: Here we create a large number of locks and use all of them.
   // If a deadlock detector keeps a full graph of lock acquisition order,
   // it will likely be too slow for this test to pass.
@@ -1123,7 +1123,7 @@ TEST(Mutex, DeadlockDetectorStessTest) NO_THREAD_SAFETY_ANALYSIS {
 // TSAN reports errors when locked Mutexes are destroyed.
 TEST(Mutex, DISABLED_DeadlockIdBug) NO_THREAD_SAFETY_ANALYSIS {
 #else
-TEST(Mutex, DeadlockIdBug) NO_THREAD_SAFETY_ANALYSIS {
+TEST(Mutex, DeadlockIdBug) ABSL_NO_THREAD_SAFETY_ANALYSIS {
 #endif
   // Test a scenario where a cached deadlock graph node id in the
   // list of held locks is not invalidated when the corresponding