about summary refs log tree commit diff
path: root/absl/base/call_once.h
diff options
context:
space:
mode:
Diffstat (limited to 'absl/base/call_once.h')
-rw-r--r--absl/base/call_once.h13
1 files changed, 11 insertions, 2 deletions
diff --git a/absl/base/call_once.h b/absl/base/call_once.h
index 4aa6360cffc5..e1614e517a8d 100644
--- a/absl/base/call_once.h
+++ b/absl/base/call_once.h
@@ -148,7 +148,7 @@ void CallOnceImpl(std::atomic<uint32_t>* control,
                   Args&&... args) {
 #ifndef NDEBUG
   {
-    uint32_t old_control = control->load(std::memory_order_acquire);
+    uint32_t old_control = control->load(std::memory_order_relaxed);
     if (old_control != kOnceInit &&
         old_control != kOnceRunning &&
         old_control != kOnceWaiter &&
@@ -166,14 +166,23 @@ void CallOnceImpl(std::atomic<uint32_t>* control,
   // Must do this before potentially modifying control word's state.
   base_internal::SchedulingHelper maybe_disable_scheduling(scheduling_mode);
   // Short circuit the simplest case to avoid procedure call overhead.
+  // The base_internal::SpinLockWait() call returns either kOnceInit or
+  // kOnceDone. If it returns kOnceDone, it must have loaded the control word
+  // with std::memory_order_acquire and seen a value of kOnceDone.
   uint32_t old_control = kOnceInit;
   if (control->compare_exchange_strong(old_control, kOnceRunning,
-                                       std::memory_order_acquire,
                                        std::memory_order_relaxed) ||
       base_internal::SpinLockWait(control, ABSL_ARRAYSIZE(trans), trans,
                                   scheduling_mode) == kOnceInit) {
     base_internal::Invoke(std::forward<Callable>(fn),
                           std::forward<Args>(args)...);
+    // The call to SpinLockWake below is an optimization, because the waiter
+    // in SpinLockWait is waiting with a short timeout. The atomic load/store
+    // sequence is slightly faster than an atomic exchange:
+    //   old_control = control->exchange(base_internal::kOnceDone,
+    //                                   std::memory_order_release);
+    // We opt for a slightly faster case when there are no waiters, in spite
+    // of longer tail latency when there are waiters.
     old_control = control->load(std::memory_order_relaxed);
     control->store(base_internal::kOnceDone, std::memory_order_release);
     if (old_control == base_internal::kOnceWaiter) {