diff options
Diffstat (limited to 'absl/base/internal')
-rw-r--r-- | absl/base/internal/spinlock.cc | 19 | ||||
-rw-r--r-- | absl/base/internal/spinlock.h | 13 |
2 files changed, 17 insertions, 15 deletions
diff --git a/absl/base/internal/spinlock.cc b/absl/base/internal/spinlock.cc index 830d4729af3b..fd0c733e234b 100644 --- a/absl/base/internal/spinlock.cc +++ b/absl/base/internal/spinlock.cc @@ -190,30 +190,32 @@ void SpinLock::SlowUnlock(uint32_t lock_value) { // We use the upper 29 bits of the lock word to store the time spent waiting to // acquire this lock. This is reported by contentionz profiling. Since the // lower bits of the cycle counter wrap very quickly on high-frequency -// processors we divide to reduce the granularity to 2^PROFILE_TIMESTAMP_SHIFT +// processors we divide to reduce the granularity to 2^kProfileTimestampShift // sized units. On a 4Ghz machine this will lose track of wait times greater // than (2^29/4 Ghz)*128 =~ 17.2 seconds. Such waits should be extremely rare. -enum { PROFILE_TIMESTAMP_SHIFT = 7 }; -enum { LOCKWORD_RESERVED_SHIFT = 3 }; // We currently reserve the lower 3 bits. +static constexpr int kProfileTimestampShift = 7; + +// We currently reserve the lower 3 bits. +static constexpr int kLockwordReservedShift = 3; uint32_t SpinLock::EncodeWaitCycles(int64_t wait_start_time, int64_t wait_end_time) { static const int64_t kMaxWaitTime = - std::numeric_limits<uint32_t>::max() >> LOCKWORD_RESERVED_SHIFT; + std::numeric_limits<uint32_t>::max() >> kLockwordReservedShift; int64_t scaled_wait_time = - (wait_end_time - wait_start_time) >> PROFILE_TIMESTAMP_SHIFT; + (wait_end_time - wait_start_time) >> kProfileTimestampShift; // Return a representation of the time spent waiting that can be stored in // the lock word's upper bits. uint32_t clamped = static_cast<uint32_t>( - std::min(scaled_wait_time, kMaxWaitTime) << LOCKWORD_RESERVED_SHIFT); + std::min(scaled_wait_time, kMaxWaitTime) << kLockwordReservedShift); if (clamped == 0) { return kSpinLockSleeper; // Just wake waiters, but don't record contention. } // Bump up value if necessary to avoid returning kSpinLockSleeper. const uint32_t kMinWaitTime = - kSpinLockSleeper + (1 << LOCKWORD_RESERVED_SHIFT); + kSpinLockSleeper + (1 << kLockwordReservedShift); if (clamped == kSpinLockSleeper) { return kMinWaitTime; } @@ -224,8 +226,7 @@ uint64_t SpinLock::DecodeWaitCycles(uint32_t lock_value) { // Cast to uint32_t first to ensure bits [63:32] are cleared. const uint64_t scaled_wait_time = static_cast<uint32_t>(lock_value & kWaitTimeMask); - return scaled_wait_time - << (PROFILE_TIMESTAMP_SHIFT - LOCKWORD_RESERVED_SHIFT); + return scaled_wait_time << (kProfileTimestampShift - kLockwordReservedShift); } } // namespace base_internal diff --git a/absl/base/internal/spinlock.h b/absl/base/internal/spinlock.h index 24e2e9a6f820..89e93aad0bd0 100644 --- a/absl/base/internal/spinlock.h +++ b/absl/base/internal/spinlock.h @@ -148,12 +148,13 @@ class ABSL_LOCKABLE SpinLock { // bit[1] encodes whether a lock uses cooperative scheduling. // bit[2] encodes whether a lock disables scheduling. // bit[3:31] encodes time a lock spent on waiting as a 29-bit unsigned int. - enum { kSpinLockHeld = 1 }; - enum { kSpinLockCooperative = 2 }; - enum { kSpinLockDisabledScheduling = 4 }; - enum { kSpinLockSleeper = 8 }; - enum { kWaitTimeMask = // Includes kSpinLockSleeper. - ~(kSpinLockHeld | kSpinLockCooperative | kSpinLockDisabledScheduling) }; + static constexpr uint32_t kSpinLockHeld = 1; + static constexpr uint32_t kSpinLockCooperative = 2; + static constexpr uint32_t kSpinLockDisabledScheduling = 4; + static constexpr uint32_t kSpinLockSleeper = 8; + // Includes kSpinLockSleeper. + static constexpr uint32_t kWaitTimeMask = + ~(kSpinLockHeld | kSpinLockCooperative | kSpinLockDisabledScheduling); // Returns true if the provided scheduling mode is cooperative. static constexpr bool IsCooperative( |