about summary refs log tree commit diff
path: root/absl/base/internal/spinlock.cc
diff options
context:
space:
mode:
authorAbseil Team <absl-team@google.com>2020-03-23T20·16-0700
committerXiaoyi Zhang <zhangxy@google.com>2020-03-23T20·24-0400
commit518f17501e6156f7921fbb9b68a1e420bcb10bc5 (patch)
tree615cb6be894145feaa79ff3e341f2d483022f336 /absl/base/internal/spinlock.cc
parent092ed9793a1ad0e7e418f32c057bf3159a71cd04 (diff)
Export of internal Abseil changes
--
79913a12f0cad4baf948430315aabf53f03b6475 by Abseil Team <absl-team@google.com>:

Don't inline (Un)LockSlow.

PiperOrigin-RevId: 302502344

--
6b340e80f0690655f24799c8de6707b3a95b8579 by Derek Mauro <dmauro@google.com>:

Add hardening assertions to absl::optional's dereference operators

PiperOrigin-RevId: 302492862

--
a9951bf4852d8c1aec472cb4b539830411270e4c by Derek Mauro <dmauro@google.com>:

Correctly add hardware AES compiler flags under Linux X86-64
Fixes #643

PiperOrigin-RevId: 302490673

--
314c3621ee4d57b6bc8d64338a1f1d48a69741d1 by Derek Mauro <dmauro@google.com>:

Upgrade to hardening assertions in absl::Span::remove_prefix and absl::Span::remove_suffix

PiperOrigin-RevId: 302481191

--
a142b8c6c62705c5f0d4fe3113150f0c0b7822b9 by Derek Mauro <dmauro@google.com>:

Update docker containers to Bazel 2.2.0, GCC 9.3, and new Clang snapshot

PiperOrigin-RevId: 302454042

--
afedeb70a2adc87010030c9ba6f06fe35ec26407 by Derek Mauro <dmauro@google.com>:

Add hardening assertions for the preconditions of absl::FixedArray

PiperOrigin-RevId: 302441767

--
44442bfbc0a9a742df32f07cee86a47712efb8b4 by Derek Mauro <dmauro@google.com>:

Fix new Clang warning about SpinLock doing operations on enums of different types

PiperOrigin-RevId: 302430387

--
69eaff7f97231779f696321c2ba8b88debf6dd9e by Derek Mauro <dmauro@google.com>:

Convert precondition assertions to ABSL_HARDENING_ASSERT for
absl::InlinedVector

PiperOrigin-RevId: 302427894

--
26b6db906a0942fd18583dc2cdd1bab32919d964 by Gennadiy Rozental <rogeeff@google.com>:

Internal change

PiperOrigin-RevId: 302425283

--
e62e81422979e922505d2cd9000e1de58123c088 by Derek Mauro <dmauro@google.com>:

Add an option to build Abseil in hardened mode

In hardened mode, the ABSL_HARDENING_ASSERT() macro is active even
when NDEBUG is defined. This allows Abseil to perform runtime checks
even in release mode. This should be used to implement things like
bounds checks that could otherwise lead to security vulnerabilities.

Use the new assertion in absl::string_view and absl::Span to test it.

PiperOrigin-RevId: 302119187
GitOrigin-RevId: 79913a12f0cad4baf948430315aabf53f03b6475
Change-Id: I0cc3341fd333a1df313167bab72dc5a759c4a048
Diffstat (limited to 'absl/base/internal/spinlock.cc')
-rw-r--r--absl/base/internal/spinlock.cc19
1 files changed, 10 insertions, 9 deletions
diff --git a/absl/base/internal/spinlock.cc b/absl/base/internal/spinlock.cc
index 830d4729af3b..fd0c733e234b 100644
--- a/absl/base/internal/spinlock.cc
+++ b/absl/base/internal/spinlock.cc
@@ -190,30 +190,32 @@ void SpinLock::SlowUnlock(uint32_t lock_value) {
 // We use the upper 29 bits of the lock word to store the time spent waiting to
 // acquire this lock.  This is reported by contentionz profiling.  Since the
 // lower bits of the cycle counter wrap very quickly on high-frequency
-// processors we divide to reduce the granularity to 2^PROFILE_TIMESTAMP_SHIFT
+// processors we divide to reduce the granularity to 2^kProfileTimestampShift
 // sized units.  On a 4Ghz machine this will lose track of wait times greater
 // than (2^29/4 Ghz)*128 =~ 17.2 seconds.  Such waits should be extremely rare.
-enum { PROFILE_TIMESTAMP_SHIFT = 7 };
-enum { LOCKWORD_RESERVED_SHIFT = 3 };  // We currently reserve the lower 3 bits.
+static constexpr int kProfileTimestampShift = 7;
+
+// We currently reserve the lower 3 bits.
+static constexpr int kLockwordReservedShift = 3;
 
 uint32_t SpinLock::EncodeWaitCycles(int64_t wait_start_time,
                                     int64_t wait_end_time) {
   static const int64_t kMaxWaitTime =
-      std::numeric_limits<uint32_t>::max() >> LOCKWORD_RESERVED_SHIFT;
+      std::numeric_limits<uint32_t>::max() >> kLockwordReservedShift;
   int64_t scaled_wait_time =
-      (wait_end_time - wait_start_time) >> PROFILE_TIMESTAMP_SHIFT;
+      (wait_end_time - wait_start_time) >> kProfileTimestampShift;
 
   // Return a representation of the time spent waiting that can be stored in
   // the lock word's upper bits.
   uint32_t clamped = static_cast<uint32_t>(
-      std::min(scaled_wait_time, kMaxWaitTime) << LOCKWORD_RESERVED_SHIFT);
+      std::min(scaled_wait_time, kMaxWaitTime) << kLockwordReservedShift);
 
   if (clamped == 0) {
     return kSpinLockSleeper;  // Just wake waiters, but don't record contention.
   }
   // Bump up value if necessary to avoid returning kSpinLockSleeper.
   const uint32_t kMinWaitTime =
-      kSpinLockSleeper + (1 << LOCKWORD_RESERVED_SHIFT);
+      kSpinLockSleeper + (1 << kLockwordReservedShift);
   if (clamped == kSpinLockSleeper) {
     return kMinWaitTime;
   }
@@ -224,8 +226,7 @@ uint64_t SpinLock::DecodeWaitCycles(uint32_t lock_value) {
   // Cast to uint32_t first to ensure bits [63:32] are cleared.
   const uint64_t scaled_wait_time =
       static_cast<uint32_t>(lock_value & kWaitTimeMask);
-  return scaled_wait_time
-      << (PROFILE_TIMESTAMP_SHIFT - LOCKWORD_RESERVED_SHIFT);
+  return scaled_wait_time << (kProfileTimestampShift - kLockwordReservedShift);
 }
 
 }  // namespace base_internal