diff options
author | Abseil Team <absl-team@google.com> | 2019-04-09T15·22-0700 |
---|---|---|
committer | Shaindel Schwartz <shaindel@google.com> | 2019-04-09T17·34-0400 |
commit | dbae8764fbd429bf7d7745e24bcf73962177a7c0 (patch) | |
tree | 9e0b90d17192c02952632d619616f00ec4f68580 /absl/base/internal | |
parent | 044da8a29c923506af0f0b46bc46f43c1e1300b5 (diff) |
Export of internal Abseil changes.
-- 3f04cd3c25a99df91ff913977b8c5b343532db5d by Abseil Team <absl-team@google.com>: Stricter memory order constraints for CycleClock callback. PiperOrigin-RevId: 242670115 -- 216db48375306490f1722a11aaf33080939d9f2f by Abseil Team <absl-team@google.com>: internal/optional.h: move macro from types/optional.h ABSL_OPTIONAL_USE_INHERITING_CONSTRUCTORS is only used within this file. additionally check the macro with #ifdef rather than #if, fixes -Wundef warning: 'ABSL_OPTIONAL_USE_INHERITING_CONSTRUCTORS' is not defined, evaluates to 0 PiperOrigin-RevId: 242548205 -- fbe22e7d8dc5c0b3d43ac26297e97ddbaeab3d39 by Samuel Benzaquen <sbenza@google.com>: Implement %f natively for any input. It evaluates the input at runtime and allocates stack space accordingly. This removes a potential fallback into snprintf, improves performance, and removes all memory allocations in this formatting path. PiperOrigin-RevId: 242531736 -- 1458f9ba2a79ef0534e46527cd34770dee54164d by Greg Falcon <gfalcon@google.com>: Add explicit check for NVCC in compressed_tuple.h. NVCC claims to be MSVC, but does not implement this MSVC attribute. PiperOrigin-RevId: 242513453 GitOrigin-RevId: 3f04cd3c25a99df91ff913977b8c5b343532db5d Change-Id: I0742e8619c5248c7607961113e406486bc0e279b
Diffstat (limited to 'absl/base/internal')
-rw-r--r-- | absl/base/internal/cycleclock.cc | 18 |
1 files changed, 16 insertions, 2 deletions
diff --git a/absl/base/internal/cycleclock.cc b/absl/base/internal/cycleclock.cc index 4b553c29a2c3..e9844b7177a1 100644 --- a/absl/base/internal/cycleclock.cc +++ b/absl/base/internal/cycleclock.cc @@ -55,10 +55,23 @@ static constexpr int32_t kShift = 2; static constexpr double kFrequencyScale = 1.0 / (1 << kShift); static std::atomic<CycleClockSourceFunc> cycle_clock_source; +CycleClockSourceFunc LoadCycleClockSource() { + // Optimize for the common case (no callback) by first doing a relaxed load; + // this is significantly faster on non-x86 platforms. + if (cycle_clock_source.load(std::memory_order_relaxed) == nullptr) { + return nullptr; + } + // This corresponds to the store(std::memory_order_release) in + // CycleClockSource::Register, and makes sure that any updates made prior to + // registering the callback are visible to this thread before the callback is + // invoked. + return cycle_clock_source.load(std::memory_order_acquire); +} + } // namespace int64_t CycleClock::Now() { - auto fn = cycle_clock_source.load(std::memory_order_relaxed); + auto fn = LoadCycleClockSource(); if (fn == nullptr) { return base_internal::UnscaledCycleClock::Now() >> kShift; } @@ -70,7 +83,8 @@ double CycleClock::Frequency() { } void CycleClockSource::Register(CycleClockSourceFunc source) { - cycle_clock_source.store(source, std::memory_order_relaxed); + // Corresponds to the load(std::memory_order_acquire) in LoadCycleClockSource. + cycle_clock_source.store(source, std::memory_order_release); } #else |