diff options
Diffstat (limited to 'absl')
-rw-r--r-- | absl/base/config.h | 2 | ||||
-rw-r--r-- | absl/container/btree_test.cc | 2 | ||||
-rw-r--r-- | absl/container/inlined_vector_benchmark.cc | 37 | ||||
-rw-r--r-- | absl/meta/type_traits_test.cc | 22 | ||||
-rw-r--r-- | absl/random/internal/BUILD.bazel | 5 | ||||
-rw-r--r-- | absl/random/internal/randen_hwaes.cc | 128 | ||||
-rw-r--r-- | absl/random/internal/uniform_helper.h | 3 | ||||
-rw-r--r-- | absl/strings/string_view.h | 2 | ||||
-rw-r--r-- | absl/time/internal/cctz/src/time_zone_impl.cc | 13 |
9 files changed, 98 insertions, 116 deletions
diff --git a/absl/base/config.h b/absl/base/config.h index 1c3cb08e746d..90d98214d09d 100644 --- a/absl/base/config.h +++ b/absl/base/config.h @@ -125,7 +125,7 @@ #error ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE cannot directly set #elif (defined(__clang__) && defined(_LIBCPP_VERSION)) || \ (!defined(__clang__) && defined(__GNUC__) && \ - (__GNUC__ > 5 || (__GNUC__ == 5 && __GNUC_MINOR__ >= 1)) && \ + (__GNUC__ > 7 || (__GNUC__ == 7 && __GNUC_MINOR__ >= 4)) && \ (defined(_LIBCPP_VERSION) || defined(__GLIBCXX__))) || \ (defined(_MSC_VER) && !defined(__NVCC__)) #define ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE 1 diff --git a/absl/container/btree_test.cc b/absl/container/btree_test.cc index a330cca32b90..3ac97f89a8b7 100644 --- a/absl/container/btree_test.cc +++ b/absl/container/btree_test.cc @@ -323,6 +323,7 @@ class unique_checker : public base_checker<TreeType, CheckerType> { unique_checker(const unique_checker &x) : super_type(x) {} template <class InputIterator> unique_checker(InputIterator b, InputIterator e) : super_type(b, e) {} + unique_checker& operator=(const unique_checker&) = default; // Insertion routines. std::pair<iterator, bool> insert(const value_type &x) { @@ -370,6 +371,7 @@ class multi_checker : public base_checker<TreeType, CheckerType> { multi_checker(const multi_checker &x) : super_type(x) {} template <class InputIterator> multi_checker(InputIterator b, InputIterator e) : super_type(b, e) {} + multi_checker& operator=(const multi_checker&) = default; // Insertion routines. iterator insert(const value_type &x) { diff --git a/absl/container/inlined_vector_benchmark.cc b/absl/container/inlined_vector_benchmark.cc index b99bbd62544f..3f2b4ed28a57 100644 --- a/absl/container/inlined_vector_benchmark.cc +++ b/absl/container/inlined_vector_benchmark.cc @@ -25,42 +25,45 @@ namespace { void BM_InlinedVectorFill(benchmark::State& state) { + const int len = state.range(0); absl::InlinedVector<int, 8> v; - int val = 10; + v.reserve(len); for (auto _ : state) { + v.resize(0); // Use resize(0) as InlinedVector releases storage on clear(). + for (int i = 0; i < len; ++i) { + v.push_back(i); + } benchmark::DoNotOptimize(v); - v.push_back(val); } } -BENCHMARK(BM_InlinedVectorFill)->Range(0, 1024); +BENCHMARK(BM_InlinedVectorFill)->Range(1, 256); void BM_InlinedVectorFillRange(benchmark::State& state) { const int len = state.range(0); - std::unique_ptr<int[]> ia(new int[len]); - for (int i = 0; i < len; i++) { - ia[i] = i; - } - auto* from = ia.get(); - auto* to = from + len; + const std::vector<int> src(len, len); + absl::InlinedVector<int, 8> v; + v.reserve(len); for (auto _ : state) { - benchmark::DoNotOptimize(from); - benchmark::DoNotOptimize(to); - absl::InlinedVector<int, 8> v(from, to); + benchmark::DoNotOptimize(src); + v.assign(src.begin(), src.end()); benchmark::DoNotOptimize(v); } } -BENCHMARK(BM_InlinedVectorFillRange)->Range(0, 1024); +BENCHMARK(BM_InlinedVectorFillRange)->Range(1, 256); void BM_StdVectorFill(benchmark::State& state) { + const int len = state.range(0); std::vector<int> v; - int val = 10; + v.reserve(len); for (auto _ : state) { + v.clear(); + for (int i = 0; i < len; ++i) { + v.push_back(i); + } benchmark::DoNotOptimize(v); - benchmark::DoNotOptimize(val); - v.push_back(val); } } -BENCHMARK(BM_StdVectorFill)->Range(0, 1024); +BENCHMARK(BM_StdVectorFill)->Range(1, 256); // The purpose of the next two benchmarks is to verify that // absl::InlinedVector is efficient when moving is more efficent than diff --git a/absl/meta/type_traits_test.cc b/absl/meta/type_traits_test.cc index a7a9c5c917ff..6fbb42f82e06 100644 --- a/absl/meta/type_traits_test.cc +++ b/absl/meta/type_traits_test.cc @@ -546,6 +546,28 @@ TEST(TypeTraitsTest, TestTrivialDefaultCtor) { #endif } +// GCC prior to 7.4 had a bug in its trivially-constructible traits +// (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=80654). +// This test makes sure that we do not depend on the trait in these cases when +// implementing absl triviality traits. + +template <class T> +struct BadConstructors { + BadConstructors() { static_assert(T::value, ""); } + + BadConstructors(BadConstructors&&) { static_assert(T::value, ""); } + + BadConstructors(const BadConstructors&) { static_assert(T::value, ""); } +}; + +TEST(TypeTraitsTest, TestTrivialityBadConstructors) { + using BadType = BadConstructors<int>; + + EXPECT_FALSE(absl::is_trivially_default_constructible<BadType>::value); + EXPECT_FALSE(absl::is_trivially_move_constructible<BadType>::value); + EXPECT_FALSE(absl::is_trivially_copy_constructible<BadType>::value); +} + TEST(TypeTraitsTest, TestTrivialMoveCtor) { // Verify that arithmetic types and pointers have trivial move // constructors. diff --git a/absl/random/internal/BUILD.bazel b/absl/random/internal/BUILD.bazel index 7416e93642bd..d9581d00e587 100644 --- a/absl/random/internal/BUILD.bazel +++ b/absl/random/internal/BUILD.bazel @@ -306,7 +306,10 @@ cc_library( # anyway and thus there wouldn't be any gain from using it as a module. features = ["-header_modules"], linkopts = ABSL_DEFAULT_LINKOPTS, - deps = [":platform"], + deps = [ + ":platform", + "//absl/base:core_headers", + ], ) cc_binary( diff --git a/absl/random/internal/randen_hwaes.cc b/absl/random/internal/randen_hwaes.cc index 6b82d1d07ad8..7d5b2b74128b 100644 --- a/absl/random/internal/randen_hwaes.cc +++ b/absl/random/internal/randen_hwaes.cc @@ -22,39 +22,9 @@ #include <cstdint> #include <cstring> +#include "absl/base/attributes.h" #include "absl/random/internal/platform.h" -// ABSL_HAVE_ATTRIBUTE -#if !defined(ABSL_HAVE_ATTRIBUTE) -#ifdef __has_attribute -#define ABSL_HAVE_ATTRIBUTE(x) __has_attribute(x) -#else -#define ABSL_HAVE_ATTRIBUTE(x) 0 -#endif -#endif - -#if ABSL_HAVE_ATTRIBUTE(always_inline) || \ - (defined(__GNUC__) && !defined(__clang__)) -#define ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE \ - __attribute__((always_inline)) -#elif defined(_MSC_VER) -// We can achieve something similar to attribute((always_inline)) with MSVC by -// using the __forceinline keyword, however this is not perfect. MSVC is -// much less aggressive about inlining, and even with the __forceinline keyword. -#define ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE __forceinline -#else -#define ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE -#endif - -// ABSL_ATTRIBUTE_FLATTEN enables much more aggressive inlining within -// the indicated function. -#undef ABSL_ATTRIBUTE_FLATTEN -#if ABSL_HAVE_ATTRIBUTE(flatten) || (defined(__GNUC__) && !defined(__clang__)) -#define ABSL_ATTRIBUTE_FLATTEN __attribute__((flatten)) -#else -#define ABSL_ATTRIBUTE_FLATTEN -#endif - // ABSL_RANDEN_HWAES_IMPL indicates whether this file will contain // a hardware accelerated implementation of randen, or whether it // will contain stubs that exit the process. @@ -146,18 +116,6 @@ void RandenHwAes::Generate(const void*, void*) { #include "absl/random/internal/randen_traits.h" -// ABSL_FUNCTION_ALIGN32 defines a 32-byte alignment attribute -// for the functions in this file. -// -// NOTE: Determine whether we actually have any wins from ALIGN32 -// using microbenchmarks. If not, remove. -#undef ABSL_FUNCTION_ALIGN32 -#if ABSL_HAVE_ATTRIBUTE(aligned) || (defined(__GNUC__) && !defined(__clang__)) -#define ABSL_FUNCTION_ALIGN32 __attribute__((aligned(32))) -#else -#define ABSL_FUNCTION_ALIGN32 -#endif - // TARGET_CRYPTO defines a crypto attribute for each architecture. // // NOTE: Evaluate whether we should eliminate ABSL_TARGET_CRYPTO. @@ -191,8 +149,7 @@ using Vector128 = __vector unsigned long long; // NOLINT(runtime/int) namespace { -inline ABSL_TARGET_CRYPTO ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE Vector128 -ReverseBytes(const Vector128& v) { +inline ABSL_TARGET_CRYPTO Vector128 ReverseBytes(const Vector128& v) { // Reverses the bytes of the vector. const __vector unsigned char perm = {15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0}; @@ -202,26 +159,26 @@ ReverseBytes(const Vector128& v) { // WARNING: these load/store in native byte order. It is OK to load and then // store an unchanged vector, but interpreting the bits as a number or input // to AES will have undefined results. -inline ABSL_TARGET_CRYPTO ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE Vector128 +inline ABSL_TARGET_CRYPTO Vector128 Vector128Load(const void* ABSL_RANDOM_INTERNAL_RESTRICT from) { return vec_vsx_ld(0, reinterpret_cast<const Vector128*>(from)); } -inline ABSL_TARGET_CRYPTO ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE void -Vector128Store(const Vector128& v, void* ABSL_RANDOM_INTERNAL_RESTRICT to) { +inline ABSL_TARGET_CRYPTO void Vector128Store( + const Vector128& v, void* ABSL_RANDOM_INTERNAL_RESTRICT to) { vec_vsx_st(v, 0, reinterpret_cast<Vector128*>(to)); } // One round of AES. "round_key" is a public constant for breaking the // symmetry of AES (ensures previously equal columns differ afterwards). -inline ABSL_TARGET_CRYPTO ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE Vector128 -AesRound(const Vector128& state, const Vector128& round_key) { +inline ABSL_TARGET_CRYPTO Vector128 AesRound(const Vector128& state, + const Vector128& round_key) { return Vector128(__builtin_crypto_vcipher(state, round_key)); } // Enables native loads in the round loop by pre-swapping. -inline ABSL_TARGET_CRYPTO ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE void -SwapEndian(uint64_t* ABSL_RANDOM_INTERNAL_RESTRICT state) { +inline ABSL_TARGET_CRYPTO void SwapEndian( + uint64_t* ABSL_RANDOM_INTERNAL_RESTRICT state) { using absl::random_internal::RandenTraits; constexpr size_t kLanes = 2; constexpr size_t kFeistelBlocks = RandenTraits::kFeistelBlocks; @@ -273,20 +230,20 @@ using Vector128 = uint8x16_t; namespace { -inline ABSL_TARGET_CRYPTO ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE Vector128 +inline ABSL_TARGET_CRYPTO Vector128 Vector128Load(const void* ABSL_RANDOM_INTERNAL_RESTRICT from) { return vld1q_u8(reinterpret_cast<const uint8_t*>(from)); } -inline ABSL_TARGET_CRYPTO ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE void -Vector128Store(const Vector128& v, void* ABSL_RANDOM_INTERNAL_RESTRICT to) { +inline ABSL_TARGET_CRYPTO void Vector128Store( + const Vector128& v, void* ABSL_RANDOM_INTERNAL_RESTRICT to) { vst1q_u8(reinterpret_cast<uint8_t*>(to), v); } // One round of AES. "round_key" is a public constant for breaking the // symmetry of AES (ensures previously equal columns differ afterwards). -inline ABSL_TARGET_CRYPTO ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE Vector128 -AesRound(const Vector128& state, const Vector128& round_key) { +inline ABSL_TARGET_CRYPTO Vector128 AesRound(const Vector128& state, + const Vector128& round_key) { // It is important to always use the full round function - omitting the // final MixColumns reduces security [https://eprint.iacr.org/2010/041.pdf] // and does not help because we never decrypt. @@ -297,8 +254,8 @@ AesRound(const Vector128& state, const Vector128& round_key) { return vaesmcq_u8(vaeseq_u8(state, uint8x16_t{})) ^ round_key; } -inline ABSL_TARGET_CRYPTO ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE void -SwapEndian(uint64_t* ABSL_RANDOM_INTERNAL_RESTRICT) {} +inline ABSL_TARGET_CRYPTO void SwapEndian( + uint64_t* ABSL_RANDOM_INTERNAL_RESTRICT) {} } // namespace @@ -313,16 +270,11 @@ namespace { class Vector128 { public: // Convert from/to intrinsics. - inline ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE explicit Vector128( - const __m128i& Vector128) - : data_(Vector128) {} + inline explicit Vector128(const __m128i& Vector128) : data_(Vector128) {} - inline ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE __m128i data() const { - return data_; - } + inline __m128i data() const { return data_; } - inline ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE Vector128& operator^=( - const Vector128& other) { + inline Vector128& operator^=(const Vector128& other) { data_ = _mm_xor_si128(data_, other.data()); return *this; } @@ -331,29 +283,29 @@ class Vector128 { __m128i data_; }; -inline ABSL_TARGET_CRYPTO ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE Vector128 +inline ABSL_TARGET_CRYPTO Vector128 Vector128Load(const void* ABSL_RANDOM_INTERNAL_RESTRICT from) { return Vector128(_mm_load_si128(reinterpret_cast<const __m128i*>(from))); } -inline ABSL_TARGET_CRYPTO ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE void -Vector128Store(const Vector128& v, void* ABSL_RANDOM_INTERNAL_RESTRICT to) { +inline ABSL_TARGET_CRYPTO void Vector128Store( + const Vector128& v, void* ABSL_RANDOM_INTERNAL_RESTRICT to) { _mm_store_si128(reinterpret_cast<__m128i * ABSL_RANDOM_INTERNAL_RESTRICT>(to), v.data()); } // One round of AES. "round_key" is a public constant for breaking the // symmetry of AES (ensures previously equal columns differ afterwards). -inline ABSL_TARGET_CRYPTO ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE Vector128 -AesRound(const Vector128& state, const Vector128& round_key) { +inline ABSL_TARGET_CRYPTO Vector128 AesRound(const Vector128& state, + const Vector128& round_key) { // It is important to always use the full round function - omitting the // final MixColumns reduces security [https://eprint.iacr.org/2010/041.pdf] // and does not help because we never decrypt. return Vector128(_mm_aesenc_si128(state.data(), round_key.data())); } -inline ABSL_TARGET_CRYPTO ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE void -SwapEndian(uint64_t* ABSL_RANDOM_INTERNAL_RESTRICT) {} +inline ABSL_TARGET_CRYPTO void SwapEndian( + uint64_t* ABSL_RANDOM_INTERNAL_RESTRICT) {} } // namespace @@ -450,8 +402,8 @@ constexpr size_t kLanes = 2; // Block shuffles applies a shuffle to the entire state between AES rounds. // Improved odd-even shuffle from "New criterion for diffusion property". -inline ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE ABSL_TARGET_CRYPTO void -BlockShuffle(uint64_t* ABSL_RANDOM_INTERNAL_RESTRICT state) { +inline ABSL_TARGET_CRYPTO void BlockShuffle( + uint64_t* ABSL_RANDOM_INTERNAL_RESTRICT state) { static_assert(kFeistelBlocks == 16, "Expecting 16 FeistelBlocks."); constexpr size_t shuffle[kFeistelBlocks] = {7, 2, 13, 4, 11, 8, 3, 6, @@ -499,10 +451,9 @@ BlockShuffle(uint64_t* ABSL_RANDOM_INTERNAL_RESTRICT state) { // per 16 bytes (vs. 10 for AES-CTR). Computing eight round functions in // parallel hides the 7-cycle AESNI latency on HSW. Note that the Feistel // XORs are 'free' (included in the second AES instruction). -inline ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE ABSL_TARGET_CRYPTO const - u64x2* - FeistelRound(uint64_t* ABSL_RANDOM_INTERNAL_RESTRICT state, - const u64x2* ABSL_RANDOM_INTERNAL_RESTRICT keys) { +inline ABSL_TARGET_CRYPTO const u64x2* FeistelRound( + uint64_t* ABSL_RANDOM_INTERNAL_RESTRICT state, + const u64x2* ABSL_RANDOM_INTERNAL_RESTRICT keys) { static_assert(kFeistelBlocks == 16, "Expecting 16 FeistelBlocks."); // MSVC does a horrible job at unrolling loops. @@ -561,9 +512,9 @@ inline ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE ABSL_TARGET_CRYPTO const // Indistinguishable from ideal by chosen-ciphertext adversaries using less than // 2^64 queries if the round function is a PRF. This is similar to the b=8 case // of Simpira v2, but more efficient than its generic construction for b=16. -inline ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE ABSL_TARGET_CRYPTO void -Permute(const void* ABSL_RANDOM_INTERNAL_RESTRICT keys, - uint64_t* ABSL_RANDOM_INTERNAL_RESTRICT state) { +inline ABSL_TARGET_CRYPTO void Permute( + const void* ABSL_RANDOM_INTERNAL_RESTRICT keys, + uint64_t* ABSL_RANDOM_INTERNAL_RESTRICT state) { const u64x2* ABSL_RANDOM_INTERNAL_RESTRICT keys128 = static_cast<const u64x2*>(keys); @@ -584,16 +535,15 @@ namespace random_internal { bool HasRandenHwAesImplementation() { return true; } -const void* ABSL_TARGET_CRYPTO ABSL_FUNCTION_ALIGN32 ABSL_ATTRIBUTE_FLATTEN -RandenHwAes::GetKeys() { +const void* ABSL_TARGET_CRYPTO RandenHwAes::GetKeys() { // Round keys for one AES per Feistel round and branch. // The canonical implementation uses first digits of Pi. return round_keys; } // NOLINTNEXTLINE -void ABSL_TARGET_CRYPTO ABSL_FUNCTION_ALIGN32 ABSL_ATTRIBUTE_FLATTEN -RandenHwAes::Absorb(const void* seed_void, void* state_void) { +void ABSL_TARGET_CRYPTO RandenHwAes::Absorb(const void* seed_void, + void* state_void) { uint64_t* ABSL_RANDOM_INTERNAL_RESTRICT state = reinterpret_cast<uint64_t*>(state_void); const uint64_t* ABSL_RANDOM_INTERNAL_RESTRICT seed = @@ -669,8 +619,8 @@ RandenHwAes::Absorb(const void* seed_void, void* state_void) { } // NOLINTNEXTLINE -void ABSL_TARGET_CRYPTO ABSL_FUNCTION_ALIGN32 ABSL_ATTRIBUTE_FLATTEN -RandenHwAes::Generate(const void* keys, void* state_void) { +void ABSL_TARGET_CRYPTO RandenHwAes::Generate(const void* keys, + void* state_void) { static_assert(kCapacityBytes == sizeof(Vector128), "Capacity mismatch"); uint64_t* ABSL_RANDOM_INTERNAL_RESTRICT state = diff --git a/absl/random/internal/uniform_helper.h b/absl/random/internal/uniform_helper.h index 21646489643a..ebcc3744b188 100644 --- a/absl/random/internal/uniform_helper.h +++ b/absl/random/internal/uniform_helper.h @@ -35,9 +35,6 @@ struct IntervalClosedClosedT {}; struct IntervalClosedOpenT {}; struct IntervalOpenClosedT {}; struct IntervalOpenOpenT {}; -} // namespace random_internal - -namespace random_internal { // The functions // uniform_lower_bound(tag, a, b) diff --git a/absl/strings/string_view.h b/absl/strings/string_view.h index 25a4d1edeafe..a1b5a17b52b3 100644 --- a/absl/strings/string_view.h +++ b/absl/strings/string_view.h @@ -342,7 +342,7 @@ class string_view { size_type rlen = (std::min)(length_ - pos, n); if (rlen > 0) { const char* start = ptr_ + pos; - std::copy(start, start + rlen, buf); + traits_type::copy(buf, start, rlen); } return rlen; } diff --git a/absl/time/internal/cctz/src/time_zone_impl.cc b/absl/time/internal/cctz/src/time_zone_impl.cc index 3cbc674653dd..a26151d3e917 100644 --- a/absl/time/internal/cctz/src/time_zone_impl.cc +++ b/absl/time/internal/cctz/src/time_zone_impl.cc @@ -33,7 +33,12 @@ using TimeZoneImplByName = TimeZoneImplByName* time_zone_map = nullptr; // Mutual exclusion for time_zone_map. -std::mutex time_zone_mutex; +std::mutex& TimeZoneMutex() { + // This mutex is intentionally "leaked" to avoid the static deinitialization + // order fiasco (std::mutex's destructor is not trivial on many platforms). + static std::mutex* time_zone_mutex = new std::mutex; + return *time_zone_mutex; +} } // namespace @@ -54,7 +59,7 @@ bool time_zone::Impl::LoadTimeZone(const std::string& name, time_zone* tz) { // Then check, under a shared lock, whether the time zone has already // been loaded. This is the common path. TODO: Move to shared_mutex. { - std::lock_guard<std::mutex> lock(time_zone_mutex); + std::lock_guard<std::mutex> lock(TimeZoneMutex()); if (time_zone_map != nullptr) { TimeZoneImplByName::const_iterator itr = time_zone_map->find(name); if (itr != time_zone_map->end()) { @@ -65,7 +70,7 @@ bool time_zone::Impl::LoadTimeZone(const std::string& name, time_zone* tz) { } // Now check again, under an exclusive lock. - std::lock_guard<std::mutex> lock(time_zone_mutex); + std::lock_guard<std::mutex> lock(TimeZoneMutex()); if (time_zone_map == nullptr) time_zone_map = new TimeZoneImplByName; const Impl*& impl = (*time_zone_map)[name]; if (impl == nullptr) { @@ -84,7 +89,7 @@ bool time_zone::Impl::LoadTimeZone(const std::string& name, time_zone* tz) { } void time_zone::Impl::ClearTimeZoneMapTestOnly() { - std::lock_guard<std::mutex> lock(time_zone_mutex); + std::lock_guard<std::mutex> lock(TimeZoneMutex()); if (time_zone_map != nullptr) { // Existing time_zone::Impl* entries are in the wild, so we simply // leak them. Future requests will result in reloading the data. |