diff options
author | misterg <misterg@google.com> | 2017-09-19T20·54-0400 |
---|---|---|
committer | misterg <misterg@google.com> | 2017-09-19T20·54-0400 |
commit | c2e754829628d1e9b7a16b3389cfdace76950fdf (patch) | |
tree | 5a7f056f44e27c30e10025113b644f0b3b5801fc /absl/base/internal |
Initial Commit
Diffstat (limited to 'absl/base/internal')
45 files changed, 7378 insertions, 0 deletions
diff --git a/absl/base/internal/atomic_hook.h b/absl/base/internal/atomic_hook.h new file mode 100644 index 000000000000..8eee367e4f9c --- /dev/null +++ b/absl/base/internal/atomic_hook.h @@ -0,0 +1,122 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef ABSL_BASE_INTERNAL_ATOMIC_HOOK_H_ +#define ABSL_BASE_INTERNAL_ATOMIC_HOOK_H_ + +#include <cassert> +#include <atomic> +#include <utility> + +namespace absl { +namespace base_internal { + +// In current versions of MSVC (as of July 2017), a std::atomic<T> where T is a +// pointer to function cannot be constant-initialized with an address constant +// expression. That is, the following code does not compile: +// void NoOp() {} +// constexpr std::atomic<void(*)()> ptr(NoOp); +// +// This is the only compiler we support that seems to have this issue. We +// conditionalize on MSVC here to use a fallback implementation. But we +// should revisit this occasionally. If MSVC fixes this compiler bug, we +// can then change this to be conditionalized on the value on _MSC_FULL_VER +// instead. +#ifdef _MSC_FULL_VER +#define ABSL_HAVE_FUNCTION_ADDRESS_CONSTANT_EXPRESSION 0 +#else +#define ABSL_HAVE_FUNCTION_ADDRESS_CONSTANT_EXPRESSION 1 +#endif + +template <typename T> +class AtomicHook; + +// AtomicHook is a helper class, templatized on a raw function pointer type, for +// implementing Abseil customization hooks. It is a callable object that +// dispatches to the registered hook, or performs a no-op (and returns a default +// constructed object) if no hook has been registered. +// +// Reads and writes guarantee memory_order_acquire/memory_order_release +// semantics. +template <typename ReturnType, typename... Args> +class AtomicHook<ReturnType (*)(Args...)> { + public: + using FnPtr = ReturnType (*)(Args...); + + constexpr AtomicHook() : hook_(DummyFunction) {} + + // Stores the provided function pointer as the value for this hook. + // + // This is intended to be called once. Multiple calls are legal only if the + // same function pointer is provided for each call. The store is implemented + // as a memory_order_release operation, and read accesses are implemented as + // memory_order_acquire. + void Store(FnPtr fn) { + assert(fn); + FnPtr expected = DummyFunction; + hook_.compare_exchange_strong(expected, fn, std::memory_order_acq_rel, + std::memory_order_acquire); + // If the compare and exchange failed, make sure that's because hook_ was + // already set to `fn` by an earlier call. Any other state reflects an API + // violation (calling Store() multiple times with different values). + // + // Avoid ABSL_RAW_CHECK, since raw logging depends on AtomicHook. + assert(expected == DummyFunction || expected == fn); + } + + // Invokes the registered callback. If no callback has yet been registered, a + // default-constructed object of the appropriate type is returned instead. + template <typename... CallArgs> + ReturnType operator()(CallArgs&&... args) const { + FnPtr hook = hook_.load(std::memory_order_acquire); + if (ABSL_HAVE_FUNCTION_ADDRESS_CONSTANT_EXPRESSION || hook) { + return hook(std::forward<CallArgs>(args)...); + } else { + return ReturnType(); + } + } + + // Returns the registered callback, or nullptr if none has been registered. + // Useful if client code needs to conditionalize behavior based on whether a + // callback was registered. + // + // Note that atomic_hook.Load()() and atomic_hook() have different semantics: + // operator()() will perform a no-op if no callback was registered, while + // Load()() will dereference a null function pointer. Prefer operator()() to + // Load()() unless you must conditionalize behavior on whether a hook was + // registered. + FnPtr Load() const { + FnPtr ptr = hook_.load(std::memory_order_acquire); + return (ptr == DummyFunction) ? nullptr : ptr; + } + + private: +#if ABSL_HAVE_FUNCTION_ADDRESS_CONSTANT_EXPRESSION + static ReturnType DummyFunction(Args...) { + return ReturnType(); + } +#else + static constexpr FnPtr DummyFunction = nullptr; +#endif + + std::atomic<FnPtr> hook_; +}; + +#undef ABSL_HAVE_FUNCTION_ADDRESS_CONSTANT_EXPRESSION + +} // namespace base_internal +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_ATOMIC_HOOK_H_ diff --git a/absl/base/internal/cycleclock.cc b/absl/base/internal/cycleclock.cc new file mode 100644 index 000000000000..a742df01f94a --- /dev/null +++ b/absl/base/internal/cycleclock.cc @@ -0,0 +1,81 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// The implementation of CycleClock::Frequency. +// +// NOTE: only i386 and x86_64 have been well tested. +// PPC, sparc, alpha, and ia64 are based on +// http://peter.kuscsik.com/wordpress/?p=14 +// with modifications by m3b. See also +// https://setisvn.ssl.berkeley.edu/svn/lib/fftw-3.0.1/kernel/cycle.h + +#include "absl/base/internal/cycleclock.h" + +#include <chrono> // NOLINT(build/c++11) + +#include "absl/base/internal/unscaledcycleclock.h" + +namespace absl { +namespace base_internal { + +#if ABSL_USE_UNSCALED_CYCLECLOCK + +namespace { + +#ifdef NDEBUG +#ifdef ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY +// Not debug mode and the UnscaledCycleClock frequency is the CPU +// frequency. Scale the CycleClock to prevent overflow if someone +// tries to represent the time as cycles since the Unix epoch. +static constexpr int32_t kShift = 1; +#else +// Not debug mode and the UnscaledCycleClock isn't operating at the +// raw CPU frequency. There is no need to do any scaling, so don't +// needlessly sacrifice precision. +static constexpr int32_t kShift = 0; +#endif +#else +// In debug mode use a different shift to discourage depending on a +// particular shift value. +static constexpr int32_t kShift = 2; +#endif + +static constexpr double kFrequencyScale = 1.0 / (1 << kShift); + +} // namespace + +int64_t CycleClock::Now() { + return base_internal::UnscaledCycleClock::Now() >> kShift; +} + +double CycleClock::Frequency() { + return kFrequencyScale * base_internal::UnscaledCycleClock::Frequency(); +} + +#else + +int64_t CycleClock::Now() { + return std::chrono::duration_cast<std::chrono::nanoseconds>( + std::chrono::steady_clock::now().time_since_epoch()) + .count(); +} + +double CycleClock::Frequency() { + return 1e9; +} + +#endif + +} // namespace base_internal +} // namespace absl diff --git a/absl/base/internal/cycleclock.h b/absl/base/internal/cycleclock.h new file mode 100644 index 000000000000..60e971583c57 --- /dev/null +++ b/absl/base/internal/cycleclock.h @@ -0,0 +1,77 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// ----------------------------------------------------------------------------- +// File: cycleclock.h +// ----------------------------------------------------------------------------- +// +// This header file defines a `CycleClock`, which yields the value and frequency +// of a cycle counter that increments at a rate that is approximately constant. +// +// NOTE: +// +// The cycle counter frequency is not necessarily related to the core clock +// frequency and should not be treated as such. That is, `CycleClock` cycles are +// not necessarily "CPU cycles" and code should not rely on that behavior, even +// if experimentally observed. +// +// +// An arbitrary offset may have been added to the counter at power on. +// +// On some platforms, the rate and offset of the counter may differ +// slightly when read from different CPUs of a multiprocessor. Usually, +// we try to ensure that the operating system adjusts values periodically +// so that values agree approximately. If you need stronger guarantees, +// consider using alternate interfaces. +// +// The CPU is not required to maintain the ordering of a cycle counter read +// with respect to surrounding instructions. + +#ifndef ABSL_BASE_INTERNAL_CYCLECLOCK_H_ +#define ABSL_BASE_INTERNAL_CYCLECLOCK_H_ + +#include <cstdint> + +namespace absl { +namespace base_internal { + +// ----------------------------------------------------------------------------- +// CycleClock +// ----------------------------------------------------------------------------- +class CycleClock { + public: + // CycleClock::Now() + // + // Returns the value of a cycle counter that counts at a rate that is + // approximately constant. + static int64_t Now(); + + // CycleClock::Frequency() + // + // Returns the amount by which `CycleClock::Now()` increases per second. Note + // that this value may not necessarily match the core CPU clock frequency. + static double Frequency(); + + private: + CycleClock() = delete; // no instances + CycleClock(const CycleClock&) = delete; + CycleClock& operator=(const CycleClock&) = delete; +}; + +} // namespace base_internal +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_CYCLECLOCK_H_ diff --git a/absl/base/internal/endian.h b/absl/base/internal/endian.h new file mode 100644 index 000000000000..602129eed207 --- /dev/null +++ b/absl/base/internal/endian.h @@ -0,0 +1,267 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef ABSL_BASE_INTERNAL_ENDIAN_H_ +#define ABSL_BASE_INTERNAL_ENDIAN_H_ + +// The following guarantees declaration of the byte swap functions +#ifdef _MSC_VER +#include <stdlib.h> // NOLINT(build/include) +#elif defined(__APPLE__) +// Mac OS X / Darwin features +#include <libkern/OSByteOrder.h> +#elif defined(__GLIBC__) +#include <byteswap.h> // IWYU pragma: export +#endif + +#include <cstdint> +#include "absl/base/config.h" +#include "absl/base/internal/unaligned_access.h" +#include "absl/base/port.h" + +namespace absl { + +// Use compiler byte-swapping intrinsics if they are available. 32-bit +// and 64-bit versions are available in Clang and GCC as of GCC 4.3.0. +// The 16-bit version is available in Clang and GCC only as of GCC 4.8.0. +// For simplicity, we enable them all only for GCC 4.8.0 or later. +#if defined(__clang__) || \ + (defined(__GNUC__) && \ + ((__GNUC__ == 4 && __GNUC_MINOR__ >= 8) || __GNUC__ >= 5)) +inline uint64_t gbswap_64(uint64_t host_int) { + return __builtin_bswap64(host_int); +} +inline uint32_t gbswap_32(uint32_t host_int) { + return __builtin_bswap32(host_int); +} +inline uint16_t gbswap_16(uint16_t host_int) { + return __builtin_bswap16(host_int); +} + +#elif defined(_MSC_VER) +inline uint64_t gbswap_64(uint64_t host_int) { + return _byteswap_uint64(host_int); +} +inline uint32_t gbswap_32(uint32_t host_int) { + return _byteswap_ulong(host_int); +} +inline uint16_t gbswap_16(uint16_t host_int) { + return _byteswap_ushort(host_int); +} + +#elif defined(__APPLE__) +inline uint64_t gbswap_64(uint64_t host_int) { return OSSwapInt16(host_int); } +inline uint32_t gbswap_32(uint32_t host_int) { return OSSwapInt32(host_int); } +inline uint16_t gbswap_16(uint16_t host_int) { return OSSwapInt64(host_int); } + +#else +inline uint64_t gbswap_64(uint64_t host_int) { +#if defined(__GNUC__) && defined(__x86_64__) && !defined(__APPLE__) + // Adapted from /usr/include/byteswap.h. Not available on Mac. + if (__builtin_constant_p(host_int)) { + return __bswap_constant_64(host_int); + } else { + register uint64_t result; + __asm__("bswap %0" : "=r"(result) : "0"(host_int)); + return result; + } +#elif defined(__GLIBC__) + return bswap_64(host_int); +#else + return (((x & uint64_t{(0xFF}) << 56) | + ((x & uint64_t{(0xFF00}) << 40) | + ((x & uint64_t{(0xFF0000}) << 24) | + ((x & uint64_t{(0xFF000000}) << 8) | + ((x & uint64_t{(0xFF00000000}) >> 8) | + ((x & uint64_t{(0xFF0000000000}) >> 24) | + ((x & uint64_t{(0xFF000000000000}) >> 40) | + ((x & uint64_t{(0xFF00000000000000}) >> 56)); +#endif // bswap_64 +} + +inline uint32_t gbswap_32(uint32_t host_int) { +#if defined(__GLIBC__) + return bswap_32(host_int); +#else + return (((x & 0xFF) << 24) | ((x & 0xFF00) << 8) | ((x & 0xFF0000) >> 8) | + ((x & 0xFF000000) >> 24)); +#endif +} + +inline uint16_t gbswap_16(uint16_t host_int) { +#if defined(__GLIBC__) + return bswap_16(host_int); +#else + return uint16_t{((x & 0xFF) << 8) | ((x & 0xFF00) >> 8)}; +#endif +} + +#endif // intrinics available + +#ifdef ABSL_IS_LITTLE_ENDIAN + +// Definitions for ntohl etc. that don't require us to include +// netinet/in.h. We wrap gbswap_32 and gbswap_16 in functions rather +// than just #defining them because in debug mode, gcc doesn't +// correctly handle the (rather involved) definitions of bswap_32. +// gcc guarantees that inline functions are as fast as macros, so +// this isn't a performance hit. +inline uint16_t ghtons(uint16_t x) { return gbswap_16(x); } +inline uint32_t ghtonl(uint32_t x) { return gbswap_32(x); } +inline uint64_t ghtonll(uint64_t x) { return gbswap_64(x); } + +#elif defined ABSL_IS_BIG_ENDIAN + +// These definitions are simpler on big-endian machines +// These are functions instead of macros to avoid self-assignment warnings +// on calls such as "i = ghtnol(i);". This also provides type checking. +inline uint16_t ghtons(uint16_t x) { return x; } +inline uint32_t ghtonl(uint32_t x) { return x; } +inline uint64_t ghtonll(uint64_t x) { return x; } + +#else +#error \ + "Unsupported byte order: Either ABSL_IS_BIG_ENDIAN or " \ + "ABSL_IS_LITTLE_ENDIAN must be defined" +#endif // byte order + +inline uint16_t gntohs(uint16_t x) { return ghtons(x); } +inline uint32_t gntohl(uint32_t x) { return ghtonl(x); } +inline uint64_t gntohll(uint64_t x) { return ghtonll(x); } + +// Utilities to convert numbers between the current hosts's native byte +// order and little-endian byte order +// +// Load/Store methods are alignment safe +namespace little_endian { +// Conversion functions. +#ifdef ABSL_IS_LITTLE_ENDIAN + +inline uint16_t FromHost16(uint16_t x) { return x; } +inline uint16_t ToHost16(uint16_t x) { return x; } + +inline uint32_t FromHost32(uint32_t x) { return x; } +inline uint32_t ToHost32(uint32_t x) { return x; } + +inline uint64_t FromHost64(uint64_t x) { return x; } +inline uint64_t ToHost64(uint64_t x) { return x; } + +inline constexpr bool IsLittleEndian() { return true; } + +#elif defined ABSL_IS_BIG_ENDIAN + +inline uint16_t FromHost16(uint16_t x) { return gbswap_16(x); } +inline uint16_t ToHost16(uint16_t x) { return gbswap_16(x); } + +inline uint32_t FromHost32(uint32_t x) { return gbswap_32(x); } +inline uint32_t ToHost32(uint32_t x) { return gbswap_32(x); } + +inline uint64_t FromHost64(uint64_t x) { return gbswap_64(x); } +inline uint64_t ToHost64(uint64_t x) { return gbswap_64(x); } + +inline constexpr bool IsLittleEndian() { return false; } + +#endif /* ENDIAN */ + +// Functions to do unaligned loads and stores in little-endian order. +inline uint16_t Load16(const void *p) { + return ToHost16(ABSL_INTERNAL_UNALIGNED_LOAD16(p)); +} + +inline void Store16(void *p, uint16_t v) { + ABSL_INTERNAL_UNALIGNED_STORE16(p, FromHost16(v)); +} + +inline uint32_t Load32(const void *p) { + return ToHost32(ABSL_INTERNAL_UNALIGNED_LOAD32(p)); +} + +inline void Store32(void *p, uint32_t v) { + ABSL_INTERNAL_UNALIGNED_STORE32(p, FromHost32(v)); +} + +inline uint64_t Load64(const void *p) { + return ToHost64(ABSL_INTERNAL_UNALIGNED_LOAD64(p)); +} + +inline void Store64(void *p, uint64_t v) { + ABSL_INTERNAL_UNALIGNED_STORE64(p, FromHost64(v)); +} + +} // namespace little_endian + +// Utilities to convert numbers between the current hosts's native byte +// order and big-endian byte order (same as network byte order) +// +// Load/Store methods are alignment safe +namespace big_endian { +#ifdef ABSL_IS_LITTLE_ENDIAN + +inline uint16_t FromHost16(uint16_t x) { return gbswap_16(x); } +inline uint16_t ToHost16(uint16_t x) { return gbswap_16(x); } + +inline uint32_t FromHost32(uint32_t x) { return gbswap_32(x); } +inline uint32_t ToHost32(uint32_t x) { return gbswap_32(x); } + +inline uint64_t FromHost64(uint64_t x) { return gbswap_64(x); } +inline uint64_t ToHost64(uint64_t x) { return gbswap_64(x); } + +inline constexpr bool IsLittleEndian() { return true; } + +#elif defined ABSL_IS_BIG_ENDIAN + +inline uint16_t FromHost16(uint16_t x) { return x; } +inline uint16_t ToHost16(uint16_t x) { return x; } + +inline uint32_t FromHost32(uint32_t x) { return x; } +inline uint32_t ToHost32(uint32_t x) { return x; } + +inline uint64_t FromHost64(uint64_t x) { return x; } +inline uint64_t ToHost64(uint64_t x) { return x; } + +inline constexpr bool IsLittleEndian() { return false; } + +#endif /* ENDIAN */ + +// Functions to do unaligned loads and stores in big-endian order. +inline uint16_t Load16(const void *p) { + return ToHost16(ABSL_INTERNAL_UNALIGNED_LOAD16(p)); +} + +inline void Store16(void *p, uint16_t v) { + ABSL_INTERNAL_UNALIGNED_STORE16(p, FromHost16(v)); +} + +inline uint32_t Load32(const void *p) { + return ToHost32(ABSL_INTERNAL_UNALIGNED_LOAD32(p)); +} + +inline void Store32(void *p, uint32_t v) { + ABSL_INTERNAL_UNALIGNED_STORE32(p, FromHost32(v)); +} + +inline uint64_t Load64(const void *p) { + return ToHost64(ABSL_INTERNAL_UNALIGNED_LOAD64(p)); +} + +inline void Store64(void *p, uint64_t v) { + ABSL_INTERNAL_UNALIGNED_STORE64(p, FromHost64(v)); +} + +} // namespace big_endian + +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_ENDIAN_H_ diff --git a/absl/base/internal/endian_test.cc b/absl/base/internal/endian_test.cc new file mode 100644 index 000000000000..6812214ef045 --- /dev/null +++ b/absl/base/internal/endian_test.cc @@ -0,0 +1,281 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/endian.h" + +#include <algorithm> +#include <cstdint> +#include <cstdio> +#include <limits> +#include <random> +#include <vector> + +#include "gtest/gtest.h" +#include "absl/base/casts.h" +#include "absl/base/config.h" + +namespace absl { +namespace { + +const uint64_t kInitialNumber{0x0123456789abcdef}; +const uint64_t k64Value{kInitialNumber}; +const uint32_t k32Value{0x01234567}; +const uint16_t k16Value{0x0123}; +const int kNumValuesToTest = 1000000; +const int kRandomSeed = 12345; + +#ifdef ABSL_IS_BIG_ENDIAN +const uint64_t kInitialInNetworkOrder{kInitialNumber}; +const uint64_t k64ValueLE{0xefcdab8967452301}; +const uint32_t k32ValueLE{0x67452301}; +const uint16_t k16ValueLE{0x2301}; +const uint8_t k8ValueLE{k8Value}; +const uint64_t k64IValueLE{0xefcdab89674523a1}; +const uint32_t k32IValueLE{0x67452391}; +const uint16_t k16IValueLE{0x85ff}; +const uint8_t k8IValueLE{0xff}; +const uint64_t kDoubleValueLE{0x6e861bf0f9210940}; +const uint32_t kFloatValueLE{0xd00f4940}; +const uint8_t kBoolValueLE{0x1}; + +const uint64_t k64ValueBE{kInitialNumber}; +const uint32_t k32ValueBE{k32Value}; +const uint16_t k16ValueBE{k16Value}; +const uint8_t k8ValueBE{k8Value}; +const uint64_t k64IValueBE{0xa123456789abcdef}; +const uint32_t k32IValueBE{0x91234567}; +const uint16_t k16IValueBE{0xff85}; +const uint8_t k8IValueBE{0xff}; +const uint64_t kDoubleValueBE{0x400921f9f01b866e}; +const uint32_t kFloatValueBE{0x40490fd0}; +const uint8_t kBoolValueBE{0x1}; +#elif defined ABSL_IS_LITTLE_ENDIAN +const uint64_t kInitialInNetworkOrder{0xefcdab8967452301}; +const uint64_t k64ValueLE{kInitialNumber}; +const uint32_t k32ValueLE{k32Value}; +const uint16_t k16ValueLE{k16Value}; + +const uint64_t k64ValueBE{0xefcdab8967452301}; +const uint32_t k32ValueBE{0x67452301}; +const uint16_t k16ValueBE{0x2301}; +#endif + +template<typename T> +std::vector<T> GenerateAllValuesForType() { + std::vector<T> result; + T next = std::numeric_limits<T>::min(); + while (true) { + result.push_back(next); + if (next == std::numeric_limits<T>::max()) { + return result; + } + ++next; + } +} + +template<typename T> +std::vector<T> GenerateRandomIntegers(size_t numValuesToTest) { + std::vector<T> result; + std::mt19937_64 rng(kRandomSeed); + for (size_t i = 0; i < numValuesToTest; ++i) { + result.push_back(rng()); + } + return result; +} + +void ManualByteSwap(char* bytes, int length) { + if (length == 1) + return; + + EXPECT_EQ(0, length % 2); + for (int i = 0; i < length / 2; ++i) { + int j = (length - 1) - i; + using std::swap; + swap(bytes[i], bytes[j]); + } +} + +template<typename T> +inline T UnalignedLoad(const char* p) { + static_assert( + sizeof(T) == 1 || sizeof(T) == 2 || sizeof(T) == 4 || sizeof(T) == 8, + "Unexpected type size"); + + switch (sizeof(T)) { + case 1: return *reinterpret_cast<const T*>(p); + case 2: + return ABSL_INTERNAL_UNALIGNED_LOAD16(p); + case 4: + return ABSL_INTERNAL_UNALIGNED_LOAD32(p); + case 8: + return ABSL_INTERNAL_UNALIGNED_LOAD64(p); + default: + // Suppresses invalid "not all control paths return a value" on MSVC + return {}; + } +} + +template <typename T, typename ByteSwapper> +static void GBSwapHelper(const std::vector<T>& host_values_to_test, + const ByteSwapper& byte_swapper) { + // Test byte_swapper against a manual byte swap. + for (typename std::vector<T>::const_iterator it = host_values_to_test.begin(); + it != host_values_to_test.end(); ++it) { + T host_value = *it; + + char actual_value[sizeof(host_value)]; + memcpy(actual_value, &host_value, sizeof(host_value)); + byte_swapper(actual_value); + + char expected_value[sizeof(host_value)]; + memcpy(expected_value, &host_value, sizeof(host_value)); + ManualByteSwap(expected_value, sizeof(host_value)); + + ASSERT_EQ(0, memcmp(actual_value, expected_value, sizeof(host_value))) + << "Swap output for 0x" << std::hex << host_value << " does not match. " + << "Expected: 0x" << UnalignedLoad<T>(expected_value) << "; " + << "actual: 0x" << UnalignedLoad<T>(actual_value); + } +} + +void Swap16(char* bytes) { + ABSL_INTERNAL_UNALIGNED_STORE16( + bytes, gbswap_16(ABSL_INTERNAL_UNALIGNED_LOAD16(bytes))); +} + +void Swap32(char* bytes) { + ABSL_INTERNAL_UNALIGNED_STORE32( + bytes, gbswap_32(ABSL_INTERNAL_UNALIGNED_LOAD32(bytes))); +} + +void Swap64(char* bytes) { + ABSL_INTERNAL_UNALIGNED_STORE64( + bytes, gbswap_64(ABSL_INTERNAL_UNALIGNED_LOAD64(bytes))); +} + +TEST(EndianessTest, Uint16) { + GBSwapHelper(GenerateAllValuesForType<uint16_t>(), &Swap16); +} + +TEST(EndianessTest, Uint32) { + GBSwapHelper(GenerateRandomIntegers<uint32_t>(kNumValuesToTest), &Swap32); +} + +TEST(EndianessTest, Uint64) { + GBSwapHelper(GenerateRandomIntegers<uint64_t>(kNumValuesToTest), &Swap64); +} + +TEST(EndianessTest, ghtonll_gntohll) { + // Test that absl::ghtonl compiles correctly + uint32_t test = 0x01234567; + EXPECT_EQ(absl::gntohl(absl::ghtonl(test)), test); + + uint64_t comp = absl::ghtonll(kInitialNumber); + EXPECT_EQ(comp, kInitialInNetworkOrder); + comp = absl::gntohll(kInitialInNetworkOrder); + EXPECT_EQ(comp, kInitialNumber); + + // Test that htonll and ntohll are each others' inverse functions on a + // somewhat assorted batch of numbers. 37 is chosen to not be anything + // particularly nice base 2. + uint64_t value = 1; + for (int i = 0; i < 100; ++i) { + comp = absl::ghtonll(absl::gntohll(value)); + EXPECT_EQ(value, comp); + comp = absl::gntohll(absl::ghtonll(value)); + EXPECT_EQ(value, comp); + value *= 37; + } +} + +TEST(EndianessTest, little_endian) { + // Check little_endian uint16_t. + uint64_t comp = little_endian::FromHost16(k16Value); + EXPECT_EQ(comp, k16ValueLE); + comp = little_endian::ToHost16(k16ValueLE); + EXPECT_EQ(comp, k16Value); + + // Check little_endian uint32_t. + comp = little_endian::FromHost32(k32Value); + EXPECT_EQ(comp, k32ValueLE); + comp = little_endian::ToHost32(k32ValueLE); + EXPECT_EQ(comp, k32Value); + + // Check little_endian uint64_t. + comp = little_endian::FromHost64(k64Value); + EXPECT_EQ(comp, k64ValueLE); + comp = little_endian::ToHost64(k64ValueLE); + EXPECT_EQ(comp, k64Value); + + // Check little-endian Load and store functions. + uint16_t u16Buf; + uint32_t u32Buf; + uint64_t u64Buf; + + little_endian::Store16(&u16Buf, k16Value); + EXPECT_EQ(u16Buf, k16ValueLE); + comp = little_endian::Load16(&u16Buf); + EXPECT_EQ(comp, k16Value); + + little_endian::Store32(&u32Buf, k32Value); + EXPECT_EQ(u32Buf, k32ValueLE); + comp = little_endian::Load32(&u32Buf); + EXPECT_EQ(comp, k32Value); + + little_endian::Store64(&u64Buf, k64Value); + EXPECT_EQ(u64Buf, k64ValueLE); + comp = little_endian::Load64(&u64Buf); + EXPECT_EQ(comp, k64Value); +} + +TEST(EndianessTest, big_endian) { + // Check big-endian Load and store functions. + uint16_t u16Buf; + uint32_t u32Buf; + uint64_t u64Buf; + + unsigned char buffer[10]; + big_endian::Store16(&u16Buf, k16Value); + EXPECT_EQ(u16Buf, k16ValueBE); + uint64_t comp = big_endian::Load16(&u16Buf); + EXPECT_EQ(comp, k16Value); + + big_endian::Store32(&u32Buf, k32Value); + EXPECT_EQ(u32Buf, k32ValueBE); + comp = big_endian::Load32(&u32Buf); + EXPECT_EQ(comp, k32Value); + + big_endian::Store64(&u64Buf, k64Value); + EXPECT_EQ(u64Buf, k64ValueBE); + comp = big_endian::Load64(&u64Buf); + EXPECT_EQ(comp, k64Value); + + big_endian::Store16(buffer + 1, k16Value); + EXPECT_EQ(u16Buf, k16ValueBE); + comp = big_endian::Load16(buffer + 1); + EXPECT_EQ(comp, k16Value); + + big_endian::Store32(buffer + 1, k32Value); + EXPECT_EQ(u32Buf, k32ValueBE); + comp = big_endian::Load32(buffer + 1); + EXPECT_EQ(comp, k32Value); + + big_endian::Store64(buffer + 1, k64Value); + EXPECT_EQ(u64Buf, k64ValueBE); + comp = big_endian::Load64(buffer + 1); + EXPECT_EQ(comp, k64Value); +} + +} // namespace +} // namespace absl diff --git a/absl/base/internal/exception_testing.h b/absl/base/internal/exception_testing.h new file mode 100644 index 000000000000..99a10734842b --- /dev/null +++ b/absl/base/internal/exception_testing.h @@ -0,0 +1,24 @@ +// Testing utilities for ABSL types which throw exceptions. + +#ifndef ABSL_BASE_INTERNAL_EXCEPTION_TESTING_H_ +#define ABSL_BASE_INTERNAL_EXCEPTION_TESTING_H_ + +#include "gtest/gtest.h" +#include "absl/base/config.h" + +// ABSL_BASE_INTERNAL_EXPECT_FAIL tests either for a specified thrown exception +// if exceptions are enabled, or for death with a specified text in the error +// message +#ifdef ABSL_HAVE_EXCEPTIONS + +#define ABSL_BASE_INTERNAL_EXPECT_FAIL(expr, exception_t, text) \ + EXPECT_THROW(expr, exception_t) + +#else + +#define ABSL_BASE_INTERNAL_EXPECT_FAIL(expr, exception_t, text) \ + EXPECT_DEATH(expr, text) + +#endif + +#endif // ABSL_BASE_INTERNAL_EXCEPTION_TESTING_H_ diff --git a/absl/base/internal/identity.h b/absl/base/internal/identity.h new file mode 100644 index 000000000000..a6734b4d3537 --- /dev/null +++ b/absl/base/internal/identity.h @@ -0,0 +1,33 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef ABSL_BASE_INTERNAL_IDENTITY_H_ +#define ABSL_BASE_INTERNAL_IDENTITY_H_ + +namespace absl { +namespace internal { + +template <typename T> +struct identity { + typedef T type; +}; + +template <typename T> +using identity_t = typename identity<T>::type; + +} // namespace internal +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_IDENTITY_H_ diff --git a/absl/base/internal/invoke.h b/absl/base/internal/invoke.h new file mode 100644 index 000000000000..8c3f4f60637c --- /dev/null +++ b/absl/base/internal/invoke.h @@ -0,0 +1,188 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// absl::base_internal::Invoke(f, args...) is an implementation of +// INVOKE(f, args...) from section [func.require] of the C++ standard. +// +// [func.require] +// Define INVOKE (f, t1, t2, ..., tN) as follows: +// 1. (t1.*f)(t2, ..., tN) when f is a pointer to a member function of a class T +// and t1 is an object of type T or a reference to an object of type T or a +// reference to an object of a type derived from T; +// 2. ((*t1).*f)(t2, ..., tN) when f is a pointer to a member function of a +// class T and t1 is not one of the types described in the previous item; +// 3. t1.*f when N == 1 and f is a pointer to member data of a class T and t1 is +// an object of type T or a reference to an object of type T or a reference +// to an object of a type derived from T; +// 4. (*t1).*f when N == 1 and f is a pointer to member data of a class T and t1 +// is not one of the types described in the previous item; +// 5. f(t1, t2, ..., tN) in all other cases. +// +// The implementation is SFINAE-friendly: substitution failure within Invoke() +// isn't an error. + +#ifndef ABSL_BASE_INTERNAL_INVOKE_H_ +#define ABSL_BASE_INTERNAL_INVOKE_H_ + +#include <algorithm> +#include <type_traits> +#include <utility> + +// The following code is internal implementation detail. See the comment at the +// top of this file for the API documentation. + +namespace absl { +namespace base_internal { + +// The five classes below each implement one of the clauses from the definition +// of INVOKE. The inner class template Accept<F, Args...> checks whether the +// clause is applicable; static function template Invoke(f, args...) does the +// invocation. +// +// By separating the clause selection logic from invocation we make sure that +// Invoke() does exactly what the standard says. + +template <typename Derived> +struct StrippedAccept { + template <typename... Args> + struct Accept : Derived::template AcceptImpl<typename std::remove_cv< + typename std::remove_reference<Args>::type>::type...> {}; +}; + +// (t1.*f)(t2, ..., tN) when f is a pointer to a member function of a class T +// and t1 is an object of type T or a reference to an object of type T or a +// reference to an object of a type derived from T. +struct MemFunAndRef : StrippedAccept<MemFunAndRef> { + template <typename... Args> + struct AcceptImpl : std::false_type {}; + + template <typename R, typename C, typename... Params, typename Obj, + typename... Args> + struct AcceptImpl<R (C::*)(Params...), Obj, Args...> + : std::is_base_of<C, Obj> {}; + + template <typename R, typename C, typename... Params, typename Obj, + typename... Args> + struct AcceptImpl<R (C::*)(Params...) const, Obj, Args...> + : std::is_base_of<C, Obj> {}; + + template <typename MemFun, typename Obj, typename... Args> + static decltype((std::declval<Obj>().* + std::declval<MemFun>())(std::declval<Args>()...)) + Invoke(MemFun&& mem_fun, Obj&& obj, Args&&... args) { + return (std::forward<Obj>(obj).* + std::forward<MemFun>(mem_fun))(std::forward<Args>(args)...); + } +}; + +// ((*t1).*f)(t2, ..., tN) when f is a pointer to a member function of a +// class T and t1 is not one of the types described in the previous item. +struct MemFunAndPtr : StrippedAccept<MemFunAndPtr> { + template <typename... Args> + struct AcceptImpl : std::false_type {}; + + template <typename R, typename C, typename... Params, typename Ptr, + typename... Args> + struct AcceptImpl<R (C::*)(Params...), Ptr, Args...> + : std::integral_constant<bool, !std::is_base_of<C, Ptr>::value> {}; + + template <typename R, typename C, typename... Params, typename Ptr, + typename... Args> + struct AcceptImpl<R (C::*)(Params...) const, Ptr, Args...> + : std::integral_constant<bool, !std::is_base_of<C, Ptr>::value> {}; + + template <typename MemFun, typename Ptr, typename... Args> + static decltype(((*std::declval<Ptr>()).* + std::declval<MemFun>())(std::declval<Args>()...)) + Invoke(MemFun&& mem_fun, Ptr&& ptr, Args&&... args) { + return ((*std::forward<Ptr>(ptr)).* + std::forward<MemFun>(mem_fun))(std::forward<Args>(args)...); + } +}; + +// t1.*f when N == 1 and f is a pointer to member data of a class T and t1 is +// an object of type T or a reference to an object of type T or a reference +// to an object of a type derived from T. +struct DataMemAndRef : StrippedAccept<DataMemAndRef> { + template <typename... Args> + struct AcceptImpl : std::false_type {}; + + template <typename R, typename C, typename Obj> + struct AcceptImpl<R C::*, Obj> : std::is_base_of<C, Obj> {}; + + template <typename DataMem, typename Ref> + static decltype(std::declval<Ref>().*std::declval<DataMem>()) Invoke( + DataMem&& data_mem, Ref&& ref) { + return std::forward<Ref>(ref).*std::forward<DataMem>(data_mem); + } +}; + +// (*t1).*f when N == 1 and f is a pointer to member data of a class T and t1 +// is not one of the types described in the previous item. +struct DataMemAndPtr : StrippedAccept<DataMemAndPtr> { + template <typename... Args> + struct AcceptImpl : std::false_type {}; + + template <typename R, typename C, typename Ptr> + struct AcceptImpl<R C::*, Ptr> + : std::integral_constant<bool, !std::is_base_of<C, Ptr>::value> {}; + + template <typename DataMem, typename Ptr> + static decltype((*std::declval<Ptr>()).*std::declval<DataMem>()) Invoke( + DataMem&& data_mem, Ptr&& ptr) { + return (*std::forward<Ptr>(ptr)).*std::forward<DataMem>(data_mem); + } +}; + +// f(t1, t2, ..., tN) in all other cases. +struct Callable { + // Callable doesn't have Accept because it's the last clause that gets picked + // when none of the previous clauses are applicable. + template <typename F, typename... Args> + static decltype(std::declval<F>()(std::declval<Args>()...)) Invoke( + F&& f, Args&&... args) { + return std::forward<F>(f)(std::forward<Args>(args)...); + } +}; + +// Resolves to the first matching clause. +template <typename... Args> +struct Invoker { + typedef typename std::conditional< + MemFunAndRef::Accept<Args...>::value, MemFunAndRef, + typename std::conditional< + MemFunAndPtr::Accept<Args...>::value, MemFunAndPtr, + typename std::conditional< + DataMemAndRef::Accept<Args...>::value, DataMemAndRef, + typename std::conditional<DataMemAndPtr::Accept<Args...>::value, + DataMemAndPtr, Callable>::type>::type>:: + type>::type type; +}; + +// The result type of Invoke<F, Args...>. +template <typename F, typename... Args> +using InvokeT = decltype(Invoker<F, Args...>::type::Invoke( + std::declval<F>(), std::declval<Args>()...)); + +// Invoke(f, args...) is an implementation of INVOKE(f, args...) from section +// [func.require] of the C++ standard. +template <typename F, typename... Args> +InvokeT<F, Args...> Invoke(F&& f, Args&&... args) { + return Invoker<F, Args...>::type::Invoke(std::forward<F>(f), + std::forward<Args>(args)...); +} +} // namespace base_internal +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_INVOKE_H_ diff --git a/absl/base/internal/log_severity.cc b/absl/base/internal/log_severity.cc new file mode 100644 index 000000000000..430ac45848a0 --- /dev/null +++ b/absl/base/internal/log_severity.cc @@ -0,0 +1,15 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/log_severity.h" diff --git a/absl/base/internal/log_severity.h b/absl/base/internal/log_severity.h new file mode 100644 index 000000000000..deaf6a578925 --- /dev/null +++ b/absl/base/internal/log_severity.h @@ -0,0 +1,52 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef ABSL_BASE_INTERNAL_LOG_SEVERITY_H_ +#define ABSL_BASE_INTERNAL_LOG_SEVERITY_H_ + +#include "absl/base/attributes.h" + +namespace absl { + +enum class LogSeverity : int { + kInfo = 0, + kWarning = 1, + kError = 2, + kFatal = 3, +}; + +constexpr const char* LogSeverityName(absl::LogSeverity s) { + return s == absl::LogSeverity::kInfo + ? "INFO" + : s == absl::LogSeverity::kWarning + ? "WARNING" + : s == absl::LogSeverity::kError + ? "ERROR" + : s == absl::LogSeverity::kFatal ? "FATAL" : "UNKNOWN"; +} + +// Note that out-of-range large severities normalize to kError, not kFatal. +constexpr absl::LogSeverity NormalizeLogSeverity(absl::LogSeverity s) { + return s < absl::LogSeverity::kInfo + ? absl::LogSeverity::kInfo + : s > absl::LogSeverity::kFatal ? absl::LogSeverity::kError : s; +} +constexpr absl::LogSeverity NormalizeLogSeverity(int s) { + return NormalizeLogSeverity(static_cast<absl::LogSeverity>(s)); +} + +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_LOG_SEVERITY_H_ diff --git a/absl/base/internal/low_level_alloc.cc b/absl/base/internal/low_level_alloc.cc new file mode 100644 index 000000000000..c55201b38bad --- /dev/null +++ b/absl/base/internal/low_level_alloc.cc @@ -0,0 +1,598 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// A low-level allocator that can be used by other low-level +// modules without introducing dependency cycles. +// This allocator is slow and wasteful of memory; +// it should not be used when performance is key. + +#include "absl/base/config.h" + +#include "absl/base/internal/low_level_alloc.h" + +// LowLevelAlloc requires that the platform support low-level +// allocation of virtual memory. Platforms lacking this cannot use +// LowLevelAlloc. +#ifndef ABSL_LOW_LEVEL_ALLOC_MISSING + +#ifndef _WIN32 +#include <pthread.h> +#include <signal.h> +#include <sys/mman.h> +#include <unistd.h> +#else +#include <windows.h> +#endif + +#include <string.h> +#include <algorithm> +#include <atomic> +#include <cstddef> +#include <cerrno> +#include <new> // for placement-new + +#include "absl/base/dynamic_annotations.h" +#include "absl/base/internal/malloc_hook.h" +#include "absl/base/internal/malloc_hook_invoke.h" +#include "absl/base/internal/raw_logging.h" +#include "absl/base/internal/spinlock.h" + +// MAP_ANONYMOUS +#if defined(__APPLE__) +// For mmap, Linux defines both MAP_ANONYMOUS and MAP_ANON and says MAP_ANON is +// deprecated. In Darwin, MAP_ANON is all there is. +#if !defined MAP_ANONYMOUS +#define MAP_ANONYMOUS MAP_ANON +#endif // !MAP_ANONYMOUS +#endif // __APPLE__ + +namespace absl { +namespace base_internal { + +// A first-fit allocator with amortized logarithmic free() time. + +// --------------------------------------------------------------------------- +static const int kMaxLevel = 30; + +namespace { +// This struct describes one allocated block, or one free block. +struct AllocList { + struct Header { + // Size of entire region, including this field. Must be + // first. Valid in both allocated and unallocated blocks. + uintptr_t size; + + // kMagicAllocated or kMagicUnallocated xor this. + uintptr_t magic; + + // Pointer to parent arena. + LowLevelAlloc::Arena *arena; + + // Aligns regions to 0 mod 2*sizeof(void*). + void *dummy_for_alignment; + } header; + + // Next two fields: in unallocated blocks: freelist skiplist data + // in allocated blocks: overlaps with client data + + // Levels in skiplist used. + int levels; + + // Actually has levels elements. The AllocList node may not have room + // for all kMaxLevel entries. See max_fit in LLA_SkiplistLevels(). + AllocList *next[kMaxLevel]; +}; +} // namespace + +// --------------------------------------------------------------------------- +// A trivial skiplist implementation. This is used to keep the freelist +// in address order while taking only logarithmic time per insert and delete. + +// An integer approximation of log2(size/base) +// Requires size >= base. +static int IntLog2(size_t size, size_t base) { + int result = 0; + for (size_t i = size; i > base; i >>= 1) { // i == floor(size/2**result) + result++; + } + // floor(size / 2**result) <= base < floor(size / 2**(result-1)) + // => log2(size/(base+1)) <= result < 1+log2(size/base) + // => result ~= log2(size/base) + return result; +} + +// Return a random integer n: p(n)=1/(2**n) if 1 <= n; p(n)=0 if n < 1. +static int Random(uint32_t *state) { + uint32_t r = *state; + int result = 1; + while ((((r = r*1103515245 + 12345) >> 30) & 1) == 0) { + result++; + } + *state = r; + return result; +} + +// Return a number of skiplist levels for a node of size bytes, where +// base is the minimum node size. Compute level=log2(size / base)+n +// where n is 1 if random is false and otherwise a random number generated with +// the standard distribution for a skiplist: See Random() above. +// Bigger nodes tend to have more skiplist levels due to the log2(size / base) +// term, so first-fit searches touch fewer nodes. "level" is clipped so +// level<kMaxLevel and next[level-1] will fit in the node. +// 0 < LLA_SkiplistLevels(x,y,false) <= LLA_SkiplistLevels(x,y,true) < kMaxLevel +static int LLA_SkiplistLevels(size_t size, size_t base, uint32_t *random) { + // max_fit is the maximum number of levels that will fit in a node for the + // given size. We can't return more than max_fit, no matter what the + // random number generator says. + size_t max_fit = (size - offsetof(AllocList, next)) / sizeof(AllocList *); + int level = IntLog2(size, base) + (random != nullptr ? Random(random) : 1); + if (static_cast<size_t>(level) > max_fit) level = static_cast<int>(max_fit); + if (level > kMaxLevel-1) level = kMaxLevel - 1; + ABSL_RAW_CHECK(level >= 1, "block not big enough for even one level"); + return level; +} + +// Return "atleast", the first element of AllocList *head s.t. *atleast >= *e. +// For 0 <= i < head->levels, set prev[i] to "no_greater", where no_greater +// points to the last element at level i in the AllocList less than *e, or is +// head if no such element exists. +static AllocList *LLA_SkiplistSearch(AllocList *head, + AllocList *e, AllocList **prev) { + AllocList *p = head; + for (int level = head->levels - 1; level >= 0; level--) { + for (AllocList *n; (n = p->next[level]) != nullptr && n < e; p = n) { + } + prev[level] = p; + } + return (head->levels == 0) ? nullptr : prev[0]->next[0]; +} + +// Insert element *e into AllocList *head. Set prev[] as LLA_SkiplistSearch. +// Requires that e->levels be previously set by the caller (using +// LLA_SkiplistLevels()) +static void LLA_SkiplistInsert(AllocList *head, AllocList *e, + AllocList **prev) { + LLA_SkiplistSearch(head, e, prev); + for (; head->levels < e->levels; head->levels++) { // extend prev pointers + prev[head->levels] = head; // to all *e's levels + } + for (int i = 0; i != e->levels; i++) { // add element to list + e->next[i] = prev[i]->next[i]; + prev[i]->next[i] = e; + } +} + +// Remove element *e from AllocList *head. Set prev[] as LLA_SkiplistSearch(). +// Requires that e->levels be previous set by the caller (using +// LLA_SkiplistLevels()) +static void LLA_SkiplistDelete(AllocList *head, AllocList *e, + AllocList **prev) { + AllocList *found = LLA_SkiplistSearch(head, e, prev); + ABSL_RAW_CHECK(e == found, "element not in freelist"); + for (int i = 0; i != e->levels && prev[i]->next[i] == e; i++) { + prev[i]->next[i] = e->next[i]; + } + while (head->levels > 0 && head->next[head->levels - 1] == nullptr) { + head->levels--; // reduce head->levels if level unused + } +} + +// --------------------------------------------------------------------------- +// Arena implementation + +struct LowLevelAlloc::Arena { + // This constructor does nothing, and relies on zero-initialization to get + // the proper initial state. + Arena() : mu(base_internal::kLinkerInitialized) {} // NOLINT + explicit Arena(int) // NOLINT(readability/casting) + : // Avoid recursive cooperative scheduling w/ kernel scheduling. + mu(base_internal::SCHEDULE_KERNEL_ONLY), + // Set pagesize to zero explicitly for non-static init. + pagesize(0), + random(0) {} + + base_internal::SpinLock mu; // protects freelist, allocation_count, + // pagesize, roundup, min_size + AllocList freelist; // head of free list; sorted by addr (under mu) + int32_t allocation_count; // count of allocated blocks (under mu) + std::atomic<uint32_t> flags; // flags passed to NewArena (ro after init) + size_t pagesize; // ==getpagesize() (init under mu, then ro) + size_t roundup; // lowest 2^n >= max(16,sizeof (AllocList)) + // (init under mu, then ro) + size_t min_size; // smallest allocation block size + // (init under mu, then ro) + uint32_t random; // PRNG state +}; + +// The default arena, which is used when 0 is passed instead of an Arena +// pointer. +static struct LowLevelAlloc::Arena default_arena; // NOLINT + +// Non-malloc-hooked arenas: used only to allocate metadata for arenas that +// do not want malloc hook reporting, so that for them there's no malloc hook +// reporting even during arena creation. +static struct LowLevelAlloc::Arena unhooked_arena; // NOLINT + +#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING +static struct LowLevelAlloc::Arena unhooked_async_sig_safe_arena; // NOLINT +#endif + +// magic numbers to identify allocated and unallocated blocks +static const uintptr_t kMagicAllocated = 0x4c833e95U; +static const uintptr_t kMagicUnallocated = ~kMagicAllocated; + +namespace { +class SCOPED_LOCKABLE ArenaLock { + public: + explicit ArenaLock(LowLevelAlloc::Arena *arena) + EXCLUSIVE_LOCK_FUNCTION(arena->mu) + : arena_(arena) { +#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING + if (arena == &unhooked_async_sig_safe_arena || + (arena->flags.load(std::memory_order_relaxed) & + LowLevelAlloc::kAsyncSignalSafe) != 0) { + sigset_t all; + sigfillset(&all); + mask_valid_ = pthread_sigmask(SIG_BLOCK, &all, &mask_) == 0; + } +#endif + arena_->mu.Lock(); + } + ~ArenaLock() { ABSL_RAW_CHECK(left_, "haven't left Arena region"); } + void Leave() UNLOCK_FUNCTION() { + arena_->mu.Unlock(); +#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING + if (mask_valid_) { + pthread_sigmask(SIG_SETMASK, &mask_, nullptr); + } +#endif + left_ = true; + } + + private: + bool left_ = false; // whether left region +#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING + bool mask_valid_ = false; + sigset_t mask_; // old mask of blocked signals +#endif + LowLevelAlloc::Arena *arena_; + ArenaLock(const ArenaLock &) = delete; + ArenaLock &operator=(const ArenaLock &) = delete; +}; +} // namespace + +// create an appropriate magic number for an object at "ptr" +// "magic" should be kMagicAllocated or kMagicUnallocated +inline static uintptr_t Magic(uintptr_t magic, AllocList::Header *ptr) { + return magic ^ reinterpret_cast<uintptr_t>(ptr); +} + +// Initialize the fields of an Arena +static void ArenaInit(LowLevelAlloc::Arena *arena) { + if (arena->pagesize == 0) { +#ifdef _WIN32 + SYSTEM_INFO system_info; + GetSystemInfo(&system_info); + arena->pagesize = std::max(system_info.dwPageSize, + system_info.dwAllocationGranularity); +#else + arena->pagesize = getpagesize(); +#endif + // Round up block sizes to a power of two close to the header size. + arena->roundup = 16; + while (arena->roundup < sizeof (arena->freelist.header)) { + arena->roundup += arena->roundup; + } + // Don't allocate blocks less than twice the roundup size to avoid tiny + // free blocks. + arena->min_size = 2 * arena->roundup; + arena->freelist.header.size = 0; + arena->freelist.header.magic = + Magic(kMagicUnallocated, &arena->freelist.header); + arena->freelist.header.arena = arena; + arena->freelist.levels = 0; + memset(arena->freelist.next, 0, sizeof (arena->freelist.next)); + arena->allocation_count = 0; + if (arena == &default_arena) { + // Default arena should be hooked, e.g. for heap-checker to trace + // pointer chains through objects in the default arena. + arena->flags.store(LowLevelAlloc::kCallMallocHook, + std::memory_order_relaxed); + } +#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING + else if (arena == // NOLINT(readability/braces) + &unhooked_async_sig_safe_arena) { + arena->flags.store(LowLevelAlloc::kAsyncSignalSafe, + std::memory_order_relaxed); + } +#endif + else { // NOLINT(readability/braces) + // other arenas' flags may be overridden by client, + // but unhooked_arena will have 0 in 'flags'. + arena->flags.store(0, std::memory_order_relaxed); + } + } +} + +// L < meta_data_arena->mu +LowLevelAlloc::Arena *LowLevelAlloc::NewArena(int32_t flags, + Arena *meta_data_arena) { + ABSL_RAW_CHECK(meta_data_arena != nullptr, "must pass a valid arena"); + if (meta_data_arena == &default_arena) { +#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING + if ((flags & LowLevelAlloc::kAsyncSignalSafe) != 0) { + meta_data_arena = &unhooked_async_sig_safe_arena; + } else // NOLINT(readability/braces) +#endif + if ((flags & LowLevelAlloc::kCallMallocHook) == 0) { + meta_data_arena = &unhooked_arena; + } + } + // Arena(0) uses the constructor for non-static contexts + Arena *result = + new (AllocWithArena(sizeof (*result), meta_data_arena)) Arena(0); + ArenaInit(result); + result->flags.store(flags, std::memory_order_relaxed); + return result; +} + +// L < arena->mu, L < arena->arena->mu +bool LowLevelAlloc::DeleteArena(Arena *arena) { + ABSL_RAW_CHECK( + arena != nullptr && arena != &default_arena && arena != &unhooked_arena, + "may not delete default arena"); + ArenaLock section(arena); + bool empty = (arena->allocation_count == 0); + section.Leave(); + if (empty) { + while (arena->freelist.next[0] != nullptr) { + AllocList *region = arena->freelist.next[0]; + size_t size = region->header.size; + arena->freelist.next[0] = region->next[0]; + ABSL_RAW_CHECK( + region->header.magic == Magic(kMagicUnallocated, ®ion->header), + "bad magic number in DeleteArena()"); + ABSL_RAW_CHECK(region->header.arena == arena, + "bad arena pointer in DeleteArena()"); + ABSL_RAW_CHECK(size % arena->pagesize == 0, + "empty arena has non-page-aligned block size"); + ABSL_RAW_CHECK(reinterpret_cast<uintptr_t>(region) % arena->pagesize == 0, + "empty arena has non-page-aligned block"); + int munmap_result; +#ifdef _WIN32 + munmap_result = VirtualFree(region, 0, MEM_RELEASE); + ABSL_RAW_CHECK(munmap_result != 0, + "LowLevelAlloc::DeleteArena: VitualFree failed"); +#else + if ((arena->flags.load(std::memory_order_relaxed) & + LowLevelAlloc::kAsyncSignalSafe) == 0) { + munmap_result = munmap(region, size); + } else { + munmap_result = MallocHook::UnhookedMUnmap(region, size); + } + if (munmap_result != 0) { + ABSL_RAW_LOG(FATAL, "LowLevelAlloc::DeleteArena: munmap failed: %d", + errno); + } +#endif + } + Free(arena); + } + return empty; +} + +// --------------------------------------------------------------------------- + +// Addition, checking for overflow. The intent is to die if an external client +// manages to push through a request that would cause arithmetic to fail. +static inline uintptr_t CheckedAdd(uintptr_t a, uintptr_t b) { + uintptr_t sum = a + b; + ABSL_RAW_CHECK(sum >= a, "LowLevelAlloc arithmetic overflow"); + return sum; +} + +// Return value rounded up to next multiple of align. +// align must be a power of two. +static inline uintptr_t RoundUp(uintptr_t addr, uintptr_t align) { + return CheckedAdd(addr, align - 1) & ~(align - 1); +} + +// Equivalent to "return prev->next[i]" but with sanity checking +// that the freelist is in the correct order, that it +// consists of regions marked "unallocated", and that no two regions +// are adjacent in memory (they should have been coalesced). +// L < arena->mu +static AllocList *Next(int i, AllocList *prev, LowLevelAlloc::Arena *arena) { + ABSL_RAW_CHECK(i < prev->levels, "too few levels in Next()"); + AllocList *next = prev->next[i]; + if (next != nullptr) { + ABSL_RAW_CHECK( + next->header.magic == Magic(kMagicUnallocated, &next->header), + "bad magic number in Next()"); + ABSL_RAW_CHECK(next->header.arena == arena, "bad arena pointer in Next()"); + if (prev != &arena->freelist) { + ABSL_RAW_CHECK(prev < next, "unordered freelist"); + ABSL_RAW_CHECK(reinterpret_cast<char *>(prev) + prev->header.size < + reinterpret_cast<char *>(next), + "malformed freelist"); + } + } + return next; +} + +// Coalesce list item "a" with its successor if they are adjacent. +static void Coalesce(AllocList *a) { + AllocList *n = a->next[0]; + if (n != nullptr && reinterpret_cast<char *>(a) + a->header.size == + reinterpret_cast<char *>(n)) { + LowLevelAlloc::Arena *arena = a->header.arena; + a->header.size += n->header.size; + n->header.magic = 0; + n->header.arena = nullptr; + AllocList *prev[kMaxLevel]; + LLA_SkiplistDelete(&arena->freelist, n, prev); + LLA_SkiplistDelete(&arena->freelist, a, prev); + a->levels = LLA_SkiplistLevels(a->header.size, arena->min_size, + &arena->random); + LLA_SkiplistInsert(&arena->freelist, a, prev); + } +} + +// Adds block at location "v" to the free list +// L >= arena->mu +static void AddToFreelist(void *v, LowLevelAlloc::Arena *arena) { + AllocList *f = reinterpret_cast<AllocList *>( + reinterpret_cast<char *>(v) - sizeof (f->header)); + ABSL_RAW_CHECK(f->header.magic == Magic(kMagicAllocated, &f->header), + "bad magic number in AddToFreelist()"); + ABSL_RAW_CHECK(f->header.arena == arena, + "bad arena pointer in AddToFreelist()"); + f->levels = LLA_SkiplistLevels(f->header.size, arena->min_size, + &arena->random); + AllocList *prev[kMaxLevel]; + LLA_SkiplistInsert(&arena->freelist, f, prev); + f->header.magic = Magic(kMagicUnallocated, &f->header); + Coalesce(f); // maybe coalesce with successor + Coalesce(prev[0]); // maybe coalesce with predecessor +} + +// Frees storage allocated by LowLevelAlloc::Alloc(). +// L < arena->mu +void LowLevelAlloc::Free(void *v) { + if (v != nullptr) { + AllocList *f = reinterpret_cast<AllocList *>( + reinterpret_cast<char *>(v) - sizeof (f->header)); + ABSL_RAW_CHECK(f->header.magic == Magic(kMagicAllocated, &f->header), + "bad magic number in Free()"); + LowLevelAlloc::Arena *arena = f->header.arena; + if ((arena->flags.load(std::memory_order_relaxed) & kCallMallocHook) != 0) { + MallocHook::InvokeDeleteHook(v); + } + ArenaLock section(arena); + AddToFreelist(v, arena); + ABSL_RAW_CHECK(arena->allocation_count > 0, "nothing in arena to free"); + arena->allocation_count--; + section.Leave(); + } +} + +// allocates and returns a block of size bytes, to be freed with Free() +// L < arena->mu +static void *DoAllocWithArena(size_t request, LowLevelAlloc::Arena *arena) { + void *result = nullptr; + if (request != 0) { + AllocList *s; // will point to region that satisfies request + ArenaLock section(arena); + ArenaInit(arena); + // round up with header + size_t req_rnd = RoundUp(CheckedAdd(request, sizeof (s->header)), + arena->roundup); + for (;;) { // loop until we find a suitable region + // find the minimum levels that a block of this size must have + int i = LLA_SkiplistLevels(req_rnd, arena->min_size, nullptr) - 1; + if (i < arena->freelist.levels) { // potential blocks exist + AllocList *before = &arena->freelist; // predecessor of s + while ((s = Next(i, before, arena)) != nullptr && + s->header.size < req_rnd) { + before = s; + } + if (s != nullptr) { // we found a region + break; + } + } + // we unlock before mmap() both because mmap() may call a callback hook, + // and because it may be slow. + arena->mu.Unlock(); + // mmap generous 64K chunks to decrease + // the chances/impact of fragmentation: + size_t new_pages_size = RoundUp(req_rnd, arena->pagesize * 16); + void *new_pages; +#ifdef _WIN32 + new_pages = VirtualAlloc(0, new_pages_size, + MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); + ABSL_RAW_CHECK(new_pages != nullptr, "VirtualAlloc failed"); +#else + if ((arena->flags.load(std::memory_order_relaxed) & + LowLevelAlloc::kAsyncSignalSafe) != 0) { + new_pages = MallocHook::UnhookedMMap(nullptr, new_pages_size, + PROT_WRITE|PROT_READ, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); + } else { + new_pages = mmap(nullptr, new_pages_size, PROT_WRITE | PROT_READ, + MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); + } + if (new_pages == MAP_FAILED) { + ABSL_RAW_LOG(FATAL, "mmap error: %d", errno); + } +#endif + arena->mu.Lock(); + s = reinterpret_cast<AllocList *>(new_pages); + s->header.size = new_pages_size; + // Pretend the block is allocated; call AddToFreelist() to free it. + s->header.magic = Magic(kMagicAllocated, &s->header); + s->header.arena = arena; + AddToFreelist(&s->levels, arena); // insert new region into free list + } + AllocList *prev[kMaxLevel]; + LLA_SkiplistDelete(&arena->freelist, s, prev); // remove from free list + // s points to the first free region that's big enough + if (CheckedAdd(req_rnd, arena->min_size) <= s->header.size) { + // big enough to split + AllocList *n = reinterpret_cast<AllocList *> + (req_rnd + reinterpret_cast<char *>(s)); + n->header.size = s->header.size - req_rnd; + n->header.magic = Magic(kMagicAllocated, &n->header); + n->header.arena = arena; + s->header.size = req_rnd; + AddToFreelist(&n->levels, arena); + } + s->header.magic = Magic(kMagicAllocated, &s->header); + ABSL_RAW_CHECK(s->header.arena == arena, ""); + arena->allocation_count++; + section.Leave(); + result = &s->levels; + } + ANNOTATE_MEMORY_IS_UNINITIALIZED(result, request); + return result; +} + +void *LowLevelAlloc::Alloc(size_t request) { + void *result = DoAllocWithArena(request, &default_arena); + if ((default_arena.flags.load(std::memory_order_relaxed) & + kCallMallocHook) != 0) { + // this call must be directly in the user-called allocator function + // for MallocHook::GetCallerStackTrace to work properly + MallocHook::InvokeNewHook(result, request); + } + return result; +} + +void *LowLevelAlloc::AllocWithArena(size_t request, Arena *arena) { + ABSL_RAW_CHECK(arena != nullptr, "must pass a valid arena"); + void *result = DoAllocWithArena(request, arena); + if ((arena->flags.load(std::memory_order_relaxed) & kCallMallocHook) != 0) { + // this call must be directly in the user-called allocator function + // for MallocHook::GetCallerStackTrace to work properly + MallocHook::InvokeNewHook(result, request); + } + return result; +} + +LowLevelAlloc::Arena *LowLevelAlloc::DefaultArena() { + return &default_arena; +} + +} // namespace base_internal +} // namespace absl + +#endif // ABSL_LOW_LEVEL_ALLOC_MISSING diff --git a/absl/base/internal/low_level_alloc.h b/absl/base/internal/low_level_alloc.h new file mode 100644 index 000000000000..d6eb813f0d6a --- /dev/null +++ b/absl/base/internal/low_level_alloc.h @@ -0,0 +1,120 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef ABSL_BASE_INTERNAL_LOW_LEVEL_ALLOC_H_ +#define ABSL_BASE_INTERNAL_LOW_LEVEL_ALLOC_H_ + +// A simple thread-safe memory allocator that does not depend on +// mutexes or thread-specific data. It is intended to be used +// sparingly, and only when malloc() would introduce an unwanted +// dependency, such as inside the heap-checker, or the Mutex +// implementation. + +// IWYU pragma: private, include "base/low_level_alloc.h" + +#include <cstdint> + +#include "absl/base/config.h" + +// LowLevelAlloc requires that the platform support low-level +// allocation of virtual memory. Platforms lacking this cannot use +// LowLevelAlloc. +#ifdef ABSL_LOW_LEVEL_ALLOC_MISSING +#error ABSL_LOW_LEVEL_ALLOC_MISSING cannot be directly set +#elif !defined(ABSL_HAVE_MMAP) && !defined(_WIN32) +#define ABSL_LOW_LEVEL_ALLOC_MISSING 1 +#endif + +// Using LowLevelAlloc with kAsyncSignalSafe isn't supported on Windows. +#ifdef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING +#error ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING cannot be directly set +#elif defined(_WIN32) +#define ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING 1 +#endif + +#include <cstddef> + +#include "absl/base/port.h" + +namespace absl { +namespace base_internal { + +class LowLevelAlloc { + public: + struct Arena; // an arena from which memory may be allocated + + // Returns a pointer to a block of at least "request" bytes + // that have been newly allocated from the specific arena. + // for Alloc() call the DefaultArena() is used. + // Returns 0 if passed request==0. + // Does not return 0 under other circumstances; it crashes if memory + // is not available. + static void *Alloc(size_t request) ABSL_ATTRIBUTE_SECTION(malloc_hook); + static void *AllocWithArena(size_t request, Arena *arena) + ABSL_ATTRIBUTE_SECTION(malloc_hook); + + // Deallocates a region of memory that was previously allocated with + // Alloc(). Does nothing if passed 0. "s" must be either 0, + // or must have been returned from a call to Alloc() and not yet passed to + // Free() since that call to Alloc(). The space is returned to the arena + // from which it was allocated. + static void Free(void *s) ABSL_ATTRIBUTE_SECTION(malloc_hook); + + // ABSL_ATTRIBUTE_SECTION(malloc_hook) for Alloc* and Free + // are to put all callers of MallocHook::Invoke* in this module + // into special section, + // so that MallocHook::GetCallerStackTrace can function accurately. + + // Create a new arena. + // The root metadata for the new arena is allocated in the + // meta_data_arena; the DefaultArena() can be passed for meta_data_arena. + // These values may be ored into flags: + enum { + // Report calls to Alloc() and Free() via the MallocHook interface. + // Set in the DefaultArena. + kCallMallocHook = 0x0001, + +#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING + // Make calls to Alloc(), Free() be async-signal-safe. Not set in + // DefaultArena(). Not supported on all platforms. + kAsyncSignalSafe = 0x0002, +#endif + + // When used with DefaultArena(), the NewArena() and DeleteArena() calls + // obey the flags given explicitly in the NewArena() call, even if those + // flags differ from the settings in DefaultArena(). So the call + // NewArena(kAsyncSignalSafe, DefaultArena()) is itself async-signal-safe, + // as well as generatating an arena that provides async-signal-safe + // Alloc/Free. + }; + static Arena *NewArena(int32_t flags, Arena *meta_data_arena); + + // Destroys an arena allocated by NewArena and returns true, + // provided no allocated blocks remain in the arena. + // If allocated blocks remain in the arena, does nothing and + // returns false. + // It is illegal to attempt to destroy the DefaultArena(). + static bool DeleteArena(Arena *arena); + + // The default arena that always exists. + static Arena *DefaultArena(); + + private: + LowLevelAlloc(); // no instances +}; + +} // namespace base_internal +} // namespace absl +#endif // ABSL_BASE_INTERNAL_LOW_LEVEL_ALLOC_H_ diff --git a/absl/base/internal/low_level_alloc_test.cc b/absl/base/internal/low_level_alloc_test.cc new file mode 100644 index 000000000000..5f60c3d9dfb9 --- /dev/null +++ b/absl/base/internal/low_level_alloc_test.cc @@ -0,0 +1,203 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/low_level_alloc.h" + +#include <stdio.h> +#include <stdlib.h> +#include <thread> // NOLINT(build/c++11) +#include <unordered_map> + +#include "absl/base/internal/malloc_hook.h" + +namespace absl { +namespace base_internal { +namespace { + +// This test doesn't use gtest since it needs to test that everything +// works before main(). +#define TEST_ASSERT(x) \ + if (!(x)) { \ + printf("TEST_ASSERT(%s) FAILED ON LINE %d\n", #x, __LINE__); \ + abort(); \ + } + +// a block of memory obtained from the allocator +struct BlockDesc { + char *ptr; // pointer to memory + int len; // number of bytes + int fill; // filled with data starting with this +}; + +// Check that the pattern placed in the block d +// by RandomizeBlockDesc is still there. +static void CheckBlockDesc(const BlockDesc &d) { + for (int i = 0; i != d.len; i++) { + TEST_ASSERT((d.ptr[i] & 0xff) == ((d.fill + i) & 0xff)); + } +} + +// Fill the block "*d" with a pattern +// starting with a random byte. +static void RandomizeBlockDesc(BlockDesc *d) { + d->fill = rand() & 0xff; + for (int i = 0; i != d->len; i++) { + d->ptr[i] = (d->fill + i) & 0xff; + } +} + +// Use to indicate to the malloc hooks that +// this calls is from LowLevelAlloc. +static bool using_low_level_alloc = false; + +// n times, toss a coin, and based on the outcome +// either allocate a new block or deallocate an old block. +// New blocks are placed in a std::unordered_map with a random key +// and initialized with RandomizeBlockDesc(). +// If keys conflict, the older block is freed. +// Old blocks are always checked with CheckBlockDesc() +// before being freed. At the end of the run, +// all remaining allocated blocks are freed. +// If use_new_arena is true, use a fresh arena, and then delete it. +// If call_malloc_hook is true and user_arena is true, +// allocations and deallocations are reported via the MallocHook +// interface. +static void Test(bool use_new_arena, bool call_malloc_hook, int n) { + typedef std::unordered_map<int, BlockDesc> AllocMap; + AllocMap allocated; + AllocMap::iterator it; + BlockDesc block_desc; + int rnd; + LowLevelAlloc::Arena *arena = 0; + if (use_new_arena) { + int32_t flags = call_malloc_hook ? LowLevelAlloc::kCallMallocHook : 0; + arena = LowLevelAlloc::NewArena(flags, LowLevelAlloc::DefaultArena()); + } + for (int i = 0; i != n; i++) { + if (i != 0 && i % 10000 == 0) { + printf("."); + fflush(stdout); + } + + switch (rand() & 1) { // toss a coin + case 0: // coin came up heads: add a block + using_low_level_alloc = true; + block_desc.len = rand() & 0x3fff; + block_desc.ptr = + reinterpret_cast<char *>( + arena == 0 + ? LowLevelAlloc::Alloc(block_desc.len) + : LowLevelAlloc::AllocWithArena(block_desc.len, arena)); + using_low_level_alloc = false; + RandomizeBlockDesc(&block_desc); + rnd = rand(); + it = allocated.find(rnd); + if (it != allocated.end()) { + CheckBlockDesc(it->second); + using_low_level_alloc = true; + LowLevelAlloc::Free(it->second.ptr); + using_low_level_alloc = false; + it->second = block_desc; + } else { + allocated[rnd] = block_desc; + } + break; + case 1: // coin came up tails: remove a block + it = allocated.begin(); + if (it != allocated.end()) { + CheckBlockDesc(it->second); + using_low_level_alloc = true; + LowLevelAlloc::Free(it->second.ptr); + using_low_level_alloc = false; + allocated.erase(it); + } + break; + } + } + // remove all remaining blocks + while ((it = allocated.begin()) != allocated.end()) { + CheckBlockDesc(it->second); + using_low_level_alloc = true; + LowLevelAlloc::Free(it->second.ptr); + using_low_level_alloc = false; + allocated.erase(it); + } + if (use_new_arena) { + TEST_ASSERT(LowLevelAlloc::DeleteArena(arena)); + } +} + +// used for counting allocates and frees +static int32_t allocates; +static int32_t frees; + +// ignore uses of the allocator not triggered by our test +static std::thread::id* test_tid; + +// called on each alloc if kCallMallocHook specified +static void AllocHook(const void *p, size_t size) { + if (using_low_level_alloc) { + if (*test_tid == std::this_thread::get_id()) { + allocates++; + } + } +} + +// called on each free if kCallMallocHook specified +static void FreeHook(const void *p) { + if (using_low_level_alloc) { + if (*test_tid == std::this_thread::get_id()) { + frees++; + } + } +} + +// LowLevelAlloc is designed to be safe to call before main(). +static struct BeforeMain { + BeforeMain() { + test_tid = new std::thread::id(std::this_thread::get_id()); + TEST_ASSERT(MallocHook::AddNewHook(&AllocHook)); + TEST_ASSERT(MallocHook::AddDeleteHook(&FreeHook)); + TEST_ASSERT(allocates == 0); + TEST_ASSERT(frees == 0); + Test(false, false, 50000); + TEST_ASSERT(allocates != 0); // default arena calls hooks + TEST_ASSERT(frees != 0); + for (int i = 0; i != 16; i++) { + bool call_hooks = ((i & 1) == 1); + allocates = 0; + frees = 0; + Test(true, call_hooks, 15000); + if (call_hooks) { + TEST_ASSERT(allocates > 5000); // arena calls hooks + TEST_ASSERT(frees > 5000); + } else { + TEST_ASSERT(allocates == 0); // arena doesn't call hooks + TEST_ASSERT(frees == 0); + } + } + TEST_ASSERT(MallocHook::RemoveNewHook(&AllocHook)); + TEST_ASSERT(MallocHook::RemoveDeleteHook(&FreeHook)); + } +} before_main; + +} // namespace +} // namespace base_internal +} // namespace absl + +int main(int argc, char *argv[]) { + // The actual test runs in the global constructor of `before_main`. + printf("PASS\n"); + return 0; +} diff --git a/absl/base/internal/low_level_scheduling.h b/absl/base/internal/low_level_scheduling.h new file mode 100644 index 000000000000..b9501076ec57 --- /dev/null +++ b/absl/base/internal/low_level_scheduling.h @@ -0,0 +1,104 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Core interfaces and definitions used by by low-level //base interfaces such +// as SpinLock. + +#ifndef ABSL_BASE_INTERNAL_LOW_LEVEL_SCHEDULING_H_ +#define ABSL_BASE_INTERNAL_LOW_LEVEL_SCHEDULING_H_ + +#include "absl/base/internal/scheduling_mode.h" +#include "absl/base/macros.h" + +// The following two declarations exist so SchedulingGuard may friend them with +// the appropriate language linkage. These callbacks allow libc internals, such +// as function level statics, to schedule cooperatively when locking. +extern "C" bool __google_disable_rescheduling(void); +extern "C" void __google_enable_rescheduling(bool disable_result); + +namespace absl { +namespace base_internal { + +class SpinLock; // To allow use of SchedulingGuard. +class SchedulingHelper; // To allow use of SchedulingGuard. + +// SchedulingGuard +// Provides guard semantics that may be used to disable cooperative rescheduling +// of the calling thread within specific program blocks. This is used to +// protect resources (e.g. low-level SpinLocks or Domain code) that cooperative +// scheduling depends on. +// +// Domain implementations capable of rescheduling in reaction to involuntary +// kernel thread actions (e.g blocking due to a pagefault or syscall) must +// guarantee that an annotated thread is not allowed to (cooperatively) +// reschedule until the annotated region is complete. +// +// It is an error to attempt to use a cooperatively scheduled resource (e.g. +// Mutex) within a rescheduling-disabled region. +// +// All methods are async-signal safe. +class SchedulingGuard { + public: + // Returns true iff the calling thread may be cooperatively rescheduled. + static bool ReschedulingIsAllowed(); + + private: + // Disable cooperative rescheduling of the calling thread. It may still + // initiate scheduling operations (e.g. wake-ups), however, it may not itself + // reschedule. Nestable. The returned result is opaque, clients should not + // attempt to interpret it. + // REQUIRES: Result must be passed to a pairing EnableScheduling(). + static bool DisableRescheduling(); + + // Marks the end of a rescheduling disabled region, previously started by + // DisableRescheduling(). + // REQUIRES: Pairs with innermost call (and result) of DisableRescheduling(). + static void EnableRescheduling(bool disable_result); + + // A scoped helper for {Disable, Enable}Rescheduling(). + // REQUIRES: destructor must run in same thread as constructor. + struct ScopedDisable { + ScopedDisable() { disabled = SchedulingGuard::DisableRescheduling(); } + ~ScopedDisable() { SchedulingGuard::EnableRescheduling(disabled); } + + bool disabled; + }; + + // Access to SchedulingGuard is explicitly white-listed. + friend class SchedulingHelper; + friend class SpinLock; + + SchedulingGuard(const SchedulingGuard&) = delete; + SchedulingGuard& operator=(const SchedulingGuard&) = delete; +}; + +//------------------------------------------------------------------------------ +// End of public interfaces. +//------------------------------------------------------------------------------ +inline bool SchedulingGuard::ReschedulingIsAllowed() { + return false; +} + +inline bool SchedulingGuard::DisableRescheduling() { + return false; +} + +inline void SchedulingGuard::EnableRescheduling(bool /* disable_result */) { + return; +} + + +} // namespace base_internal +} // namespace absl +#endif // ABSL_BASE_INTERNAL_LOW_LEVEL_SCHEDULING_H_ diff --git a/absl/base/internal/malloc_extension.cc b/absl/base/internal/malloc_extension.cc new file mode 100644 index 000000000000..daf4bc32e768 --- /dev/null +++ b/absl/base/internal/malloc_extension.cc @@ -0,0 +1,197 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/malloc_extension.h" + +#include <assert.h> +#include <stdint.h> +#include <stdio.h> +#include <string.h> +#include <atomic> +#include <string> + +#include "absl/base/dynamic_annotations.h" +#include "absl/base/internal/malloc_extension_c.h" +#include "absl/base/port.h" + +namespace absl { +namespace base_internal { + +// SysAllocator implementation +SysAllocator::~SysAllocator() {} +void SysAllocator::GetStats(char* buffer, int) { buffer[0] = 0; } + +// Default implementation -- does nothing +MallocExtension::~MallocExtension() { } +bool MallocExtension::VerifyAllMemory() { return true; } +bool MallocExtension::VerifyNewMemory(const void*) { return true; } +bool MallocExtension::VerifyArrayNewMemory(const void*) { return true; } +bool MallocExtension::VerifyMallocMemory(const void*) { return true; } + +bool MallocExtension::GetNumericProperty(const char*, size_t*) { + return false; +} + +bool MallocExtension::SetNumericProperty(const char*, size_t) { + return false; +} + +void MallocExtension::GetStats(char* buffer, int length) { + assert(length > 0); + static_cast<void>(length); + buffer[0] = '\0'; +} + +bool MallocExtension::MallocMemoryStats(int* blocks, size_t* total, + int histogram[kMallocHistogramSize]) { + *blocks = 0; + *total = 0; + memset(histogram, 0, sizeof(*histogram) * kMallocHistogramSize); + return true; +} + +void MallocExtension::MarkThreadIdle() { + // Default implementation does nothing +} + +void MallocExtension::MarkThreadBusy() { + // Default implementation does nothing +} + +SysAllocator* MallocExtension::GetSystemAllocator() { + return nullptr; +} + +void MallocExtension::SetSystemAllocator(SysAllocator*) { + // Default implementation does nothing +} + +void MallocExtension::ReleaseToSystem(size_t) { + // Default implementation does nothing +} + +void MallocExtension::ReleaseFreeMemory() { + ReleaseToSystem(static_cast<size_t>(-1)); // SIZE_T_MAX +} + +void MallocExtension::SetMemoryReleaseRate(double) { + // Default implementation does nothing +} + +double MallocExtension::GetMemoryReleaseRate() { + return -1.0; +} + +size_t MallocExtension::GetEstimatedAllocatedSize(size_t size) { + return size; +} + +size_t MallocExtension::GetAllocatedSize(const void* p) { + assert(GetOwnership(p) != kNotOwned); + static_cast<void>(p); + return 0; +} + +MallocExtension::Ownership MallocExtension::GetOwnership(const void*) { + return kUnknownOwnership; +} + +void MallocExtension::GetProperties(MallocExtension::StatLevel, + std::map<std::string, Property>* result) { + result->clear(); +} + +size_t MallocExtension::ReleaseCPUMemory(int) { + return 0; +} + +// The current malloc extension object. + +std::atomic<MallocExtension*> MallocExtension::current_instance_; + +MallocExtension* MallocExtension::InitModule() { + MallocExtension* ext = new MallocExtension; + current_instance_.store(ext, std::memory_order_release); + return ext; +} + +void MallocExtension::Register(MallocExtension* implementation) { + InitModuleOnce(); + // When running under valgrind, our custom malloc is replaced with + // valgrind's one and malloc extensions will not work. (Note: + // callers should be responsible for checking that they are the + // malloc that is really being run, before calling Register. This + // is just here as an extra sanity check.) + // Under compiler-based ThreadSanitizer RunningOnValgrind() returns true, + // but we still want to use malloc extensions. +#ifndef THREAD_SANITIZER + if (RunningOnValgrind()) { + return; + } +#endif // #ifndef THREAD_SANITIZER + current_instance_.store(implementation, std::memory_order_release); +} +void MallocExtension::GetHeapSample(MallocExtensionWriter*) {} + +void MallocExtension::GetHeapGrowthStacks(MallocExtensionWriter*) {} + +void MallocExtension::GetFragmentationProfile(MallocExtensionWriter*) {} + +} // namespace base_internal +} // namespace absl + +// These are C shims that work on the current instance. + +#define C_SHIM(fn, retval, paramlist, arglist) \ + extern "C" retval MallocExtension_##fn paramlist { \ + return absl::base_internal::MallocExtension::instance()->fn arglist; \ + } + +C_SHIM(VerifyAllMemory, int, (void), ()); +C_SHIM(VerifyNewMemory, int, (const void* p), (p)); +C_SHIM(VerifyArrayNewMemory, int, (const void* p), (p)); +C_SHIM(VerifyMallocMemory, int, (const void* p), (p)); +C_SHIM( + MallocMemoryStats, int, + (int* blocks, size_t* total, + int histogram[absl::base_internal::MallocExtension::kMallocHistogramSize]), + (blocks, total, histogram)); + +C_SHIM(GetStats, void, + (char* buffer, int buffer_length), (buffer, buffer_length)); +C_SHIM(GetNumericProperty, int, + (const char* property, size_t* value), (property, value)); +C_SHIM(SetNumericProperty, int, + (const char* property, size_t value), (property, value)); + +C_SHIM(MarkThreadIdle, void, (void), ()); +C_SHIM(MarkThreadBusy, void, (void), ()); +C_SHIM(ReleaseFreeMemory, void, (void), ()); +C_SHIM(ReleaseToSystem, void, (size_t num_bytes), (num_bytes)); +C_SHIM(GetEstimatedAllocatedSize, size_t, (size_t size), (size)); +C_SHIM(GetAllocatedSize, size_t, (const void* p), (p)); + +// Can't use the shim here because of the need to translate the enums. +extern "C" +MallocExtension_Ownership MallocExtension_GetOwnership(const void* p) { + return static_cast<MallocExtension_Ownership>( + absl::base_internal::MallocExtension::instance()->GetOwnership(p)); +} + +// Default implementation just returns size. The expectation is that +// the linked-in malloc implementation might provide an override of +// this weak function with a better implementation. +ABSL_ATTRIBUTE_WEAK ABSL_ATTRIBUTE_NOINLINE size_t nallocx(size_t size, int) { + return size; +} diff --git a/absl/base/internal/malloc_extension.h b/absl/base/internal/malloc_extension.h new file mode 100644 index 000000000000..dee4116b207c --- /dev/null +++ b/absl/base/internal/malloc_extension.h @@ -0,0 +1,424 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Extra extensions exported by some malloc implementations. These +// extensions are accessed through a virtual base class so an +// application can link against a malloc that does not implement these +// extensions, and it will get default versions that do nothing. +// +// NOTE FOR C USERS: If you wish to use this functionality from within +// a C program, see malloc_extension_c.h. + +#ifndef ABSL_BASE_INTERNAL_MALLOC_EXTENSION_H_ +#define ABSL_BASE_INTERNAL_MALLOC_EXTENSION_H_ + +#include <atomic> +#include <map> +#include <memory> +#include <vector> + +#include <stddef.h> +#include <stdint.h> +#include <string> +#include "absl/base/macros.h" +#include "absl/base/port.h" +namespace absl { +namespace base_internal { + +class MallocExtensionWriter; + +// Interface to a pluggable system allocator. +class SysAllocator { + public: + SysAllocator() { + } + virtual ~SysAllocator(); + + // Allocates "size"-byte of memory from system aligned with "alignment". + // Returns null if failed. Otherwise, the returned pointer p up to and + // including (p + actual_size -1) have been allocated. + virtual void* Alloc(size_t size, size_t *actual_size, size_t alignment) = 0; + + // Get a human-readable description of the current state of the + // allocator. The state is stored as a null-terminated std::string in + // a prefix of buffer. + virtual void GetStats(char* buffer, int length); +}; + +// The default implementations of the following routines do nothing. +// All implementations should be thread-safe; the current ones +// (DebugMallocImplementation and TCMallocImplementation) are. +class MallocExtension { + public: + virtual ~MallocExtension(); + + // Verifies that all blocks are valid. Returns true if all are; dumps + // core otherwise. A no-op except in debug mode. Even in debug mode, + // they may not do any checking except with certain malloc + // implementations. Thread-safe. + virtual bool VerifyAllMemory(); + + // Verifies that p was returned by new, has not been deleted, and is + // valid. Returns true if p is good; dumps core otherwise. A no-op + // except in debug mode. Even in debug mode, may not do any checking + // except with certain malloc implementations. Thread-safe. + virtual bool VerifyNewMemory(const void* p); + + // Verifies that p was returned by new[], has not been deleted, and is + // valid. Returns true if p is good; dumps core otherwise. A no-op + // except in debug mode. Even in debug mode, may not do any checking + // except with certain malloc implementations. Thread-safe. + virtual bool VerifyArrayNewMemory(const void* p); + + // Verifies that p was returned by malloc, has not been freed, and is + // valid. Returns true if p is good; dumps core otherwise. A no-op + // except in debug mode. Even in debug mode, may not do any checking + // except with certain malloc implementations. Thread-safe. + virtual bool VerifyMallocMemory(const void* p); + + // If statistics collection is enabled, sets *blocks to be the number of + // currently allocated blocks, sets *total to be the total size allocated + // over all blocks, sets histogram[n] to be the number of blocks with + // size between 2^n-1 and 2^(n+1), and returns true. Returns false, and + // does not change *blocks, *total, or *histogram, if statistics + // collection is disabled. + // + // Note that these statistics reflect memory allocated by new, new[], + // malloc(), and realloc(), but not mmap(). They may be larger (if not + // all pages have been written to) or smaller (if pages have been + // allocated by mmap()) than the total RSS size. They will always be + // smaller than the total virtual memory size. + static constexpr int kMallocHistogramSize = 64; + virtual bool MallocMemoryStats(int* blocks, size_t* total, + int histogram[kMallocHistogramSize]); + + // Get a human readable description of the current state of the malloc + // data structures. The state is stored as a null-terminated std::string + // in a prefix of "buffer[0,buffer_length-1]". + // REQUIRES: buffer_length > 0. + virtual void GetStats(char* buffer, int buffer_length); + + // Outputs to "writer" a sample of live objects and the stack traces + // that allocated these objects. The output can be passed to pprof. + virtual void GetHeapSample(MallocExtensionWriter* writer); + + // Outputs to "writer" the stack traces that caused growth in the + // address space size. The output can be passed to "pprof". + virtual void GetHeapGrowthStacks(MallocExtensionWriter* writer); + + // Outputs to "writer" a fragmentation profile. The output can be + // passed to "pprof". In particular, the result is a list of + // <n,total,stacktrace> tuples that says that "total" bytes in "n" + // objects are currently unusable because of fragmentation caused by + // an allocation with the specified "stacktrace". + virtual void GetFragmentationProfile(MallocExtensionWriter* writer); + + // ------------------------------------------------------------------- + // Control operations for getting and setting malloc implementation + // specific parameters. Some currently useful properties: + // + // generic + // ------- + // "generic.current_allocated_bytes" + // Number of bytes currently allocated by application + // This property is not writable. + // + // "generic.heap_size" + // Number of bytes in the heap == + // current_allocated_bytes + + // fragmentation + + // freed memory regions + // This property is not writable. + // + // tcmalloc + // -------- + // "tcmalloc.max_total_thread_cache_bytes" + // Upper limit on total number of bytes stored across all + // per-thread caches. Default: 16MB. + // + // "tcmalloc.current_total_thread_cache_bytes" + // Number of bytes used across all thread caches. + // This property is not writable. + // + // "tcmalloc.pageheap_free_bytes" + // Number of bytes in free, mapped pages in page heap. These + // bytes can be used to fulfill allocation requests. They + // always count towards virtual memory usage, and unless the + // underlying memory is swapped out by the OS, they also count + // towards physical memory usage. This property is not writable. + // + // "tcmalloc.pageheap_unmapped_bytes" + // Number of bytes in free, unmapped pages in page heap. + // These are bytes that have been released back to the OS, + // possibly by one of the MallocExtension "Release" calls. + // They can be used to fulfill allocation requests, but + // typically incur a page fault. They always count towards + // virtual memory usage, and depending on the OS, typically + // do not count towards physical memory usage. This property + // is not writable. + // + // "tcmalloc.per_cpu_caches_active" + // Whether tcmalloc is using per-CPU caches (1 or 0 respectively). + // This property is not writable. + // ------------------------------------------------------------------- + + // Get the named "property"'s value. Returns true if the property + // is known. Returns false if the property is not a valid property + // name for the current malloc implementation. + // REQUIRES: property != null; value != null + virtual bool GetNumericProperty(const char* property, size_t* value); + + // Set the named "property"'s value. Returns true if the property + // is known and writable. Returns false if the property is not a + // valid property name for the current malloc implementation, or + // is not writable. + // REQUIRES: property != null + virtual bool SetNumericProperty(const char* property, size_t value); + + // Mark the current thread as "idle". This routine may optionally + // be called by threads as a hint to the malloc implementation that + // any thread-specific resources should be released. Note: this may + // be an expensive routine, so it should not be called too often. + // + // Also, if the code that calls this routine will go to sleep for + // a while, it should take care to not allocate anything between + // the call to this routine and the beginning of the sleep. + // + // Most malloc implementations ignore this routine. + virtual void MarkThreadIdle(); + + // Mark the current thread as "busy". This routine should be + // called after MarkThreadIdle() if the thread will now do more + // work. If this method is not called, performance may suffer. + // + // Most malloc implementations ignore this routine. + virtual void MarkThreadBusy(); + + // Attempt to free any resources associated with cpu <cpu> (in the sense + // of only being usable from that CPU.) Returns the number of bytes + // previously assigned to "cpu" that were freed. Safe to call from + // any processor, not just <cpu>. + // + // Most malloc implementations ignore this routine (known exceptions: + // tcmalloc with --tcmalloc_per_cpu_caches=true.) + virtual size_t ReleaseCPUMemory(int cpu); + + // Gets the system allocator used by the malloc extension instance. Returns + // null for malloc implementations that do not support pluggable system + // allocators. + virtual SysAllocator* GetSystemAllocator(); + + // Sets the system allocator to the specified. + // + // Users could register their own system allocators for malloc implementation + // that supports pluggable system allocators, such as TCMalloc, by doing: + // alloc = new MyOwnSysAllocator(); + // MallocExtension::instance()->SetSystemAllocator(alloc); + // It's up to users whether to fall back (recommended) to the default + // system allocator (use GetSystemAllocator() above) or not. The caller is + // responsible to any necessary locking. + // See tcmalloc/system-alloc.h for the interface and + // tcmalloc/memfs_malloc.cc for the examples. + // + // It's a no-op for malloc implementations that do not support pluggable + // system allocators. + virtual void SetSystemAllocator(SysAllocator *a); + + // Try to release num_bytes of free memory back to the operating + // system for reuse. Use this extension with caution -- to get this + // memory back may require faulting pages back in by the OS, and + // that may be slow. (Currently only implemented in tcmalloc.) + virtual void ReleaseToSystem(size_t num_bytes); + + // Same as ReleaseToSystem() but release as much memory as possible. + virtual void ReleaseFreeMemory(); + + // Sets the rate at which we release unused memory to the system. + // Zero means we never release memory back to the system. Increase + // this flag to return memory faster; decrease it to return memory + // slower. Reasonable rates are in the range [0,10]. (Currently + // only implemented in tcmalloc). + virtual void SetMemoryReleaseRate(double rate); + + // Gets the release rate. Returns a value < 0 if unknown. + virtual double GetMemoryReleaseRate(); + + // Returns the estimated number of bytes that will be allocated for + // a request of "size" bytes. This is an estimate: an allocation of + // SIZE bytes may reserve more bytes, but will never reserve less. + // (Currently only implemented in tcmalloc, other implementations + // always return SIZE.) + // This is equivalent to malloc_good_size() in OS X. + virtual size_t GetEstimatedAllocatedSize(size_t size); + + // Returns the actual number N of bytes reserved by tcmalloc for the + // pointer p. This number may be equal to or greater than the + // number of bytes requested when p was allocated. + // + // This routine is just useful for statistics collection. The + // client must *not* read or write from the extra bytes that are + // indicated by this call. + // + // Example, suppose the client gets memory by calling + // p = malloc(10) + // and GetAllocatedSize(p) returns 16. The client must only use the + // first 10 bytes p[0..9], and not attempt to read or write p[10..15]. + // + // p must have been allocated by this malloc implementation, must + // not be an interior pointer -- that is, must be exactly the + // pointer returned to by malloc() et al., not some offset from that + // -- and should not have been freed yet. p may be null. + // (Currently only implemented in tcmalloc; other implementations + // will return 0.) + virtual size_t GetAllocatedSize(const void* p); + + // Returns kOwned if this malloc implementation allocated the memory + // pointed to by p, or kNotOwned if some other malloc implementation + // allocated it or p is null. May also return kUnknownOwnership if + // the malloc implementation does not keep track of ownership. + // REQUIRES: p must be a value returned from a previous call to + // malloc(), calloc(), realloc(), memalign(), posix_memalign(), + // valloc(), pvalloc(), new, or new[], and must refer to memory that + // is currently allocated (so, for instance, you should not pass in + // a pointer after having called free() on it). + enum Ownership { + // NOTE: Enum values MUST be kept in sync with the version in + // malloc_extension_c.h + kUnknownOwnership = 0, + kOwned, + kNotOwned + }; + virtual Ownership GetOwnership(const void* p); + + // The current malloc implementation. Always non-null. + static MallocExtension* instance() { + InitModuleOnce(); + return current_instance_.load(std::memory_order_acquire); + } + + // Change the malloc implementation. Typically called by the + // malloc implementation during initialization. + static void Register(MallocExtension* implementation); + + // Type used by GetProperties. See comment on GetProperties. + struct Property { + size_t value; + // Stores breakdown of the property value bucketed by object size. + struct Bucket { + size_t min_object_size; + size_t max_object_size; + size_t size; + }; + // Empty unless detailed info was asked for and this type has buckets + std::vector<Bucket> buckets; + }; + + // Type used by GetProperties. See comment on GetProperties. + enum StatLevel { kSummary, kDetailed }; + + // Stores in *result detailed statistics about the malloc + // implementation. *result will be a map keyed by the name of + // the statistic. Each statistic has at least a "value" field. + // + // Some statistics may also contain an array of buckets if + // level==kDetailed and the "value" can be subdivided + // into different buckets for different object sizes. If + // such detailed statistics are not available, Property::buckets + // will be empty. Otherwise Property::buckets will contain + // potentially many entries. For each bucket b, b.value + // will count the value contributed by objects in the range + // [b.min_object_size, b.max_object_size]. + // + // Common across malloc implementations: + // generic.bytes_in_use_by_app -- Bytes currently in use by application + // generic.physical_memory_used -- Overall (including malloc internals) + // generic.virtual_memory_used -- Overall (including malloc internals) + // + // Tcmalloc specific properties + // tcmalloc.cpu_free -- Bytes in per-cpu free-lists + // tcmalloc.thread_cache_free -- Bytes in per-thread free-lists + // tcmalloc.transfer_cache -- Bytes in cross-thread transfer caches + // tcmalloc.central_cache_free -- Bytes in central cache + // tcmalloc.page_heap_free -- Bytes in page heap + // tcmalloc.page_heap_unmapped -- Bytes in page heap (no backing phys. mem) + // tcmalloc.metadata_bytes -- Used by internal data structures + // tcmalloc.thread_cache_count -- Number of thread caches in use + // + // Debug allocator + // debug.free_queue -- Recently freed objects + virtual void GetProperties(StatLevel level, + std::map<std::string, Property>* result); + private: + static MallocExtension* InitModule(); + + static void InitModuleOnce() { + // Pointer stored here so heap leak checker will consider the default + // instance reachable, even if current_instance_ is later overridden by + // MallocExtension::Register(). + ABSL_ATTRIBUTE_UNUSED static MallocExtension* default_instance = + InitModule(); + } + + static std::atomic<MallocExtension*> current_instance_; +}; + +// Base class than can handle output generated by GetHeapSample() and +// GetHeapGrowthStacks(). Use the available subclass or roll your +// own. Useful if you want explicit control over the type of output +// buffer used (e.g. IOBuffer, Cord, etc.) +class MallocExtensionWriter { + public: + virtual ~MallocExtensionWriter() {} + virtual void Write(const char* buf, int len) = 0; + protected: + MallocExtensionWriter() {} + MallocExtensionWriter(const MallocExtensionWriter&) = delete; + MallocExtensionWriter& operator=(const MallocExtensionWriter&) = delete; +}; + +// A subclass that writes to the std::string "out". NOTE: The generated +// data is *appended* to "*out". I.e., the old contents of "*out" are +// preserved. +class StringMallocExtensionWriter : public MallocExtensionWriter { + public: + explicit StringMallocExtensionWriter(std::string* out) : out_(out) {} + virtual void Write(const char* buf, int len) { + out_->append(buf, len); + } + + private: + std::string* const out_; + StringMallocExtensionWriter(const StringMallocExtensionWriter&) = delete; + StringMallocExtensionWriter& operator=(const StringMallocExtensionWriter&) = + delete; +}; + +} // namespace base_internal +} // namespace absl + +// The nallocx function allocates no memory, but it performs the same size +// computation as the malloc function, and returns the real size of the +// allocation that would result from the equivalent malloc function call. +// Default weak implementation returns size unchanged, but tcmalloc overrides it +// and returns rounded up size. See the following link for details: +// http://www.unix.com/man-page/freebsd/3/nallocx/ +extern "C" size_t nallocx(size_t size, int flags); + +#ifndef MALLOCX_LG_ALIGN +#define MALLOCX_LG_ALIGN(la) (la) +#endif + +#endif // ABSL_BASE_INTERNAL_MALLOC_EXTENSION_H_ diff --git a/absl/base/internal/malloc_extension_c.h b/absl/base/internal/malloc_extension_c.h new file mode 100644 index 000000000000..5afc84a43584 --- /dev/null +++ b/absl/base/internal/malloc_extension_c.h @@ -0,0 +1,75 @@ +/* + * Copyright 2017 The Abseil Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + + * C shims for the C++ malloc_extension.h. See malloc_extension.h for + * details. Note these C shims always work on + * MallocExtension::instance(); it is not possible to have more than + * one MallocExtension object in C applications. + */ + +#ifndef ABSL_BASE_INTERNAL_MALLOC_EXTENSION_C_H_ +#define ABSL_BASE_INTERNAL_MALLOC_EXTENSION_C_H_ + +#include <stddef.h> +#include <sys/types.h> + +#ifdef __cplusplus +extern "C" { +#endif + +#define kMallocExtensionHistogramSize 64 + +int MallocExtension_VerifyAllMemory(void); +int MallocExtension_VerifyNewMemory(const void* p); +int MallocExtension_VerifyArrayNewMemory(const void* p); +int MallocExtension_VerifyMallocMemory(const void* p); +int MallocExtension_MallocMemoryStats(int* blocks, size_t* total, + int histogram[kMallocExtensionHistogramSize]); + +void MallocExtension_GetStats(char* buffer, int buffer_length); + +/* TODO(csilvers): write a C version of these routines, that perhaps + * takes a function ptr and a void *. + */ +/* void MallocExtension_GetHeapSample(MallocExtensionWriter* result); */ +/* void MallocExtension_GetHeapGrowthStacks(MallocExtensionWriter* result); */ + +int MallocExtension_GetNumericProperty(const char* property, size_t* value); +int MallocExtension_SetNumericProperty(const char* property, size_t value); +void MallocExtension_MarkThreadIdle(void); +void MallocExtension_MarkThreadBusy(void); +void MallocExtension_ReleaseToSystem(size_t num_bytes); +void MallocExtension_ReleaseFreeMemory(void); +size_t MallocExtension_GetEstimatedAllocatedSize(size_t size); +size_t MallocExtension_GetAllocatedSize(const void* p); + +/* + * NOTE: These enum values MUST be kept in sync with the version in + * malloc_extension.h + */ +typedef enum { + MallocExtension_kUnknownOwnership = 0, + MallocExtension_kOwned, + MallocExtension_kNotOwned +} MallocExtension_Ownership; + +MallocExtension_Ownership MallocExtension_GetOwnership(const void* p); + + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif /* ABSL_BASE_INTERNAL_MALLOC_EXTENSION_C_H_ */ diff --git a/absl/base/internal/malloc_extension_test.cc b/absl/base/internal/malloc_extension_test.cc new file mode 100644 index 000000000000..246875a030ef --- /dev/null +++ b/absl/base/internal/malloc_extension_test.cc @@ -0,0 +1,102 @@ +/* + * Copyright 2017 The Abseil Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <algorithm> +#include <cstdlib> + +#include "gtest/gtest.h" +#include "absl/base/internal/malloc_extension.h" +#include "absl/base/internal/malloc_extension_c.h" + +namespace absl { +namespace base_internal { +namespace { + +TEST(MallocExtension, MallocExtension) { + void* a = malloc(1000); + + size_t cxx_bytes_used, c_bytes_used; + if (!MallocExtension::instance()->GetNumericProperty( + "generic.current_allocated_bytes", &cxx_bytes_used)) { + EXPECT_TRUE(ABSL_MALLOC_EXTENSION_TEST_ALLOW_MISSING_EXTENSION); + } else { + ASSERT_TRUE(MallocExtension::instance()->GetNumericProperty( + "generic.current_allocated_bytes", &cxx_bytes_used)); + ASSERT_TRUE(MallocExtension_GetNumericProperty( + "generic.current_allocated_bytes", &c_bytes_used)); +#ifndef MEMORY_SANITIZER + EXPECT_GT(cxx_bytes_used, 1000); + EXPECT_GT(c_bytes_used, 1000); +#endif + + EXPECT_TRUE(MallocExtension::instance()->VerifyAllMemory()); + EXPECT_TRUE(MallocExtension_VerifyAllMemory()); + + EXPECT_EQ(MallocExtension::kOwned, + MallocExtension::instance()->GetOwnership(a)); + // TODO(csilvers): this relies on undocumented behavior that + // GetOwnership works on stack-allocated variables. Use a better test. + EXPECT_EQ(MallocExtension::kNotOwned, + MallocExtension::instance()->GetOwnership(&cxx_bytes_used)); + EXPECT_EQ(MallocExtension::kNotOwned, + MallocExtension::instance()->GetOwnership(nullptr)); + EXPECT_GE(MallocExtension::instance()->GetAllocatedSize(a), 1000); + // This is just a sanity check. If we allocated too much, tcmalloc is + // broken + EXPECT_LE(MallocExtension::instance()->GetAllocatedSize(a), 5000); + EXPECT_GE(MallocExtension::instance()->GetEstimatedAllocatedSize(1000), + 1000); + for (int i = 0; i < 10; ++i) { + void* p = malloc(i); + EXPECT_GE(MallocExtension::instance()->GetAllocatedSize(p), + MallocExtension::instance()->GetEstimatedAllocatedSize(i)); + free(p); + } + + // Check the c-shim version too. + EXPECT_EQ(MallocExtension_kOwned, MallocExtension_GetOwnership(a)); + EXPECT_EQ(MallocExtension_kNotOwned, + MallocExtension_GetOwnership(&cxx_bytes_used)); + EXPECT_EQ(MallocExtension_kNotOwned, MallocExtension_GetOwnership(nullptr)); + EXPECT_GE(MallocExtension_GetAllocatedSize(a), 1000); + EXPECT_LE(MallocExtension_GetAllocatedSize(a), 5000); + EXPECT_GE(MallocExtension_GetEstimatedAllocatedSize(1000), 1000); + } + + free(a); +} + +// Verify that the .cc file and .h file have the same enum values. +TEST(GetOwnership, EnumValuesEqualForCAndCXX) { + EXPECT_EQ(static_cast<int>(MallocExtension::kUnknownOwnership), + static_cast<int>(MallocExtension_kUnknownOwnership)); + EXPECT_EQ(static_cast<int>(MallocExtension::kOwned), + static_cast<int>(MallocExtension_kOwned)); + EXPECT_EQ(static_cast<int>(MallocExtension::kNotOwned), + static_cast<int>(MallocExtension_kNotOwned)); +} + +TEST(nallocx, SaneBehavior) { + for (size_t size = 0; size < 64 * 1024; ++size) { + size_t alloc_size = nallocx(size, 0); + EXPECT_LE(size, alloc_size) << "size is " << size; + EXPECT_LE(alloc_size, std::max(size + 100, 2 * size)) << "size is " << size; + } +} + +} // namespace +} // namespace base_internal +} // namespace absl diff --git a/absl/base/internal/malloc_hook.cc b/absl/base/internal/malloc_hook.cc new file mode 100644 index 000000000000..d5d227a5f881 --- /dev/null +++ b/absl/base/internal/malloc_hook.cc @@ -0,0 +1,611 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/config.h" + +#if ABSL_HAVE_MMAP +// Disable the glibc prototype of mremap(), as older versions of the +// system headers define this function with only four arguments, +// whereas newer versions allow an optional fifth argument: +#define mremap glibc_mremap +#include <sys/mman.h> +#undef mremap +#endif + +#include <cstddef> +#include <cstdint> +#include <algorithm> + +#include "absl/base/call_once.h" +#include "absl/base/casts.h" +#include "absl/base/internal/malloc_hook.h" +#include "absl/base/internal/malloc_hook_invoke.h" +#include "absl/base/internal/raw_logging.h" +#include "absl/base/internal/spinlock.h" + +// __THROW is defined in glibc systems. It means, counter-intuitively, +// "This function will never throw an exception." It's an optional +// optimization tool, but we may need to use it to match glibc prototypes. +#ifndef __THROW // I guess we're not on a glibc system +# define __THROW // __THROW is just an optimization, so ok to make it "" +#endif + +namespace absl { +namespace base_internal { +namespace { + +void RemoveInitialHooksAndCallInitializers(); // below. + +absl::once_flag once; + +// These hooks are installed in MallocHook as the only initial hooks. The first +// hook that is called will run RemoveInitialHooksAndCallInitializers (see the +// definition below) and then redispatch to any malloc hooks installed by +// RemoveInitialHooksAndCallInitializers. +// +// Note(llib): there is a possibility of a race in the event that there are +// multiple threads running before the first allocation. This is pretty +// difficult to achieve, but if it is then multiple threads may concurrently do +// allocations. The first caller will call +// RemoveInitialHooksAndCallInitializers via one of the initial hooks. A +// concurrent allocation may, depending on timing either: +// * still have its initial malloc hook installed, run that and block on waiting +// for the first caller to finish its call to +// RemoveInitialHooksAndCallInitializers, and proceed normally. +// * occur some time during the RemoveInitialHooksAndCallInitializers call, at +// which point there could be no initial hooks and the subsequent hooks that +// are about to be set up by RemoveInitialHooksAndCallInitializers haven't +// been installed yet. I think the worst we can get is that some allocations +// will not get reported to some hooks set by the initializers called from +// RemoveInitialHooksAndCallInitializers. + +void InitialNewHook(const void* ptr, size_t size) { + absl::call_once(once, RemoveInitialHooksAndCallInitializers); + MallocHook::InvokeNewHook(ptr, size); +} + +void InitialPreMMapHook(const void* start, + size_t size, + int protection, + int flags, + int fd, + off_t offset) { + absl::call_once(once, RemoveInitialHooksAndCallInitializers); + MallocHook::InvokePreMmapHook(start, size, protection, flags, fd, offset); +} + +void InitialPreSbrkHook(ptrdiff_t increment) { + absl::call_once(once, RemoveInitialHooksAndCallInitializers); + MallocHook::InvokePreSbrkHook(increment); +} + +// This function is called at most once by one of the above initial malloc +// hooks. It removes all initial hooks and initializes all other clients that +// want to get control at the very first memory allocation. The initializers +// may assume that the initial malloc hooks have been removed. The initializers +// may set up malloc hooks and allocate memory. +void RemoveInitialHooksAndCallInitializers() { + ABSL_RAW_CHECK(MallocHook::RemoveNewHook(&InitialNewHook), ""); + ABSL_RAW_CHECK(MallocHook::RemovePreMmapHook(&InitialPreMMapHook), ""); + ABSL_RAW_CHECK(MallocHook::RemovePreSbrkHook(&InitialPreSbrkHook), ""); +} + +} // namespace +} // namespace base_internal +} // namespace absl + +namespace absl { +namespace base_internal { + +// This lock is shared between all implementations of HookList::Add & Remove. +// The potential for contention is very small. This needs to be a SpinLock and +// not a Mutex since it's possible for Mutex locking to allocate memory (e.g., +// per-thread allocation in debug builds), which could cause infinite recursion. +static absl::base_internal::SpinLock hooklist_spinlock( + absl::base_internal::kLinkerInitialized); + +template <typename T> +bool HookList<T>::Add(T value_as_t) { + if (value_as_t == T()) { + return false; + } + absl::base_internal::SpinLockHolder l(&hooklist_spinlock); + // Find the first slot in data that is 0. + int index = 0; + while ((index < kHookListMaxValues) && + (priv_data[index].load(std::memory_order_relaxed) != 0)) { + ++index; + } + if (index == kHookListMaxValues) { + return false; + } + int prev_num_hooks = priv_end.load(std::memory_order_acquire); + priv_data[index].store(reinterpret_cast<intptr_t>(value_as_t), + std::memory_order_release); + if (prev_num_hooks <= index) { + priv_end.store(index + 1, std::memory_order_release); + } + return true; +} + +template <typename T> +bool HookList<T>::Remove(T value_as_t) { + if (value_as_t == T()) { + return false; + } + absl::base_internal::SpinLockHolder l(&hooklist_spinlock); + int hooks_end = priv_end.load(std::memory_order_acquire); + int index = 0; + while (index < hooks_end && + value_as_t != reinterpret_cast<T>( + priv_data[index].load(std::memory_order_acquire))) { + ++index; + } + if (index == hooks_end) { + return false; + } + priv_data[index].store(0, std::memory_order_release); + if (hooks_end == index + 1) { + // Adjust hooks_end down to the lowest possible value. + hooks_end = index; + while ((hooks_end > 0) && + (priv_data[hooks_end - 1].load(std::memory_order_acquire) == 0)) { + --hooks_end; + } + priv_end.store(hooks_end, std::memory_order_release); + } + return true; +} + +template <typename T> +int HookList<T>::Traverse(T* output_array, int n) const { + int hooks_end = priv_end.load(std::memory_order_acquire); + int actual_hooks_end = 0; + for (int i = 0; i < hooks_end && n > 0; ++i) { + T data = reinterpret_cast<T>(priv_data[i].load(std::memory_order_acquire)); + if (data != T()) { + *output_array++ = data; + ++actual_hooks_end; + --n; + } + } + return actual_hooks_end; +} + +// Initialize a HookList (optionally with the given initial_value in index 0). +#define INIT_HOOK_LIST { {0}, {{}} } +#define INIT_HOOK_LIST_WITH_VALUE(initial_value) \ + { {1}, { {reinterpret_cast<intptr_t>(initial_value)} } } + +// Explicit instantiation for malloc_hook_test.cc. This ensures all the methods +// are instantiated. +template struct HookList<MallocHook::NewHook>; + +HookList<MallocHook::NewHook> new_hooks_ = + INIT_HOOK_LIST_WITH_VALUE(&InitialNewHook); +HookList<MallocHook::DeleteHook> delete_hooks_ = INIT_HOOK_LIST; +HookList<MallocHook::SampledNewHook> sampled_new_hooks_ = INIT_HOOK_LIST; +HookList<MallocHook::SampledDeleteHook> sampled_delete_hooks_ = INIT_HOOK_LIST; +HookList<MallocHook::PreMmapHook> premmap_hooks_ = + INIT_HOOK_LIST_WITH_VALUE(&InitialPreMMapHook); +HookList<MallocHook::MmapHook> mmap_hooks_ = INIT_HOOK_LIST; +HookList<MallocHook::MunmapHook> munmap_hooks_ = INIT_HOOK_LIST; +HookList<MallocHook::MremapHook> mremap_hooks_ = INIT_HOOK_LIST; +HookList<MallocHook::PreSbrkHook> presbrk_hooks_ = + INIT_HOOK_LIST_WITH_VALUE(InitialPreSbrkHook); +HookList<MallocHook::SbrkHook> sbrk_hooks_ = INIT_HOOK_LIST; + +// These lists contain either 0 or 1 hooks. +HookList<MallocHook::MmapReplacement> mmap_replacement_ = INIT_HOOK_LIST; +HookList<MallocHook::MunmapReplacement> munmap_replacement_ = INIT_HOOK_LIST; + +#undef INIT_HOOK_LIST_WITH_VALUE +#undef INIT_HOOK_LIST + +} // namespace base_internal +} // namespace absl + +// These are available as C bindings as well as C++, hence their +// definition outside the MallocHook class. +extern "C" +int MallocHook_AddNewHook(MallocHook_NewHook hook) { + return absl::base_internal::new_hooks_.Add(hook); +} + +extern "C" +int MallocHook_RemoveNewHook(MallocHook_NewHook hook) { + return absl::base_internal::new_hooks_.Remove(hook); +} + +extern "C" +int MallocHook_AddDeleteHook(MallocHook_DeleteHook hook) { + return absl::base_internal::delete_hooks_.Add(hook); +} + +extern "C" +int MallocHook_RemoveDeleteHook(MallocHook_DeleteHook hook) { + return absl::base_internal::delete_hooks_.Remove(hook); +} + +extern "C" int MallocHook_AddSampledNewHook(MallocHook_SampledNewHook hook) { + return absl::base_internal::sampled_new_hooks_.Add(hook); +} + +extern "C" int MallocHook_RemoveSampledNewHook(MallocHook_SampledNewHook hook) { + return absl::base_internal::sampled_new_hooks_.Remove(hook); +} + +extern "C" int MallocHook_AddSampledDeleteHook( + MallocHook_SampledDeleteHook hook) { + return absl::base_internal::sampled_delete_hooks_.Add(hook); +} + +extern "C" int MallocHook_RemoveSampledDeleteHook( + MallocHook_SampledDeleteHook hook) { + return absl::base_internal::sampled_delete_hooks_.Remove(hook); +} + +extern "C" +int MallocHook_AddPreMmapHook(MallocHook_PreMmapHook hook) { + return absl::base_internal::premmap_hooks_.Add(hook); +} + +extern "C" +int MallocHook_RemovePreMmapHook(MallocHook_PreMmapHook hook) { + return absl::base_internal::premmap_hooks_.Remove(hook); +} + +extern "C" +int MallocHook_SetMmapReplacement(MallocHook_MmapReplacement hook) { + // NOTE this is a best effort CHECK. Concurrent sets could succeed since + // this test is outside of the Add spin lock. + ABSL_RAW_CHECK(absl::base_internal::mmap_replacement_.empty(), + "Only one MMapReplacement is allowed."); + return absl::base_internal::mmap_replacement_.Add(hook); +} + +extern "C" +int MallocHook_RemoveMmapReplacement(MallocHook_MmapReplacement hook) { + return absl::base_internal::mmap_replacement_.Remove(hook); +} + +extern "C" +int MallocHook_AddMmapHook(MallocHook_MmapHook hook) { + return absl::base_internal::mmap_hooks_.Add(hook); +} + +extern "C" +int MallocHook_RemoveMmapHook(MallocHook_MmapHook hook) { + return absl::base_internal::mmap_hooks_.Remove(hook); +} + +extern "C" +int MallocHook_AddMunmapHook(MallocHook_MunmapHook hook) { + return absl::base_internal::munmap_hooks_.Add(hook); +} + +extern "C" +int MallocHook_RemoveMunmapHook(MallocHook_MunmapHook hook) { + return absl::base_internal::munmap_hooks_.Remove(hook); +} + +extern "C" +int MallocHook_SetMunmapReplacement(MallocHook_MunmapReplacement hook) { + // NOTE this is a best effort CHECK. Concurrent sets could succeed since + // this test is outside of the Add spin lock. + ABSL_RAW_CHECK(absl::base_internal::munmap_replacement_.empty(), + "Only one MunmapReplacement is allowed."); + return absl::base_internal::munmap_replacement_.Add(hook); +} + +extern "C" +int MallocHook_RemoveMunmapReplacement(MallocHook_MunmapReplacement hook) { + return absl::base_internal::munmap_replacement_.Remove(hook); +} + +extern "C" +int MallocHook_AddMremapHook(MallocHook_MremapHook hook) { + return absl::base_internal::mremap_hooks_.Add(hook); +} + +extern "C" +int MallocHook_RemoveMremapHook(MallocHook_MremapHook hook) { + return absl::base_internal::mremap_hooks_.Remove(hook); +} + +extern "C" +int MallocHook_AddPreSbrkHook(MallocHook_PreSbrkHook hook) { + return absl::base_internal::presbrk_hooks_.Add(hook); +} + +extern "C" +int MallocHook_RemovePreSbrkHook(MallocHook_PreSbrkHook hook) { + return absl::base_internal::presbrk_hooks_.Remove(hook); +} + +extern "C" +int MallocHook_AddSbrkHook(MallocHook_SbrkHook hook) { + return absl::base_internal::sbrk_hooks_.Add(hook); +} + +extern "C" +int MallocHook_RemoveSbrkHook(MallocHook_SbrkHook hook) { + return absl::base_internal::sbrk_hooks_.Remove(hook); +} + +namespace absl { +namespace base_internal { + +// Note: embedding the function calls inside the traversal of HookList would be +// very confusing, as it is legal for a hook to remove itself and add other +// hooks. Doing traversal first, and then calling the hooks ensures we only +// call the hooks registered at the start. +#define INVOKE_HOOKS(HookType, hook_list, args) \ + do { \ + HookType hooks[kHookListMaxValues]; \ + int num_hooks = hook_list.Traverse(hooks, kHookListMaxValues); \ + for (int i = 0; i < num_hooks; ++i) { \ + (*hooks[i]) args; \ + } \ + } while (0) + +// There should only be one replacement. Return the result of the first +// one, or false if there is none. +#define INVOKE_REPLACEMENT(HookType, hook_list, args) \ + do { \ + HookType hooks[kHookListMaxValues]; \ + int num_hooks = hook_list.Traverse(hooks, kHookListMaxValues); \ + return (num_hooks > 0 && (*hooks[0])args); \ + } while (0) + +void MallocHook::InvokeNewHookSlow(const void* ptr, size_t size) { + INVOKE_HOOKS(NewHook, new_hooks_, (ptr, size)); +} + +void MallocHook::InvokeDeleteHookSlow(const void* ptr) { + INVOKE_HOOKS(DeleteHook, delete_hooks_, (ptr)); +} + +void MallocHook::InvokeSampledNewHookSlow(const SampledAlloc* sampled_alloc) { + INVOKE_HOOKS(SampledNewHook, sampled_new_hooks_, (sampled_alloc)); +} + +void MallocHook::InvokeSampledDeleteHookSlow(AllocHandle handle) { + INVOKE_HOOKS(SampledDeleteHook, sampled_delete_hooks_, (handle)); +} + +void MallocHook::InvokePreMmapHookSlow(const void* start, + size_t size, + int protection, + int flags, + int fd, + off_t offset) { + INVOKE_HOOKS(PreMmapHook, premmap_hooks_, (start, size, protection, flags, fd, + offset)); +} + +void MallocHook::InvokeMmapHookSlow(const void* result, + const void* start, + size_t size, + int protection, + int flags, + int fd, + off_t offset) { + INVOKE_HOOKS(MmapHook, mmap_hooks_, (result, start, size, protection, flags, + fd, offset)); +} + +bool MallocHook::InvokeMmapReplacementSlow(const void* start, + size_t size, + int protection, + int flags, + int fd, + off_t offset, + void** result) { + INVOKE_REPLACEMENT(MmapReplacement, mmap_replacement_, + (start, size, protection, flags, fd, offset, result)); +} + +void MallocHook::InvokeMunmapHookSlow(const void* start, size_t size) { + INVOKE_HOOKS(MunmapHook, munmap_hooks_, (start, size)); +} + +bool MallocHook::InvokeMunmapReplacementSlow(const void* start, + size_t size, + int* result) { + INVOKE_REPLACEMENT(MunmapReplacement, munmap_replacement_, + (start, size, result)); +} + +void MallocHook::InvokeMremapHookSlow(const void* result, + const void* old_addr, + size_t old_size, + size_t new_size, + int flags, + const void* new_addr) { + INVOKE_HOOKS(MremapHook, mremap_hooks_, (result, old_addr, old_size, new_size, + flags, new_addr)); +} + +void MallocHook::InvokePreSbrkHookSlow(ptrdiff_t increment) { + INVOKE_HOOKS(PreSbrkHook, presbrk_hooks_, (increment)); +} + +void MallocHook::InvokeSbrkHookSlow(const void* result, ptrdiff_t increment) { + INVOKE_HOOKS(SbrkHook, sbrk_hooks_, (result, increment)); +} + +#undef INVOKE_HOOKS +#undef INVOKE_REPLACEMENT + +} // namespace base_internal +} // namespace absl + +ABSL_DEFINE_ATTRIBUTE_SECTION_VARS(google_malloc); +ABSL_DECLARE_ATTRIBUTE_SECTION_VARS(google_malloc); +// actual functions are in debugallocation.cc or tcmalloc.cc +ABSL_DEFINE_ATTRIBUTE_SECTION_VARS(malloc_hook); +ABSL_DECLARE_ATTRIBUTE_SECTION_VARS(malloc_hook); +// actual functions are in this file, malloc_hook.cc, and low_level_alloc.cc +ABSL_DEFINE_ATTRIBUTE_SECTION_VARS(blink_malloc); +ABSL_DECLARE_ATTRIBUTE_SECTION_VARS(blink_malloc); +// actual functions are in third_party/blink_headless/.../{PartitionAlloc, +// FastMalloc}.cpp. + +#define ADDR_IN_ATTRIBUTE_SECTION(addr, name) \ + (reinterpret_cast<uintptr_t>(ABSL_ATTRIBUTE_SECTION_START(name)) <= \ + reinterpret_cast<uintptr_t>(addr) && \ + reinterpret_cast<uintptr_t>(addr) < \ + reinterpret_cast<uintptr_t>(ABSL_ATTRIBUTE_SECTION_STOP(name))) + +// Return true iff 'caller' is a return address within a function +// that calls one of our hooks via MallocHook:Invoke*. +// A helper for GetCallerStackTrace. +static inline bool InHookCaller(const void* caller) { + return ADDR_IN_ATTRIBUTE_SECTION(caller, google_malloc) || + ADDR_IN_ATTRIBUTE_SECTION(caller, malloc_hook) || + ADDR_IN_ATTRIBUTE_SECTION(caller, blink_malloc); + // We can use one section for everything except tcmalloc_or_debug + // due to its special linkage mode, which prevents merging of the sections. +} + +#undef ADDR_IN_ATTRIBUTE_SECTION + +static absl::once_flag in_hook_caller_once; + +static void InitializeInHookCaller() { + ABSL_INIT_ATTRIBUTE_SECTION_VARS(google_malloc); + if (ABSL_ATTRIBUTE_SECTION_START(google_malloc) == + ABSL_ATTRIBUTE_SECTION_STOP(google_malloc)) { + ABSL_RAW_LOG(ERROR, + "google_malloc section is missing, " + "thus InHookCaller is broken!"); + } + ABSL_INIT_ATTRIBUTE_SECTION_VARS(malloc_hook); + if (ABSL_ATTRIBUTE_SECTION_START(malloc_hook) == + ABSL_ATTRIBUTE_SECTION_STOP(malloc_hook)) { + ABSL_RAW_LOG(ERROR, + "malloc_hook section is missing, " + "thus InHookCaller is broken!"); + } + ABSL_INIT_ATTRIBUTE_SECTION_VARS(blink_malloc); + // The blink_malloc section is only expected to be present in binaries + // linking against the blink rendering engine in third_party/blink_headless. +} + +// We can improve behavior/compactness of this function +// if we pass a generic test function (with a generic arg) +// into the implementations for get_stack_trace_fn instead of the skip_count. +extern "C" int MallocHook_GetCallerStackTrace( + void** result, int max_depth, int skip_count, + MallocHook_GetStackTraceFn get_stack_trace_fn) { + if (!ABSL_HAVE_ATTRIBUTE_SECTION) { + // Fall back to get_stack_trace_fn and good old but fragile frame skip + // counts. + // Note: this path is inaccurate when a hook is not called directly by an + // allocation function but is daisy-chained through another hook, + // search for MallocHook::(Get|Set|Invoke)* to find such cases. +#ifdef NDEBUG + return get_stack_trace_fn(result, max_depth, skip_count); +#else + return get_stack_trace_fn(result, max_depth, skip_count + 1); +#endif + // due to -foptimize-sibling-calls in opt mode + // there's no need for extra frame skip here then + } + absl::call_once(in_hook_caller_once, InitializeInHookCaller); + // MallocHook caller determination via InHookCaller works, use it: + static const int kMaxSkip = 32 + 6 + 3; + // Constant tuned to do just one get_stack_trace_fn call below in practice + // and not get many frames that we don't actually need: + // currently max passed max_depth is 32, + // max passed/needed skip_count is 6 + // and 3 is to account for some hook daisy chaining. + static const int kStackSize = kMaxSkip + 1; + void* stack[kStackSize]; + int depth = + get_stack_trace_fn(stack, kStackSize, 1); // skip this function frame + if (depth == 0) + // silently propagate cases when get_stack_trace_fn does not work + return 0; + for (int i = depth - 1; i >= 0; --i) { // stack[0] is our immediate caller + if (InHookCaller(stack[i])) { + i += 1; // skip hook caller frame + depth -= i; // correct depth + if (depth > max_depth) depth = max_depth; + std::copy(stack + i, stack + i + depth, result); + if (depth < max_depth && depth + i == kStackSize) { + // get frames for the missing depth + depth += get_stack_trace_fn(result + depth, max_depth - depth, + 1 + kStackSize); + } + return depth; + } + } + ABSL_RAW_LOG(WARNING, + "Hooked allocator frame not found, returning empty trace"); + // If this happens try increasing kMaxSkip + // or else something must be wrong with InHookCaller, + // e.g. for every section used in InHookCaller + // all functions in that section must be inside the same library. + return 0; +} + +// On systems where we know how, we override mmap/munmap/mremap/sbrk +// to provide support for calling the related hooks (in addition, +// of course, to doing what these functions normally do). + +// The ABSL_MALLOC_HOOK_MMAP_DISABLE macro disables mmap/munmap interceptors. +// Dynamic tools that intercept mmap/munmap can't be linked together with +// malloc_hook interceptors. We disable the malloc_hook interceptors for the +// widely-used dynamic tools, i.e. ThreadSanitizer and MemorySanitizer, but +// still allow users to disable this in special cases that can't be easily +// detected during compilation, via -DABSL_MALLOC_HOOK_MMAP_DISABLE or #define +// ABSL_MALLOC_HOOK_MMAP_DISABLE. +// TODO(b/62370839): Remove MALLOC_HOOK_MMAP_DISABLE in CROSSTOOL for tsan and +// msan config; Replace MALLOC_HOOK_MMAP_DISABLE with +// ABSL_MALLOC_HOOK_MMAP_DISABLE for other special cases. +#if !defined(THREAD_SANITIZER) && !defined(MEMORY_SANITIZER) && \ + !defined(ABSL_MALLOC_HOOK_MMAP_DISABLE) && defined(__linux__) +#include "absl/base/internal/malloc_hook_mmap_linux.inc" + +#elif ABSL_HAVE_MMAP + +namespace absl { +namespace base_internal { + +// static +void* MallocHook::UnhookedMMap(void* start, size_t size, int protection, + int flags, int fd, off_t offset) { + void* result; + if (!MallocHook::InvokeMmapReplacement( + start, size, protection, flags, fd, offset, &result)) { + result = mmap(start, size, protection, flags, fd, offset); + } + return result; +} + +// static +int MallocHook::UnhookedMUnmap(void* start, size_t size) { + int result; + if (!MallocHook::InvokeMunmapReplacement(start, size, &result)) { + result = munmap(start, size); + } + return result; +} + +} // namespace base_internal +} // namespace absl + +#endif diff --git a/absl/base/internal/malloc_hook.h b/absl/base/internal/malloc_hook.h new file mode 100644 index 000000000000..ed5cf2e65dd6 --- /dev/null +++ b/absl/base/internal/malloc_hook.h @@ -0,0 +1,333 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Some of our malloc implementations can invoke the following hooks whenever +// memory is allocated or deallocated. MallocHook is thread-safe, and things +// you do before calling AddFooHook(MyHook) are visible to any resulting calls +// to MyHook. Hooks must be thread-safe. If you write: +// +// CHECK(MallocHook::AddNewHook(&MyNewHook)); +// +// MyNewHook will be invoked in subsequent calls in the current thread, but +// there are no guarantees on when it might be invoked in other threads. +// +// There are a limited number of slots available for each hook type. Add*Hook +// will return false if there are no slots available. Remove*Hook will return +// false if the given hook was not already installed. +// +// The order in which individual hooks are called in Invoke*Hook is undefined. +// +// It is safe for a hook to remove itself within Invoke*Hook and add other +// hooks. Any hooks added inside a hook invocation (for the same hook type) +// will not be invoked for the current invocation. +// +// One important user of these hooks is the heap profiler. +// +// CAVEAT: If you add new MallocHook::Invoke* calls then those calls must be +// directly in the code of the (de)allocation function that is provided to the +// user and that function must have an ABSL_ATTRIBUTE_SECTION(malloc_hook) +// attribute. +// +// Note: the Invoke*Hook() functions are defined in malloc_hook-inl.h. If you +// need to invoke a hook (which you shouldn't unless you're part of tcmalloc), +// be sure to #include malloc_hook-inl.h in addition to malloc_hook.h. +// +// NOTE FOR C USERS: If you want to use malloc_hook functionality from +// a C program, #include malloc_hook_c.h instead of this file. +// +// IWYU pragma: private, include "base/malloc_hook.h" + +#ifndef ABSL_BASE_INTERNAL_MALLOC_HOOK_H_ +#define ABSL_BASE_INTERNAL_MALLOC_HOOK_H_ + +#include <sys/types.h> +#include <cstddef> + +#include "absl/base/config.h" +#include "absl/base/internal/malloc_hook_c.h" +#include "absl/base/port.h" + +namespace absl { +namespace base_internal { + +// Note: malloc_hook_c.h defines MallocHook_*Hook and +// MallocHook_{Add,Remove}*Hook. The version of these inside the MallocHook +// class are defined in terms of the malloc_hook_c version. See malloc_hook_c.h +// for details of these types/functions. + +class MallocHook { + public: + // The NewHook is invoked whenever an object is being allocated. + // Object pointer and size are passed in. + // It may be passed null pointer if the allocator returned null. + typedef MallocHook_NewHook NewHook; + inline static bool AddNewHook(NewHook hook) { + return MallocHook_AddNewHook(hook); + } + inline static bool RemoveNewHook(NewHook hook) { + return MallocHook_RemoveNewHook(hook); + } + inline static void InvokeNewHook(const void* ptr, size_t size); + + // The DeleteHook is invoked whenever an object is being deallocated. + // Object pointer is passed in. + // It may be passed null pointer if the caller is trying to delete null. + typedef MallocHook_DeleteHook DeleteHook; + inline static bool AddDeleteHook(DeleteHook hook) { + return MallocHook_AddDeleteHook(hook); + } + inline static bool RemoveDeleteHook(DeleteHook hook) { + return MallocHook_RemoveDeleteHook(hook); + } + inline static void InvokeDeleteHook(const void* ptr); + + // The SampledNewHook is invoked for some subset of object allocations + // according to the sampling policy of an allocator such as tcmalloc. + // SampledAlloc has the following fields: + // * AllocHandle handle: to be set to an effectively unique value (in this + // process) by allocator. + // * size_t allocated_size: space actually used by allocator to host + // the object. + // * int stack_depth and const void* stack: invocation stack for + // the allocation. + // The allocator invoking the hook should record the handle value and later + // call InvokeSampledDeleteHook() with that value. + typedef MallocHook_SampledNewHook SampledNewHook; + typedef MallocHook_SampledAlloc SampledAlloc; + inline static bool AddSampledNewHook(SampledNewHook hook) { + return MallocHook_AddSampledNewHook(hook); + } + inline static bool RemoveSampledNewHook(SampledNewHook hook) { + return MallocHook_RemoveSampledNewHook(hook); + } + inline static void InvokeSampledNewHook(const SampledAlloc* sampled_alloc); + + // The SampledDeleteHook is invoked whenever an object previously chosen + // by an allocator for sampling is being deallocated. + // The handle identifying the object --as previously chosen by + // InvokeSampledNewHook()-- is passed in. + typedef MallocHook_SampledDeleteHook SampledDeleteHook; + typedef MallocHook_AllocHandle AllocHandle; + inline static bool AddSampledDeleteHook(SampledDeleteHook hook) { + return MallocHook_AddSampledDeleteHook(hook); + } + inline static bool RemoveSampledDeleteHook(SampledDeleteHook hook) { + return MallocHook_RemoveSampledDeleteHook(hook); + } + inline static void InvokeSampledDeleteHook(AllocHandle handle); + + // The PreMmapHook is invoked with mmap's or mmap64's arguments just + // before the mmap/mmap64 call is actually made. Such a hook may be useful + // in memory limited contexts, to catch allocations that will exceed + // a memory limit, and take outside actions to increase that limit. + typedef MallocHook_PreMmapHook PreMmapHook; + inline static bool AddPreMmapHook(PreMmapHook hook) { + return MallocHook_AddPreMmapHook(hook); + } + inline static bool RemovePreMmapHook(PreMmapHook hook) { + return MallocHook_RemovePreMmapHook(hook); + } + inline static void InvokePreMmapHook(const void* start, + size_t size, + int protection, + int flags, + int fd, + off_t offset); + + // The MmapReplacement is invoked with mmap's arguments and place to put the + // result into after the PreMmapHook but before the mmap/mmap64 call is + // actually made. + // The MmapReplacement should return true if it handled the call, or false + // if it is still necessary to call mmap/mmap64. + // This should be used only by experts, and users must be be + // extremely careful to avoid recursive calls to mmap. The replacement + // should be async signal safe. + // Only one MmapReplacement is supported. After setting an MmapReplacement + // you must call RemoveMmapReplacement before calling SetMmapReplacement + // again. + typedef MallocHook_MmapReplacement MmapReplacement; + inline static bool SetMmapReplacement(MmapReplacement hook) { + return MallocHook_SetMmapReplacement(hook); + } + inline static bool RemoveMmapReplacement(MmapReplacement hook) { + return MallocHook_RemoveMmapReplacement(hook); + } + inline static bool InvokeMmapReplacement(const void* start, + size_t size, + int protection, + int flags, + int fd, + off_t offset, + void** result); + + + // The MmapHook is invoked with mmap's return value and arguments whenever + // a region of memory has been just mapped. + // It may be passed MAP_FAILED if the mmap failed. + typedef MallocHook_MmapHook MmapHook; + inline static bool AddMmapHook(MmapHook hook) { + return MallocHook_AddMmapHook(hook); + } + inline static bool RemoveMmapHook(MmapHook hook) { + return MallocHook_RemoveMmapHook(hook); + } + inline static void InvokeMmapHook(const void* result, + const void* start, + size_t size, + int protection, + int flags, + int fd, + off_t offset); + + // The MunmapReplacement is invoked with munmap's arguments and place to put + // the result into just before the munmap call is actually made. + // The MunmapReplacement should return true if it handled the call, or false + // if it is still necessary to call munmap. + // This should be used only by experts. The replacement should be + // async signal safe. + // Only one MunmapReplacement is supported. After setting an + // MunmapReplacement you must call RemoveMunmapReplacement before + // calling SetMunmapReplacement again. + typedef MallocHook_MunmapReplacement MunmapReplacement; + inline static bool SetMunmapReplacement(MunmapReplacement hook) { + return MallocHook_SetMunmapReplacement(hook); + } + inline static bool RemoveMunmapReplacement(MunmapReplacement hook) { + return MallocHook_RemoveMunmapReplacement(hook); + } + inline static bool InvokeMunmapReplacement(const void* start, + size_t size, + int* result); + + // The MunmapHook is invoked with munmap's arguments just before the munmap + // call is actually made. + // TODO(maxim): Rename this to PreMunmapHook for consistency with PreMmapHook + // and PreSbrkHook. + typedef MallocHook_MunmapHook MunmapHook; + inline static bool AddMunmapHook(MunmapHook hook) { + return MallocHook_AddMunmapHook(hook); + } + inline static bool RemoveMunmapHook(MunmapHook hook) { + return MallocHook_RemoveMunmapHook(hook); + } + inline static void InvokeMunmapHook(const void* start, size_t size); + + // The MremapHook is invoked with mremap's return value and arguments + // whenever a region of memory has been just remapped. + typedef MallocHook_MremapHook MremapHook; + inline static bool AddMremapHook(MremapHook hook) { + return MallocHook_AddMremapHook(hook); + } + inline static bool RemoveMremapHook(MremapHook hook) { + return MallocHook_RemoveMremapHook(hook); + } + inline static void InvokeMremapHook(const void* result, + const void* old_addr, + size_t old_size, + size_t new_size, + int flags, + const void* new_addr); + + // The PreSbrkHook is invoked with sbrk's argument just before sbrk is called + // -- except when the increment is 0. This is because sbrk(0) is often called + // to get the top of the memory stack, and is not actually a + // memory-allocation call. It may be useful in memory-limited contexts, + // to catch allocations that will exceed the limit and take outside + // actions to increase such a limit. + typedef MallocHook_PreSbrkHook PreSbrkHook; + inline static bool AddPreSbrkHook(PreSbrkHook hook) { + return MallocHook_AddPreSbrkHook(hook); + } + inline static bool RemovePreSbrkHook(PreSbrkHook hook) { + return MallocHook_RemovePreSbrkHook(hook); + } + inline static void InvokePreSbrkHook(ptrdiff_t increment); + + // The SbrkHook is invoked with sbrk's result and argument whenever sbrk + // has just executed -- except when the increment is 0. + // This is because sbrk(0) is often called to get the top of the memory stack, + // and is not actually a memory-allocation call. + typedef MallocHook_SbrkHook SbrkHook; + inline static bool AddSbrkHook(SbrkHook hook) { + return MallocHook_AddSbrkHook(hook); + } + inline static bool RemoveSbrkHook(SbrkHook hook) { + return MallocHook_RemoveSbrkHook(hook); + } + inline static void InvokeSbrkHook(const void* result, ptrdiff_t increment); + + // Pointer to a absl::GetStackTrace implementation, following the API in + // base/stacktrace.h. + using GetStackTraceFn = int (*)(void**, int, int); + + // Get the current stack trace. Try to skip all routines up to and + // including the caller of MallocHook::Invoke*. + // Use "skip_count" (similarly to absl::GetStackTrace from stacktrace.h) + // as a hint about how many routines to skip if better information + // is not available. + // Stack trace is filled into *result up to the size of max_depth. + // The actual number of stack frames filled is returned. + inline static int GetCallerStackTrace(void** result, int max_depth, + int skip_count, + GetStackTraceFn get_stack_trace_fn) { + return MallocHook_GetCallerStackTrace(result, max_depth, skip_count, + get_stack_trace_fn); + } + +#if ABSL_HAVE_MMAP + // Unhooked versions of mmap() and munmap(). These should be used + // only by experts, since they bypass heapchecking, etc. + // Note: These do not run hooks, but they still use the MmapReplacement + // and MunmapReplacement. + static void* UnhookedMMap(void* start, size_t size, int protection, int flags, + int fd, off_t offset); + static int UnhookedMUnmap(void* start, size_t size); +#endif + + private: + // Slow path versions of Invoke*Hook. + static void InvokeNewHookSlow(const void* ptr, + size_t size) ABSL_ATTRIBUTE_COLD; + static void InvokeDeleteHookSlow(const void* ptr) ABSL_ATTRIBUTE_COLD; + static void InvokeSampledNewHookSlow(const SampledAlloc* sampled_alloc) + ABSL_ATTRIBUTE_COLD; + static void InvokeSampledDeleteHookSlow(AllocHandle handle) + ABSL_ATTRIBUTE_COLD; + static void InvokePreMmapHookSlow(const void* start, size_t size, + int protection, int flags, int fd, + off_t offset) ABSL_ATTRIBUTE_COLD; + static void InvokeMmapHookSlow(const void* result, const void* start, + size_t size, int protection, int flags, int fd, + off_t offset) ABSL_ATTRIBUTE_COLD; + static bool InvokeMmapReplacementSlow(const void* start, size_t size, + int protection, int flags, int fd, + off_t offset, + void** result) ABSL_ATTRIBUTE_COLD; + static void InvokeMunmapHookSlow(const void* ptr, + size_t size) ABSL_ATTRIBUTE_COLD; + static bool InvokeMunmapReplacementSlow(const void* ptr, size_t size, + int* result) ABSL_ATTRIBUTE_COLD; + static void InvokeMremapHookSlow(const void* result, const void* old_addr, + size_t old_size, size_t new_size, int flags, + const void* new_addr) ABSL_ATTRIBUTE_COLD; + static void InvokePreSbrkHookSlow(ptrdiff_t increment) ABSL_ATTRIBUTE_COLD; + static void InvokeSbrkHookSlow(const void* result, + ptrdiff_t increment) ABSL_ATTRIBUTE_COLD; +}; + +} // namespace base_internal +} // namespace absl +#endif // ABSL_BASE_INTERNAL_MALLOC_HOOK_H_ diff --git a/absl/base/internal/malloc_hook_c.h b/absl/base/internal/malloc_hook_c.h new file mode 100644 index 000000000000..03b84ca627d2 --- /dev/null +++ b/absl/base/internal/malloc_hook_c.h @@ -0,0 +1,131 @@ +/* + * Copyright 2017 The Abseil Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * C shims for the C++ malloc_hook.h. See malloc_hook.h for details + * on how to use these. + */ +#ifndef ABSL_BASE_INTERNAL_MALLOC_HOOK_C_H_ +#define ABSL_BASE_INTERNAL_MALLOC_HOOK_C_H_ + +#include <stddef.h> +#include <stdint.h> +#include <sys/types.h> + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/* Get the current stack trace. Try to skip all routines up to and + * including the caller of MallocHook::Invoke*. + * Use "skip_count" (similarly to absl::GetStackTrace from stacktrace.h) + * as a hint about how many routines to skip if better information + * is not available. + */ +typedef int (*MallocHook_GetStackTraceFn)(void**, int, int); +int MallocHook_GetCallerStackTrace(void** result, int max_depth, int skip_count, + MallocHook_GetStackTraceFn fn); + +/* All the MallocHook_{Add,Remove}*Hook functions below return 1 on success + * and 0 on failure. + */ + +typedef void (*MallocHook_NewHook)(const void* ptr, size_t size); +int MallocHook_AddNewHook(MallocHook_NewHook hook); +int MallocHook_RemoveNewHook(MallocHook_NewHook hook); + +typedef void (*MallocHook_DeleteHook)(const void* ptr); +int MallocHook_AddDeleteHook(MallocHook_DeleteHook hook); +int MallocHook_RemoveDeleteHook(MallocHook_DeleteHook hook); + +typedef int64_t MallocHook_AllocHandle; +typedef struct { + /* See malloc_hook.h for documentation for this struct. */ + MallocHook_AllocHandle handle; + size_t allocated_size; + int stack_depth; + const void* stack; +} MallocHook_SampledAlloc; +typedef void (*MallocHook_SampledNewHook)( + const MallocHook_SampledAlloc* sampled_alloc); +int MallocHook_AddSampledNewHook(MallocHook_SampledNewHook hook); +int MallocHook_RemoveSampledNewHook(MallocHook_SampledNewHook hook); + +typedef void (*MallocHook_SampledDeleteHook)(MallocHook_AllocHandle handle); +int MallocHook_AddSampledDeleteHook(MallocHook_SampledDeleteHook hook); +int MallocHook_RemoveSampledDeleteHook(MallocHook_SampledDeleteHook hook); + +typedef void (*MallocHook_PreMmapHook)(const void *start, + size_t size, + int protection, + int flags, + int fd, + off_t offset); +int MallocHook_AddPreMmapHook(MallocHook_PreMmapHook hook); +int MallocHook_RemovePreMmapHook(MallocHook_PreMmapHook hook); + +typedef void (*MallocHook_MmapHook)(const void* result, + const void* start, + size_t size, + int protection, + int flags, + int fd, + off_t offset); +int MallocHook_AddMmapHook(MallocHook_MmapHook hook); +int MallocHook_RemoveMmapHook(MallocHook_MmapHook hook); + +typedef int (*MallocHook_MmapReplacement)(const void* start, + size_t size, + int protection, + int flags, + int fd, + off_t offset, + void** result); +int MallocHook_SetMmapReplacement(MallocHook_MmapReplacement hook); +int MallocHook_RemoveMmapReplacement(MallocHook_MmapReplacement hook); + +typedef void (*MallocHook_MunmapHook)(const void* start, size_t size); +int MallocHook_AddMunmapHook(MallocHook_MunmapHook hook); +int MallocHook_RemoveMunmapHook(MallocHook_MunmapHook hook); + +typedef int (*MallocHook_MunmapReplacement)(const void* start, + size_t size, + int* result); +int MallocHook_SetMunmapReplacement(MallocHook_MunmapReplacement hook); +int MallocHook_RemoveMunmapReplacement(MallocHook_MunmapReplacement hook); + +typedef void (*MallocHook_MremapHook)(const void* result, + const void* old_addr, + size_t old_size, + size_t new_size, + int flags, + const void* new_addr); +int MallocHook_AddMremapHook(MallocHook_MremapHook hook); +int MallocHook_RemoveMremapHook(MallocHook_MremapHook hook); + +typedef void (*MallocHook_PreSbrkHook)(ptrdiff_t increment); +int MallocHook_AddPreSbrkHook(MallocHook_PreSbrkHook hook); +int MallocHook_RemovePreSbrkHook(MallocHook_PreSbrkHook hook); + +typedef void (*MallocHook_SbrkHook)(const void* result, ptrdiff_t increment); +int MallocHook_AddSbrkHook(MallocHook_SbrkHook hook); +int MallocHook_RemoveSbrkHook(MallocHook_SbrkHook hook); + +#ifdef __cplusplus +} /* extern "C" */ +#endif /* __cplusplus */ + +#endif /* ABSL_BASE_INTERNAL_MALLOC_HOOK_C_H_ */ diff --git a/absl/base/internal/malloc_hook_invoke.h b/absl/base/internal/malloc_hook_invoke.h new file mode 100644 index 000000000000..c08220cbcf36 --- /dev/null +++ b/absl/base/internal/malloc_hook_invoke.h @@ -0,0 +1,198 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +/// + +// This has the implementation details of malloc_hook that are needed +// to use malloc-hook inside the tcmalloc system. It does not hold +// any of the client-facing calls that are used to add new hooks. +// +// IWYU pragma: private, include "base/malloc_hook-inl.h" + +#ifndef ABSL_BASE_INTERNAL_MALLOC_HOOK_INVOKE_H_ +#define ABSL_BASE_INTERNAL_MALLOC_HOOK_INVOKE_H_ + +#include <sys/types.h> +#include <atomic> +#include <cstddef> + +#include "absl/base/internal/malloc_hook.h" + +namespace absl { +namespace base_internal { + +// Maximum of 7 hooks means that HookList is 8 words. +static constexpr int kHookListMaxValues = 7; + +// HookList: a class that provides synchronized insertions and removals and +// lockless traversal. Most of the implementation is in malloc_hook.cc. +template <typename T> +struct HookList { + static_assert(sizeof(T) <= sizeof(intptr_t), "T_should_fit_in_intptr_t"); + + // Adds value to the list. Note that duplicates are allowed. Thread-safe and + // blocking (acquires hooklist_spinlock). Returns true on success; false + // otherwise (failures include invalid value and no space left). + bool Add(T value); + + // Removes the first entry matching value from the list. Thread-safe and + // blocking (acquires hooklist_spinlock). Returns true on success; false + // otherwise (failures include invalid value and no value found). + bool Remove(T value); + + // Store up to n values of the list in output_array, and return the number of + // elements stored. Thread-safe and non-blocking. This is fast (one memory + // access) if the list is empty. + int Traverse(T* output_array, int n) const; + + // Fast inline implementation for fast path of Invoke*Hook. + bool empty() const { + // empty() is only used as an optimization to determine if we should call + // Traverse which has proper acquire loads. Memory reordering around a + // call to empty will either lead to an unnecessary Traverse call, or will + // miss invoking hooks, neither of which is a problem. + return priv_end.load(std::memory_order_relaxed) == 0; + } + + // This internal data is not private so that the class is an aggregate and can + // be initialized by the linker. Don't access this directly. Use the + // INIT_HOOK_LIST macro in malloc_hook.cc. + + // One more than the index of the last valid element in priv_data. During + // 'Remove' this may be past the last valid element in priv_data, but + // subsequent values will be 0. + std::atomic<int> priv_end; + std::atomic<intptr_t> priv_data[kHookListMaxValues]; +}; + +extern template struct HookList<MallocHook::NewHook>; + +extern HookList<MallocHook::NewHook> new_hooks_; +extern HookList<MallocHook::DeleteHook> delete_hooks_; +extern HookList<MallocHook::SampledNewHook> sampled_new_hooks_; +extern HookList<MallocHook::SampledDeleteHook> sampled_delete_hooks_; +extern HookList<MallocHook::PreMmapHook> premmap_hooks_; +extern HookList<MallocHook::MmapHook> mmap_hooks_; +extern HookList<MallocHook::MmapReplacement> mmap_replacement_; +extern HookList<MallocHook::MunmapHook> munmap_hooks_; +extern HookList<MallocHook::MunmapReplacement> munmap_replacement_; +extern HookList<MallocHook::MremapHook> mremap_hooks_; +extern HookList<MallocHook::PreSbrkHook> presbrk_hooks_; +extern HookList<MallocHook::SbrkHook> sbrk_hooks_; + +inline void MallocHook::InvokeNewHook(const void* ptr, size_t size) { + if (!absl::base_internal::new_hooks_.empty()) { + InvokeNewHookSlow(ptr, size); + } +} + +inline void MallocHook::InvokeDeleteHook(const void* ptr) { + if (!absl::base_internal::delete_hooks_.empty()) { + InvokeDeleteHookSlow(ptr); + } +} + +inline void MallocHook::InvokeSampledNewHook( + const SampledAlloc* sampled_alloc) { + if (!absl::base_internal::sampled_new_hooks_.empty()) { + InvokeSampledNewHookSlow(sampled_alloc); + } +} + +inline void MallocHook::InvokeSampledDeleteHook(AllocHandle handle) { + if (!absl::base_internal::sampled_delete_hooks_.empty()) { + InvokeSampledDeleteHookSlow(handle); + } +} + +inline void MallocHook::InvokePreMmapHook(const void* start, + size_t size, + int protection, + int flags, + int fd, + off_t offset) { + if (!absl::base_internal::premmap_hooks_.empty()) { + InvokePreMmapHookSlow(start, size, protection, flags, fd, offset); + } +} + +inline void MallocHook::InvokeMmapHook(const void* result, + const void* start, + size_t size, + int protection, + int flags, + int fd, + off_t offset) { + if (!absl::base_internal::mmap_hooks_.empty()) { + InvokeMmapHookSlow(result, start, size, protection, flags, fd, offset); + } +} + +inline bool MallocHook::InvokeMmapReplacement(const void* start, + size_t size, + int protection, + int flags, + int fd, + off_t offset, + void** result) { + if (!absl::base_internal::mmap_replacement_.empty()) { + return InvokeMmapReplacementSlow(start, size, + protection, flags, + fd, offset, + result); + } + return false; +} + +inline void MallocHook::InvokeMunmapHook(const void* start, size_t size) { + if (!absl::base_internal::munmap_hooks_.empty()) { + InvokeMunmapHookSlow(start, size); + } +} + +inline bool MallocHook::InvokeMunmapReplacement( + const void* start, size_t size, int* result) { + if (!absl::base_internal::mmap_replacement_.empty()) { + return InvokeMunmapReplacementSlow(start, size, result); + } + return false; +} + +inline void MallocHook::InvokeMremapHook(const void* result, + const void* old_addr, + size_t old_size, + size_t new_size, + int flags, + const void* new_addr) { + if (!absl::base_internal::mremap_hooks_.empty()) { + InvokeMremapHookSlow(result, old_addr, old_size, new_size, flags, new_addr); + } +} + +inline void MallocHook::InvokePreSbrkHook(ptrdiff_t increment) { + if (!absl::base_internal::presbrk_hooks_.empty() && increment != 0) { + InvokePreSbrkHookSlow(increment); + } +} + +inline void MallocHook::InvokeSbrkHook(const void* result, + ptrdiff_t increment) { + if (!absl::base_internal::sbrk_hooks_.empty() && increment != 0) { + InvokeSbrkHookSlow(result, increment); + } +} + +} // namespace base_internal +} // namespace absl +#endif // ABSL_BASE_INTERNAL_MALLOC_HOOK_INVOKE_H_ diff --git a/absl/base/internal/malloc_hook_mmap_linux.inc b/absl/base/internal/malloc_hook_mmap_linux.inc new file mode 100644 index 000000000000..059ded57626d --- /dev/null +++ b/absl/base/internal/malloc_hook_mmap_linux.inc @@ -0,0 +1,236 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// We define mmap() and mmap64(), which somewhat reimplements libc's mmap +// syscall stubs. Unfortunately libc only exports the stubs via weak symbols +// (which we're overriding with our mmap64() and mmap() wrappers) so we can't +// just call through to them. + +#ifndef __linux__ +# error Should only be including malloc_hook_mmap_linux.h on linux systems. +#endif + +#include <sys/mman.h> +#include <sys/types.h> +#ifdef __BIONIC__ +#include <sys/syscall.h> +#else +#include <syscall.h> +#endif + +#include <linux/unistd.h> +#include <unistd.h> +#include <cerrno> +#include <cstdarg> +#include <cstdint> + +#ifdef __mips__ +// Include definitions of the ABI currently in use. +#ifdef __BIONIC__ +// Android doesn't have sgidefs.h, but does have asm/sgidefs.h, which has the +// definitions we need. +#include <asm/sgidefs.h> +#else +#include <sgidefs.h> +#endif // __BIONIC__ +#endif // __mips__ + +// SYS_mmap, SYS_munmap, and SYS_mremap are not defined in Android. +#ifdef __BIONIC__ +extern "C" void *__mmap2(void *, size_t, int, int, int, long); +#if defined(__NR_mmap) && !defined(SYS_mmap) +#define SYS_mmap __NR_mmap +#endif +#ifndef SYS_munmap +#define SYS_munmap __NR_munmap +#endif +#ifndef SYS_mremap +#define SYS_mremap __NR_mremap +#endif +#endif // __BIONIC__ + +// Platform specific logic extracted from +// https://chromium.googlesource.com/linux-syscall-support/+/master/linux_syscall_support.h +static inline void* do_mmap64(void* start, size_t length, int prot, + int flags, int fd, off64_t offset) __THROW { +#if defined(__i386__) || \ + defined(__ARM_ARCH_3__) || defined(__ARM_EABI__) || \ + (defined(__mips__) && _MIPS_SIM == _MIPS_SIM_ABI32) || \ + (defined(__PPC__) && !defined(__PPC64__)) || \ + (defined(__s390__) && !defined(__s390x__)) + // On these architectures, implement mmap with mmap2. + static int pagesize = 0; + if (pagesize == 0) { + pagesize = getpagesize(); + } + if (offset < 0 || offset % pagesize != 0) { + errno = EINVAL; + return MAP_FAILED; + } +#ifdef __BIONIC__ + // SYS_mmap2 has problems on Android API level <= 16. + // Workaround by invoking __mmap2() instead. + return __mmap2(start, length, prot, flags, fd, offset / pagesize); +#else + return reinterpret_cast<void*>( + syscall(SYS_mmap2, start, length, prot, flags, fd, + static_cast<off_t>(offset / pagesize))); +#endif +#elif defined(__s390x__) + // On s390x, mmap() arguments are passed in memory. + uint32_t buf[6] = { + reinterpret_cast<uint32_t>(start), static_cast<uint32_t>(length), + static_cast<uint32_t>(prot), static_cast<uint32_t>(flags), + static_cast<uint32_t>(fd), static_cast<uint32_t>(offset)}; + return reintrepret_cast<void*>(syscall(SYS_mmap, buf)); +#elif defined(__x86_64__) + // The x32 ABI has 32 bit longs, but the syscall interface is 64 bit. + // We need to explicitly cast to an unsigned 64 bit type to avoid implicit + // sign extension. We can't cast pointers directly because those are + // 32 bits, and gcc will dump ugly warnings about casting from a pointer + // to an integer of a different size. We also need to make sure __off64_t + // isn't truncated to 32-bits under x32. + #define MMAP_SYSCALL_ARG(x) ((uint64_t)(uintptr_t)(x)) + return reinterpret_cast<void*>( + syscall(SYS_mmap, MMAP_SYSCALL_ARG(start), MMAP_SYSCALL_ARG(length), + MMAP_SYSCALL_ARG(prot), MMAP_SYSCALL_ARG(flags), + MMAP_SYSCALL_ARG(fd), static_cast<uint64_t>(offset))); + #undef MMAP_SYSCALL_ARG +#else // Remaining 64-bit aritectures. + static_assert(sizeof(unsigned long) == 8, "Platform is not 64-bit"); + return reinterpret_cast<void*>( + syscall(SYS_mmap, start, length, prot, flags, fd, offset)); +#endif +} + +// We use do_mmap64 abstraction to put MallocHook::InvokeMmapHook +// calls right into mmap and mmap64, so that the stack frames in the caller's +// stack are at the same offsets for all the calls of memory allocating +// functions. + +// Put all callers of MallocHook::Invoke* in this module into +// malloc_hook section, +// so that MallocHook::GetCallerStackTrace can function accurately: + +// Make sure mmap doesn't get #define'd away by <sys/mman.h> +# undef mmap + +extern "C" { +ABSL_ATTRIBUTE_SECTION(malloc_hook) +void* mmap64(void* start, size_t length, int prot, int flags, int fd, + off64_t offset) __THROW; +ABSL_ATTRIBUTE_SECTION(malloc_hook) +void* mmap(void* start, size_t length, int prot, int flags, int fd, + off_t offset) __THROW; +ABSL_ATTRIBUTE_SECTION(malloc_hook) +int munmap(void* start, size_t length) __THROW; +ABSL_ATTRIBUTE_SECTION(malloc_hook) +void* mremap(void* old_addr, size_t old_size, size_t new_size, int flags, + ...) __THROW; +ABSL_ATTRIBUTE_SECTION(malloc_hook) void* sbrk(ptrdiff_t increment) __THROW; +} + +extern "C" void* mmap64(void *start, size_t length, int prot, int flags, + int fd, off64_t offset) __THROW { + absl::base_internal::MallocHook::InvokePreMmapHook(start, length, prot, flags, + fd, offset); + void *result; + if (!absl::base_internal::MallocHook::InvokeMmapReplacement( + start, length, prot, flags, fd, offset, &result)) { + result = do_mmap64(start, length, prot, flags, fd, offset); + } + absl::base_internal::MallocHook::InvokeMmapHook(result, start, length, prot, + flags, fd, offset); + return result; +} + +# if !defined(__USE_FILE_OFFSET64) || !defined(__REDIRECT_NTH) + +extern "C" void* mmap(void *start, size_t length, int prot, int flags, + int fd, off_t offset) __THROW { + absl::base_internal::MallocHook::InvokePreMmapHook(start, length, prot, flags, + fd, offset); + void *result; + if (!absl::base_internal::MallocHook::InvokeMmapReplacement( + start, length, prot, flags, fd, offset, &result)) { + result = do_mmap64(start, length, prot, flags, fd, + static_cast<size_t>(offset)); // avoid sign extension + } + absl::base_internal::MallocHook::InvokeMmapHook(result, start, length, prot, + flags, fd, offset); + return result; +} + +# endif // !defined(__USE_FILE_OFFSET64) || !defined(__REDIRECT_NTH) + +extern "C" int munmap(void* start, size_t length) __THROW { + absl::base_internal::MallocHook::InvokeMunmapHook(start, length); + int result; + if (!absl::base_internal::MallocHook::InvokeMunmapReplacement(start, length, + &result)) { + result = syscall(SYS_munmap, start, length); + } + return result; +} + +extern "C" void* mremap(void* old_addr, size_t old_size, size_t new_size, + int flags, ...) __THROW { + va_list ap; + va_start(ap, flags); + void *new_address = va_arg(ap, void *); + va_end(ap); + void* result = reinterpret_cast<void*>( + syscall(SYS_mremap, old_addr, old_size, new_size, flags, new_address)); + absl::base_internal::MallocHook::InvokeMremapHook( + result, old_addr, old_size, new_size, flags, new_address); + return result; +} + +// sbrk cannot be intercepted on Android as there is no mechanism to +// invoke the original sbrk (since there is no __sbrk as with glibc). +#if !defined(__BIONIC__) +// libc's version: +extern "C" void* __sbrk(ptrdiff_t increment); + +extern "C" void* sbrk(ptrdiff_t increment) __THROW { + absl::base_internal::MallocHook::InvokePreSbrkHook(increment); + void *result = __sbrk(increment); + absl::base_internal::MallocHook::InvokeSbrkHook(result, increment); + return result; +} +#endif // !defined(__BIONIC__) + +namespace absl { +namespace base_internal { + +/*static*/void* MallocHook::UnhookedMMap(void *start, size_t length, int prot, + int flags, int fd, off_t offset) { + void* result; + if (!MallocHook::InvokeMmapReplacement( + start, length, prot, flags, fd, offset, &result)) { + result = do_mmap64(start, length, prot, flags, fd, offset); + } + return result; +} + +/*static*/int MallocHook::UnhookedMUnmap(void *start, size_t length) { + int result; + if (!MallocHook::InvokeMunmapReplacement(start, length, &result)) { + result = syscall(SYS_munmap, start, length); + } + return result; +} + +} // namespace base_internal +} // namespace absl diff --git a/absl/base/internal/per_thread_tls.h b/absl/base/internal/per_thread_tls.h new file mode 100644 index 000000000000..7397451031b9 --- /dev/null +++ b/absl/base/internal/per_thread_tls.h @@ -0,0 +1,48 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INTERNAL_PER_THREAD_TLS_H_ +#define ABSL_BASE_INTERNAL_PER_THREAD_TLS_H_ + +// This header defines two macros: +// If the platform supports thread-local storage: +// ABSL_PER_THREAD_TLS_KEYWORD is the C keyword needed to declare a +// thread-local variable ABSL_PER_THREAD_TLS is 1 +// +// Otherwise: +// ABSL_PER_THREAD_TLS_KEYWORD is empty +// ABSL_PER_THREAD_TLS is 0 +// +// Microsoft C supports thread-local storage. +// GCC supports it if the appropriate version of glibc is available, +// which the programme can indicate by defining ABSL_HAVE_TLS + +#include "absl/base/port.h" // For ABSL_HAVE_TLS + +#if defined(ABSL_PER_THREAD_TLS) +#error ABSL_PER_THREAD_TLS cannot be directly set +#elif defined(ABSL_PER_THREAD_TLS_KEYWORD) +#error ABSL_PER_THREAD_TLS_KEYWORD cannot be directly set +#elif defined(ABSL_HAVE_TLS) +#define ABSL_PER_THREAD_TLS_KEYWORD __thread +#define ABSL_PER_THREAD_TLS 1 +#elif defined(_MSC_VER) +#define ABSL_PER_THREAD_TLS_KEYWORD __declspec(thread) +#define ABSL_PER_THREAD_TLS 1 +#else +#define ABSL_PER_THREAD_TLS_KEYWORD +#define ABSL_PER_THREAD_TLS 0 +#endif + +#endif // ABSL_BASE_INTERNAL_PER_THREAD_TLS_H_ diff --git a/absl/base/internal/raw_logging.cc b/absl/base/internal/raw_logging.cc new file mode 100644 index 000000000000..d197d444a6f4 --- /dev/null +++ b/absl/base/internal/raw_logging.cc @@ -0,0 +1,225 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include <atomic> +#include <cassert> +#include <cstdarg> +#include <cstdio> +#include <cstdlib> +#include <cstring> + +#include "absl/base/config.h" +#include "absl/base/internal/atomic_hook.h" +#include "absl/base/internal/log_severity.h" +#include "absl/base/internal/raw_logging.h" +#include "absl/base/port.h" + +// We know how to perform low-level writes to stderr in POSIX and Windows. For +// these platforms, we define the token ABSL_LOW_LEVEL_WRITE_SUPPORTED. +// Much of raw_logging.cc becomes a no-op when we can't output messages, +// although a FATAL ABSL_RAW_LOG message will still abort the process. + +// ABSL_HAVE_POSIX_WRITE is defined when the platform provides posix write() +// (as from unistd.h) +// +// This preprocessor token is also defined in raw_io.cc. If you need to copy +// this, consider moving both to config.h instead. +#if defined(__linux__) || defined(__APPLE__) || defined(__Fuchsia__) || \ + defined(__GENCLAVE__) +#include <unistd.h> +#define ABSL_HAVE_POSIX_WRITE 1 +#define ABSL_LOW_LEVEL_WRITE_SUPPORTED 1 +#else +#undef ABSL_HAVE_POSIX_WRITE +#endif + +// ABSL_HAVE_SYSCALL_WRITE is defined when the platform provides the syscall +// syscall(SYS_write, /*int*/ fd, /*char* */ buf, /*size_t*/ len); +// for low level operations that want to avoid libc. +#if defined(__linux__) && !defined(__ANDROID__) +#include <sys/syscall.h> +#define ABSL_HAVE_SYSCALL_WRITE 1 +#define ABSL_LOW_LEVEL_WRITE_SUPPORTED 1 +#else +#undef ABSL_HAVE_SYSCALL_WRITE +#endif + +#ifdef _WIN32 +#include <io.h> +#define ABSL_HAVE_RAW_IO 1 +#define ABSL_LOW_LEVEL_WRITE_SUPPORTED 1 +#else +#undef ABSL_HAVE_RAW_IO +#endif + +// TODO(gfalcon): We want raw-logging to work on as many platforms as possible. +// Explicitly #error out when not ABSL_LOW_LEVEL_WRITE_SUPPORTED, except for a +// whitelisted set of platforms for which we expect not to be able to raw log. + +ABSL_CONST_INIT static absl::base_internal::AtomicHook< + absl::raw_logging_internal::LogPrefixHook> log_prefix_hook; +ABSL_CONST_INIT static absl::base_internal::AtomicHook< + absl::raw_logging_internal::AbortHook> abort_hook; + +#ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED +static const char kTruncated[] = " ... (message truncated)\n"; + +// sprintf the format to the buffer, adjusting *buf and *size to reflect the +// consumed bytes, and return whether the message fit without truncation. If +// truncation occurred, if possible leave room in the buffer for the message +// kTruncated[]. +inline static bool VADoRawLog(char** buf, int* size, + const char* format, va_list ap) { + int n = vsnprintf(*buf, *size, format, ap); + bool result = true; + if (n < 0 || n > *size) { + result = false; + if (static_cast<size_t>(*size) > sizeof(kTruncated)) { + n = *size - sizeof(kTruncated); // room for truncation message + } else { + n = 0; // no room for truncation message + } + } + *size -= n; + *buf += n; + return result; +} +#endif // ABSL_LOW_LEVEL_WRITE_SUPPORTED + +static constexpr int kLogBufSize = 3000; + +namespace absl { +namespace raw_logging_internal { +void SafeWriteToStderr(const char *s, size_t len); +} // namespace raw_logging_internal +} // namespace absl + +namespace { + +// CAVEAT: vsnprintf called from *DoRawLog below has some (exotic) code paths +// that invoke malloc() and getenv() that might acquire some locks. +// If this becomes a problem we should reimplement a subset of vsnprintf +// that does not need locks and malloc. +// E.g. google3/third_party/clearsilver/core/util/snprintf.c +// looks like such a reimplementation. + +// Helper for RawLog below. +// *DoRawLog writes to *buf of *size and move them past the written portion. +// It returns true iff there was no overflow or error. +bool DoRawLog(char** buf, int* size, const char* format, ...) + ABSL_PRINTF_ATTRIBUTE(3, 4); +bool DoRawLog(char** buf, int* size, const char* format, ...) { + va_list ap; + va_start(ap, format); + int n = vsnprintf(*buf, *size, format, ap); + va_end(ap); + if (n < 0 || n > *size) return false; + *size -= n; + *buf += n; + return true; +} + +void RawLogVA(absl::LogSeverity severity, const char* file, int line, + const char* format, va_list ap) { + char buffer[kLogBufSize]; + char* buf = buffer; + int size = sizeof(buffer); +#ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED + bool enabled = true; +#else + bool enabled = false; +#endif + +#ifdef ABSL_MIN_LOG_LEVEL + if (static_cast<int>(severity) < ABSL_MIN_LOG_LEVEL && + severity < absl::LogSeverity::kFatal) { + enabled = false; + } +#endif + + auto log_prefix_hook_ptr = log_prefix_hook.Load(); + if (log_prefix_hook_ptr) { + enabled = log_prefix_hook_ptr(severity, file, line, &buf, &size); + } else { + if (enabled) { + DoRawLog(&buf, &size, "[%s : %d] RAW: ", file, line); + } + } + const char* const prefix_end = buf; + +#ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED + if (enabled) { + bool no_chop = VADoRawLog(&buf, &size, format, ap); + if (no_chop) { + DoRawLog(&buf, &size, "\n"); + } else { + DoRawLog(&buf, &size, "%s", kTruncated); + } + absl::raw_logging_internal::SafeWriteToStderr(buffer, strlen(buffer)); + } +#else + static_cast<void>(format); + static_cast<void>(ap); +#endif + + // Abort the process after logging a FATAL message, even if the output itself + // was suppressed. + if (severity == absl::LogSeverity::kFatal) { + abort_hook(file, line, buffer, prefix_end, buffer + kLogBufSize); + abort(); + } +} + +} // namespace + +namespace absl { +namespace raw_logging_internal { + +// Writes the provided buffer directly to stderr, in a safe, low-level manner. +// +// In POSIX this means calling write(), which is async-signal safe and does +// not malloc. If the platform supports the SYS_write syscall, we invoke that +// directly to side-step any libc interception. +void SafeWriteToStderr(const char *s, size_t len) { +#if defined(ABSL_HAVE_SYSCALL_WRITE) + syscall(SYS_write, STDERR_FILENO, s, len); +#elif defined(ABSL_HAVE_POSIX_WRITE) + write(STDERR_FILENO, s, len); +#elif defined(ABSL_HAVE_RAW_IO) + _write(/* stderr */ 2, s, len); +#else + // stderr logging unsupported on this platform + (void) s; + (void) len; +#endif +} + +void RawLog(absl::LogSeverity severity, const char* file, int line, + const char* format, ...) { + va_list ap; + va_start(ap, format); + RawLogVA(severity, file, line, format, ap); + va_end(ap); +} + +bool RawLoggingFullySupported() { +#ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED + return true; +#else // !ABSL_LOW_LEVEL_WRITE_SUPPORTED + return false; +#endif // !ABSL_LOW_LEVEL_WRITE_SUPPORTED +} + +} // namespace raw_logging_internal +} // namespace absl diff --git a/absl/base/internal/raw_logging.h b/absl/base/internal/raw_logging.h new file mode 100644 index 000000000000..47cf4373db0b --- /dev/null +++ b/absl/base/internal/raw_logging.h @@ -0,0 +1,129 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Thread-safe logging routines that do not allocate any memory or +// acquire any locks, and can therefore be used by low-level memory +// allocation, synchronization, and signal-handling code. + +#ifndef ABSL_BASE_INTERNAL_RAW_LOGGING_H_ +#define ABSL_BASE_INTERNAL_RAW_LOGGING_H_ + +#include "absl/base/internal/log_severity.h" +#include "absl/base/macros.h" +#include "absl/base/port.h" + +// This is similar to LOG(severity) << format..., but +// * it is to be used ONLY by low-level modules that can't use normal LOG() +// * it is designed to be a low-level logger that does not allocate any +// memory and does not need any locks, hence: +// * it logs straight and ONLY to STDERR w/o buffering +// * it uses an explicit printf-format and arguments list +// * it will silently chop off really long message strings +// Usage example: +// ABSL_RAW_LOG(ERROR, "Failed foo with %i: %s", status, error); +// This will print an almost standard log line like this to stderr only: +// E0821 211317 file.cc:123] RAW: Failed foo with 22: bad_file +#define ABSL_RAW_LOG(severity, ...) \ + do { \ + constexpr const char* absl_raw_logging_internal_basename = \ + ::absl::raw_logging_internal::Basename(__FILE__, \ + sizeof(__FILE__) - 1); \ + ::absl::raw_logging_internal::RawLog(ABSL_RAW_LOGGING_INTERNAL_##severity, \ + absl_raw_logging_internal_basename, \ + __LINE__, __VA_ARGS__); \ + } while (0) + +// Similar to CHECK(condition) << message, but for low-level modules: +// we use only ABSL_RAW_LOG that does not allocate memory. +// We do not want to provide args list here to encourage this usage: +// if (!cond) ABSL_RAW_LOG(FATAL, "foo ...", hard_to_compute_args); +// so that the args are not computed when not needed. +#define ABSL_RAW_CHECK(condition, message) \ + do { \ + if (ABSL_PREDICT_FALSE(!(condition))) { \ + ABSL_RAW_LOG(FATAL, "Check %s failed: %s", #condition, message); \ + } \ + } while (0) + +#define ABSL_RAW_LOGGING_INTERNAL_INFO ::absl::LogSeverity::kInfo +#define ABSL_RAW_LOGGING_INTERNAL_WARNING ::absl::LogSeverity::kWarning +#define ABSL_RAW_LOGGING_INTERNAL_ERROR ::absl::LogSeverity::kError +#define ABSL_RAW_LOGGING_INTERNAL_FATAL ::absl::LogSeverity::kFatal +#define ABSL_RAW_LOGGING_INTERNAL_LEVEL(severity) \ + ::absl::NormalizeLogSeverity(severity) + +namespace absl { +namespace raw_logging_internal { + +// Helper function to implement ABSL_RAW_LOG +// Logs format... at "severity" level, reporting it +// as called from file:line. +// This does not allocate memory or acquire locks. +void RawLog(absl::LogSeverity severity, const char* file, int line, + const char* format, ...) ABSL_PRINTF_ATTRIBUTE(4, 5); + +// compile-time function to get the "base" filename, that is, the part of +// a filename after the last "/" or "\" path separator. The search starts at +// the end of the std::string; the second parameter is the length of the std::string. +constexpr const char* Basename(const char* fname, int offset) { + return offset == 0 || fname[offset - 1] == '/' || fname[offset - 1] == '\\' + ? fname + offset + : Basename(fname, offset - 1); +} + +// For testing only. +// Returns true if raw logging is fully supported. When it is not +// fully supported, no messages will be emitted, but a log at FATAL +// severity will cause an abort. +// +// TODO(gfalcon): Come up with a better name for this method. +bool RawLoggingFullySupported(); + +// Function type for a raw_logging customization hook for suppressing messages +// by severity, and for writing custom prefixes on non-suppressed messages. +// +// The installed hook is called for every raw log invocation. The message will +// be logged to stderr only if the hook returns true. FATAL errors will cause +// the process to abort, even if writing to stderr is suppressed. The hook is +// also provided with an output buffer, where it can write a custom log message +// prefix. +// +// The raw_logging system does not allocate memory or grab locks. User-provided +// hooks must avoid these operations, and must not throw exceptions. +// +// 'severity' is the severity level of the message being written. +// 'file' and 'line' are the file and line number where the ABSL_RAW_LOG macro +// was located. +// 'buffer' and 'buf_size' are pointers to the buffer and buffer size. If the +// hook writes a prefix, it must increment *buffer and decrement *buf_size +// accordingly. +using LogPrefixHook = bool (*)(absl::LogSeverity severity, const char* file, + int line, char** buffer, int* buf_size); + +// Function type for a raw_logging customization hook called to abort a process +// when a FATAL message is logged. If the provided AbortHook() returns, the +// logging system will call abort(). +// +// 'file' and 'line' are the file and line number where the ABSL_RAW_LOG macro +// was located. +// The null-terminated logged message lives in the buffer between 'buf_start' +// and 'buf_end'. 'prefix_end' points to the first non-prefix character of the +// buffer (as written by the LogPrefixHook.) +using AbortHook = void (*)(const char* file, int line, const char* buf_start, + const char* prefix_end, const char* buf_end); + +} // namespace raw_logging_internal +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_RAW_LOGGING_H_ diff --git a/absl/base/internal/scheduling_mode.h b/absl/base/internal/scheduling_mode.h new file mode 100644 index 000000000000..b7560f30d793 --- /dev/null +++ b/absl/base/internal/scheduling_mode.h @@ -0,0 +1,54 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Core interfaces and definitions used by by low-level //base interfaces such +// as SpinLock. + +#ifndef ABSL_BASE_INTERNAL_SCHEDULING_MODE_H_ +#define ABSL_BASE_INTERNAL_SCHEDULING_MODE_H_ + +namespace absl { +namespace base_internal { + +// Used to describe how a thread may be scheduled. Typically associated with +// the declaration of a resource supporting synchronized access. +// +// SCHEDULE_COOPERATIVE_AND_KERNEL: +// Specifies that when waiting, a cooperative thread (e.g. a Fiber) may +// reschedule (using base::scheduling semantics); allowing other cooperative +// threads to proceed. +// +// SCHEDULE_KERNEL_ONLY: (Also described as "non-cooperative") +// Specifies that no cooperative scheduling semantics may be used, even if the +// current thread is itself cooperatively scheduled. This means that +// cooperative threads will NOT allow other cooperative threads to execute in +// their place while waiting for a resource of this type. Host operating system +// semantics (e.g. a futex) may still be used. +// +// When optional, clients should strongly prefer SCHEDULE_COOPERATIVE_AND_KERNEL +// by default. SCHEDULE_KERNEL_ONLY should only be used for resources on which +// base::scheduling (e.g. the implementation of a Scheduler) may depend. +// +// NOTE: Cooperative resources may not be nested below non-cooperative ones. +// This means that it is invalid to to acquire a SCHEDULE_COOPERATIVE_AND_KERNEL +// resource if a SCHEDULE_KERNEL_ONLY resource is already held. +enum SchedulingMode { + SCHEDULE_KERNEL_ONLY = 0, // Allow scheduling only the host OS. + SCHEDULE_COOPERATIVE_AND_KERNEL, // Also allow cooperative scheduling. +}; + +} // namespace base_internal +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_SCHEDULING_MODE_H_ diff --git a/absl/base/internal/spinlock.cc b/absl/base/internal/spinlock.cc new file mode 100644 index 000000000000..6257bfcec6f8 --- /dev/null +++ b/absl/base/internal/spinlock.cc @@ -0,0 +1,243 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/spinlock.h" + +#include <algorithm> +#include <atomic> + +#include "absl/base/casts.h" +#include "absl/base/internal/atomic_hook.h" +#include "absl/base/internal/cycleclock.h" +#include "absl/base/internal/spinlock_wait.h" +#include "absl/base/internal/sysinfo.h" /* For NumCPUs() */ + +// Description of lock-word: +// 31..00: [............................3][2][1][0] +// +// [0]: kSpinLockHeld +// [1]: kSpinLockCooperative +// [2]: kSpinLockDisabledScheduling +// [31..3]: ONLY kSpinLockSleeper OR +// Wait time in cycles >> PROFILE_TIMESTAMP_SHIFT +// +// Detailed descriptions: +// +// Bit [0]: The lock is considered held iff kSpinLockHeld is set. +// +// Bit [1]: Eligible waiters (e.g. Fibers) may co-operatively reschedule when +// contended iff kSpinLockCooperative is set. +// +// Bit [2]: This bit is exclusive from bit [1]. It is used only by a +// non-cooperative lock. When set, indicates that scheduling was +// successfully disabled when the lock was acquired. May be unset, +// even if non-cooperative, if a ThreadIdentity did not yet exist at +// time of acquisition. +// +// Bit [3]: If this is the only upper bit ([31..3]) set then this lock was +// acquired without contention, however, at least one waiter exists. +// +// Otherwise, bits [31..3] represent the time spent by the current lock +// holder to acquire the lock. There may be outstanding waiter(s). + +namespace absl { +namespace base_internal { + +static int adaptive_spin_count = 0; + +namespace { +struct SpinLock_InitHelper { + SpinLock_InitHelper() { + // On multi-cpu machines, spin for longer before yielding + // the processor or sleeping. Reduces idle time significantly. + if (base_internal::NumCPUs() > 1) { + adaptive_spin_count = 1000; + } + } +}; + +// Hook into global constructor execution: +// We do not do adaptive spinning before that, +// but nothing lock-intensive should be going on at that time. +static SpinLock_InitHelper init_helper; + +ABSL_CONST_INIT static base_internal::AtomicHook<void (*)(const void *lock, + int64_t wait_cycles)> + submit_profile_data; + +} // namespace + +void RegisterSpinLockProfiler(void (*fn)(const void *contendedlock, + int64_t wait_cycles)) { + submit_profile_data.Store(fn); +} + +static inline bool IsCooperative( + base_internal::SchedulingMode scheduling_mode) { + return scheduling_mode == base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL; +} + +// Uncommon constructors. +SpinLock::SpinLock(base_internal::SchedulingMode mode) + : lockword_(IsCooperative(mode) ? kSpinLockCooperative : 0) { + ABSL_TSAN_MUTEX_CREATE(this, 0); +} + +SpinLock::SpinLock(base_internal::LinkerInitialized, + base_internal::SchedulingMode mode) { + ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_linker_init); + if (IsCooperative(mode)) { + InitLinkerInitializedAndCooperative(); + } + // Otherwise, lockword_ is already initialized. +} + +// Static (linker initialized) spinlocks always start life as functional +// non-cooperative locks. When their static constructor does run, it will call +// this initializer to augment the lockword with the cooperative bit. By +// actually taking the lock when we do this we avoid the need for an atomic +// operation in the regular unlock path. +// +// SlowLock() must be careful to re-test for this bit so that any outstanding +// waiters may be upgraded to cooperative status. +void SpinLock::InitLinkerInitializedAndCooperative() { + Lock(); + lockword_.fetch_or(kSpinLockCooperative, std::memory_order_relaxed); + Unlock(); +} + +// Monitor the lock to see if its value changes within some time period +// (adaptive_spin_count loop iterations). A timestamp indicating +// when the thread initially started waiting for the lock is passed in via +// the initial_wait_timestamp value. The total wait time in cycles for the +// lock is returned in the wait_cycles parameter. The last value read +// from the lock is returned from the method. +uint32_t SpinLock::SpinLoop(int64_t initial_wait_timestamp, + uint32_t *wait_cycles) { + int c = adaptive_spin_count; + uint32_t lock_value; + do { + lock_value = lockword_.load(std::memory_order_relaxed); + } while ((lock_value & kSpinLockHeld) != 0 && --c > 0); + uint32_t spin_loop_wait_cycles = + EncodeWaitCycles(initial_wait_timestamp, CycleClock::Now()); + *wait_cycles = spin_loop_wait_cycles; + + return TryLockInternal(lock_value, spin_loop_wait_cycles); +} + +void SpinLock::SlowLock() { + // The lock was not obtained initially, so this thread needs to wait for + // it. Record the current timestamp in the local variable wait_start_time + // so the total wait time can be stored in the lockword once this thread + // obtains the lock. + int64_t wait_start_time = CycleClock::Now(); + uint32_t wait_cycles; + uint32_t lock_value = SpinLoop(wait_start_time, &wait_cycles); + + int lock_wait_call_count = 0; + while ((lock_value & kSpinLockHeld) != 0) { + // If the lock is currently held, but not marked as having a sleeper, mark + // it as having a sleeper. + if ((lock_value & kWaitTimeMask) == 0) { + // Here, just "mark" that the thread is going to sleep. Don't store the + // lock wait time in the lock as that will cause the current lock + // owner to think it experienced contention. + if (lockword_.compare_exchange_strong( + lock_value, lock_value | kSpinLockSleeper, + std::memory_order_acquire, std::memory_order_relaxed)) { + // Successfully transitioned to kSpinLockSleeper. Pass + // kSpinLockSleeper to the SpinLockWait routine to properly indicate + // the last lock_value observed. + lock_value |= kSpinLockSleeper; + } else if ((lock_value & kSpinLockHeld) == 0) { + // Lock is free again, so try and acquire it before sleeping. The + // new lock state will be the number of cycles this thread waited if + // this thread obtains the lock. + lock_value = TryLockInternal(lock_value, wait_cycles); + continue; // Skip the delay at the end of the loop. + } + } + + base_internal::SchedulingMode scheduling_mode; + if ((lock_value & kSpinLockCooperative) != 0) { + scheduling_mode = base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL; + } else { + scheduling_mode = base_internal::SCHEDULE_KERNEL_ONLY; + } + // SpinLockDelay() calls into fiber scheduler, we need to see + // synchronization there to avoid false positives. + ABSL_TSAN_MUTEX_PRE_DIVERT(this, 0); + // Wait for an OS specific delay. + base_internal::SpinLockDelay(&lockword_, lock_value, ++lock_wait_call_count, + scheduling_mode); + ABSL_TSAN_MUTEX_POST_DIVERT(this, 0); + // Spin again after returning from the wait routine to give this thread + // some chance of obtaining the lock. + lock_value = SpinLoop(wait_start_time, &wait_cycles); + } +} + +void SpinLock::SlowUnlock(uint32_t lock_value) { + base_internal::SpinLockWake(&lockword_, + false); // wake waiter if necessary + + // If our acquisition was contended, collect contentionz profile info. We + // reserve a unitary wait time to represent that a waiter exists without our + // own acquisition having been contended. + if ((lock_value & kWaitTimeMask) != kSpinLockSleeper) { + const uint64_t wait_cycles = DecodeWaitCycles(lock_value); + ABSL_TSAN_MUTEX_PRE_DIVERT(this, 0); + submit_profile_data(this, wait_cycles); + ABSL_TSAN_MUTEX_POST_DIVERT(this, 0); + } +} + +// We use the upper 29 bits of the lock word to store the time spent waiting to +// acquire this lock. This is reported by contentionz profiling. Since the +// lower bits of the cycle counter wrap very quickly on high-frequency +// processors we divide to reduce the granularity to 2^PROFILE_TIMESTAMP_SHIFT +// sized units. On a 4Ghz machine this will lose track of wait times greater +// than (2^29/4 Ghz)*128 =~ 17.2 seconds. Such waits should be extremely rare. +enum { PROFILE_TIMESTAMP_SHIFT = 7 }; +enum { LOCKWORD_RESERVED_SHIFT = 3 }; // We currently reserve the lower 3 bits. + +uint32_t SpinLock::EncodeWaitCycles(int64_t wait_start_time, + int64_t wait_end_time) { + static const int64_t kMaxWaitTime = + std::numeric_limits<uint32_t>::max() >> LOCKWORD_RESERVED_SHIFT; + int64_t scaled_wait_time = + (wait_end_time - wait_start_time) >> PROFILE_TIMESTAMP_SHIFT; + + // Return a representation of the time spent waiting that can be stored in + // the lock word's upper bits. bit_cast is required as Atomic32 is signed. + const uint32_t clamped = static_cast<uint32_t>( + std::min(scaled_wait_time, kMaxWaitTime) << LOCKWORD_RESERVED_SHIFT); + + // bump up value if necessary to avoid returning kSpinLockSleeper. + const uint32_t after_spinlock_sleeper = + kSpinLockSleeper + (1 << LOCKWORD_RESERVED_SHIFT); + return clamped == kSpinLockSleeper ? after_spinlock_sleeper : clamped; +} + +uint64_t SpinLock::DecodeWaitCycles(uint32_t lock_value) { + // Cast to uint32_t first to ensure bits [63:32] are cleared. + const uint64_t scaled_wait_time = + static_cast<uint32_t>(lock_value & kWaitTimeMask); + return scaled_wait_time + << (PROFILE_TIMESTAMP_SHIFT - LOCKWORD_RESERVED_SHIFT); +} + +} // namespace base_internal +} // namespace absl diff --git a/absl/base/internal/spinlock.h b/absl/base/internal/spinlock.h new file mode 100644 index 000000000000..fa64ba65bbf2 --- /dev/null +++ b/absl/base/internal/spinlock.h @@ -0,0 +1,227 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Most users requiring mutual exclusion should use Mutex. +// SpinLock is provided for use in three situations: +// - for use in code that Mutex itself depends on +// - to get a faster fast-path release under low contention (without an +// atomic read-modify-write) In return, SpinLock has worse behaviour under +// contention, which is why Mutex is preferred in most situations. +// - for async signal safety (see below) + +// SpinLock is async signal safe. If a spinlock is used within a signal +// handler, all code that acquires the lock must ensure that the signal cannot +// arrive while they are holding the lock. Typically, this is done by blocking +// the signal. + +#ifndef ABSL_BASE_INTERNAL_SPINLOCK_H_ +#define ABSL_BASE_INTERNAL_SPINLOCK_H_ + +#include <atomic> + +#include "absl/base/dynamic_annotations.h" +#include "absl/base/internal/low_level_scheduling.h" +#include "absl/base/internal/tsan_mutex_interface.h" +#include "absl/base/port.h" +#include "absl/base/thread_annotations.h" + +namespace absl { +namespace base_internal { + +class LOCKABLE SpinLock { + public: + SpinLock() : lockword_(kSpinLockCooperative) { + ABSL_TSAN_MUTEX_CREATE(this, 0); + } + + // Special constructor for use with static SpinLock objects. E.g., + // + // static SpinLock lock(base_internal::kLinkerInitialized); + // + // When intialized using this constructor, we depend on the fact + // that the linker has already initialized the memory appropriately. + // A SpinLock constructed like this can be freely used from global + // initializers without worrying about the order in which global + // initializers run. + explicit SpinLock(base_internal::LinkerInitialized) { + // Does nothing; lockword_ is already initialized + ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_linker_init); + } + + // Constructors that allow non-cooperative spinlocks to be created for use + // inside thread schedulers. Normal clients should not use these. + explicit SpinLock(base_internal::SchedulingMode mode); + SpinLock(base_internal::LinkerInitialized, + base_internal::SchedulingMode mode); + + ~SpinLock() { ABSL_TSAN_MUTEX_DESTROY(this, 0); } + + // Acquire this SpinLock. + inline void Lock() EXCLUSIVE_LOCK_FUNCTION() { + ABSL_TSAN_MUTEX_PRE_LOCK(this, 0); + if (!TryLockImpl()) { + SlowLock(); + } + ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0); + } + + // Try to acquire this SpinLock without blocking and return true if the + // acquisition was successful. If the lock was not acquired, false is + // returned. If this SpinLock is free at the time of the call, TryLock + // will return true with high probability. + inline bool TryLock() EXCLUSIVE_TRYLOCK_FUNCTION(true) { + ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_try_lock); + bool res = TryLockImpl(); + ABSL_TSAN_MUTEX_POST_LOCK( + this, __tsan_mutex_try_lock | (res ? 0 : __tsan_mutex_try_lock_failed), + 0); + return res; + } + + // Release this SpinLock, which must be held by the calling thread. + inline void Unlock() UNLOCK_FUNCTION() { + ABSL_TSAN_MUTEX_PRE_UNLOCK(this, 0); + uint32_t lock_value = lockword_.load(std::memory_order_relaxed); + lockword_.store(lock_value & kSpinLockCooperative, + std::memory_order_release); + + if ((lock_value & kSpinLockDisabledScheduling) != 0) { + base_internal::SchedulingGuard::EnableRescheduling(true); + } + if ((lock_value & kWaitTimeMask) != 0) { + // Collect contentionz profile info, and speed the wakeup of any waiter. + // The wait_cycles value indicates how long this thread spent waiting + // for the lock. + SlowUnlock(lock_value); + } + ABSL_TSAN_MUTEX_POST_UNLOCK(this, 0); + } + + // Determine if the lock is held. When the lock is held by the invoking + // thread, true will always be returned. Intended to be used as + // CHECK(lock.IsHeld()). + inline bool IsHeld() const { + return (lockword_.load(std::memory_order_relaxed) & kSpinLockHeld) != 0; + } + + protected: + // These should not be exported except for testing. + + // Store number of cycles between wait_start_time and wait_end_time in a + // lock value. + static uint32_t EncodeWaitCycles(int64_t wait_start_time, + int64_t wait_end_time); + + // Extract number of wait cycles in a lock value. + static uint64_t DecodeWaitCycles(uint32_t lock_value); + + // Provide access to protected method above. Use for testing only. + friend struct SpinLockTest; + + private: + // lockword_ is used to store the following: + // + // bit[0] encodes whether a lock is being held. + // bit[1] encodes whether a lock uses cooperative scheduling. + // bit[2] encodes whether a lock disables scheduling. + // bit[3:31] encodes time a lock spent on waiting as a 29-bit unsigned int. + enum { kSpinLockHeld = 1 }; + enum { kSpinLockCooperative = 2 }; + enum { kSpinLockDisabledScheduling = 4 }; + enum { kSpinLockSleeper = 8 }; + enum { kWaitTimeMask = // Includes kSpinLockSleeper. + ~(kSpinLockHeld | kSpinLockCooperative | kSpinLockDisabledScheduling) }; + + uint32_t TryLockInternal(uint32_t lock_value, uint32_t wait_cycles); + void InitLinkerInitializedAndCooperative(); + void SlowLock() ABSL_ATTRIBUTE_COLD; + void SlowUnlock(uint32_t lock_value) ABSL_ATTRIBUTE_COLD; + uint32_t SpinLoop(int64_t initial_wait_timestamp, uint32_t* wait_cycles); + + inline bool TryLockImpl() { + uint32_t lock_value = lockword_.load(std::memory_order_relaxed); + return (TryLockInternal(lock_value, 0) & kSpinLockHeld) == 0; + } + + std::atomic<uint32_t> lockword_; + + SpinLock(const SpinLock&) = delete; + SpinLock& operator=(const SpinLock&) = delete; +}; + +// Corresponding locker object that arranges to acquire a spinlock for +// the duration of a C++ scope. +class SCOPED_LOCKABLE SpinLockHolder { + public: + inline explicit SpinLockHolder(SpinLock* l) EXCLUSIVE_LOCK_FUNCTION(l) + : lock_(l) { + l->Lock(); + } + inline ~SpinLockHolder() UNLOCK_FUNCTION() { lock_->Unlock(); } + + SpinLockHolder(const SpinLockHolder&) = delete; + SpinLockHolder& operator=(const SpinLockHolder&) = delete; + + private: + SpinLock* lock_; +}; + +// Register a hook for profiling support. +// +// The function pointer registered here will be called whenever a spinlock is +// contended. The callback is given an opaque handle to the contended spinlock +// and the number of wait cycles. This is thread-safe, but only a single +// profiler can be registered. It is an error to call this function multiple +// times with different arguments. +void RegisterSpinLockProfiler(void (*fn)(const void* lock, + int64_t wait_cycles)); + +//------------------------------------------------------------------------------ +// Public interface ends here. +//------------------------------------------------------------------------------ + +// If (result & kSpinLockHeld) == 0, then *this was successfully locked. +// Otherwise, returns last observed value for lockword_. +inline uint32_t SpinLock::TryLockInternal(uint32_t lock_value, + uint32_t wait_cycles) { + if ((lock_value & kSpinLockHeld) != 0) { + return lock_value; + } + + uint32_t sched_disabled_bit = 0; + if ((lock_value & kSpinLockCooperative) == 0) { + // For non-cooperative locks we must make sure we mark ourselves as + // non-reschedulable before we attempt to CompareAndSwap. + if (base_internal::SchedulingGuard::DisableRescheduling()) { + sched_disabled_bit = kSpinLockDisabledScheduling; + } + } + + if (lockword_.compare_exchange_strong( + lock_value, + kSpinLockHeld | lock_value | wait_cycles | sched_disabled_bit, + std::memory_order_acquire, std::memory_order_relaxed)) { + } else { + base_internal::SchedulingGuard::EnableRescheduling(sched_disabled_bit); + } + + return lock_value; +} + +} // namespace base_internal +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_SPINLOCK_H_ diff --git a/absl/base/internal/spinlock_posix.inc b/absl/base/internal/spinlock_posix.inc new file mode 100644 index 000000000000..0098c1c76016 --- /dev/null +++ b/absl/base/internal/spinlock_posix.inc @@ -0,0 +1,46 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This file is a Posix-specific part of spinlock_wait.cc + +#include <sched.h> +#include <atomic> +#include <ctime> +#include <cerrno> + +#include "absl/base/internal/scheduling_mode.h" +#include "absl/base/port.h" + +extern "C" { + +ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockDelay( + std::atomic<uint32_t>* /* lock_word */, uint32_t /* value */, int loop, + absl::base_internal::SchedulingMode /* mode */) { + int save_errno = errno; + if (loop == 0) { + } else if (loop == 1) { + sched_yield(); + } else { + struct timespec tm; + tm.tv_sec = 0; + tm.tv_nsec = absl::base_internal::SpinLockSuggestedDelayNS(loop); + nanosleep(&tm, nullptr); + } + errno = save_errno; +} + +ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockWake( + std::atomic<uint32_t>* /* lock_word */, bool /* all */) {} + +} // extern "C" diff --git a/absl/base/internal/spinlock_wait.cc b/absl/base/internal/spinlock_wait.cc new file mode 100644 index 000000000000..0fd36286629d --- /dev/null +++ b/absl/base/internal/spinlock_wait.cc @@ -0,0 +1,77 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// The OS-specific header included below must provide two calls: +// base::subtle::SpinLockDelay() and base::subtle::SpinLockWake(). +// See spinlock_wait.h for the specs. + +#include <atomic> +#include <cstdint> + +#include "absl/base/internal/spinlock_wait.h" + +#if defined(_WIN32) +#include "absl/base/internal/spinlock_win32.inc" +#else +#include "absl/base/internal/spinlock_posix.inc" +#endif + +namespace absl { +namespace base_internal { + +// See spinlock_wait.h for spec. +uint32_t SpinLockWait(std::atomic<uint32_t> *w, int n, + const SpinLockWaitTransition trans[], + base_internal::SchedulingMode scheduling_mode) { + for (int loop = 0; ; loop++) { + uint32_t v = w->load(std::memory_order_acquire); + int i; + for (i = 0; i != n && v != trans[i].from; i++) { + } + if (i == n) { + SpinLockDelay(w, v, loop, scheduling_mode); // no matching transition + } else if (trans[i].to == v || // null transition + w->compare_exchange_strong(v, trans[i].to, + std::memory_order_acquire, + std::memory_order_relaxed)) { + if (trans[i].done) return v; + } + } +} + +static std::atomic<uint64_t> delay_rand; + +// Return a suggested delay in nanoseconds for iteration number "loop" +int SpinLockSuggestedDelayNS(int loop) { + // Weak pseudo-random number generator to get some spread between threads + // when many are spinning. + uint64_t r = delay_rand.load(std::memory_order_relaxed); + r = 0x5deece66dLL * r + 0xb; // numbers from nrand48() + delay_rand.store(r, std::memory_order_relaxed); + + r <<= 16; // 48-bit random number now in top 48-bits. + if (loop < 0 || loop > 32) { // limit loop to 0..32 + loop = 32; + } + // loop>>3 cannot exceed 4 because loop cannot exceed 32. + // Select top 20..24 bits of lower 48 bits, + // giving approximately 0ms to 16ms. + // Mean is exponential in loop for first 32 iterations, then 8ms. + // The futex path multiplies this by 16, since we expect explicit wakeups + // almost always on that path. + return r >> (44 - (loop >> 3)); +} + +} // namespace base_internal +} // namespace absl diff --git a/absl/base/internal/spinlock_wait.h b/absl/base/internal/spinlock_wait.h new file mode 100644 index 000000000000..5432c1ce2094 --- /dev/null +++ b/absl/base/internal/spinlock_wait.h @@ -0,0 +1,94 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INTERNAL_SPINLOCK_WAIT_H_ +#define ABSL_BASE_INTERNAL_SPINLOCK_WAIT_H_ + +// Operations to make atomic transitions on a word, and to allow +// waiting for those transitions to become possible. + +// This file is used internally in spinlock.cc and once.cc, and a few other +// places listing in //base:spinlock_wait_users. If you need to use it outside +// of //base, please request permission to be added to that list. + +#include <atomic> + +#include "absl/base/internal/scheduling_mode.h" + +namespace absl { +namespace base_internal { + +// SpinLockWait() waits until it can perform one of several transitions from +// "from" to "to". It returns when it performs a transition where done==true. +struct SpinLockWaitTransition { + uint32_t from; + uint32_t to; + bool done; +}; + +// Wait until *w can transition from trans[i].from to trans[i].to for some i +// satisfying 0<=i<n && trans[i].done, atomically make the transition, +// then return the old value of *w. Make any other atomic transitions +// where !trans[i].done, but continue waiting. +uint32_t SpinLockWait(std::atomic<uint32_t> *w, int n, + const SpinLockWaitTransition trans[], + SchedulingMode scheduling_mode); + +// If possible, wake some thread that has called SpinLockDelay(w, ...). If +// "all" is true, wake all such threads. This call is a hint, and on some +// systems it may be a no-op; threads calling SpinLockDelay() will always wake +// eventually even if SpinLockWake() is never called. +void SpinLockWake(std::atomic<uint32_t> *w, bool all); + +// Wait for an appropriate spin delay on iteration "loop" of a +// spin loop on location *w, whose previously observed value was "value". +// SpinLockDelay() may do nothing, may yield the CPU, may sleep a clock tick, +// or may wait for a delay that can be truncated by a call to SpinLockWake(w). +// In all cases, it must return in bounded time even if SpinLockWake() is not +// called. +void SpinLockDelay(std::atomic<uint32_t> *w, uint32_t value, int loop, + base_internal::SchedulingMode scheduling_mode); + +// Helper used by AbslInternalSpinLockDelay. +// Returns a suggested delay in nanoseconds for iteration number "loop". +int SpinLockSuggestedDelayNS(int loop); + +} // namespace base_internal +} // namespace absl + +// In some build configurations we pass --detect-odr-violations to the +// gold linker. This causes it to flag weak symbol overrides as ODR +// violations. Because ODR only applies to C++ and not C, +// --detect-odr-violations ignores symbols not mangled with C++ names. +// By changing our extension points to be extern "C", we dodge this +// check. +extern "C" { +void AbslInternalSpinLockWake(std::atomic<uint32_t> *w, bool all); +void AbslInternalSpinLockDelay( + std::atomic<uint32_t> *w, uint32_t value, int loop, + absl::base_internal::SchedulingMode scheduling_mode); +} + +inline void absl::base_internal::SpinLockWake(std::atomic<uint32_t> *w, + bool all) { + AbslInternalSpinLockWake(w, all); +} + +inline void absl::base_internal::SpinLockDelay( + std::atomic<uint32_t> *w, uint32_t value, int loop, + base_internal::SchedulingMode scheduling_mode) { + AbslInternalSpinLockDelay(w, value, loop, scheduling_mode); +} + +#endif // ABSL_BASE_INTERNAL_SPINLOCK_WAIT_H_ diff --git a/absl/base/internal/spinlock_win32.inc b/absl/base/internal/spinlock_win32.inc new file mode 100644 index 000000000000..32c8fc0bb51d --- /dev/null +++ b/absl/base/internal/spinlock_win32.inc @@ -0,0 +1,37 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This file is a Win32-specific part of spinlock_wait.cc + +#include <windows.h> +#include <atomic> +#include "absl/base/internal/scheduling_mode.h" + +extern "C" { + +void AbslInternalSpinLockDelay(std::atomic<uint32_t>* /* lock_word */, + uint32_t /* value */, int loop, + absl::base_internal::SchedulingMode /* mode */) { + if (loop == 0) { + } else if (loop == 1) { + Sleep(0); + } else { + Sleep(absl::base_internal::SpinLockSuggestedDelayNS(loop) / 1000000); + } +} + +void AbslInternalSpinLockWake(std::atomic<uint32_t>* /* lock_word */, + bool /* all */) {} + +} // extern "C" diff --git a/absl/base/internal/sysinfo.cc b/absl/base/internal/sysinfo.cc new file mode 100644 index 000000000000..11863eab20ea --- /dev/null +++ b/absl/base/internal/sysinfo.cc @@ -0,0 +1,370 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/sysinfo.h" + +#ifdef _WIN32 +#include <shlwapi.h> +#include <windows.h> +#else +#include <fcntl.h> +#include <pthread.h> +#include <sys/stat.h> +#include <sys/types.h> +#include <unistd.h> +#endif + +#ifdef __linux__ +#include <sys/syscall.h> +#endif + +#ifdef __APPLE__ +#include <sys/sysctl.h> +#endif + +#include <string.h> +#include <cassert> +#include <cstdint> +#include <cstdio> +#include <cstdlib> +#include <ctime> +#include <limits> +#include <thread> // NOLINT(build/c++11) +#include <utility> +#include <vector> + +#include "absl/base/call_once.h" +#include "absl/base/internal/raw_logging.h" +#include "absl/base/internal/spinlock.h" +#include "absl/base/internal/unscaledcycleclock.h" +#include "absl/base/thread_annotations.h" + +namespace absl { +namespace base_internal { + +static once_flag init_system_info_once; +static int num_cpus = 0; +static double nominal_cpu_frequency = 1.0; // 0.0 might be dangerous. + +static int GetNumCPUs() { +#if defined(__myriad2__) || defined(__GENCLAVE__) + // TODO(b/28296132): Calling std::thread::hardware_concurrency() induces a + // link error on myriad2 builds. + // TODO(b/62709537): Support std::thread::hardware_concurrency() in gEnclalve. + return 1; +#else + // Other possibilities: + // - Read /sys/devices/system/cpu/online and use cpumask_parse() + // - sysconf(_SC_NPROCESSORS_ONLN) + return std::thread::hardware_concurrency(); +#endif +} + +#if defined(_WIN32) + +static double GetNominalCPUFrequency() { + DWORD data; + DWORD data_size = sizeof(data); + #pragma comment(lib, "shlwapi.lib") // For SHGetValue(). + if (SUCCEEDED( + SHGetValueA(HKEY_LOCAL_MACHINE, + "HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0", + "~MHz", nullptr, &data, &data_size))) { + return data * 1e6; // Value is MHz. + } + return 1.0; +} + +#elif defined(CTL_HW) && defined(HW_CPU_FREQ) + +static double GetNominalCPUFrequency() { + unsigned freq; + size_t size = sizeof(freq); + int mib[2] = {CTL_HW, HW_CPU_FREQ}; + if (sysctl(mib, 2, &freq, &size, nullptr, 0) == 0) { + return static_cast<double>(freq); + } + return 1.0; +} + +#else + +// Helper function for reading a long from a file. Returns true if successful +// and the memory location pointed to by value is set to the value read. +static bool ReadLongFromFile(const char *file, long *value) { + bool ret = false; + int fd = open(file, O_RDONLY); + if (fd != -1) { + char line[1024]; + char *err; + memset(line, '\0', sizeof(line)); + int len = read(fd, line, sizeof(line) - 1); + if (len <= 0) { + ret = false; + } else { + const long temp_value = strtol(line, &err, 10); + if (line[0] != '\0' && (*err == '\n' || *err == '\0')) { + *value = temp_value; + ret = true; + } + } + close(fd); + } + return ret; +} + +#if defined(ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY) + +// Reads a monotonic time source and returns a value in +// nanoseconds. The returned value uses an arbitrary epoch, not the +// Unix epoch. +static int64_t ReadMonotonicClockNanos() { + struct timespec t; +#ifdef CLOCK_MONOTONIC_RAW + int rc = clock_gettime(CLOCK_MONOTONIC_RAW, &t); +#else + int rc = clock_gettime(CLOCK_MONOTONIC, &t); +#endif + if (rc != 0) { + perror("clock_gettime() failed"); + abort(); + } + return int64_t{t.tv_sec} * 1000000000 + t.tv_nsec; +} + +class UnscaledCycleClockWrapperForInitializeFrequency { + public: + static int64_t Now() { return base_internal::UnscaledCycleClock::Now(); } +}; + +struct TimeTscPair { + int64_t time; // From ReadMonotonicClockNanos(). + int64_t tsc; // From UnscaledCycleClock::Now(). +}; + +// Returns a pair of values (monotonic kernel time, TSC ticks) that +// approximately correspond to each other. This is accomplished by +// doing several reads and picking the reading with the lowest +// latency. This approach is used to minimize the probability that +// our thread was preempted between clock reads. +static TimeTscPair GetTimeTscPair() { + int64_t best_latency = std::numeric_limits<int64_t>::max(); + TimeTscPair best; + for (int i = 0; i < 10; ++i) { + int64_t t0 = ReadMonotonicClockNanos(); + int64_t tsc = UnscaledCycleClockWrapperForInitializeFrequency::Now(); + int64_t t1 = ReadMonotonicClockNanos(); + int64_t latency = t1 - t0; + if (latency < best_latency) { + best_latency = latency; + best.time = t0; + best.tsc = tsc; + } + } + return best; +} + +// Measures and returns the TSC frequency by taking a pair of +// measurements approximately `sleep_nanoseconds` apart. +static double MeasureTscFrequencyWithSleep(int sleep_nanoseconds) { + auto t0 = GetTimeTscPair(); + struct timespec ts; + ts.tv_sec = 0; + ts.tv_nsec = sleep_nanoseconds; + while (nanosleep(&ts, &ts) != 0 && errno == EINTR) {} + auto t1 = GetTimeTscPair(); + double elapsed_ticks = t1.tsc - t0.tsc; + double elapsed_time = (t1.time - t0.time) * 1e-9; + return elapsed_ticks / elapsed_time; +} + +// Measures and returns the TSC frequency by calling +// MeasureTscFrequencyWithSleep(), doubling the sleep interval until the +// frequency measurement stabilizes. +static double MeasureTscFrequency() { + double last_measurement = -1.0; + int sleep_nanoseconds = 1000000; // 1 millisecond. + for (int i = 0; i < 8; ++i) { + double measurement = MeasureTscFrequencyWithSleep(sleep_nanoseconds); + if (measurement * 0.99 < last_measurement && + last_measurement < measurement * 1.01) { + // Use the current measurement if it is within 1% of the + // previous measurement. + return measurement; + } + last_measurement = measurement; + sleep_nanoseconds *= 2; + } + return last_measurement; +} + +#endif // ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY + +static double GetNominalCPUFrequency() { + long freq = 0; + + // Google's production kernel has a patch to export the TSC + // frequency through sysfs. If the kernel is exporting the TSC + // frequency use that. There are issues where cpuinfo_max_freq + // cannot be relied on because the BIOS may be exporting an invalid + // p-state (on x86) or p-states may be used to put the processor in + // a new mode (turbo mode). Essentially, those frequencies cannot + // always be relied upon. The same reasons apply to /proc/cpuinfo as + // well. + if (ReadLongFromFile("/sys/devices/system/cpu/cpu0/tsc_freq_khz", &freq)) { + return freq * 1e3; // Value is kHz. + } + +#if defined(ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY) + // On these platforms, the TSC frequency is the nominal CPU + // frequency. But without having the kernel export it directly + // though /sys/devices/system/cpu/cpu0/tsc_freq_khz, there is no + // other way to reliably get the TSC frequency, so we have to + // measure it ourselves. Some CPUs abuse cpuinfo_max_freq by + // exporting "fake" frequencies for implementing new features. For + // example, Intel's turbo mode is enabled by exposing a p-state + // value with a higher frequency than that of the real TSC + // rate. Because of this, we prefer to measure the TSC rate + // ourselves on i386 and x86-64. + return MeasureTscFrequency(); +#else + + // If CPU scaling is in effect, we want to use the *maximum* + // frequency, not whatever CPU speed some random processor happens + // to be using now. + if (ReadLongFromFile("/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq", + &freq)) { + return freq * 1e3; // Value is kHz. + } + + return 1.0; +#endif // !ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY +} + +#endif + +// InitializeSystemInfo() may be called before main() and before +// malloc is properly initialized, therefore this must not allocate +// memory. +static void InitializeSystemInfo() { + num_cpus = GetNumCPUs(); + nominal_cpu_frequency = GetNominalCPUFrequency(); +} + +int NumCPUs() { + base_internal::LowLevelCallOnce(&init_system_info_once, InitializeSystemInfo); + return num_cpus; +} + +double NominalCPUFrequency() { + base_internal::LowLevelCallOnce(&init_system_info_once, InitializeSystemInfo); + return nominal_cpu_frequency; +} + +#if defined(_WIN32) + +pid_t GetTID() { + return GetCurrentThreadId(); +} + +#elif defined(__linux__) + +#ifndef SYS_gettid +#define SYS_gettid __NR_gettid +#endif + +pid_t GetTID() { + return syscall(SYS_gettid); +} + +#else + +// Fallback implementation of GetTID using pthread_getspecific. +static once_flag tid_once; +static pthread_key_t tid_key; +static absl::base_internal::SpinLock tid_lock( + absl::base_internal::kLinkerInitialized); + +// We set a bit per thread in this array to indicate that an ID is in +// use. ID 0 is unused because it is the default value returned by +// pthread_getspecific(). +static std::vector<uint32_t>* tid_array GUARDED_BY(tid_lock) = nullptr; +static constexpr int kBitsPerWord = 32; // tid_array is uint32_t. + +// Returns the TID to tid_array. +static void FreeTID(void *v) { + intptr_t tid = reinterpret_cast<intptr_t>(v); + int word = tid / kBitsPerWord; + uint32_t mask = ~(1u << (tid % kBitsPerWord)); + absl::base_internal::SpinLockHolder lock(&tid_lock); + assert(0 <= word && static_cast<size_t>(word) < tid_array->size()); + (*tid_array)[word] &= mask; +} + +static void InitGetTID() { + if (pthread_key_create(&tid_key, FreeTID) != 0) { + // The logging system calls GetTID() so it can't be used here. + perror("pthread_key_create failed"); + abort(); + } + + // Initialize tid_array. + absl::base_internal::SpinLockHolder lock(&tid_lock); + tid_array = new std::vector<uint32_t>(1); + (*tid_array)[0] = 1; // ID 0 is never-allocated. +} + +// Return a per-thread small integer ID from pthread's thread-specific data. +pid_t GetTID() { + absl::call_once(tid_once, InitGetTID); + + intptr_t tid = reinterpret_cast<intptr_t>(pthread_getspecific(tid_key)); + if (tid != 0) { + return tid; + } + + int bit; // tid_array[word] = 1u << bit; + size_t word; + { + // Search for the first unused ID. + absl::base_internal::SpinLockHolder lock(&tid_lock); + // First search for a word in the array that is not all ones. + word = 0; + while (word < tid_array->size() && ~(*tid_array)[word] == 0) { + ++word; + } + if (word == tid_array->size()) { + tid_array->push_back(0); // No space left, add kBitsPerWord more IDs. + } + // Search for a zero bit in the word. + bit = 0; + while (bit < kBitsPerWord && (((*tid_array)[word] >> bit) & 1) != 0) { + ++bit; + } + tid = (word * kBitsPerWord) + bit; + (*tid_array)[word] |= 1u << bit; // Mark the TID as allocated. + } + + if (pthread_setspecific(tid_key, reinterpret_cast<void *>(tid)) != 0) { + perror("pthread_setspecific failed"); + abort(); + } + + return static_cast<pid_t>(tid); +} + +#endif + +} // namespace base_internal +} // namespace absl diff --git a/absl/base/internal/sysinfo.h b/absl/base/internal/sysinfo.h new file mode 100644 index 000000000000..f21de14325c7 --- /dev/null +++ b/absl/base/internal/sysinfo.h @@ -0,0 +1,64 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This file includes routines to find out characteristics +// of the machine a program is running on. It is undoubtedly +// system-dependent. + +// Functions listed here that accept a pid_t as an argument act on the +// current process if the pid_t argument is 0 +// All functions here are thread-hostile due to file caching unless +// commented otherwise. + +#ifndef ABSL_BASE_INTERNAL_SYSINFO_H_ +#define ABSL_BASE_INTERNAL_SYSINFO_H_ + +#ifndef _WIN32 +#include <sys/types.h> +#else +#include <intsafe.h> +#endif + +#include "absl/base/port.h" + +namespace absl { +namespace base_internal { + +// Nominal core processor cycles per second of each processor. This is _not_ +// necessarily the frequency of the CycleClock counter (see cycleclock.h) +// Thread-safe. +double NominalCPUFrequency(); + +// Number of logical processors (hyperthreads) in system. See +// //base/cpuid/cpuid.h for more CPU-related info. Thread-safe. +int NumCPUs(); + +// Return the thread id of the current thread, as told by the system. +// No two currently-live threads implemented by the OS shall have the same ID. +// Thread ids of exited threads may be reused. Multiple user-level threads +// may have the same thread ID if multiplexed on the same OS thread. +// +// On Linux, you may send a signal to the resulting ID with kill(). However, +// it is recommended for portability that you use pthread_kill() instead. +#ifdef _WIN32 +// On Windows, process id and thread id are of the same type according to +// the return types of GetProcessId() and GetThreadId() are both DWORD. +using pid_t = DWORD; +#endif +pid_t GetTID(); + +} // namespace base_internal +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_SYSINFO_H_ diff --git a/absl/base/internal/sysinfo_test.cc b/absl/base/internal/sysinfo_test.cc new file mode 100644 index 000000000000..4c7d66b7f9c3 --- /dev/null +++ b/absl/base/internal/sysinfo_test.cc @@ -0,0 +1,99 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/sysinfo.h" + +#ifndef _WIN32 +#include <sys/types.h> +#include <unistd.h> +#endif + +#include <thread> // NOLINT(build/c++11) +#include <unordered_set> +#include <vector> + +#include "gtest/gtest.h" +#include "absl/synchronization/barrier.h" +#include "absl/synchronization/mutex.h" + +namespace absl { +namespace base_internal { +namespace { + +TEST(SysinfoTest, NumCPUs) { + EXPECT_NE(NumCPUs(), 0) + << "NumCPUs() should not have the default value of 0"; +} + +TEST(SysinfoTest, NominalCPUFrequency) { +#if !(defined(__aarch64__) && defined(__linux__)) + EXPECT_GE(NominalCPUFrequency(), 1000.0) + << "NominalCPUFrequency() did not return a reasonable value"; +#else + // TODO(b/37919252): Aarch64 cannot read the CPU frequency from sysfs, so we + // get back 1.0. Fix once the value is available. + EXPECT_EQ(NominalCPUFrequency(), 1.0) + << "CPU frequency detection was fixed! Please update unittest and " + "b/37919252"; +#endif +} + +TEST(SysinfoTest, GetTID) { + EXPECT_EQ(GetTID(), GetTID()); // Basic compile and equality test. +#ifdef __native_client__ + // Native Client has a race condition bug that leads to memory + // exaustion when repeatedly creating and joining threads. + // https://bugs.chromium.org/p/nativeclient/issues/detail?id=1027 + return; +#endif + // Test that TIDs are unique to each thread. + // Uses a few loops to exercise implementations that reallocate IDs. + for (int i = 0; i < 32; ++i) { + constexpr int kNumThreads = 64; + Barrier all_threads_done(kNumThreads); + std::vector<std::thread> threads; + + Mutex mutex; + std::unordered_set<pid_t> tids; + + for (int j = 0; j < kNumThreads; ++j) { + threads.push_back(std::thread([&]() { + pid_t id = GetTID(); + { + MutexLock lock(&mutex); + ASSERT_TRUE(tids.find(id) == tids.end()); + tids.insert(id); + } + // We can't simply join the threads here. The threads need to + // be alive otherwise the TID might have been reallocated to + // another live thread. + all_threads_done.Block(); + })); + } + for (auto& thread : threads) { + thread.join(); + } + } +} + +#ifdef __linux__ +TEST(SysinfoTest, LinuxGetTID) { + // On Linux, for the main thread, GetTID()==getpid() is guaranteed by the API. + EXPECT_EQ(GetTID(), getpid()); +} +#endif + +} // namespace +} // namespace base_internal +} // namespace absl diff --git a/absl/base/internal/thread_identity.cc b/absl/base/internal/thread_identity.cc new file mode 100644 index 000000000000..ee96a5883293 --- /dev/null +++ b/absl/base/internal/thread_identity.cc @@ -0,0 +1,126 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/thread_identity.h" + +#ifndef _WIN32 +#include <pthread.h> +#include <signal.h> +#endif + +#include <atomic> +#include <cassert> +#include <memory> + +#include "absl/base/call_once.h" +#include "absl/base/internal/raw_logging.h" +#include "absl/base/internal/spinlock.h" + +namespace absl { +namespace base_internal { + +#if ABSL_THREAD_IDENTITY_MODE != ABSL_THREAD_IDENTITY_MODE_USE_CPP11 +namespace { +// Used to co-ordinate one-time creation of our pthread_key +absl::once_flag init_thread_identity_key_once; +pthread_key_t thread_identity_pthread_key; +std::atomic<bool> pthread_key_initialized(false); + +void AllocateThreadIdentityKey(ThreadIdentityReclaimerFunction reclaimer) { + pthread_key_create(&thread_identity_pthread_key, reclaimer); + pthread_key_initialized.store(true, std::memory_order_release); +} +} // namespace +#endif + +#if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_TLS || \ + ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_CPP11 +// The actual TLS storage for a thread's currently associated ThreadIdentity. +// This is referenced by inline accessors in the header. +// "protected" visibility ensures that if multiple copies of //base exist in a +// process (via dlopen() or similar), references to +// thread_identity_ptr from each copy of the code will refer to +// *different* instances of this ptr. See extensive discussion of this choice +// in cl/90634708 +// TODO(ahh): hard deprecate multiple copies of //base; remove this. +#ifdef __GNUC__ +__attribute__((visibility("protected"))) +#endif // __GNUC__ + ABSL_PER_THREAD_TLS_KEYWORD ThreadIdentity* thread_identity_ptr; +#endif // TLS or CPP11 + +void SetCurrentThreadIdentity( + ThreadIdentity* identity, ThreadIdentityReclaimerFunction reclaimer) { + assert(CurrentThreadIdentityIfPresent() == nullptr); + // Associate our destructor. + // NOTE: This call to pthread_setspecific is currently the only immovable + // barrier to CurrentThreadIdentity() always being async signal safe. +#if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC + // NOTE: Not async-safe. But can be open-coded. + absl::call_once(init_thread_identity_key_once, AllocateThreadIdentityKey, + reclaimer); + // b/18366710: + // We must mask signals around the call to setspecific as with current glibc, + // a concurrent getspecific (needed for GetCurrentThreadIdentityIfPresent()) + // may zero our value. + // + // While not officially async-signal safe, getspecific within a signal handler + // is otherwise OK. + sigset_t all_signals; + sigset_t curr_signals; + sigfillset(&all_signals); + pthread_sigmask(SIG_SETMASK, &all_signals, &curr_signals); + pthread_setspecific(thread_identity_pthread_key, + reinterpret_cast<void*>(identity)); + pthread_sigmask(SIG_SETMASK, &curr_signals, nullptr); +#elif ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_TLS + // NOTE: Not async-safe. But can be open-coded. + absl::call_once(init_thread_identity_key_once, AllocateThreadIdentityKey, + reclaimer); + pthread_setspecific(thread_identity_pthread_key, + reinterpret_cast<void*>(identity)); + thread_identity_ptr = identity; +#elif ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_CPP11 + thread_local std::unique_ptr<ThreadIdentity, ThreadIdentityReclaimerFunction> + holder(identity, reclaimer); + thread_identity_ptr = identity; +#else +#error Unimplemented ABSL_THREAD_IDENTITY_MODE +#endif +} + +void ClearCurrentThreadIdentity() { +#if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_TLS || \ + ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_CPP11 + thread_identity_ptr = nullptr; +#elif ABSL_THREAD_IDENTITY_MODE == \ + ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC + // pthread_setspecific expected to clear value on destruction + assert(CurrentThreadIdentityIfPresent() == nullptr); +#endif +} + +#if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC +ThreadIdentity* CurrentThreadIdentityIfPresent() { + bool initialized = pthread_key_initialized.load(std::memory_order_acquire); + if (!initialized) { + return nullptr; + } + return reinterpret_cast<ThreadIdentity*>( + pthread_getspecific(thread_identity_pthread_key)); +} +#endif + +} // namespace base_internal +} // namespace absl diff --git a/absl/base/internal/thread_identity.h b/absl/base/internal/thread_identity.h new file mode 100644 index 000000000000..914d5da7ef40 --- /dev/null +++ b/absl/base/internal/thread_identity.h @@ -0,0 +1,240 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Each active thread has an ThreadIdentity that may represent the thread in +// various level interfaces. ThreadIdentity objects are never deallocated. +// When a thread terminates, its ThreadIdentity object may be reused for a +// thread created later. + +#ifndef ABSL_BASE_INTERNAL_THREAD_IDENTITY_H_ +#define ABSL_BASE_INTERNAL_THREAD_IDENTITY_H_ + +#ifndef _WIN32 +#include <pthread.h> +// Defines __GOOGLE_GRTE_VERSION__ (via glibc-specific features.h) when +// supported. +#include <unistd.h> +#endif + +#include <atomic> +#include <cstdint> + +#include "absl/base/internal/per_thread_tls.h" + +namespace absl { + +struct SynchLocksHeld; +struct SynchWaitParams; + +namespace base_internal { + +class SpinLock; +struct ThreadIdentity; + +// Used by the implementation of base::Mutex and base::CondVar. +struct PerThreadSynch { + // The internal representation of base::Mutex and base::CondVar rely + // on the alignment of PerThreadSynch. Both store the address of the + // PerThreadSynch in the high-order bits of their internal state, + // which means the low kLowZeroBits of the address of PerThreadSynch + // must be zero. + static constexpr int kLowZeroBits = 8; + static constexpr int kAlignment = 1 << kLowZeroBits; + + // Returns the associated ThreadIdentity. + // This can be implemented as a cast because we guarantee + // PerThreadSynch is the first element of ThreadIdentity. + ThreadIdentity* thread_identity() { + return reinterpret_cast<ThreadIdentity*>(this); + } + + PerThreadSynch *next; // Circular waiter queue; initialized to 0. + PerThreadSynch *skip; // If non-zero, all entries in Mutex queue + // upto and including "skip" have same + // condition as this, and will be woken later + bool may_skip; // if false while on mutex queue, a mutex unlocker + // is using this PerThreadSynch as a terminator. Its + // skip field must not be filled in because the loop + // might then skip over the terminator. + + // The wait parameters of the current wait. waitp is null if the + // thread is not waiting. Transitions from null to non-null must + // occur before the enqueue commit point (state = kQueued in + // Enqueue() and CondVarEnqueue()). Transitions from non-null to + // null must occur after the wait is finished (state = kAvailable in + // Mutex::Block() and CondVar::WaitCommon()). This field may be + // changed only by the thread that describes this PerThreadSynch. A + // special case is Fer(), which calls Enqueue() on another thread, + // but with an identical SynchWaitParams pointer, thus leaving the + // pointer unchanged. + SynchWaitParams *waitp; + + bool suppress_fatal_errors; // If true, try to proceed even in the face of + // broken invariants. This is used within fatal + // signal handlers to improve the chances of + // debug logging information being output + // successfully. + + intptr_t readers; // Number of readers in mutex. + int priority; // Priority of thread (updated every so often). + + // When priority will next be read (cycles). + int64_t next_priority_read_cycles; + + // State values: + // kAvailable: This PerThreadSynch is available. + // kQueued: This PerThreadSynch is unavailable, it's currently queued on a + // Mutex or CondVar waistlist. + // + // Transitions from kQueued to kAvailable require a release + // barrier. This is needed as a waiter may use "state" to + // independently observe that it's no longer queued. + // + // Transitions from kAvailable to kQueued require no barrier, they + // are externally ordered by the Mutex. + enum State { + kAvailable, + kQueued + }; + std::atomic<State> state; + + bool maybe_unlocking; // Valid at head of Mutex waiter queue; + // true if UnlockSlow could be searching + // for a waiter to wake. Used for an optimization + // in Enqueue(). true is always a valid value. + // Can be reset to false when the unlocker or any + // writer releases the lock, or a reader fully releases + // the lock. It may not be set to false by a reader + // that decrements the count to non-zero. + // protected by mutex spinlock + + bool wake; // This thread is to be woken from a Mutex. + + // If "x" is on a waiter list for a mutex, "x->cond_waiter" is true iff the + // waiter is waiting on the mutex as part of a CV Wait or Mutex Await. + // + // The value of "x->cond_waiter" is meaningless if "x" is not on a + // Mutex waiter list. + bool cond_waiter; + + // Locks held; used during deadlock detection. + // Allocated in Synch_GetAllLocks() and freed in ReclaimThreadIdentity(). + SynchLocksHeld *all_locks; +}; + +struct ThreadIdentity { + // Must be the first member. The Mutex implementation requires that + // the PerThreadSynch object associated with each thread is + // PerThreadSynch::kAlignment aligned. We provide this alignment on + // ThreadIdentity itself. + PerThreadSynch per_thread_synch; + + // Private: Reserved for absl::synchronization_internal::Waiter. + struct WaiterState { + char data[128]; + } waiter_state; + + // Used by PerThreadSem::{Get,Set}ThreadBlockedCounter(). + std::atomic<int>* blocked_count_ptr; + + // The following variables are mostly read/written just by the + // thread itself. The only exception is that these are read by + // a ticker thread as a hint. + std::atomic<int> ticker; // Tick counter, incremented once per second. + std::atomic<int> wait_start; // Ticker value when thread started waiting. + std::atomic<bool> is_idle; // Has thread become idle yet? + + ThreadIdentity* next; +}; + +// Returns the ThreadIdentity object representing the calling thread; guaranteed +// to be unique for its lifetime. The returned object will remain valid for the +// program's lifetime; although it may be re-assigned to a subsequent thread. +// If one does not exist, return nullptr instead. +// +// Does not malloc(*), and is async-signal safe. +// [*] Technically pthread_setspecific() does malloc on first use; however this +// is handled internally within tcmalloc's initialization already. +// +// New ThreadIdentity objects can be constructed and associated with a thread +// by calling GetOrCreateCurrentThreadIdentity() in per-thread-sem.h. +ThreadIdentity* CurrentThreadIdentityIfPresent(); + +using ThreadIdentityReclaimerFunction = void (*)(void*); + +// Sets the current thread identity to the given value. 'reclaimer' is a +// pointer to the global function for cleaning up instances on thread +// destruction. +void SetCurrentThreadIdentity(ThreadIdentity* identity, + ThreadIdentityReclaimerFunction reclaimer); + +// Removes the currently associated ThreadIdentity from the running thread. +// This must be called from inside the ThreadIdentityReclaimerFunction, and only +// from that function. +void ClearCurrentThreadIdentity(); + +// May be chosen at compile time via: -DABSL_FORCE_THREAD_IDENTITY_MODE=<mode +// index> +#ifdef ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC +#error ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC cannot be direcly set +#else +#define ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC 0 +#endif + +#ifdef ABSL_THREAD_IDENTITY_MODE_USE_TLS +#error ABSL_THREAD_IDENTITY_MODE_USE_TLS cannot be direcly set +#else +#define ABSL_THREAD_IDENTITY_MODE_USE_TLS 1 +#endif + +#ifdef ABSL_THREAD_IDENTITY_MODE_USE_CPP11 +#error ABSL_THREAD_IDENTITY_MODE_USE_CPP11 cannot be direcly set +#else +#define ABSL_THREAD_IDENTITY_MODE_USE_CPP11 2 +#endif + +#ifdef ABSL_THREAD_IDENTITY_MODE +#error ABSL_THREAD_IDENTITY_MODE cannot be direcly set +#elif defined(ABSL_FORCE_THREAD_IDENTITY_MODE) +#define ABSL_THREAD_IDENTITY_MODE ABSL_FORCE_THREAD_IDENTITY_MODE +#elif defined(_WIN32) +#define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_CPP11 +#elif ABSL_PER_THREAD_TLS && defined(__GOOGLE_GRTE_VERSION__) && \ + (__GOOGLE_GRTE_VERSION__ >= 20140228L) +// Support for async-safe TLS was specifically added in GRTEv4. It's not +// present in the upstream eglibc. +// Note: Current default for production systems. +#define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_TLS +#else +#define ABSL_THREAD_IDENTITY_MODE \ + ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC +#endif + +#if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_TLS || \ + ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_CPP11 + +extern ABSL_PER_THREAD_TLS_KEYWORD ThreadIdentity* thread_identity_ptr; + +inline ThreadIdentity* CurrentThreadIdentityIfPresent() { + return thread_identity_ptr; +} + +#elif ABSL_THREAD_IDENTITY_MODE != \ + ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC +#error Unknown ABSL_THREAD_IDENTITY_MODE +#endif + +} // namespace base_internal +} // namespace absl +#endif // ABSL_BASE_INTERNAL_THREAD_IDENTITY_H_ diff --git a/absl/base/internal/thread_identity_test.cc b/absl/base/internal/thread_identity_test.cc new file mode 100644 index 000000000000..a2b053d96ada --- /dev/null +++ b/absl/base/internal/thread_identity_test.cc @@ -0,0 +1,124 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/thread_identity.h" + +#include <thread> // NOLINT(build/c++11) +#include <vector> + +#include "gtest/gtest.h" +#include "absl/base/internal/spinlock.h" +#include "absl/synchronization/internal/per_thread_sem.h" +#include "absl/synchronization/mutex.h" + +namespace absl { +namespace base_internal { +namespace { + +// protects num_identities_reused +static absl::base_internal::SpinLock map_lock( + absl::base_internal::kLinkerInitialized); +static int num_identities_reused; + +static const void* const kCheckNoIdentity = reinterpret_cast<void*>(1); + +static void TestThreadIdentityCurrent(const void* assert_no_identity) { + ThreadIdentity* identity; + + // We have to test this conditionally, because if the test framework relies + // on Abseil, then some previous action may have already allocated an + // identity. + if (assert_no_identity == kCheckNoIdentity) { + identity = CurrentThreadIdentityIfPresent(); + EXPECT_TRUE(identity == nullptr); + } + + identity = synchronization_internal::GetOrCreateCurrentThreadIdentity(); + EXPECT_TRUE(identity != nullptr); + ThreadIdentity* identity_no_init; + identity_no_init = CurrentThreadIdentityIfPresent(); + EXPECT_TRUE(identity == identity_no_init); + + // Check that per_thread_synch is correctly aligned. + EXPECT_EQ(0, reinterpret_cast<intptr_t>(&identity->per_thread_synch) % + PerThreadSynch::kAlignment); + EXPECT_EQ(identity, identity->per_thread_synch.thread_identity()); + + absl::base_internal::SpinLockHolder l(&map_lock); + num_identities_reused++; +} + +TEST(ThreadIdentityTest, BasicIdentityWorks) { + // This tests for the main() thread. + TestThreadIdentityCurrent(nullptr); +} + +TEST(ThreadIdentityTest, BasicIdentityWorksThreaded) { + // Now try the same basic test with multiple threads being created and + // destroyed. This makes sure that: + // - New threads are created without a ThreadIdentity. + // - We re-allocate ThreadIdentity objects from the free-list. + // - If a thread implementation chooses to recycle threads, that + // correct re-initialization occurs. + static const int kNumLoops = 3; + static const int kNumThreads = 400; + for (int iter = 0; iter < kNumLoops; iter++) { + std::vector<std::thread> threads; + for (int i = 0; i < kNumThreads; ++i) { + threads.push_back( + std::thread(TestThreadIdentityCurrent, kCheckNoIdentity)); + } + for (auto& thread : threads) { + thread.join(); + } + } + + // We should have recycled ThreadIdentity objects above; while (external) + // library threads allocating their own identities may preclude some + // reuse, we should have sufficient repetitions to exclude this. + EXPECT_LT(kNumThreads, num_identities_reused); +} + +TEST(ThreadIdentityTest, ReusedThreadIdentityMutexTest) { + // This test repeatly creates and joins a series of threads, each of + // which acquires and releases shared Mutex locks. This verifies + // Mutex operations work correctly under a reused + // ThreadIdentity. Note that the most likely failure mode of this + // test is a crash or deadlock. + static const int kNumLoops = 10; + static const int kNumThreads = 12; + static const int kNumMutexes = 3; + static const int kNumLockLoops = 5; + + Mutex mutexes[kNumMutexes]; + for (int iter = 0; iter < kNumLoops; ++iter) { + std::vector<std::thread> threads; + for (int thread = 0; thread < kNumThreads; ++thread) { + threads.push_back(std::thread([&]() { + for (int l = 0; l < kNumLockLoops; ++l) { + for (int m = 0; m < kNumMutexes; ++m) { + MutexLock lock(&mutexes[m]); + } + } + })); + } + for (auto& thread : threads) { + thread.join(); + } + } +} + +} // namespace +} // namespace base_internal +} // namespace absl diff --git a/absl/base/internal/throw_delegate.cc b/absl/base/internal/throw_delegate.cc new file mode 100644 index 000000000000..46dc573cfa84 --- /dev/null +++ b/absl/base/internal/throw_delegate.cc @@ -0,0 +1,106 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/throw_delegate.h" + +#include <cstdlib> +#include <functional> +#include <new> +#include <stdexcept> +#include "absl/base/config.h" +#include "absl/base/internal/raw_logging.h" + +namespace absl { +namespace base_internal { + +namespace { +template <typename T> +[[noreturn]] void Throw(const T& error) { +#ifdef ABSL_HAVE_EXCEPTIONS + throw error; +#else + ABSL_RAW_LOG(ERROR, "%s", error.what()); + abort(); +#endif +} +} // namespace + +void ThrowStdLogicError(const std::string& what_arg) { + Throw(std::logic_error(what_arg)); +} +void ThrowStdLogicError(const char* what_arg) { + Throw(std::logic_error(what_arg)); +} +void ThrowStdInvalidArgument(const std::string& what_arg) { + Throw(std::invalid_argument(what_arg)); +} +void ThrowStdInvalidArgument(const char* what_arg) { + Throw(std::invalid_argument(what_arg)); +} + +void ThrowStdDomainError(const std::string& what_arg) { + Throw(std::domain_error(what_arg)); +} +void ThrowStdDomainError(const char* what_arg) { + Throw(std::domain_error(what_arg)); +} + +void ThrowStdLengthError(const std::string& what_arg) { + Throw(std::length_error(what_arg)); +} +void ThrowStdLengthError(const char* what_arg) { + Throw(std::length_error(what_arg)); +} + +void ThrowStdOutOfRange(const std::string& what_arg) { + Throw(std::out_of_range(what_arg)); +} +void ThrowStdOutOfRange(const char* what_arg) { + Throw(std::out_of_range(what_arg)); +} + +void ThrowStdRuntimeError(const std::string& what_arg) { + Throw(std::runtime_error(what_arg)); +} +void ThrowStdRuntimeError(const char* what_arg) { + Throw(std::runtime_error(what_arg)); +} + +void ThrowStdRangeError(const std::string& what_arg) { + Throw(std::range_error(what_arg)); +} +void ThrowStdRangeError(const char* what_arg) { + Throw(std::range_error(what_arg)); +} + +void ThrowStdOverflowError(const std::string& what_arg) { + Throw(std::overflow_error(what_arg)); +} +void ThrowStdOverflowError(const char* what_arg) { + Throw(std::overflow_error(what_arg)); +} + +void ThrowStdUnderflowError(const std::string& what_arg) { + Throw(std::underflow_error(what_arg)); +} +void ThrowStdUnderflowError(const char* what_arg) { + Throw(std::underflow_error(what_arg)); +} + +void ThrowStdBadFunctionCall() { Throw(std::bad_function_call()); } + +void ThrowStdBadAlloc() { Throw(std::bad_alloc()); } + +} // namespace base_internal +} // namespace absl diff --git a/absl/base/internal/throw_delegate.h b/absl/base/internal/throw_delegate.h new file mode 100644 index 000000000000..70e2d7709e5b --- /dev/null +++ b/absl/base/internal/throw_delegate.h @@ -0,0 +1,71 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef ABSL_BASE_INTERNAL_THROW_DELEGATE_H_ +#define ABSL_BASE_INTERNAL_THROW_DELEGATE_H_ + +#include <string> + +namespace absl { +namespace base_internal { + +// Helper functions that allow throwing exceptions consistently from anywhere. +// The main use case is for header-based libraries (eg templates), as they will +// be built by many different targets with their own compiler options. +// In particular, this will allow a safe way to throw exceptions even if the +// caller is compiled with -fno-exceptions. This is intended for implementing +// things like map<>::at(), which the standard documents as throwing an +// exception on error. +// +// Using other techniques like #if tricks could lead to ODR violations. +// +// You shouldn't use it unless you're writing code that you know will be built +// both with and without exceptions and you need to conform to an interface +// that uses exceptions. + +[[noreturn]] void ThrowStdLogicError(const std::string& what_arg); +[[noreturn]] void ThrowStdLogicError(const char* what_arg); +[[noreturn]] void ThrowStdInvalidArgument(const std::string& what_arg); +[[noreturn]] void ThrowStdInvalidArgument(const char* what_arg); +[[noreturn]] void ThrowStdDomainError(const std::string& what_arg); +[[noreturn]] void ThrowStdDomainError(const char* what_arg); +[[noreturn]] void ThrowStdLengthError(const std::string& what_arg); +[[noreturn]] void ThrowStdLengthError(const char* what_arg); +[[noreturn]] void ThrowStdOutOfRange(const std::string& what_arg); +[[noreturn]] void ThrowStdOutOfRange(const char* what_arg); +[[noreturn]] void ThrowStdRuntimeError(const std::string& what_arg); +[[noreturn]] void ThrowStdRuntimeError(const char* what_arg); +[[noreturn]] void ThrowStdRangeError(const std::string& what_arg); +[[noreturn]] void ThrowStdRangeError(const char* what_arg); +[[noreturn]] void ThrowStdOverflowError(const std::string& what_arg); +[[noreturn]] void ThrowStdOverflowError(const char* what_arg); +[[noreturn]] void ThrowStdUnderflowError(const std::string& what_arg); +[[noreturn]] void ThrowStdUnderflowError(const char* what_arg); + +[[noreturn]] void ThrowStdBadFunctionCall(); +[[noreturn]] void ThrowStdBadAlloc(); + +// ThrowStdBadArrayNewLength() cannot be consistently supported because +// std::bad_array_new_length is missing in libstdc++ until 4.9.0. +// https://gcc.gnu.org/onlinedocs/gcc-4.8.3/libstdc++/api/a01379_source.html +// https://gcc.gnu.org/onlinedocs/gcc-4.9.0/libstdc++/api/a01327_source.html +// libcxx (as of 3.2) and msvc (as of 2015) both have it. +// [[noreturn]] void ThrowStdBadArrayNewLength(); + +} // namespace base_internal +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_THROW_DELEGATE_H_ diff --git a/absl/base/internal/tsan_mutex_interface.h b/absl/base/internal/tsan_mutex_interface.h new file mode 100644 index 000000000000..a1303e67c8f5 --- /dev/null +++ b/absl/base/internal/tsan_mutex_interface.h @@ -0,0 +1,51 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This file is intended solely for spinlock.h. +// It provides ThreadSanitizer annotations for custom mutexes. +// See <sanitizer/tsan_interface.h> for meaning of these annotations. + +#ifndef ABSL_BASE_INTERNAL_TSAN_MUTEX_INTERFACE_H_ +#define ABSL_BASE_INTERNAL_TSAN_MUTEX_INTERFACE_H_ + +#ifdef THREAD_SANITIZER +#include <sanitizer/tsan_interface.h> + +#define ABSL_TSAN_MUTEX_CREATE __tsan_mutex_create +#define ABSL_TSAN_MUTEX_DESTROY __tsan_mutex_destroy +#define ABSL_TSAN_MUTEX_PRE_LOCK __tsan_mutex_pre_lock +#define ABSL_TSAN_MUTEX_POST_LOCK __tsan_mutex_post_lock +#define ABSL_TSAN_MUTEX_PRE_UNLOCK __tsan_mutex_pre_unlock +#define ABSL_TSAN_MUTEX_POST_UNLOCK __tsan_mutex_post_unlock +#define ABSL_TSAN_MUTEX_PRE_SIGNAL __tsan_mutex_pre_signal +#define ABSL_TSAN_MUTEX_POST_SIGNAL __tsan_mutex_post_signal +#define ABSL_TSAN_MUTEX_PRE_DIVERT __tsan_mutex_pre_divert +#define ABSL_TSAN_MUTEX_POST_DIVERT __tsan_mutex_post_divert + +#else + +#define ABSL_TSAN_MUTEX_CREATE(...) +#define ABSL_TSAN_MUTEX_DESTROY(...) +#define ABSL_TSAN_MUTEX_PRE_LOCK(...) +#define ABSL_TSAN_MUTEX_POST_LOCK(...) +#define ABSL_TSAN_MUTEX_PRE_UNLOCK(...) +#define ABSL_TSAN_MUTEX_POST_UNLOCK(...) +#define ABSL_TSAN_MUTEX_PRE_SIGNAL(...) +#define ABSL_TSAN_MUTEX_POST_SIGNAL(...) +#define ABSL_TSAN_MUTEX_PRE_DIVERT(...) +#define ABSL_TSAN_MUTEX_POST_DIVERT(...) + +#endif + +#endif // ABSL_BASE_INTERNAL_TSAN_MUTEX_INTERFACE_H_ diff --git a/absl/base/internal/unaligned_access.h b/absl/base/internal/unaligned_access.h new file mode 100644 index 000000000000..ea30829b0e18 --- /dev/null +++ b/absl/base/internal/unaligned_access.h @@ -0,0 +1,256 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef ABSL_BASE_INTERNAL_UNALIGNED_ACCESS_H_ +#define ABSL_BASE_INTERNAL_UNALIGNED_ACCESS_H_ + +#include <string.h> +#include <cstdint> + +#include "absl/base/attributes.h" + +// unaligned APIs + +// Portable handling of unaligned loads, stores, and copies. +// On some platforms, like ARM, the copy functions can be more efficient +// then a load and a store. +// +// It is possible to implement all of these these using constant-length memcpy +// calls, which is portable and will usually be inlined into simple loads and +// stores if the architecture supports it. However, such inlining usually +// happens in a pass that's quite late in compilation, which means the resulting +// loads and stores cannot participate in many other optimizations, leading to +// overall worse code. + +// The unaligned API is C++ only. The declarations use C++ features +// (namespaces, inline) which are absent or incompatible in C. +#if defined(__cplusplus) + +#if defined(ADDRESS_SANITIZER) || defined(THREAD_SANITIZER) ||\ + defined(MEMORY_SANITIZER) +// Consider we have an unaligned load/store of 4 bytes from address 0x...05. +// AddressSanitizer will treat it as a 3-byte access to the range 05:07 and +// will miss a bug if 08 is the first unaddressable byte. +// ThreadSanitizer will also treat this as a 3-byte access to 05:07 and will +// miss a race between this access and some other accesses to 08. +// MemorySanitizer will correctly propagate the shadow on unaligned stores +// and correctly report bugs on unaligned loads, but it may not properly +// update and report the origin of the uninitialized memory. +// For all three tools, replacing an unaligned access with a tool-specific +// callback solves the problem. + +// Make sure uint16_t/uint32_t/uint64_t are defined. +#include <stdint.h> + +extern "C" { +uint16_t __sanitizer_unaligned_load16(const void *p); +uint32_t __sanitizer_unaligned_load32(const void *p); +uint64_t __sanitizer_unaligned_load64(const void *p); +void __sanitizer_unaligned_store16(void *p, uint16_t v); +void __sanitizer_unaligned_store32(void *p, uint32_t v); +void __sanitizer_unaligned_store64(void *p, uint64_t v); +} // extern "C" + +namespace absl { + +inline uint16_t UnalignedLoad16(const void *p) { + return __sanitizer_unaligned_load16(p); +} + +inline uint32_t UnalignedLoad32(const void *p) { + return __sanitizer_unaligned_load32(p); +} + +inline uint64_t UnalignedLoad64(const void *p) { + return __sanitizer_unaligned_load64(p); +} + +inline void UnalignedStore16(void *p, uint16_t v) { + __sanitizer_unaligned_store16(p, v); +} + +inline void UnalignedStore32(void *p, uint32_t v) { + __sanitizer_unaligned_store32(p, v); +} + +inline void UnalignedStore64(void *p, uint64_t v) { + __sanitizer_unaligned_store64(p, v); +} + +} // namespace absl + +#define ABSL_INTERNAL_UNALIGNED_LOAD16(_p) (absl::UnalignedLoad16(_p)) +#define ABSL_INTERNAL_UNALIGNED_LOAD32(_p) (absl::UnalignedLoad32(_p)) +#define ABSL_INTERNAL_UNALIGNED_LOAD64(_p) (absl::UnalignedLoad64(_p)) + +#define ABSL_INTERNAL_UNALIGNED_STORE16(_p, _val) \ + (absl::UnalignedStore16(_p, _val)) +#define ABSL_INTERNAL_UNALIGNED_STORE32(_p, _val) \ + (absl::UnalignedStore32(_p, _val)) +#define ABSL_INTERNAL_UNALIGNED_STORE64(_p, _val) \ + (absl::UnalignedStore64(_p, _val)) + +#elif defined(__x86_64__) || defined(_M_X64) || defined(__i386) || \ + defined(_M_IX86) || defined(__ppc__) || defined(__PPC__) || \ + defined(__ppc64__) || defined(__PPC64__) + +// x86 and x86-64 can perform unaligned loads/stores directly; +// modern PowerPC hardware can also do unaligned integer loads and stores; +// but note: the FPU still sends unaligned loads and stores to a trap handler! + +#define ABSL_INTERNAL_UNALIGNED_LOAD16(_p) \ + (*reinterpret_cast<const uint16_t *>(_p)) +#define ABSL_INTERNAL_UNALIGNED_LOAD32(_p) \ + (*reinterpret_cast<const uint32_t *>(_p)) +#define ABSL_INTERNAL_UNALIGNED_LOAD64(_p) \ + (*reinterpret_cast<const uint64_t *>(_p)) + +#define ABSL_INTERNAL_UNALIGNED_STORE16(_p, _val) \ + (*reinterpret_cast<uint16_t *>(_p) = (_val)) +#define ABSL_INTERNAL_UNALIGNED_STORE32(_p, _val) \ + (*reinterpret_cast<uint32_t *>(_p) = (_val)) +#define ABSL_INTERNAL_UNALIGNED_STORE64(_p, _val) \ + (*reinterpret_cast<uint64_t *>(_p) = (_val)) + +#elif defined(__arm__) && \ + !defined(__ARM_ARCH_5__) && \ + !defined(__ARM_ARCH_5T__) && \ + !defined(__ARM_ARCH_5TE__) && \ + !defined(__ARM_ARCH_5TEJ__) && \ + !defined(__ARM_ARCH_6__) && \ + !defined(__ARM_ARCH_6J__) && \ + !defined(__ARM_ARCH_6K__) && \ + !defined(__ARM_ARCH_6Z__) && \ + !defined(__ARM_ARCH_6ZK__) && \ + !defined(__ARM_ARCH_6T2__) + + +// ARMv7 and newer support native unaligned accesses, but only of 16-bit +// and 32-bit values (not 64-bit); older versions either raise a fatal signal, +// do an unaligned read and rotate the words around a bit, or do the reads very +// slowly (trip through kernel mode). There's no simple #define that says just +// “ARMv7 or higher”, so we have to filter away all ARMv5 and ARMv6 +// sub-architectures. Newer gcc (>= 4.6) set an __ARM_FEATURE_ALIGNED #define, +// so in time, maybe we can move on to that. +// +// This is a mess, but there's not much we can do about it. +// +// To further complicate matters, only LDR instructions (single reads) are +// allowed to be unaligned, not LDRD (two reads) or LDM (many reads). Unless we +// explicitly tell the compiler that these accesses can be unaligned, it can and +// will combine accesses. On armcc, the way to signal this is done by accessing +// through the type (uint32_t __packed *), but GCC has no such attribute +// (it ignores __attribute__((packed)) on individual variables). However, +// we can tell it that a _struct_ is unaligned, which has the same effect, +// so we do that. + +namespace absl { +namespace internal { + +struct Unaligned16Struct { + uint16_t value; + uint8_t dummy; // To make the size non-power-of-two. +} ABSL_ATTRIBUTE_PACKED; + +struct Unaligned32Struct { + uint32_t value; + uint8_t dummy; // To make the size non-power-of-two. +} ABSL_ATTRIBUTE_PACKED; + +} // namespace internal +} // namespace absl + +#define ABSL_INTERNAL_UNALIGNED_LOAD16(_p) \ + ((reinterpret_cast<const ::absl::internal::Unaligned16Struct *>(_p))->value) +#define ABSL_INTERNAL_UNALIGNED_LOAD32(_p) \ + ((reinterpret_cast<const ::absl::internal::Unaligned32Struct *>(_p))->value) + +#define ABSL_INTERNAL_UNALIGNED_STORE16(_p, _val) \ + ((reinterpret_cast< ::absl::internal::Unaligned16Struct *>(_p))->value = \ + (_val)) +#define ABSL_INTERNAL_UNALIGNED_STORE32(_p, _val) \ + ((reinterpret_cast< ::absl::internal::Unaligned32Struct *>(_p))->value = \ + (_val)) + +namespace absl { + +inline uint64_t UnalignedLoad64(const void *p) { + uint64_t t; + memcpy(&t, p, sizeof t); + return t; +} + +inline void UnalignedStore64(void *p, uint64_t v) { memcpy(p, &v, sizeof v); } + +} // namespace absl + +#define ABSL_INTERNAL_UNALIGNED_LOAD64(_p) (absl::UnalignedLoad64(_p)) +#define ABSL_INTERNAL_UNALIGNED_STORE64(_p, _val) \ + (absl::UnalignedStore64(_p, _val)) + +#else + +// ABSL_INTERNAL_NEED_ALIGNED_LOADS is defined when the underlying platform +// doesn't support unaligned access. +#define ABSL_INTERNAL_NEED_ALIGNED_LOADS + +// These functions are provided for architectures that don't support +// unaligned loads and stores. + +namespace absl { + +inline uint16_t UnalignedLoad16(const void *p) { + uint16_t t; + memcpy(&t, p, sizeof t); + return t; +} + +inline uint32_t UnalignedLoad32(const void *p) { + uint32_t t; + memcpy(&t, p, sizeof t); + return t; +} + +inline uint64_t UnalignedLoad64(const void *p) { + uint64_t t; + memcpy(&t, p, sizeof t); + return t; +} + +inline void UnalignedStore16(void *p, uint16_t v) { memcpy(p, &v, sizeof v); } + +inline void UnalignedStore32(void *p, uint32_t v) { memcpy(p, &v, sizeof v); } + +inline void UnalignedStore64(void *p, uint64_t v) { memcpy(p, &v, sizeof v); } + +} // namespace absl + +#define ABSL_INTERNAL_UNALIGNED_LOAD16(_p) (absl::UnalignedLoad16(_p)) +#define ABSL_INTERNAL_UNALIGNED_LOAD32(_p) (absl::UnalignedLoad32(_p)) +#define ABSL_INTERNAL_UNALIGNED_LOAD64(_p) (absl::UnalignedLoad64(_p)) + +#define ABSL_INTERNAL_UNALIGNED_STORE16(_p, _val) \ + (absl::UnalignedStore16(_p, _val)) +#define ABSL_INTERNAL_UNALIGNED_STORE32(_p, _val) \ + (absl::UnalignedStore32(_p, _val)) +#define ABSL_INTERNAL_UNALIGNED_STORE64(_p, _val) \ + (absl::UnalignedStore64(_p, _val)) + +#endif + +#endif // defined(__cplusplus), end of unaligned API + +#endif // ABSL_BASE_INTERNAL_UNALIGNED_ACCESS_H_ diff --git a/absl/base/internal/unscaledcycleclock.cc b/absl/base/internal/unscaledcycleclock.cc new file mode 100644 index 000000000000..a12d68bd10af --- /dev/null +++ b/absl/base/internal/unscaledcycleclock.cc @@ -0,0 +1,101 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/unscaledcycleclock.h" + +#if ABSL_USE_UNSCALED_CYCLECLOCK + +#if defined(_WIN32) +#include <intrin.h> +#endif + +#if defined(__powerpc__) || defined(__ppc__) +#include <sys/platform/ppc.h> +#endif + +#include "absl/base/internal/sysinfo.h" + +namespace absl { +namespace base_internal { + +#if defined(__i386__) + +int64_t UnscaledCycleClock::Now() { + int64_t ret; + __asm__ volatile("rdtsc" : "=A"(ret)); + return ret; +} + +double UnscaledCycleClock::Frequency() { + return base_internal::NominalCPUFrequency(); +} + +#elif defined(__x86_64__) + +int64_t UnscaledCycleClock::Now() { + uint64_t low, high; + __asm__ volatile("rdtsc" : "=a"(low), "=d"(high)); + return (high << 32) | low; +} + +double UnscaledCycleClock::Frequency() { + return base_internal::NominalCPUFrequency(); +} + +#elif defined(__powerpc__) || defined(__ppc__) + +int64_t UnscaledCycleClock::Now() { + return __ppc_get_timebase(); +} + +double UnscaledCycleClock::Frequency() { + return __ppc_get_timebase_freq(); +} + +#elif defined(__aarch64__) + +// System timer of ARMv8 runs at a different frequency than the CPU's. +// The frequency is fixed, typically in the range 1-50MHz. It can be +// read at CNTFRQ special register. We assume the OS has set up +// the virtual timer properly. +int64_t UnscaledCycleClock::Now() { + int64_t virtual_timer_value; + asm volatile("mrs %0, cntvct_el0" : "=r"(virtual_timer_value)); + return virtual_timer_value; +} + +double UnscaledCycleClock::Frequency() { + uint64_t aarch64_timer_frequency; + asm volatile("mrs %0, cntfrq_el0" : "=r"(aarch64_timer_frequency)); + return aarch64_timer_frequency; +} + +#elif defined(_M_IX86) || defined(_M_X64) + +#pragma intrinsic(__rdtsc) + +int64_t UnscaledCycleClock::Now() { + return __rdtsc(); +} + +double UnscaledCycleClock::Frequency() { + return base_internal::NominalCPUFrequency(); +} + +#endif + +} // namespace base_internal +} // namespace absl + +#endif // ABSL_USE_UNSCALED_CYCLECLOCK diff --git a/absl/base/internal/unscaledcycleclock.h b/absl/base/internal/unscaledcycleclock.h new file mode 100644 index 000000000000..ddf6a5e5a79e --- /dev/null +++ b/absl/base/internal/unscaledcycleclock.h @@ -0,0 +1,118 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// UnscaledCycleClock +// An UnscaledCycleClock yields the value and frequency of a cycle counter +// that increments at a rate that is approximately constant. +// This class is for internal / whitelisted use only, you should consider +// using CycleClock instead. +// +// Notes: +// The cycle counter frequency is not necessarily the core clock frequency. +// That is, CycleCounter cycles are not necessarily "CPU cycles". +// +// An arbitrary offset may have been added to the counter at power on. +// +// On some platforms, the rate and offset of the counter may differ +// slightly when read from different CPUs of a multiprocessor. Usually, +// we try to ensure that the operating system adjusts values periodically +// so that values agree approximately. If you need stronger guarantees, +// consider using alternate interfaces. +// +// The CPU is not required to maintain the ordering of a cycle counter read +// with respect to surrounding instructions. + +#ifndef ABSL_BASE_INTERNAL_UNSCALEDCYCLECLOCK_H_ +#define ABSL_BASE_INTERNAL_UNSCALEDCYCLECLOCK_H_ + +#include <cstdint> + +#if defined(__APPLE__) +#include <TargetConditionals.h> +#endif + +#include "absl/base/port.h" + +// The following platforms have an implementation of a hardware counter. +#if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__) || \ + defined(__powerpc__) || defined(__ppc__) || \ + defined(_M_IX86) || defined(_M_X64) +#define ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION 1 +#else +#define ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION 0 +#endif + +// The following platforms often disable access to the hardware +// counter (through a sandbox) even if the underlying hardware has a +// usable counter. The CycleTimer interface also requires a *scaled* +// CycleClock that runs at atleast 1 MHz. We've found some Android +// ARM64 devices where this is not the case, so we disable it by +// default on Android ARM64. +#if defined(__native_client__) || TARGET_OS_IPHONE || \ + (defined(__ANDROID__) && defined(__aarch64__)) +#define ABSL_USE_UNSCALED_CYCLECLOCK_DEFAULT 0 +#else +#define ABSL_USE_UNSCALED_CYCLECLOCK_DEFAULT 1 +#endif + +// UnscaledCycleClock is an optional internal feature. +// Use "#if ABSL_USE_UNSCALED_CYCLECLOCK" to test for its presence. +// Can be overridden at compile-time via -DABSL_USE_UNSCALED_CYCLECLOCK=0|1 +#if !defined(ABSL_USE_UNSCALED_CYCLECLOCK) +#define ABSL_USE_UNSCALED_CYCLECLOCK \ + (ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION && \ + ABSL_USE_UNSCALED_CYCLECLOCK_DEFAULT) +#endif + +#if ABSL_USE_UNSCALED_CYCLECLOCK + +// This macro can be used to test if UnscaledCycleClock::Frequency() +// is NominalCPUFrequency() on a particular platform. +#if (defined(__i386__) || defined(__x86_64__) || \ + defined(_M_IX86) || defined(_M_X64)) +#define ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY +#endif +namespace absl { +namespace time_internal { +class UnscaledCycleClockWrapperForGetCurrentTime; +} // namespace time_internal + +namespace base_internal { +class CycleClock; +class UnscaledCycleClockWrapperForInitializeFrequency; +class UnscaledCycleClock { + private: + UnscaledCycleClock() = delete; + + // Return the value of a cycle counter that counts at a rate that is + // approximately constant. + static int64_t Now(); + + // Return the how much UnscaledCycleClock::Now() increases per second. + // This is not necessarily the core CPU clock frequency. + // It may be the nominal value report by the kernel, rather than a measured + // value. + static double Frequency(); + + // Whitelisted friends. + friend class base_internal::CycleClock; + friend class time_internal::UnscaledCycleClockWrapperForGetCurrentTime; + friend class base_internal::UnscaledCycleClockWrapperForInitializeFrequency; +}; + +} // namespace base_internal +} // namespace absl +#endif // ABSL_USE_UNSCALED_CYCLECLOCK + +#endif // ABSL_BASE_INTERNAL_UNSCALEDCYCLECLOCK_H_ |