about summary refs log tree commit diff
path: root/absl/base/internal
diff options
context:
space:
mode:
Diffstat (limited to 'absl/base/internal')
-rw-r--r--absl/base/internal/atomic_hook.h104
-rw-r--r--absl/base/internal/low_level_scheduling.h4
-rw-r--r--absl/base/internal/malloc_hook.cc26
-rw-r--r--absl/base/internal/raw_logging.cc8
-rw-r--r--absl/base/internal/scheduling_mode.h4
-rw-r--r--absl/base/internal/spinlock_wait.h4
-rw-r--r--absl/base/internal/sysinfo.cc5
-rw-r--r--absl/base/internal/sysinfo.h3
-rw-r--r--absl/base/internal/sysinfo_test.cc5
-rw-r--r--absl/base/internal/thread_identity.cc11
10 files changed, 91 insertions, 83 deletions
diff --git a/absl/base/internal/atomic_hook.h b/absl/base/internal/atomic_hook.h
index 1f9a8102f7ec..47d4013928a0 100644
--- a/absl/base/internal/atomic_hook.h
+++ b/absl/base/internal/atomic_hook.h
@@ -18,28 +18,12 @@
 
 #include <atomic>
 #include <cassert>
+#include <cstdint>
 #include <utility>
 
 namespace absl {
 namespace base_internal {
 
-// In current versions of MSVC (as of July 2017), a std::atomic<T> where T is a
-// pointer to function cannot be constant-initialized with an address constant
-// expression.  That is, the following code does not compile:
-//   void NoOp() {}
-//   constexpr std::atomic<void(*)()> ptr(NoOp);
-//
-// This is the only compiler we support that seems to have this issue.  We
-// conditionalize on MSVC here to use a fallback implementation.  But we
-// should revisit this occasionally.  If MSVC fixes this compiler bug, we
-// can then change this to be conditionalized on the value on _MSC_FULL_VER
-// instead.
-#ifdef _MSC_FULL_VER
-#define ABSL_HAVE_FUNCTION_ADDRESS_CONSTANT_EXPRESSION 0
-#else
-#define ABSL_HAVE_FUNCTION_ADDRESS_CONSTANT_EXPRESSION 1
-#endif
-
 template <typename T>
 class AtomicHook;
 
@@ -55,7 +39,7 @@ class AtomicHook<ReturnType (*)(Args...)> {
  public:
   using FnPtr = ReturnType (*)(Args...);
 
-  constexpr AtomicHook() : hook_(DummyFunction) {}
+  constexpr AtomicHook() : hook_(kInitialValue) {}
 
   // Stores the provided function pointer as the value for this hook.
   //
@@ -64,28 +48,16 @@ class AtomicHook<ReturnType (*)(Args...)> {
   // as a memory_order_release operation, and read accesses are implemented as
   // memory_order_acquire.
   void Store(FnPtr fn) {
-    assert(fn);
-    FnPtr expected = DummyFunction;
-    hook_.compare_exchange_strong(expected, fn, std::memory_order_acq_rel,
-                                  std::memory_order_acquire);
-    // If the compare and exchange failed, make sure that's because hook_ was
-    // already set to `fn` by an earlier call.  Any other state reflects an API
-    // violation (calling Store() multiple times with different values).
-    //
-    // Avoid ABSL_RAW_CHECK, since raw logging depends on AtomicHook.
-    assert(expected == DummyFunction || expected == fn);
+    bool success = DoStore(fn);
+    static_cast<void>(success);
+    assert(success);
   }
 
   // Invokes the registered callback.  If no callback has yet been registered, a
   // default-constructed object of the appropriate type is returned instead.
   template <typename... CallArgs>
   ReturnType operator()(CallArgs&&... args) const {
-    FnPtr hook = hook_.load(std::memory_order_acquire);
-    if (ABSL_HAVE_FUNCTION_ADDRESS_CONSTANT_EXPRESSION || hook) {
-      return hook(std::forward<CallArgs>(args)...);
-    } else {
-      return ReturnType();
-    }
+    return DoLoad()(std::forward<CallArgs>(args)...);
   }
 
   // Returns the registered callback, or nullptr if none has been registered.
@@ -98,23 +70,79 @@ class AtomicHook<ReturnType (*)(Args...)> {
   // Load()() unless you must conditionalize behavior on whether a hook was
   // registered.
   FnPtr Load() const {
-    FnPtr ptr = hook_.load(std::memory_order_acquire);
+    FnPtr ptr = DoLoad();
     return (ptr == DummyFunction) ? nullptr : ptr;
   }
 
  private:
-#if ABSL_HAVE_FUNCTION_ADDRESS_CONSTANT_EXPRESSION
   static ReturnType DummyFunction(Args...) {
     return ReturnType();
   }
+
+  // Current versions of MSVC (as of September 2017) have a broken
+  // implementation of std::atomic<T*>:  Its constructor attempts to do the
+  // equivalent of a reinterpret_cast in a constexpr context, which is not
+  // allowed.
+  //
+  // This causes an issue when building with LLVM under Windows.  To avoid this,
+  // we use a less-efficient, intptr_t-based implementation on Windows.
+
+#ifdef _MSC_FULL_VER
+#define ABSL_HAVE_WORKING_ATOMIC_POINTER 0
 #else
-  static constexpr FnPtr DummyFunction = nullptr;
+#define ABSL_HAVE_WORKING_ATOMIC_POINTER 1
 #endif
 
+#if ABSL_HAVE_WORKING_ATOMIC_POINTER
+  static constexpr FnPtr kInitialValue = &DummyFunction;
+
+  // Return the stored value, or DummyFunction if no value has been stored.
+  FnPtr DoLoad() const { return hook_.load(std::memory_order_acquire); }
+
+  // Store the given value.  Returns false if a different value was already
+  // stored to this object.
+  bool DoStore(FnPtr fn) {
+    assert(fn);
+    FnPtr expected = DummyFunction;
+    hook_.compare_exchange_strong(expected, fn, std::memory_order_acq_rel,
+                                  std::memory_order_acquire);
+    const bool store_succeeded = (expected == DummyFunction);
+    const bool same_value_already_stored = (expected == fn);
+    return store_succeeded || same_value_already_stored;
+  }
+
   std::atomic<FnPtr> hook_;
+#else  // !ABSL_HAVE_WORKING_ATOMIC_POINTER
+  // Use a sentinel value unlikely to be the address of an actual function.
+  static constexpr intptr_t kInitialValue = 0;
+
+  static_assert(sizeof(intptr_t) >= sizeof(FnPtr),
+                "intptr_t can't contain a function pointer");
+
+  FnPtr DoLoad() const {
+    const intptr_t value = hook_.load(std::memory_order_acquire);
+    if (value == 0) {
+      return DummyFunction;
+    }
+    return reinterpret_cast<FnPtr>(value);
+  }
+
+  bool DoStore(FnPtr fn) {
+    assert(fn);
+    const auto value = reinterpret_cast<intptr_t>(fn);
+    intptr_t expected = 0;
+    hook_.compare_exchange_strong(expected, value, std::memory_order_acq_rel,
+                                  std::memory_order_acquire);
+    const bool store_succeeded = (expected == 0);
+    const bool same_value_already_stored = (expected == value);
+    return store_succeeded || same_value_already_stored;
+  }
+
+  std::atomic<intptr_t> hook_;
+#endif
 };
 
-#undef ABSL_HAVE_FUNCTION_ADDRESS_CONSTANT_EXPRESSION
+#undef ABSL_HAVE_WORKING_ATOMIC_POINTER
 
 }  // namespace base_internal
 }  // namespace absl
diff --git a/absl/base/internal/low_level_scheduling.h b/absl/base/internal/low_level_scheduling.h
index a01d1c03272b..e716f2b49fa1 100644
--- a/absl/base/internal/low_level_scheduling.h
+++ b/absl/base/internal/low_level_scheduling.h
@@ -12,8 +12,8 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 //
-// Core interfaces and definitions used by by low-level //base interfaces such
-// as SpinLock.
+// Core interfaces and definitions used by by low-level interfaces such as
+// SpinLock.
 
 #ifndef ABSL_BASE_INTERNAL_LOW_LEVEL_SCHEDULING_H_
 #define ABSL_BASE_INTERNAL_LOW_LEVEL_SCHEDULING_H_
diff --git a/absl/base/internal/malloc_hook.cc b/absl/base/internal/malloc_hook.cc
index 4f5a0befe2f0..7165d4cbd02a 100644
--- a/absl/base/internal/malloc_hook.cc
+++ b/absl/base/internal/malloc_hook.cc
@@ -453,16 +453,13 @@ void MallocHook::InvokeSbrkHookSlow(const void* result, ptrdiff_t increment) {
 }  // namespace base_internal
 }  // namespace absl
 
-ABSL_DEFINE_ATTRIBUTE_SECTION_VARS(google_malloc);
-ABSL_DECLARE_ATTRIBUTE_SECTION_VARS(google_malloc);
-// actual functions are in debugallocation.cc or tcmalloc.cc
 ABSL_DEFINE_ATTRIBUTE_SECTION_VARS(malloc_hook);
 ABSL_DECLARE_ATTRIBUTE_SECTION_VARS(malloc_hook);
 // actual functions are in this file, malloc_hook.cc, and low_level_alloc.cc
+ABSL_DEFINE_ATTRIBUTE_SECTION_VARS(google_malloc);
+ABSL_DECLARE_ATTRIBUTE_SECTION_VARS(google_malloc);
 ABSL_DEFINE_ATTRIBUTE_SECTION_VARS(blink_malloc);
 ABSL_DECLARE_ATTRIBUTE_SECTION_VARS(blink_malloc);
-// actual functions are in third_party/blink_headless/.../{PartitionAlloc,
-// FastMalloc}.cpp.
 
 #define ADDR_IN_ATTRIBUTE_SECTION(addr, name)                         \
   (reinterpret_cast<uintptr_t>(ABSL_ATTRIBUTE_SECTION_START(name)) <= \
@@ -486,13 +483,6 @@ static inline bool InHookCaller(const void* caller) {
 static absl::once_flag in_hook_caller_once;
 
 static void InitializeInHookCaller() {
-  ABSL_INIT_ATTRIBUTE_SECTION_VARS(google_malloc);
-  if (ABSL_ATTRIBUTE_SECTION_START(google_malloc) ==
-      ABSL_ATTRIBUTE_SECTION_STOP(google_malloc)) {
-    ABSL_RAW_LOG(ERROR,
-                 "google_malloc section is missing, "
-                 "thus InHookCaller is broken!");
-  }
   ABSL_INIT_ATTRIBUTE_SECTION_VARS(malloc_hook);
   if (ABSL_ATTRIBUTE_SECTION_START(malloc_hook) ==
       ABSL_ATTRIBUTE_SECTION_STOP(malloc_hook)) {
@@ -500,9 +490,14 @@ static void InitializeInHookCaller() {
                  "malloc_hook section is missing, "
                  "thus InHookCaller is broken!");
   }
+  ABSL_INIT_ATTRIBUTE_SECTION_VARS(google_malloc);
+  if (ABSL_ATTRIBUTE_SECTION_START(google_malloc) ==
+      ABSL_ATTRIBUTE_SECTION_STOP(google_malloc)) {
+    ABSL_RAW_LOG(ERROR,
+                 "google_malloc section is missing, "
+                 "thus InHookCaller is broken!");
+  }
   ABSL_INIT_ATTRIBUTE_SECTION_VARS(blink_malloc);
-  // The blink_malloc section is only expected to be present in binaries
-  // linking against the blink rendering engine in third_party/blink_headless.
 }
 
 // We can improve behavior/compactness of this function
@@ -574,7 +569,8 @@ extern "C" int MallocHook_GetCallerStackTrace(
 // still allow users to disable this in special cases that can't be easily
 // detected during compilation, via -DABSL_MALLOC_HOOK_MMAP_DISABLE or #define
 // ABSL_MALLOC_HOOK_MMAP_DISABLE.
-// TODO(b/62370839): Remove MALLOC_HOOK_MMAP_DISABLE in CROSSTOOL for tsan and
+//
+// TODO(absl-team): Remove MALLOC_HOOK_MMAP_DISABLE in CROSSTOOL for tsan and
 // msan config; Replace MALLOC_HOOK_MMAP_DISABLE with
 // ABSL_MALLOC_HOOK_MMAP_DISABLE for other special cases.
 #if !defined(THREAD_SANITIZER) && !defined(MEMORY_SANITIZER) && \
diff --git a/absl/base/internal/raw_logging.cc b/absl/base/internal/raw_logging.cc
index c0890614b1be..1b849abfceae 100644
--- a/absl/base/internal/raw_logging.cc
+++ b/absl/base/internal/raw_logging.cc
@@ -34,10 +34,10 @@
 //
 // This preprocessor token is also defined in raw_io.cc.  If you need to copy
 // this, consider moving both to config.h instead.
-#if defined(__linux__) || defined(__APPLE__) || defined(__Fuchsia__) || \
-    defined(__GENCLAVE__)
+#if defined(__linux__) || defined(__APPLE__) || defined(__Fuchsia__)
 #include <unistd.h>
 
+
 #define ABSL_HAVE_POSIX_WRITE 1
 #define ABSL_LOW_LEVEL_WRITE_SUPPORTED 1
 #else
@@ -110,10 +110,6 @@ namespace {
 
 // CAVEAT: vsnprintf called from *DoRawLog below has some (exotic) code paths
 // that invoke malloc() and getenv() that might acquire some locks.
-// If this becomes a problem we should reimplement a subset of vsnprintf
-// that does not need locks and malloc.
-// E.g. google3/third_party/clearsilver/core/util/snprintf.c
-// looks like such a reimplementation.
 
 // Helper for RawLog below.
 // *DoRawLog writes to *buf of *size and move them past the written portion.
diff --git a/absl/base/internal/scheduling_mode.h b/absl/base/internal/scheduling_mode.h
index b7560f30d793..1b6497ad8757 100644
--- a/absl/base/internal/scheduling_mode.h
+++ b/absl/base/internal/scheduling_mode.h
@@ -12,8 +12,8 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 //
-// Core interfaces and definitions used by by low-level //base interfaces such
-// as SpinLock.
+// Core interfaces and definitions used by by low-level interfaces such as
+// SpinLock.
 
 #ifndef ABSL_BASE_INTERNAL_SCHEDULING_MODE_H_
 #define ABSL_BASE_INTERNAL_SCHEDULING_MODE_H_
diff --git a/absl/base/internal/spinlock_wait.h b/absl/base/internal/spinlock_wait.h
index 6734cc9ef05f..5f658211cc56 100644
--- a/absl/base/internal/spinlock_wait.h
+++ b/absl/base/internal/spinlock_wait.h
@@ -18,10 +18,6 @@
 // Operations to make atomic transitions on a word, and to allow
 // waiting for those transitions to become possible.
 
-// This file is used internally in spinlock.cc and once.cc, and a few other
-// places listing in //base:spinlock_wait_users.  If you need to use it outside
-// of //base, please request permission to be added to that list.
-
 #include <stdint.h>
 #include <atomic>
 
diff --git a/absl/base/internal/sysinfo.cc b/absl/base/internal/sysinfo.cc
index 6a4c00641b1f..9e0140fadb1f 100644
--- a/absl/base/internal/sysinfo.cc
+++ b/absl/base/internal/sysinfo.cc
@@ -57,10 +57,7 @@ static int num_cpus = 0;
 static double nominal_cpu_frequency = 1.0;  // 0.0 might be dangerous.
 
 static int GetNumCPUs() {
-#if defined(__myriad2__) || defined(__GENCLAVE__)
-  // TODO(b/28296132): Calling std::thread::hardware_concurrency() induces a
-  // link error on myriad2 builds.
-  // TODO(b/62709537): Support std::thread::hardware_concurrency() in gEnclalve.
+#if defined(__myriad2__)
   return 1;
 #else
   // Other possibilities:
diff --git a/absl/base/internal/sysinfo.h b/absl/base/internal/sysinfo.h
index f21de14325c7..5bd1c500bd54 100644
--- a/absl/base/internal/sysinfo.h
+++ b/absl/base/internal/sysinfo.h
@@ -40,8 +40,7 @@ namespace base_internal {
 // Thread-safe.
 double NominalCPUFrequency();
 
-// Number of logical processors (hyperthreads) in system. See
-// //base/cpuid/cpuid.h for more CPU-related info.  Thread-safe.
+// Number of logical processors (hyperthreads) in system. Thread-safe.
 int NumCPUs();
 
 // Return the thread id of the current thread, as told by the system.
diff --git a/absl/base/internal/sysinfo_test.cc b/absl/base/internal/sysinfo_test.cc
index 4c7d66b7f9c3..e0d9aab9bc2d 100644
--- a/absl/base/internal/sysinfo_test.cc
+++ b/absl/base/internal/sysinfo_test.cc
@@ -41,11 +41,10 @@ TEST(SysinfoTest, NominalCPUFrequency) {
   EXPECT_GE(NominalCPUFrequency(), 1000.0)
       << "NominalCPUFrequency() did not return a reasonable value";
 #else
-  // TODO(b/37919252): Aarch64 cannot read the CPU frequency from sysfs, so we
+  // TODO(absl-team): Aarch64 cannot read the CPU frequency from sysfs, so we
   // get back 1.0. Fix once the value is available.
   EXPECT_EQ(NominalCPUFrequency(), 1.0)
-      << "CPU frequency detection was fixed! Please update unittest and "
-         "b/37919252";
+      << "CPU frequency detection was fixed! Please update unittest.";
 #endif
 }
 
diff --git a/absl/base/internal/thread_identity.cc b/absl/base/internal/thread_identity.cc
index ee96a5883293..678e8568d742 100644
--- a/absl/base/internal/thread_identity.cc
+++ b/absl/base/internal/thread_identity.cc
@@ -48,12 +48,10 @@ void AllocateThreadIdentityKey(ThreadIdentityReclaimerFunction reclaimer) {
     ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_CPP11
 // The actual TLS storage for a thread's currently associated ThreadIdentity.
 // This is referenced by inline accessors in the header.
-// "protected" visibility ensures that if multiple copies of //base exist in a
-// process (via dlopen() or similar), references to
-// thread_identity_ptr from each copy of the code will refer to
-// *different* instances of this ptr. See extensive discussion of this choice
-// in cl/90634708
-// TODO(ahh): hard deprecate multiple copies of //base; remove this.
+// "protected" visibility ensures that if multiple instances of Abseil code
+// exist within a process (via dlopen() or similar), references to
+// thread_identity_ptr from each instance of the code will refer to
+// *different* instances of this ptr.
 #ifdef __GNUC__
 __attribute__((visibility("protected")))
 #endif  // __GNUC__
@@ -70,7 +68,6 @@ void SetCurrentThreadIdentity(
   // NOTE: Not async-safe.  But can be open-coded.
   absl::call_once(init_thread_identity_key_once, AllocateThreadIdentityKey,
                   reclaimer);
-  // b/18366710:
   // We must mask signals around the call to setspecific as with current glibc,
   // a concurrent getspecific (needed for GetCurrentThreadIdentityIfPresent())
   // may zero our value.