about summary refs log tree commit diff
path: root/absl/base/internal/thread_identity.h
diff options
context:
space:
mode:
authormisterg <misterg@google.com>2017-09-19T20·54-0400
committermisterg <misterg@google.com>2017-09-19T20·54-0400
commitc2e754829628d1e9b7a16b3389cfdace76950fdf (patch)
tree5a7f056f44e27c30e10025113b644f0b3b5801fc /absl/base/internal/thread_identity.h
Initial Commit
Diffstat (limited to 'absl/base/internal/thread_identity.h')
-rw-r--r--absl/base/internal/thread_identity.h240
1 files changed, 240 insertions, 0 deletions
diff --git a/absl/base/internal/thread_identity.h b/absl/base/internal/thread_identity.h
new file mode 100644
index 000000000000..914d5da7ef40
--- /dev/null
+++ b/absl/base/internal/thread_identity.h
@@ -0,0 +1,240 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Each active thread has an ThreadIdentity that may represent the thread in
+// various level interfaces.  ThreadIdentity objects are never deallocated.
+// When a thread terminates, its ThreadIdentity object may be reused for a
+// thread created later.
+
+#ifndef ABSL_BASE_INTERNAL_THREAD_IDENTITY_H_
+#define ABSL_BASE_INTERNAL_THREAD_IDENTITY_H_
+
+#ifndef _WIN32
+#include <pthread.h>
+// Defines __GOOGLE_GRTE_VERSION__ (via glibc-specific features.h) when
+// supported.
+#include <unistd.h>
+#endif
+
+#include <atomic>
+#include <cstdint>
+
+#include "absl/base/internal/per_thread_tls.h"
+
+namespace absl {
+
+struct SynchLocksHeld;
+struct SynchWaitParams;
+
+namespace base_internal {
+
+class SpinLock;
+struct ThreadIdentity;
+
+// Used by the implementation of base::Mutex and base::CondVar.
+struct PerThreadSynch {
+  // The internal representation of base::Mutex and base::CondVar rely
+  // on the alignment of PerThreadSynch. Both store the address of the
+  // PerThreadSynch in the high-order bits of their internal state,
+  // which means the low kLowZeroBits of the address of PerThreadSynch
+  // must be zero.
+  static constexpr int kLowZeroBits = 8;
+  static constexpr int kAlignment = 1 << kLowZeroBits;
+
+  // Returns the associated ThreadIdentity.
+  // This can be implemented as a cast because we guarantee
+  // PerThreadSynch is the first element of ThreadIdentity.
+  ThreadIdentity* thread_identity() {
+    return reinterpret_cast<ThreadIdentity*>(this);
+  }
+
+  PerThreadSynch *next;  // Circular waiter queue; initialized to 0.
+  PerThreadSynch *skip;  // If non-zero, all entries in Mutex queue
+                         // upto and including "skip" have same
+                         // condition as this, and will be woken later
+  bool may_skip;         // if false while on mutex queue, a mutex unlocker
+                         // is using this PerThreadSynch as a terminator.  Its
+                         // skip field must not be filled in because the loop
+                         // might then skip over the terminator.
+
+  // The wait parameters of the current wait.  waitp is null if the
+  // thread is not waiting. Transitions from null to non-null must
+  // occur before the enqueue commit point (state = kQueued in
+  // Enqueue() and CondVarEnqueue()). Transitions from non-null to
+  // null must occur after the wait is finished (state = kAvailable in
+  // Mutex::Block() and CondVar::WaitCommon()). This field may be
+  // changed only by the thread that describes this PerThreadSynch.  A
+  // special case is Fer(), which calls Enqueue() on another thread,
+  // but with an identical SynchWaitParams pointer, thus leaving the
+  // pointer unchanged.
+  SynchWaitParams *waitp;
+
+  bool suppress_fatal_errors;  // If true, try to proceed even in the face of
+                               // broken invariants.  This is used within fatal
+                               // signal handlers to improve the chances of
+                               // debug logging information being output
+                               // successfully.
+
+  intptr_t readers;     // Number of readers in mutex.
+  int priority;         // Priority of thread (updated every so often).
+
+  // When priority will next be read (cycles).
+  int64_t next_priority_read_cycles;
+
+  // State values:
+  //   kAvailable: This PerThreadSynch is available.
+  //   kQueued: This PerThreadSynch is unavailable, it's currently queued on a
+  //            Mutex or CondVar waistlist.
+  //
+  // Transitions from kQueued to kAvailable require a release
+  // barrier. This is needed as a waiter may use "state" to
+  // independently observe that it's no longer queued.
+  //
+  // Transitions from kAvailable to kQueued require no barrier, they
+  // are externally ordered by the Mutex.
+  enum State {
+    kAvailable,
+    kQueued
+  };
+  std::atomic<State> state;
+
+  bool maybe_unlocking;  // Valid at head of Mutex waiter queue;
+                         // true if UnlockSlow could be searching
+                         // for a waiter to wake.  Used for an optimization
+                         // in Enqueue().  true is always a valid value.
+                         // Can be reset to false when the unlocker or any
+                         // writer releases the lock, or a reader fully releases
+                         // the lock.  It may not be set to false by a reader
+                         // that decrements the count to non-zero.
+                         // protected by mutex spinlock
+
+  bool wake;  // This thread is to be woken from a Mutex.
+
+  // If "x" is on a waiter list for a mutex, "x->cond_waiter" is true iff the
+  // waiter is waiting on the mutex as part of a CV Wait or Mutex Await.
+  //
+  // The value of "x->cond_waiter" is meaningless if "x" is not on a
+  // Mutex waiter list.
+  bool cond_waiter;
+
+  // Locks held; used during deadlock detection.
+  // Allocated in Synch_GetAllLocks() and freed in ReclaimThreadIdentity().
+  SynchLocksHeld *all_locks;
+};
+
+struct ThreadIdentity {
+  // Must be the first member.  The Mutex implementation requires that
+  // the PerThreadSynch object associated with each thread is
+  // PerThreadSynch::kAlignment aligned.  We provide this alignment on
+  // ThreadIdentity itself.
+  PerThreadSynch per_thread_synch;
+
+  // Private: Reserved for absl::synchronization_internal::Waiter.
+  struct WaiterState {
+    char data[128];
+  } waiter_state;
+
+  // Used by PerThreadSem::{Get,Set}ThreadBlockedCounter().
+  std::atomic<int>* blocked_count_ptr;
+
+  // The following variables are mostly read/written just by the
+  // thread itself.  The only exception is that these are read by
+  // a ticker thread as a hint.
+  std::atomic<int> ticker;      // Tick counter, incremented once per second.
+  std::atomic<int> wait_start;  // Ticker value when thread started waiting.
+  std::atomic<bool> is_idle;    // Has thread become idle yet?
+
+  ThreadIdentity* next;
+};
+
+// Returns the ThreadIdentity object representing the calling thread; guaranteed
+// to be unique for its lifetime.  The returned object will remain valid for the
+// program's lifetime; although it may be re-assigned to a subsequent thread.
+// If one does not exist, return nullptr instead.
+//
+// Does not malloc(*), and is async-signal safe.
+// [*] Technically pthread_setspecific() does malloc on first use; however this
+// is handled internally within tcmalloc's initialization already.
+//
+// New ThreadIdentity objects can be constructed and associated with a thread
+// by calling GetOrCreateCurrentThreadIdentity() in per-thread-sem.h.
+ThreadIdentity* CurrentThreadIdentityIfPresent();
+
+using ThreadIdentityReclaimerFunction = void (*)(void*);
+
+// Sets the current thread identity to the given value.  'reclaimer' is a
+// pointer to the global function for cleaning up instances on thread
+// destruction.
+void SetCurrentThreadIdentity(ThreadIdentity* identity,
+                              ThreadIdentityReclaimerFunction reclaimer);
+
+// Removes the currently associated ThreadIdentity from the running thread.
+// This must be called from inside the ThreadIdentityReclaimerFunction, and only
+// from that function.
+void ClearCurrentThreadIdentity();
+
+// May be chosen at compile time via: -DABSL_FORCE_THREAD_IDENTITY_MODE=<mode
+// index>
+#ifdef ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC
+#error ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC cannot be direcly set
+#else
+#define ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC 0
+#endif
+
+#ifdef ABSL_THREAD_IDENTITY_MODE_USE_TLS
+#error ABSL_THREAD_IDENTITY_MODE_USE_TLS cannot be direcly set
+#else
+#define ABSL_THREAD_IDENTITY_MODE_USE_TLS 1
+#endif
+
+#ifdef ABSL_THREAD_IDENTITY_MODE_USE_CPP11
+#error ABSL_THREAD_IDENTITY_MODE_USE_CPP11 cannot be direcly set
+#else
+#define ABSL_THREAD_IDENTITY_MODE_USE_CPP11 2
+#endif
+
+#ifdef ABSL_THREAD_IDENTITY_MODE
+#error ABSL_THREAD_IDENTITY_MODE cannot be direcly set
+#elif defined(ABSL_FORCE_THREAD_IDENTITY_MODE)
+#define ABSL_THREAD_IDENTITY_MODE ABSL_FORCE_THREAD_IDENTITY_MODE
+#elif defined(_WIN32)
+#define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_CPP11
+#elif ABSL_PER_THREAD_TLS && defined(__GOOGLE_GRTE_VERSION__) && \
+    (__GOOGLE_GRTE_VERSION__ >= 20140228L)
+// Support for async-safe TLS was specifically added in GRTEv4.  It's not
+// present in the upstream eglibc.
+// Note:  Current default for production systems.
+#define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_TLS
+#else
+#define ABSL_THREAD_IDENTITY_MODE \
+  ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC
+#endif
+
+#if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_TLS || \
+    ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_CPP11
+
+extern ABSL_PER_THREAD_TLS_KEYWORD ThreadIdentity* thread_identity_ptr;
+
+inline ThreadIdentity* CurrentThreadIdentityIfPresent() {
+  return thread_identity_ptr;
+}
+
+#elif ABSL_THREAD_IDENTITY_MODE != \
+    ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC
+#error Unknown ABSL_THREAD_IDENTITY_MODE
+#endif
+
+}  // namespace base_internal
+}  // namespace absl
+#endif  // ABSL_BASE_INTERNAL_THREAD_IDENTITY_H_