about summary refs log tree commit diff
path: root/absl/debugging
diff options
context:
space:
mode:
Diffstat (limited to 'absl/debugging')
-rw-r--r--absl/debugging/BUILD.bazel170
-rw-r--r--absl/debugging/internal/address_is_readable.cc134
-rw-r--r--absl/debugging/internal/address_is_readable.h29
-rw-r--r--absl/debugging/internal/elf_mem_image.cc397
-rw-r--r--absl/debugging/internal/elf_mem_image.h125
-rw-r--r--absl/debugging/internal/stacktrace_aarch64-inl.inc181
-rw-r--r--absl/debugging/internal/stacktrace_arm-inl.inc115
-rw-r--r--absl/debugging/internal/stacktrace_config.h76
-rw-r--r--absl/debugging/internal/stacktrace_generic-inl.inc51
-rw-r--r--absl/debugging/internal/stacktrace_libunwind-inl.inc128
-rw-r--r--absl/debugging/internal/stacktrace_powerpc-inl.inc234
-rw-r--r--absl/debugging/internal/stacktrace_unimplemented-inl.inc14
-rw-r--r--absl/debugging/internal/stacktrace_win32-inl.inc75
-rw-r--r--absl/debugging/internal/stacktrace_x86-inl.inc327
-rw-r--r--absl/debugging/internal/vdso_support.cc177
-rw-r--r--absl/debugging/internal/vdso_support.h155
-rw-r--r--absl/debugging/leak_check.cc35
-rw-r--r--absl/debugging/leak_check.h111
-rw-r--r--absl/debugging/leak_check_disable.cc20
-rw-r--r--absl/debugging/leak_check_fail_test.cc41
-rw-r--r--absl/debugging/leak_check_test.cc41
-rw-r--r--absl/debugging/stacktrace.cc133
-rw-r--r--absl/debugging/stacktrace.h160
23 files changed, 2929 insertions, 0 deletions
diff --git a/absl/debugging/BUILD.bazel b/absl/debugging/BUILD.bazel
new file mode 100644
index 000000000000..84acf9f99e1f
--- /dev/null
+++ b/absl/debugging/BUILD.bazel
@@ -0,0 +1,170 @@
+#
+# Copyright 2017 The Abseil Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+load(
+    "//absl:copts.bzl",
+    "ABSL_DEFAULT_COPTS",
+)
+
+package(
+    default_visibility = ["//visibility:public"],
+)
+
+licenses(["notice"])  # Apache 2.0
+
+cc_library(
+    name = "stacktrace",
+    srcs = [
+        "stacktrace.cc",
+    ],
+    hdrs = ["stacktrace.h"],
+    copts = ABSL_DEFAULT_COPTS,
+    deps = [
+        ":debugging_internal",
+        "//absl/base",
+        "//absl/base:core_headers",
+    ],
+)
+
+cc_library(
+    name = "debugging_internal",
+    srcs = [
+        "internal/address_is_readable.cc",
+        "internal/elf_mem_image.cc",
+        "internal/vdso_support.cc",
+    ],
+    hdrs = [
+        "internal/address_is_readable.h",
+        "internal/elf_mem_image.h",
+        "internal/stacktrace_aarch64-inl.inc",
+        "internal/stacktrace_arm-inl.inc",
+        "internal/stacktrace_config.h",
+        "internal/stacktrace_generic-inl.inc",
+        "internal/stacktrace_libunwind-inl.inc",
+        "internal/stacktrace_powerpc-inl.inc",
+        "internal/stacktrace_unimplemented-inl.inc",
+        "internal/stacktrace_win32-inl.inc",
+        "internal/stacktrace_x86-inl.inc",
+        "internal/vdso_support.h",
+    ],
+    copts = ABSL_DEFAULT_COPTS,
+    deps = [
+        "//absl/base",
+        "//absl/base:dynamic_annotations",
+        "//absl/base:core_headers",
+    ],
+)
+
+cc_library(
+    name = "leak_check",
+    srcs = select({
+        # The leak checking interface depends on weak function
+        # declarations that may not necessarily have definitions.
+        # Windows doesn't support this, and ios requires
+        # guaranteed definitions for weak symbols.
+        "//absl:ios": [],
+        "//absl:windows": [],
+        "//conditions:default": [
+            "leak_check.cc",
+        ],
+    }),
+    hdrs = select({
+        "//absl:ios": [],
+        "//absl:windows": [],
+        "//conditions:default": ["leak_check.h"],
+    }),
+    deps = ["//absl/base:core_headers"],
+)
+
+# Adding a dependency to leak_check_disable will disable
+# sanitizer leak checking (asan/lsan) in a test without
+# the need to mess around with build features.
+cc_library(
+    name = "leak_check_disable",
+    srcs = ["leak_check_disable.cc"],
+    linkstatic = 1,
+    alwayslink = 1,
+)
+
+# These targets exists for use in tests only, explicitly configuring the
+# LEAK_SANITIZER macro. It must be linked with -fsanitize=leak for lsan.
+ABSL_LSAN_LINKOPTS = select({
+    "//absl:llvm_compiler": ["-fsanitize=leak"],
+    "//conditions:default": [],
+})
+
+cc_library(
+    name = "leak_check_api_enabled_for_testing",
+    testonly = 1,
+    srcs = ["leak_check.cc"],
+    hdrs = ["leak_check.h"],
+    copts = select({
+        "//absl:llvm_compiler": ["-DLEAK_SANITIZER"],
+        "//conditions:default": [],
+    }),
+    visibility = ["//visibility:private"],
+)
+
+cc_library(
+    name = "leak_check_api_disabled_for_testing",
+    testonly = 1,
+    srcs = ["leak_check.cc"],
+    hdrs = ["leak_check.h"],
+    copts = ["-ULEAK_SANITIZER"],
+    visibility = ["//visibility:private"],
+)
+
+cc_test(
+    name = "leak_check_test",
+    srcs = ["leak_check_test.cc"],
+    copts = select({
+        "//absl:llvm_compiler": ["-DABSL_EXPECT_LEAK_SANITIZER"],
+        "//conditions:default": [],
+    }),
+    linkopts = ABSL_LSAN_LINKOPTS,
+    deps = [
+        ":leak_check_api_enabled_for_testing",
+        "//absl/base",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
+cc_test(
+    name = "leak_check_no_lsan_test",
+    srcs = ["leak_check_test.cc"],
+    copts = ["-UABSL_EXPECT_LEAK_SANITIZER"],
+    deps = [
+        ":leak_check_api_disabled_for_testing",
+        "//absl/base",  # for raw_logging
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
+# Test that leak checking is skipped when lsan is enabled but
+# ":leak_check_disable" is linked in.
+#
+# This test should fail in the absence of a dependency on ":leak_check_disable"
+cc_test(
+    name = "disabled_leak_check_test",
+    srcs = ["leak_check_fail_test.cc"],
+    linkopts = ABSL_LSAN_LINKOPTS,
+    deps = [
+        ":leak_check_api_enabled_for_testing",
+        ":leak_check_disable",
+        "//absl/base",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
diff --git a/absl/debugging/internal/address_is_readable.cc b/absl/debugging/internal/address_is_readable.cc
new file mode 100644
index 000000000000..037ea54c3385
--- /dev/null
+++ b/absl/debugging/internal/address_is_readable.cc
@@ -0,0 +1,134 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// base::AddressIsReadable() probes an address to see whether it is readable,
+// without faulting.
+
+#include "absl/debugging/internal/address_is_readable.h"
+
+#if !defined(__linux__) || defined(__ANDROID__)
+
+namespace absl {
+namespace debug_internal {
+
+// On platforms other than Linux, just return true.
+bool AddressIsReadable(const void* /* addr */) { return true; }
+
+}  // namespace debug_internal
+}  // namespace absl
+
+#else
+
+#include <fcntl.h>
+#include <sys/syscall.h>
+#include <unistd.h>
+
+#include <atomic>
+#include <cerrno>
+#include <cstdint>
+
+#include "absl/base/internal/raw_logging.h"
+
+namespace absl {
+namespace debug_internal {
+
+// Pack a pid and two file descriptors into a 64-bit word,
+// using 16, 24, and 24 bits for each respectively.
+static uint64_t Pack(uint64_t pid, uint64_t read_fd, uint64_t write_fd) {
+  ABSL_RAW_CHECK((read_fd >> 24) == 0 && (write_fd >> 24) == 0,
+                 "fd out of range");
+  return (pid << 48) | ((read_fd & 0xffffff) << 24) | (write_fd & 0xffffff);
+}
+
+// Unpack x into a pid and two file descriptors, where x was created with
+// Pack().
+static void Unpack(uint64_t x, int *pid, int *read_fd, int *write_fd) {
+  *pid = x >> 48;
+  *read_fd = (x >> 24) & 0xffffff;
+  *write_fd = x & 0xffffff;
+}
+
+// Return whether the byte at *addr is readable, without faulting.
+// Save and restores errno.   Returns true on systems where
+// unimplemented.
+// This is a namespace-scoped variable for correct zero-initialization.
+static std::atomic<uint64_t> pid_and_fds;  // initially 0, an invalid pid.
+bool AddressIsReadable(const void *addr) {
+  int save_errno = errno;
+  // We test whether a byte is readable by using write().  Normally, this would
+  // be done via a cached file descriptor to /dev/null, but linux fails to
+  // check whether the byte is readable when the destination is /dev/null, so
+  // we use a cached pipe.  We store the pid of the process that created the
+  // pipe to handle the case where a process forks, and the child closes all
+  // the file descriptors and then calls this routine.  This is not perfect:
+  // the child could use the routine, then close all file descriptors and then
+  // use this routine again.  But the likely use of this routine is when
+  // crashing, to test the validity of pages when dumping the stack.  Beware
+  // that we may leak file descriptors, but we're unlikely to leak many.
+  int bytes_written;
+  int current_pid = getpid() & 0xffff;   // we use only the low order 16 bits
+  do {  // until we do not get EBADF trying to use file descriptors
+    int pid;
+    int read_fd;
+    int write_fd;
+    uint64_t local_pid_and_fds = pid_and_fds.load(std::memory_order_relaxed);
+    Unpack(local_pid_and_fds, &pid, &read_fd, &write_fd);
+    while (current_pid != pid) {
+      int p[2];
+      // new pipe
+      if (pipe(p) != 0) {
+        ABSL_RAW_LOG(FATAL, "Failed to create pipe, errno=%d", errno);
+      }
+      fcntl(p[0], F_SETFD, FD_CLOEXEC);
+      fcntl(p[1], F_SETFD, FD_CLOEXEC);
+      uint64_t new_pid_and_fds = Pack(current_pid, p[0], p[1]);
+      if (pid_and_fds.compare_exchange_strong(
+              local_pid_and_fds, new_pid_and_fds, std::memory_order_relaxed,
+              std::memory_order_relaxed)) {
+        local_pid_and_fds = new_pid_and_fds;  // fds exposed to other threads
+      } else {  // fds not exposed to other threads; we can close them.
+        close(p[0]);
+        close(p[1]);
+        local_pid_and_fds = pid_and_fds.load(std::memory_order_relaxed);
+      }
+      Unpack(local_pid_and_fds, &pid, &read_fd, &write_fd);
+    }
+    errno = 0;
+    // Use syscall(SYS_write, ...) instead of write() to prevent ASAN
+    // and other checkers from complaining about accesses to arbitrary
+    // memory.
+    do {
+      bytes_written = syscall(SYS_write, write_fd, addr, 1);
+    } while (bytes_written == -1 && errno == EINTR);
+    if (bytes_written == 1) {   // remove the byte from the pipe
+      char c;
+      while (read(read_fd, &c, 1) == -1 && errno == EINTR) {
+      }
+    }
+    if (errno == EBADF) {  // Descriptors invalid.
+      // If pid_and_fds contains the problematic file descriptors we just used,
+      // this call will forget them, and the loop will try again.
+      pid_and_fds.compare_exchange_strong(local_pid_and_fds, 0,
+                                          std::memory_order_relaxed,
+                                          std::memory_order_relaxed);
+    }
+  } while (errno == EBADF);
+  errno = save_errno;
+  return bytes_written == 1;
+}
+
+}  // namespace debug_internal
+}  // namespace absl
+
+#endif
diff --git a/absl/debugging/internal/address_is_readable.h b/absl/debugging/internal/address_is_readable.h
new file mode 100644
index 000000000000..a8b32923550c
--- /dev/null
+++ b/absl/debugging/internal/address_is_readable.h
@@ -0,0 +1,29 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef ABSL_DEBUGGING_INTERNAL_ADDRESS_IS_READABLE_H_
+#define ABSL_DEBUGGING_INTERNAL_ADDRESS_IS_READABLE_H_
+
+namespace absl {
+namespace debug_internal {
+
+// Return whether the byte at *addr is readable, without faulting.
+// Save and restores errno.
+bool AddressIsReadable(const void *addr);
+
+}  // namespace debug_internal
+}  // namespace absl
+
+#endif  // ABSL_DEBUGGING_INTERNAL_ADDRESS_IS_READABLE_H_
diff --git a/absl/debugging/internal/elf_mem_image.cc b/absl/debugging/internal/elf_mem_image.cc
new file mode 100644
index 000000000000..f6c6bc07d511
--- /dev/null
+++ b/absl/debugging/internal/elf_mem_image.cc
@@ -0,0 +1,397 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Allow dynamic symbol lookup in an in-memory Elf image.
+//
+
+#include "absl/debugging/internal/elf_mem_image.h"
+
+#ifdef ABSL_HAVE_ELF_MEM_IMAGE  // defined in elf_mem_image.h
+
+#include <string.h>
+#include <cassert>
+#include <cstddef>
+#include "absl/base/internal/raw_logging.h"
+
+// From binutils/include/elf/common.h (this doesn't appear to be documented
+// anywhere else).
+//
+//   /* This flag appears in a Versym structure.  It means that the symbol
+//      is hidden, and is only visible with an explicit version number.
+//      This is a GNU extension.  */
+//   #define VERSYM_HIDDEN           0x8000
+//
+//   /* This is the mask for the rest of the Versym information.  */
+//   #define VERSYM_VERSION          0x7fff
+
+#define VERSYM_VERSION 0x7fff
+
+namespace absl {
+namespace debug_internal {
+
+namespace {
+
+#if __WORDSIZE == 32
+const int kElfClass = ELFCLASS32;
+int ElfBind(const ElfW(Sym) *symbol) { return ELF32_ST_BIND(symbol->st_info); }
+int ElfType(const ElfW(Sym) *symbol) { return ELF32_ST_TYPE(symbol->st_info); }
+#elif __WORDSIZE == 64
+const int kElfClass = ELFCLASS64;
+int ElfBind(const ElfW(Sym) *symbol) { return ELF64_ST_BIND(symbol->st_info); }
+int ElfType(const ElfW(Sym) *symbol) { return ELF64_ST_TYPE(symbol->st_info); }
+#else
+const int kElfClass = -1;
+int ElfBind(const ElfW(Sym) *) {
+  ABSL_RAW_LOG(FATAL, "Unexpected word size");
+  return 0;
+}
+int ElfType(const ElfW(Sym) *) {
+  ABSL_RAW_LOG(FATAL, "Unexpected word size");
+  return 0;
+}
+#endif
+
+// Extract an element from one of the ELF tables, cast it to desired type.
+// This is just a simple arithmetic and a glorified cast.
+// Callers are responsible for bounds checking.
+template <typename T>
+const T *GetTableElement(const ElfW(Ehdr) * ehdr, ElfW(Off) table_offset,
+                         ElfW(Word) element_size, size_t index) {
+  return reinterpret_cast<const T*>(reinterpret_cast<const char *>(ehdr)
+                                    + table_offset
+                                    + index * element_size);
+}
+
+}  // namespace
+
+const void *const ElfMemImage::kInvalidBase =
+    reinterpret_cast<const void *>(~0L);
+
+ElfMemImage::ElfMemImage(const void *base) {
+  ABSL_RAW_CHECK(base != kInvalidBase, "bad pointer");
+  Init(base);
+}
+
+int ElfMemImage::GetNumSymbols() const {
+  if (!hash_) {
+    return 0;
+  }
+  // See http://www.caldera.com/developers/gabi/latest/ch5.dynamic.html#hash
+  return hash_[1];
+}
+
+const ElfW(Sym) *ElfMemImage::GetDynsym(int index) const {
+  ABSL_RAW_CHECK(index < GetNumSymbols(), "index out of range");
+  return dynsym_ + index;
+}
+
+const ElfW(Versym) *ElfMemImage::GetVersym(int index) const {
+  ABSL_RAW_CHECK(index < GetNumSymbols(), "index out of range");
+  return versym_ + index;
+}
+
+const ElfW(Phdr) *ElfMemImage::GetPhdr(int index) const {
+  ABSL_RAW_CHECK(index < ehdr_->e_phnum, "index out of range");
+  return GetTableElement<ElfW(Phdr)>(ehdr_,
+                                     ehdr_->e_phoff,
+                                     ehdr_->e_phentsize,
+                                     index);
+}
+
+const char *ElfMemImage::GetDynstr(ElfW(Word) offset) const {
+  ABSL_RAW_CHECK(offset < strsize_, "offset out of range");
+  return dynstr_ + offset;
+}
+
+const void *ElfMemImage::GetSymAddr(const ElfW(Sym) *sym) const {
+  if (sym->st_shndx == SHN_UNDEF || sym->st_shndx >= SHN_LORESERVE) {
+    // Symbol corresponds to "special" (e.g. SHN_ABS) section.
+    return reinterpret_cast<const void *>(sym->st_value);
+  }
+  ABSL_RAW_CHECK(link_base_ < sym->st_value, "symbol out of range");
+  return GetTableElement<char>(ehdr_, 0, 1, sym->st_value) - link_base_;
+}
+
+const ElfW(Verdef) *ElfMemImage::GetVerdef(int index) const {
+  ABSL_RAW_CHECK(0 <= index && static_cast<size_t>(index) <= verdefnum_,
+                 "index out of range");
+  const ElfW(Verdef) *version_definition = verdef_;
+  while (version_definition->vd_ndx < index && version_definition->vd_next) {
+    const char *const version_definition_as_char =
+        reinterpret_cast<const char *>(version_definition);
+    version_definition =
+        reinterpret_cast<const ElfW(Verdef) *>(version_definition_as_char +
+                                               version_definition->vd_next);
+  }
+  return version_definition->vd_ndx == index ? version_definition : nullptr;
+}
+
+const ElfW(Verdaux) *ElfMemImage::GetVerdefAux(
+    const ElfW(Verdef) *verdef) const {
+  return reinterpret_cast<const ElfW(Verdaux) *>(verdef+1);
+}
+
+const char *ElfMemImage::GetVerstr(ElfW(Word) offset) const {
+  ABSL_RAW_CHECK(offset < strsize_, "offset out of range");
+  return dynstr_ + offset;
+}
+
+void ElfMemImage::Init(const void *base) {
+  ehdr_      = nullptr;
+  dynsym_    = nullptr;
+  dynstr_    = nullptr;
+  versym_    = nullptr;
+  verdef_    = nullptr;
+  hash_      = nullptr;
+  strsize_   = 0;
+  verdefnum_ = 0;
+  link_base_ = ~0L;  // Sentinel: PT_LOAD .p_vaddr can't possibly be this.
+  if (!base) {
+    return;
+  }
+  const intptr_t base_as_uintptr_t = reinterpret_cast<uintptr_t>(base);
+  // Fake VDSO has low bit set.
+  const bool fake_vdso = ((base_as_uintptr_t & 1) != 0);
+  base = reinterpret_cast<const void *>(base_as_uintptr_t & ~1);
+  const char *const base_as_char = reinterpret_cast<const char *>(base);
+  if (base_as_char[EI_MAG0] != ELFMAG0 || base_as_char[EI_MAG1] != ELFMAG1 ||
+      base_as_char[EI_MAG2] != ELFMAG2 || base_as_char[EI_MAG3] != ELFMAG3) {
+    assert(false);
+    return;
+  }
+  int elf_class = base_as_char[EI_CLASS];
+  if (elf_class != kElfClass) {
+    assert(false);
+    return;
+  }
+  switch (base_as_char[EI_DATA]) {
+    case ELFDATA2LSB: {
+      if (__LITTLE_ENDIAN != __BYTE_ORDER) {
+        assert(false);
+        return;
+      }
+      break;
+    }
+    case ELFDATA2MSB: {
+      if (__BIG_ENDIAN != __BYTE_ORDER) {
+        assert(false);
+        return;
+      }
+      break;
+    }
+    default: {
+      assert(false);
+      return;
+    }
+  }
+
+  ehdr_ = reinterpret_cast<const ElfW(Ehdr) *>(base);
+  const ElfW(Phdr) *dynamic_program_header = nullptr;
+  for (int i = 0; i < ehdr_->e_phnum; ++i) {
+    const ElfW(Phdr) *const program_header = GetPhdr(i);
+    switch (program_header->p_type) {
+      case PT_LOAD:
+        if (!~link_base_) {
+          link_base_ = program_header->p_vaddr;
+        }
+        break;
+      case PT_DYNAMIC:
+        dynamic_program_header = program_header;
+        break;
+    }
+  }
+  if (!~link_base_ || !dynamic_program_header) {
+    assert(false);
+    // Mark this image as not present. Can not recur infinitely.
+    Init(nullptr);
+    return;
+  }
+  ptrdiff_t relocation =
+      base_as_char - reinterpret_cast<const char *>(link_base_);
+  ElfW(Dyn) *dynamic_entry =
+      reinterpret_cast<ElfW(Dyn) *>(dynamic_program_header->p_vaddr +
+                                    relocation);
+  for (; dynamic_entry->d_tag != DT_NULL; ++dynamic_entry) {
+    ElfW(Xword) value = dynamic_entry->d_un.d_val;
+    if (fake_vdso) {
+      // A complication: in the real VDSO, dynamic entries are not relocated
+      // (it wasn't loaded by a dynamic loader). But when testing with a
+      // "fake" dlopen()ed vdso library, the loader relocates some (but
+      // not all!) of them before we get here.
+      if (dynamic_entry->d_tag == DT_VERDEF) {
+        // The only dynamic entry (of the ones we care about) libc-2.3.6
+        // loader doesn't relocate.
+        value += relocation;
+      }
+    } else {
+      // Real VDSO. Everything needs to be relocated.
+      value += relocation;
+    }
+    switch (dynamic_entry->d_tag) {
+      case DT_HASH:
+        hash_ = reinterpret_cast<ElfW(Word) *>(value);
+        break;
+      case DT_SYMTAB:
+        dynsym_ = reinterpret_cast<ElfW(Sym) *>(value);
+        break;
+      case DT_STRTAB:
+        dynstr_ = reinterpret_cast<const char *>(value);
+        break;
+      case DT_VERSYM:
+        versym_ = reinterpret_cast<ElfW(Versym) *>(value);
+        break;
+      case DT_VERDEF:
+        verdef_ = reinterpret_cast<ElfW(Verdef) *>(value);
+        break;
+      case DT_VERDEFNUM:
+        verdefnum_ = dynamic_entry->d_un.d_val;
+        break;
+      case DT_STRSZ:
+        strsize_ = dynamic_entry->d_un.d_val;
+        break;
+      default:
+        // Unrecognized entries explicitly ignored.
+        break;
+    }
+  }
+  if (!hash_ || !dynsym_ || !dynstr_ || !versym_ ||
+      !verdef_ || !verdefnum_ || !strsize_) {
+    assert(false);  // invalid VDSO
+    // Mark this image as not present. Can not recur infinitely.
+    Init(nullptr);
+    return;
+  }
+}
+
+bool ElfMemImage::LookupSymbol(const char *name,
+                               const char *version,
+                               int type,
+                               SymbolInfo *info_out) const {
+  for (const SymbolInfo& info : *this) {
+    if (strcmp(info.name, name) == 0 && strcmp(info.version, version) == 0 &&
+        ElfType(info.symbol) == type) {
+      if (info_out) {
+        *info_out = info;
+      }
+      return true;
+    }
+  }
+  return false;
+}
+
+bool ElfMemImage::LookupSymbolByAddress(const void *address,
+                                        SymbolInfo *info_out) const {
+  for (const SymbolInfo& info : *this) {
+    const char *const symbol_start =
+        reinterpret_cast<const char *>(info.address);
+    const char *const symbol_end = symbol_start + info.symbol->st_size;
+    if (symbol_start <= address && address < symbol_end) {
+      if (info_out) {
+        // Client wants to know details for that symbol (the usual case).
+        if (ElfBind(info.symbol) == STB_GLOBAL) {
+          // Strong symbol; just return it.
+          *info_out = info;
+          return true;
+        } else {
+          // Weak or local. Record it, but keep looking for a strong one.
+          *info_out = info;
+        }
+      } else {
+        // Client only cares if there is an overlapping symbol.
+        return true;
+      }
+    }
+  }
+  return false;
+}
+
+ElfMemImage::SymbolIterator::SymbolIterator(const void *const image, int index)
+    : index_(index), image_(image) {
+}
+
+const ElfMemImage::SymbolInfo *ElfMemImage::SymbolIterator::operator->() const {
+  return &info_;
+}
+
+const ElfMemImage::SymbolInfo& ElfMemImage::SymbolIterator::operator*() const {
+  return info_;
+}
+
+bool ElfMemImage::SymbolIterator::operator==(const SymbolIterator &rhs) const {
+  return this->image_ == rhs.image_ && this->index_ == rhs.index_;
+}
+
+bool ElfMemImage::SymbolIterator::operator!=(const SymbolIterator &rhs) const {
+  return !(*this == rhs);
+}
+
+ElfMemImage::SymbolIterator &ElfMemImage::SymbolIterator::operator++() {
+  this->Update(1);
+  return *this;
+}
+
+ElfMemImage::SymbolIterator ElfMemImage::begin() const {
+  SymbolIterator it(this, 0);
+  it.Update(0);
+  return it;
+}
+
+ElfMemImage::SymbolIterator ElfMemImage::end() const {
+  return SymbolIterator(this, GetNumSymbols());
+}
+
+void ElfMemImage::SymbolIterator::Update(int increment) {
+  const ElfMemImage *image = reinterpret_cast<const ElfMemImage *>(image_);
+  ABSL_RAW_CHECK(image->IsPresent() || increment == 0, "");
+  if (!image->IsPresent()) {
+    return;
+  }
+  index_ += increment;
+  if (index_ >= image->GetNumSymbols()) {
+    index_ = image->GetNumSymbols();
+    return;
+  }
+  const ElfW(Sym)    *symbol = image->GetDynsym(index_);
+  const ElfW(Versym) *version_symbol = image->GetVersym(index_);
+  ABSL_RAW_CHECK(symbol && version_symbol, "");
+  const char *const symbol_name = image->GetDynstr(symbol->st_name);
+  const ElfW(Versym) version_index = version_symbol[0] & VERSYM_VERSION;
+  const ElfW(Verdef) *version_definition = nullptr;
+  const char *version_name = "";
+  if (symbol->st_shndx == SHN_UNDEF) {
+    // Undefined symbols reference DT_VERNEED, not DT_VERDEF, and
+    // version_index could well be greater than verdefnum_, so calling
+    // GetVerdef(version_index) may trigger assertion.
+  } else {
+    version_definition = image->GetVerdef(version_index);
+  }
+  if (version_definition) {
+    // I am expecting 1 or 2 auxiliary entries: 1 for the version itself,
+    // optional 2nd if the version has a parent.
+    ABSL_RAW_CHECK(
+        version_definition->vd_cnt == 1 || version_definition->vd_cnt == 2,
+        "wrong number of entries");
+    const ElfW(Verdaux) *version_aux = image->GetVerdefAux(version_definition);
+    version_name = image->GetVerstr(version_aux->vda_name);
+  }
+  info_.name    = symbol_name;
+  info_.version = version_name;
+  info_.address = image->GetSymAddr(symbol);
+  info_.symbol  = symbol;
+}
+
+}  // namespace debug_internal
+}  // namespace absl
+
+#endif  // ABSL_HAVE_ELF_MEM_IMAGE
diff --git a/absl/debugging/internal/elf_mem_image.h b/absl/debugging/internal/elf_mem_image.h
new file mode 100644
index 000000000000..7f3dbb971bd6
--- /dev/null
+++ b/absl/debugging/internal/elf_mem_image.h
@@ -0,0 +1,125 @@
+/*
+ * Copyright 2017 The Abseil Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Allow dynamic symbol lookup for in-memory Elf images.
+
+#ifndef ABSL_DEBUGGING_INTERNAL_ELF_MEM_IMAGE_H_
+#define ABSL_DEBUGGING_INTERNAL_ELF_MEM_IMAGE_H_
+
+// Including this will define the __GLIBC__ macro if glibc is being
+// used.
+#include <climits>
+
+// Maybe one day we can rewrite this file not to require the elf
+// symbol extensions in glibc, but for right now we need them.
+#ifdef ABSL_HAVE_ELF_MEM_IMAGE
+#error ABSL_HAVE_ELF_MEM_IMAGE cannot be directly set
+#endif
+
+#if defined(__ELF__) && defined(__GLIBC__) && !defined(__native_client__) && \
+    !defined(__asmjs__)
+#define ABSL_HAVE_ELF_MEM_IMAGE 1
+#endif
+
+#if ABSL_HAVE_ELF_MEM_IMAGE
+
+#include <link.h>  // for ElfW
+
+namespace absl {
+namespace debug_internal {
+
+// An in-memory ELF image (may not exist on disk).
+class ElfMemImage {
+ public:
+  // Sentinel: there could never be an elf image at this address.
+  static const void *const kInvalidBase;
+
+  // Information about a single vdso symbol.
+  // All pointers are into .dynsym, .dynstr, or .text of the VDSO.
+  // Do not free() them or modify through them.
+  struct SymbolInfo {
+    const char      *name;      // E.g. "__vdso_getcpu"
+    const char      *version;   // E.g. "LINUX_2.6", could be ""
+                                // for unversioned symbol.
+    const void      *address;   // Relocated symbol address.
+    const ElfW(Sym) *symbol;    // Symbol in the dynamic symbol table.
+  };
+
+  // Supports iteration over all dynamic symbols.
+  class SymbolIterator {
+   public:
+    friend class ElfMemImage;
+    const SymbolInfo *operator->() const;
+    const SymbolInfo &operator*() const;
+    SymbolIterator& operator++();
+    bool operator!=(const SymbolIterator &rhs) const;
+    bool operator==(const SymbolIterator &rhs) const;
+   private:
+    SymbolIterator(const void *const image, int index);
+    void Update(int incr);
+    SymbolInfo info_;
+    int index_;
+    const void *const image_;
+  };
+
+
+  explicit ElfMemImage(const void *base);
+  void                 Init(const void *base);
+  bool                 IsPresent() const { return ehdr_ != nullptr; }
+  const ElfW(Phdr)*    GetPhdr(int index) const;
+  const ElfW(Sym)*     GetDynsym(int index) const;
+  const ElfW(Versym)*  GetVersym(int index) const;
+  const ElfW(Verdef)*  GetVerdef(int index) const;
+  const ElfW(Verdaux)* GetVerdefAux(const ElfW(Verdef) *verdef) const;
+  const char*          GetDynstr(ElfW(Word) offset) const;
+  const void*          GetSymAddr(const ElfW(Sym) *sym) const;
+  const char*          GetVerstr(ElfW(Word) offset) const;
+  int                  GetNumSymbols() const;
+
+  SymbolIterator begin() const;
+  SymbolIterator end() const;
+
+  // Look up versioned dynamic symbol in the image.
+  // Returns false if image is not present, or doesn't contain given
+  // symbol/version/type combination.
+  // If info_out is non-null, additional details are filled in.
+  bool LookupSymbol(const char *name, const char *version,
+                    int symbol_type, SymbolInfo *info_out) const;
+
+  // Find info about symbol (if any) which overlaps given address.
+  // Returns true if symbol was found; false if image isn't present
+  // or doesn't have a symbol overlapping given address.
+  // If info_out is non-null, additional details are filled in.
+  bool LookupSymbolByAddress(const void *address, SymbolInfo *info_out) const;
+
+ private:
+  const ElfW(Ehdr) *ehdr_;
+  const ElfW(Sym) *dynsym_;
+  const ElfW(Versym) *versym_;
+  const ElfW(Verdef) *verdef_;
+  const ElfW(Word) *hash_;
+  const char *dynstr_;
+  size_t strsize_;
+  size_t verdefnum_;
+  ElfW(Addr) link_base_;     // Link-time base (p_vaddr of first PT_LOAD).
+};
+
+}  // namespace debug_internal
+}  // namespace absl
+
+#endif  // ABSL_HAVE_ELF_MEM_IMAGE
+
+#endif  // ABSL_DEBUGGING_INTERNAL_ELF_MEM_IMAGE_H_
diff --git a/absl/debugging/internal/stacktrace_aarch64-inl.inc b/absl/debugging/internal/stacktrace_aarch64-inl.inc
new file mode 100644
index 000000000000..c125ea29064f
--- /dev/null
+++ b/absl/debugging/internal/stacktrace_aarch64-inl.inc
@@ -0,0 +1,181 @@
+#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_AARCH64_INL_H_
+#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_AARCH64_INL_H_
+
+// Generate stack tracer for aarch64
+
+#if defined(__linux__)
+#include <sys/mman.h>
+#include <ucontext.h>
+#include <unistd.h>
+#endif
+
+#include <atomic>
+#include <cassert>
+#include <cstdint>
+#include <iostream>
+
+#include "absl/debugging/internal/address_is_readable.h"
+#include "absl/debugging/internal/vdso_support.h"  // a no-op on non-elf or non-glibc systems
+#include "absl/debugging/stacktrace.h"
+
+static const uintptr_t kUnknownFrameSize = 0;
+
+#if defined(__linux__)
+// Returns the address of the VDSO __kernel_rt_sigreturn function, if present.
+static const unsigned char* GetKernelRtSigreturnAddress() {
+  constexpr uintptr_t kImpossibleAddress = 1;
+  static std::atomic<uintptr_t> memoized{kImpossibleAddress};
+  uintptr_t address = memoized.load(std::memory_order_relaxed);
+  if (address != kImpossibleAddress) {
+    return reinterpret_cast<const unsigned char*>(address);
+  }
+
+  address = reinterpret_cast<uintptr_t>(nullptr);
+
+#ifdef ABSL_HAVE_VDSO_SUPPORT
+  absl::debug_internal::VDSOSupport vdso;
+  if (vdso.IsPresent()) {
+    absl::debug_internal::VDSOSupport::SymbolInfo symbol_info;
+    if (!vdso.LookupSymbol("__kernel_rt_sigreturn", "LINUX_2.6.39", STT_FUNC,
+                           &symbol_info) ||
+        symbol_info.address == nullptr) {
+      // Unexpected: VDSO is present, yet the expected symbol is missing
+      // or null.
+      assert(false && "VDSO is present, but doesn't have expected symbol");
+    } else {
+      if (reinterpret_cast<uintptr_t>(symbol_info.address) !=
+          kImpossibleAddress) {
+        address = reinterpret_cast<uintptr_t>(symbol_info.address);
+      } else {
+        assert(false && "VDSO returned invalid address");
+      }
+    }
+  }
+#endif
+
+  memoized.store(address, std::memory_order_relaxed);
+  return reinterpret_cast<const unsigned char*>(address);
+}
+#endif  // __linux__
+
+// Compute the size of a stack frame in [low..high).  We assume that
+// low < high.  Return size of kUnknownFrameSize.
+template<typename T>
+static inline uintptr_t ComputeStackFrameSize(const T* low,
+                                              const T* high) {
+  const char* low_char_ptr = reinterpret_cast<const char *>(low);
+  const char* high_char_ptr = reinterpret_cast<const char *>(high);
+  return low < high ? high_char_ptr - low_char_ptr : kUnknownFrameSize;
+}
+
+// Given a pointer to a stack frame, locate and return the calling
+// stackframe, or return null if no stackframe can be found. Perform sanity
+// checks (the strictness of which is controlled by the boolean parameter
+// "STRICT_UNWINDING") to reduce the chance that a bad pointer is returned.
+template<bool STRICT_UNWINDING, bool WITH_CONTEXT>
+static void **NextStackFrame(void **old_frame_pointer, const void *uc) {
+  void **new_frame_pointer = reinterpret_cast<void**>(*old_frame_pointer);
+  bool check_frame_size = true;
+
+#if defined(__linux__)
+  if (WITH_CONTEXT && uc != nullptr) {
+    // Check to see if next frame's return address is __kernel_rt_sigreturn.
+    if (old_frame_pointer[1] == GetKernelRtSigreturnAddress()) {
+      const ucontext_t *ucv = static_cast<const ucontext_t *>(uc);
+      // old_frame_pointer[0] is not suitable for unwinding, look at
+      // ucontext to discover frame pointer before signal.
+      void **const pre_signal_frame_pointer =
+          reinterpret_cast<void **>(ucv->uc_mcontext.regs[29]);
+
+      // Check that alleged frame pointer is actually readable. This is to
+      // prevent "double fault" in case we hit the first fault due to e.g.
+      // stack corruption.
+      if (!absl::debug_internal::AddressIsReadable(
+              pre_signal_frame_pointer))
+        return nullptr;
+
+      // Alleged frame pointer is readable, use it for further unwinding.
+      new_frame_pointer = pre_signal_frame_pointer;
+
+      // Skip frame size check if we return from a signal. We may be using a
+      // an alternate stack for signals.
+      check_frame_size = false;
+    }
+  }
+#endif
+
+  // aarch64 ABI requires stack pointer to be 16-byte-aligned.
+  if ((reinterpret_cast<uintptr_t>(new_frame_pointer) & 15) != 0)
+    return nullptr;
+
+  // Check frame size.  In strict mode, we assume frames to be under
+  // 100,000 bytes.  In non-strict mode, we relax the limit to 1MB.
+  if (check_frame_size) {
+    const uintptr_t max_size = STRICT_UNWINDING ? 100000 : 1000000;
+    const uintptr_t frame_size =
+        ComputeStackFrameSize(old_frame_pointer, new_frame_pointer);
+    if (frame_size == kUnknownFrameSize || frame_size > max_size)
+      return nullptr;
+  }
+
+  return new_frame_pointer;
+}
+
+template <bool IS_STACK_FRAMES, bool IS_WITH_CONTEXT>
+static int UnwindImpl(void** result, int* sizes, int max_depth, int skip_count,
+                      const void *ucp, int *min_dropped_frames) {
+#ifdef __GNUC__
+  void **frame_pointer = reinterpret_cast<void**>(__builtin_frame_address(0));
+#else
+# error reading stack point not yet supported on this platform.
+#endif
+
+  skip_count++;    // Skip the frame for this function.
+  int n = 0;
+
+  // The frame pointer points to low address of a frame.  The first 64-bit
+  // word of a frame points to the next frame up the call chain, which normally
+  // is just after the high address of the current frame.  The second word of
+  // a frame contains return adress of to the caller.   To find a pc value
+  // associated with the current frame, we need to go down a level in the call
+  // chain.  So we remember return the address of the last frame seen.  This
+  // does not work for the first stack frame, which belongs to UnwindImp() but
+  // we skip the frame for UnwindImp() anyway.
+  void* prev_return_address = nullptr;
+
+  while (frame_pointer && n < max_depth) {
+    // The absl::GetStackFrames routine is called when we are in some
+    // informational context (the failure signal handler for example).
+    // Use the non-strict unwinding rules to produce a stack trace
+    // that is as complete as possible (even if it contains a few bogus
+    // entries in some rare cases).
+    void **next_frame_pointer =
+        NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>(frame_pointer, ucp);
+
+    if (skip_count > 0) {
+      skip_count--;
+    } else {
+      result[n] = prev_return_address;
+      if (IS_STACK_FRAMES) {
+        sizes[n] = ComputeStackFrameSize(frame_pointer, next_frame_pointer);
+      }
+      n++;
+    }
+    prev_return_address = frame_pointer[1];
+    frame_pointer = next_frame_pointer;
+  }
+  if (min_dropped_frames != nullptr) {
+    // Implementation detail: we clamp the max of frames we are willing to
+    // count, so as not to spend too much time in the loop below.
+    const int kMaxUnwind = 200;
+    int j = 0;
+    for (; frame_pointer != nullptr && j < kMaxUnwind; j++) {
+      frame_pointer =
+          NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>(frame_pointer, ucp);
+    }
+    *min_dropped_frames = j;
+  }
+  return n;
+}
+
+#endif  // ABSL_DEBUGGING_INTERNAL_STACKTRACE_AARCH64_INL_H_
diff --git a/absl/debugging/internal/stacktrace_arm-inl.inc b/absl/debugging/internal/stacktrace_arm-inl.inc
new file mode 100644
index 000000000000..566b6d34189b
--- /dev/null
+++ b/absl/debugging/internal/stacktrace_arm-inl.inc
@@ -0,0 +1,115 @@
+// Copyright 2011 and onwards Google Inc.
+// All rights reserved.
+//
+// Author: Doug Kwan
+// This is inspired by Craig Silverstein's PowerPC stacktrace code.
+//
+
+#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_ARM_INL_H_
+#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_ARM_INL_H_
+
+#include <cstdint>
+
+#include "absl/debugging/stacktrace.h"
+
+// WARNING:
+// This only works if all your code is in either ARM or THUMB mode.  With
+// interworking, the frame pointer of the caller can either be in r11 (ARM
+// mode) or r7 (THUMB mode).  A callee only saves the frame pointer of its
+// mode in a fixed location on its stack frame.  If the caller is a different
+// mode, there is no easy way to find the frame pointer.  It can either be
+// still in the designated register or saved on stack along with other callee
+// saved registers.
+
+// Given a pointer to a stack frame, locate and return the calling
+// stackframe, or return nullptr if no stackframe can be found. Perform sanity
+// checks (the strictness of which is controlled by the boolean parameter
+// "STRICT_UNWINDING") to reduce the chance that a bad pointer is returned.
+template<bool STRICT_UNWINDING>
+static void **NextStackFrame(void **old_sp) {
+  void **new_sp = (void**) old_sp[-1];
+
+  // Check that the transition from frame pointer old_sp to frame
+  // pointer new_sp isn't clearly bogus
+  if (STRICT_UNWINDING) {
+    // With the stack growing downwards, older stack frame must be
+    // at a greater address that the current one.
+    if (new_sp <= old_sp) return nullptr;
+    // Assume stack frames larger than 100,000 bytes are bogus.
+    if ((uintptr_t)new_sp - (uintptr_t)old_sp > 100000) return nullptr;
+  } else {
+    // In the non-strict mode, allow discontiguous stack frames.
+    // (alternate-signal-stacks for example).
+    if (new_sp == old_sp) return nullptr;
+    // And allow frames upto about 1MB.
+    if ((new_sp > old_sp)
+        && ((uintptr_t)new_sp - (uintptr_t)old_sp > 1000000)) return nullptr;
+  }
+  if ((uintptr_t)new_sp & (sizeof(void *) - 1)) return nullptr;
+  return new_sp;
+}
+
+// This ensures that absl::GetStackTrace sets up the Link Register properly.
+#ifdef __GNUC__
+void StacktraceArmDummyFunction() __attribute__((noinline));
+void StacktraceArmDummyFunction() { __asm__ volatile(""); }
+#else
+# error StacktraceArmDummyFunction() needs to be ported to this platform.
+#endif
+
+template <bool IS_STACK_FRAMES, bool IS_WITH_CONTEXT>
+static int UnwindImpl(void** result, int* sizes, int max_depth, int skip_count,
+                      const void * /* ucp */, int *min_dropped_frames) {
+#ifdef __GNUC__
+  void **sp = reinterpret_cast<void**>(__builtin_frame_address(0));
+#else
+# error reading stack point not yet supported on this platform.
+#endif
+
+  // On ARM, the return address is stored in the link register (r14).
+  // This is not saved on the stack frame of a leaf function.  To
+  // simplify code that reads return addresses, we call a dummy
+  // function so that the return address of this function is also
+  // stored in the stack frame.  This works at least for gcc.
+  StacktraceArmDummyFunction();
+
+  int n = 0;
+  while (sp && n < max_depth) {
+    // The absl::GetStackFrames routine is called when we are in some
+    // informational context (the failure signal handler for example).
+    // Use the non-strict unwinding rules to produce a stack trace
+    // that is as complete as possible (even if it contains a few bogus
+    // entries in some rare cases).
+    void **next_sp = NextStackFrame<!IS_STACK_FRAMES>(sp);
+
+    if (skip_count > 0) {
+      skip_count--;
+    } else {
+      result[n] = *sp;
+
+      if (IS_STACK_FRAMES) {
+        if (next_sp > sp) {
+          sizes[n] = (uintptr_t)next_sp - (uintptr_t)sp;
+        } else {
+          // A frame-size of 0 is used to indicate unknown frame size.
+          sizes[n] = 0;
+        }
+      }
+      n++;
+    }
+    sp = next_sp;
+  }
+  if (min_dropped_frames != nullptr) {
+    // Implementation detail: we clamp the max of frames we are willing to
+    // count, so as not to spend too much time in the loop below.
+    const int kMaxUnwind = 200;
+    int j = 0;
+    for (; sp != nullptr && j < kMaxUnwind; j++) {
+      sp = NextStackFrame<!IS_STACK_FRAMES>(sp);
+    }
+    *min_dropped_frames = j;
+  }
+  return n;
+}
+
+#endif  // ABSL_DEBUGGING_INTERNAL_STACKTRACE_ARM_INL_H_
diff --git a/absl/debugging/internal/stacktrace_config.h b/absl/debugging/internal/stacktrace_config.h
new file mode 100644
index 000000000000..c0df5bb067f1
--- /dev/null
+++ b/absl/debugging/internal/stacktrace_config.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright 2017 The Abseil Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+
+ * Defines ABSL_STACKTRACE_INL_HEADER to the *-inl.h containing
+ * actual unwinder implementation.
+ * This header is "private" to stacktrace.cc.
+ * DO NOT include it into any other files.
+*/
+#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_CONFIG_H_
+#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_CONFIG_H_
+
+// First, test platforms which only support a stub.
+#if ABSL_STACKTRACE_INL_HEADER
+#error ABSL_STACKTRACE_INL_HEADER cannot be directly set
+#elif defined(__native_client__) || defined(__APPLE__) || \
+    defined(__ANDROID__) || defined(__myriad2__) || defined(__asmjs__) || \
+    defined(__Fuchsia__) || defined(__GENCLAVE__) || \
+    defined(GOOGLE_UNSUPPORTED_OS_HERCULES)
+#define ABSL_STACKTRACE_INL_HEADER \
+    "absl/debugging/internal/stacktrace_unimplemented-inl.inc"
+
+// Next, test for Mips and Windows.
+// TODO(marmstrong): http://b/21334018: Mips case, remove the check for
+// ABSL_STACKTRACE_INL_HEADER.
+#elif defined(__mips__) && !defined(ABSL_STACKTRACE_INL_HEADER)
+#define ABSL_STACKTRACE_INL_HEADER \
+    "absl/debugging/internal/stacktrace_unimplemented-inl.inc"
+#elif defined(_WIN32)  // windows
+#define ABSL_STACKTRACE_INL_HEADER \
+    "absl/debugging/internal/stacktrace_win32-inl.inc"
+
+// Finally, test NO_FRAME_POINTER.
+#elif !defined(NO_FRAME_POINTER)
+# if defined(__i386__) || defined(__x86_64__)
+#define ABSL_STACKTRACE_INL_HEADER \
+    "absl/debugging/internal/stacktrace_x86-inl.inc"
+# elif defined(__ppc__) || defined(__PPC__)
+#define ABSL_STACKTRACE_INL_HEADER \
+    "absl/debugging/internal/stacktrace_powerpc-inl.inc"
+# elif defined(__aarch64__)
+#define ABSL_STACKTRACE_INL_HEADER \
+    "absl/debugging/internal/stacktrace_aarch64-inl.inc"
+# elif defined(__arm__)
+#define ABSL_STACKTRACE_INL_HEADER \
+    "absl/debugging/internal/stacktrace_arm-inl.inc"
+# endif
+#else  // defined(NO_FRAME_POINTER)
+# if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__)
+#define ABSL_STACKTRACE_INL_HEADER \
+    "absl/debugging/internal/stacktrace_unimplemented-inl.inc"
+# elif defined(__ppc__) || defined(__PPC__)
+//  Use glibc's backtrace.
+#define ABSL_STACKTRACE_INL_HEADER \
+    "absl/debugging/internal/stacktrace_generic-inl.inc"
+# elif defined(__arm__)
+#   error stacktrace without frame pointer is not supported on ARM
+# endif
+#endif  // NO_FRAME_POINTER
+
+#if !defined(ABSL_STACKTRACE_INL_HEADER)
+#error Not supported yet
+#endif
+
+#endif  // ABSL_DEBUGGING_INTERNAL_STACKTRACE_CONFIG_H_
diff --git a/absl/debugging/internal/stacktrace_generic-inl.inc b/absl/debugging/internal/stacktrace_generic-inl.inc
new file mode 100644
index 000000000000..de4881e36067
--- /dev/null
+++ b/absl/debugging/internal/stacktrace_generic-inl.inc
@@ -0,0 +1,51 @@
+// Copyright 2000 - 2007 Google Inc.
+// All rights reserved.
+//
+// Author: Sanjay Ghemawat
+//
+// Portable implementation - just use glibc
+//
+// Note:  The glibc implementation may cause a call to malloc.
+// This can cause a deadlock in HeapProfiler.
+
+#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_GENERIC_INL_H_
+#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_GENERIC_INL_H_
+
+#include <execinfo.h>
+#include <cstring>
+
+#include "absl/debugging/stacktrace.h"
+
+template <bool IS_STACK_FRAMES, bool IS_WITH_CONTEXT>
+static int UnwindImpl(void** result, int* sizes, int max_depth, int skip_count,
+                      const void *ucp, int *min_dropped_frames) {
+  static const int kStackLength = 64;
+  void * stack[kStackLength];
+  int size;
+
+  size = backtrace(stack, kStackLength);
+  skip_count++;  // we want to skip the current frame as well
+  int result_count = size - skip_count;
+  if (result_count < 0)
+    result_count = 0;
+  if (result_count > max_depth)
+    result_count = max_depth;
+  for (int i = 0; i < result_count; i++)
+    result[i] = stack[i + skip_count];
+
+  if (IS_STACK_FRAMES) {
+    // No implementation for finding out the stack frame sizes yet.
+    memset(sizes, 0, sizeof(*sizes) * result_count);
+  }
+  if (min_dropped_frames != nullptr) {
+    if (size - skip_count - max_depth > 0) {
+      *min_dropped_frames = size - skip_count - max_depth;
+    } else {
+      *min_dropped_frames = 0;
+    }
+  }
+
+  return result_count;
+}
+
+#endif  // ABSL_DEBUGGING_INTERNAL_STACKTRACE_GENERIC_INL_H_
diff --git a/absl/debugging/internal/stacktrace_libunwind-inl.inc b/absl/debugging/internal/stacktrace_libunwind-inl.inc
new file mode 100644
index 000000000000..e9c2d26a5fe4
--- /dev/null
+++ b/absl/debugging/internal/stacktrace_libunwind-inl.inc
@@ -0,0 +1,128 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_LIBUNWIND_INL_H_
+#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_LIBUNWIND_INL_H_
+
+// We only need local unwinder.
+#define UNW_LOCAL_ONLY
+
+extern "C" {
+#include "third_party/libunwind/include/libunwind.h"
+}
+#include "absl/debugging/stacktrace.h"
+
+#include "absl/base/dynamic_annotations.h"
+#include "absl/base/internal/raw_logging.h"
+
+// Sometimes, we can try to get a stack trace from within a stack
+// trace, because we don't block signals inside libunwind (which would be too
+// expensive: the two extra system calls per stack trace do matter here).
+// That can cause a self-deadlock (as in http://b/5722312).
+// Protect against such reentrant call by failing to get a stack trace.
+//
+// We use __thread here because the code here is extremely low level -- it is
+// called while collecting stack traces from within malloc and mmap, and thus
+// can not call anything which might call malloc or mmap itself.
+// In particular, using PerThread or STATIC_THREAD_LOCAL_POD
+// here will cause infinite recursion for at least dbg/piii builds with
+// crosstool-v12.
+static __thread int recursive;
+
+template <bool IS_STACK_FRAMES, bool IS_WITH_CONTEXT>
+static int UnwindImpl(void** result, int* sizes, int max_depth, int skip_count,
+                      const void *, int *min_dropped_frames) {
+  if (recursive) {
+    return 0;
+  }
+  ++recursive;
+
+  int n = 0;
+  if (IS_STACK_FRAMES) {
+    void *ip;
+    unw_cursor_t cursor;
+    unw_context_t uc;
+    unw_word_t sp = 0, next_sp = 0;
+
+    unw_getcontext(&uc);
+    ABSL_RAW_CHECK(unw_init_local(&cursor, &uc) >= 0, "unw_init_local failed");
+    skip_count++;         // Do not include current frame
+
+    while (skip_count--) {
+      if (unw_step(&cursor) <= 0) {
+        goto out;
+      }
+      if (unw_get_reg(&cursor, UNW_REG_SP, &next_sp)) {
+        goto out;
+      }
+    }
+
+    while (n < max_depth) {
+      if (unw_get_reg(&cursor, UNW_REG_IP, (unw_word_t *) &ip) < 0) {
+        break;
+      }
+      sizes[n] = 0;
+      result[n++] = ip;
+      if (unw_step(&cursor) <= 0) {
+        break;
+      }
+      sp = next_sp;
+      if (unw_get_reg(&cursor, UNW_REG_SP, &next_sp) , 0) {
+        break;
+      }
+      sizes[n - 1] = next_sp - sp;
+    }
+    if (min_dropped_frames != nullptr) {
+      // Implementation detail: we clamp the max of frames we are willing to
+      // count, so as not to spend too much time in the loop below.
+      const int kMaxUnwind = 200;
+      int j = 0;
+      for (; j < kMaxUnwind; j++) {
+        if (unw_step(&cursor) < 0) {
+          break;
+        }
+      }
+      *min_dropped_frames = j;
+    }
+  } else {
+    skip_count++;  // Do not include current frame.
+    void **result_all = reinterpret_cast<void**>(
+        alloca(sizeof(void*) * (max_depth + skip_count)));
+    int rc = unw_backtrace(result_all, max_depth + skip_count);
+
+    if (rc > 0) {
+      // Tell MSan that result_all has been initialized. b/34965936.
+      ANNOTATE_MEMORY_IS_INITIALIZED(result_all, rc * sizeof(void*));
+    }
+
+    if (rc > skip_count) {
+      memcpy(result, &result_all[skip_count],
+             sizeof(void*) * (rc - skip_count));
+      n = rc - skip_count;
+    } else {
+      n = 0;
+    }
+
+    if (min_dropped_frames != nullptr) {
+      // Not implemented.
+      *min_dropped_frames = 0;
+    }
+  }
+
+ out:
+  --recursive;
+  return n;
+}
+
+#endif  // ABSL_DEBUGGING_INTERNAL_STACKTRACE_LIBUNWIND_INL_H_
diff --git a/absl/debugging/internal/stacktrace_powerpc-inl.inc b/absl/debugging/internal/stacktrace_powerpc-inl.inc
new file mode 100644
index 000000000000..0628b285f6c1
--- /dev/null
+++ b/absl/debugging/internal/stacktrace_powerpc-inl.inc
@@ -0,0 +1,234 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Produce stack trace.  I'm guessing (hoping!) the code is much like
+// for x86.  For apple machines, at least, it seems to be; see
+//    http://developer.apple.com/documentation/mac/runtimehtml/RTArch-59.html
+//    http://www.linux-foundation.org/spec/ELF/ppc64/PPC-elf64abi-1.9.html#STACK
+// Linux has similar code: http://patchwork.ozlabs.org/linuxppc/patch?id=8882
+
+#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_POWERPC_INL_H_
+#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_POWERPC_INL_H_
+
+#if defined(__linux__)
+#include <asm/ptrace.h>   // for PT_NIP.
+#include <ucontext.h>     // for ucontext_t
+#endif
+
+#include <unistd.h>
+#include <cassert>
+#include <cstdint>
+#include <cstdio>
+
+#include "absl/base/port.h"
+#include "absl/debugging/stacktrace.h"
+#include "absl/debugging/internal/address_is_readable.h"
+#include "absl/debugging/internal/vdso_support.h"  // a no-op on non-elf or non-glibc systems
+
+// Given a stack pointer, return the saved link register value.
+// Note that this is the link register for a callee.
+static inline void *StacktracePowerPCGetLR(void **sp) {
+  // PowerPC has 3 main ABIs, which say where in the stack the
+  // Link Register is.  For DARWIN and AIX (used by apple and
+  // linux ppc64), it's in sp[2].  For SYSV (used by linux ppc),
+  // it's in sp[1].
+#if defined(_CALL_AIX) || defined(_CALL_DARWIN)
+  return *(sp+2);
+#elif defined(_CALL_SYSV)
+  return *(sp+1);
+#elif defined(__APPLE__) || (defined(__linux__) && defined(__PPC64__))
+  // This check is in case the compiler doesn't define _CALL_AIX/etc.
+  return *(sp+2);
+#elif defined(__linux)
+  // This check is in case the compiler doesn't define _CALL_SYSV.
+  return *(sp+1);
+#else
+#error Need to specify the PPC ABI for your archiecture.
+#endif
+}
+
+// Given a pointer to a stack frame, locate and return the calling
+// stackframe, or return null if no stackframe can be found. Perform sanity
+// checks (the strictness of which is controlled by the boolean parameter
+// "STRICT_UNWINDING") to reduce the chance that a bad pointer is returned.
+template<bool STRICT_UNWINDING, bool IS_WITH_CONTEXT>
+ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS  // May read random elements from stack.
+ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY   // May read random elements from stack.
+static void **NextStackFrame(void **old_sp, const void *uc) {
+  void **new_sp = (void **) *old_sp;
+  enum { kStackAlignment = 16 };
+
+  // Check that the transition from frame pointer old_sp to frame
+  // pointer new_sp isn't clearly bogus
+  if (STRICT_UNWINDING) {
+    // With the stack growing downwards, older stack frame must be
+    // at a greater address that the current one.
+    if (new_sp <= old_sp) return nullptr;
+    // Assume stack frames larger than 100,000 bytes are bogus.
+    if ((uintptr_t)new_sp - (uintptr_t)old_sp > 100000) return nullptr;
+  } else {
+    // In the non-strict mode, allow discontiguous stack frames.
+    // (alternate-signal-stacks for example).
+    if (new_sp == old_sp) return nullptr;
+    // And allow frames upto about 1MB.
+    if ((new_sp > old_sp)
+        && ((uintptr_t)new_sp - (uintptr_t)old_sp > 1000000)) return nullptr;
+  }
+  if ((uintptr_t)new_sp % kStackAlignment != 0) return nullptr;
+
+#if defined(__linux__)
+  enum StackTraceKernelSymbolStatus {
+      kNotInitialized = 0, kAddressValid, kAddressInvalid };
+
+  if (IS_WITH_CONTEXT && uc != nullptr) {
+    static StackTraceKernelSymbolStatus kernel_symbol_status =
+        kNotInitialized;  // Sentinel: not computed yet.
+    // Initialize with sentinel value: __kernel_rt_sigtramp_rt64 can not
+    // possibly be there.
+    static const unsigned char *kernel_sigtramp_rt64_address = nullptr;
+    if (kernel_symbol_status == kNotInitialized) {
+      absl::debug_internal::VDSOSupport vdso;
+      if (vdso.IsPresent()) {
+        absl::debug_internal::VDSOSupport::SymbolInfo
+            sigtramp_rt64_symbol_info;
+        if (!vdso.LookupSymbol(
+                "__kernel_sigtramp_rt64", "LINUX_2.6.15",
+                absl::debug_internal::VDSOSupport::kVDSOSymbolType,
+                &sigtramp_rt64_symbol_info) ||
+            sigtramp_rt64_symbol_info.address == nullptr) {
+          // Unexpected: VDSO is present, yet the expected symbol is missing
+          // or null.
+          assert(false && "VDSO is present, but doesn't have expected symbol");
+          kernel_symbol_status = kAddressInvalid;
+        } else {
+          kernel_sigtramp_rt64_address =
+              reinterpret_cast<const unsigned char *>(
+                  sigtramp_rt64_symbol_info.address);
+          kernel_symbol_status = kAddressValid;
+        }
+      } else {
+        kernel_symbol_status = kAddressInvalid;
+      }
+    }
+
+    if (new_sp != nullptr &&
+        kernel_symbol_status == kAddressValid &&
+        StacktracePowerPCGetLR(new_sp) == kernel_sigtramp_rt64_address) {
+      const ucontext_t* signal_context =
+          reinterpret_cast<const ucontext_t*>(uc);
+      void **const sp_before_signal =
+          reinterpret_cast<void**>(signal_context->uc_mcontext.gp_regs[PT_R1]);
+      // Check that alleged sp before signal is nonnull and is reasonably
+      // aligned.
+      if (sp_before_signal != nullptr &&
+          ((uintptr_t)sp_before_signal % kStackAlignment) == 0) {
+        // Check that alleged stack pointer is actually readable. This is to
+        // prevent a "double fault" in case we hit the first fault due to e.g.
+        // a stack corruption.
+        if (absl::debug_internal::AddressIsReadable(sp_before_signal)) {
+          // Alleged stack pointer is readable, use it for further unwinding.
+          new_sp = sp_before_signal;
+        }
+      }
+    }
+  }
+#endif
+
+  return new_sp;
+}
+
+// This ensures that absl::GetStackTrace sets up the Link Register properly.
+void StacktracePowerPCDummyFunction() __attribute__((noinline));
+void StacktracePowerPCDummyFunction() { __asm__ volatile(""); }
+
+template <bool IS_STACK_FRAMES, bool IS_WITH_CONTEXT>
+ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS  // May read random elements from stack.
+ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY   // May read random elements from stack.
+static int UnwindImpl(void** result, int* sizes, int max_depth, int skip_count,
+                      const void *ucp, int *min_dropped_frames) {
+  void **sp;
+  // Apple OS X uses an old version of gnu as -- both Darwin 7.9.0 (Panther)
+  // and Darwin 8.8.1 (Tiger) use as 1.38.  This means we have to use a
+  // different asm syntax.  I don't know quite the best way to discriminate
+  // systems using the old as from the new one; I've gone with __APPLE__.
+#ifdef __APPLE__
+  __asm__ volatile ("mr %0,r1" : "=r" (sp));
+#else
+  __asm__ volatile ("mr %0,1" : "=r" (sp));
+#endif
+
+  // On PowerPC, the "Link Register" or "Link Record" (LR), is a stack
+  // entry that holds the return address of the subroutine call (what
+  // instruction we run after our function finishes).  This is the
+  // same as the stack-pointer of our parent routine, which is what we
+  // want here.  While the compiler will always(?) set up LR for
+  // subroutine calls, it may not for leaf functions (such as this one).
+  // This routine forces the compiler (at least gcc) to push it anyway.
+  StacktracePowerPCDummyFunction();
+
+  // The LR save area is used by the callee, so the top entry is bogus.
+  skip_count++;
+
+  int n = 0;
+
+  // Unlike ABIs of X86 and ARM, PowerPC ABIs say that return address (in
+  // the link register) of a function call is stored in the caller's stack
+  // frame instead of the callee's.  When we look for the return address
+  // associated with a stack frame, we need to make sure that there is a
+  // caller frame before it.  So we call NextStackFrame before entering the
+  // loop below and check next_sp instead of sp for loop termination.
+  // The outermost frame is set up by runtimes and it does not have a
+  // caller frame, so it is skipped.
+
+  // The absl::GetStackFrames routine is called when we are in some
+  // informational context (the failure signal handler for example).
+  // Use the non-strict unwinding rules to produce a stack trace
+  // that is as complete as possible (even if it contains a few
+  // bogus entries in some rare cases).
+  void **next_sp = NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>(sp, ucp);
+
+  while (next_sp && n < max_depth) {
+    if (skip_count > 0) {
+      skip_count--;
+    } else {
+      result[n] = StacktracePowerPCGetLR(sp);
+      if (IS_STACK_FRAMES) {
+        if (next_sp > sp) {
+          sizes[n] = (uintptr_t)next_sp - (uintptr_t)sp;
+        } else {
+          // A frame-size of 0 is used to indicate unknown frame size.
+          sizes[n] = 0;
+        }
+      }
+      n++;
+    }
+
+    sp = next_sp;
+    next_sp = NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>(sp, ucp);
+  }
+
+  if (min_dropped_frames != nullptr) {
+    // Implementation detail: we clamp the max of frames we are willing to
+    // count, so as not to spend too much time in the loop below.
+    const int kMaxUnwind = 1000;
+    int j = 0;
+    for (; next_sp != nullptr && j < kMaxUnwind; j++) {
+      next_sp = NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>(next_sp, ucp);
+    }
+    *min_dropped_frames = j;
+  }
+  return n;
+}
+
+#endif  // ABSL_DEBUGGING_INTERNAL_STACKTRACE_POWERPC_INL_H_
diff --git a/absl/debugging/internal/stacktrace_unimplemented-inl.inc b/absl/debugging/internal/stacktrace_unimplemented-inl.inc
new file mode 100644
index 000000000000..a66be7797d94
--- /dev/null
+++ b/absl/debugging/internal/stacktrace_unimplemented-inl.inc
@@ -0,0 +1,14 @@
+#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_UNIMPLEMENTED_INL_H_
+#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_UNIMPLEMENTED_INL_H_
+
+template <bool IS_STACK_FRAMES, bool IS_WITH_CONTEXT>
+static int UnwindImpl(void** /* result */, int* /* sizes */,
+                      int /* max_depth */, int /* skip_count */,
+                      const void* /* ucp */, int *min_dropped_frames) {
+  if (min_dropped_frames != nullptr) {
+    *min_dropped_frames = 0;
+  }
+  return 0;
+}
+
+#endif  // ABSL_DEBUGGING_INTERNAL_STACKTRACE_UNIMPLEMENTED_INL_H_
diff --git a/absl/debugging/internal/stacktrace_win32-inl.inc b/absl/debugging/internal/stacktrace_win32-inl.inc
new file mode 100644
index 000000000000..4c7f855bbd38
--- /dev/null
+++ b/absl/debugging/internal/stacktrace_win32-inl.inc
@@ -0,0 +1,75 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Produces a stack trace for Windows.  Normally, one could use
+// stacktrace_x86-inl.h or stacktrace_x86_64-inl.h -- and indeed, that
+// should work for binaries compiled using MSVC in "debug" mode.
+// However, in "release" mode, Windows uses frame-pointer
+// optimization, which makes getting a stack trace very difficult.
+//
+// There are several approaches one can take.  One is to use Windows
+// intrinsics like StackWalk64.  These can work, but have restrictions
+// on how successful they can be.  Another attempt is to write a
+// version of stacktrace_x86-inl.h that has heuristic support for
+// dealing with FPO, similar to what WinDbg does (see
+// http://www.nynaeve.net/?p=97).  There are (non-working) examples of
+// these approaches, complete with TODOs, in stacktrace_win32-inl.h#1
+//
+// The solution we've ended up doing is to call the undocumented
+// windows function RtlCaptureStackBackTrace, which probably doesn't
+// work with FPO but at least is fast, and doesn't require a symbol
+// server.
+//
+// This code is inspired by a patch from David Vitek:
+//   http://code.google.com/p/google-perftools/issues/detail?id=83
+
+#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_WIN32_INL_H_
+#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_WIN32_INL_H_
+
+#include <windows.h>    // for GetProcAddress and GetModuleHandle
+#include <cassert>
+
+typedef USHORT NTAPI RtlCaptureStackBackTrace_Function(
+    IN ULONG frames_to_skip,
+    IN ULONG frames_to_capture,
+    OUT PVOID *backtrace,
+    OUT PULONG backtrace_hash);
+
+// Load the function we need at static init time, where we don't have
+// to worry about someone else holding the loader's lock.
+static RtlCaptureStackBackTrace_Function* const RtlCaptureStackBackTrace_fn =
+   (RtlCaptureStackBackTrace_Function*)
+   GetProcAddress(GetModuleHandleA("ntdll.dll"), "RtlCaptureStackBackTrace");
+
+template <bool IS_STACK_FRAMES, bool IS_WITH_CONTEXT>
+static int UnwindImpl(void** result, int* sizes, int max_depth, int skip_count,
+                      const void *ucp, int *min_dropped_frames) {
+  int n = 0;
+  if (!RtlCaptureStackBackTrace_fn) {
+    // can't find a stacktrace with no function to call
+  } else {
+    n = (int)RtlCaptureStackBackTrace_fn(skip_count + 2, max_depth, result, 0);
+  }
+  if (IS_STACK_FRAMES) {
+    // No implementation for finding out the stack frame sizes yet.
+    memset(sizes, 0, sizeof(*sizes) * n);
+  }
+  if (min_dropped_frames != nullptr) {
+    // Not implemented.
+    *min_dropped_frames = 0;
+  }
+  return n;
+}
+
+#endif  // ABSL_DEBUGGING_INTERNAL_STACKTRACE_WIN32_INL_H_
diff --git a/absl/debugging/internal/stacktrace_x86-inl.inc b/absl/debugging/internal/stacktrace_x86-inl.inc
new file mode 100644
index 000000000000..6e1af017878e
--- /dev/null
+++ b/absl/debugging/internal/stacktrace_x86-inl.inc
@@ -0,0 +1,327 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Produce stack trace
+
+#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_X86_INL_INC_
+#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_X86_INL_INC_
+
+#if defined(__linux__) && (defined(__i386__) || defined(__x86_64__))
+#include <ucontext.h>  // for ucontext_t
+#endif
+
+#if !defined(_WIN32)
+#include <unistd.h>
+#endif
+
+#include <cassert>
+#include <cstdint>
+
+#include "absl/base/macros.h"
+#include "absl/base/port.h"
+#include "absl/debugging/internal/address_is_readable.h"
+#include "absl/debugging/internal/vdso_support.h"  // a no-op on non-elf or non-glibc systems
+#include "absl/debugging/stacktrace.h"
+#include "absl/base/internal/raw_logging.h"
+
+#if defined(__linux__) && defined(__i386__)
+// Count "push %reg" instructions in VDSO __kernel_vsyscall(),
+// preceeding "syscall" or "sysenter".
+// If __kernel_vsyscall uses frame pointer, answer 0.
+//
+// kMaxBytes tells how many instruction bytes of __kernel_vsyscall
+// to analyze before giving up. Up to kMaxBytes+1 bytes of
+// instructions could be accessed.
+//
+// Here are known __kernel_vsyscall instruction sequences:
+//
+// SYSENTER (linux-2.6.26/arch/x86/vdso/vdso32/sysenter.S).
+// Used on Intel.
+//  0xffffe400 <__kernel_vsyscall+0>:       push   %ecx
+//  0xffffe401 <__kernel_vsyscall+1>:       push   %edx
+//  0xffffe402 <__kernel_vsyscall+2>:       push   %ebp
+//  0xffffe403 <__kernel_vsyscall+3>:       mov    %esp,%ebp
+//  0xffffe405 <__kernel_vsyscall+5>:       sysenter
+//
+// SYSCALL (see linux-2.6.26/arch/x86/vdso/vdso32/syscall.S).
+// Used on AMD.
+//  0xffffe400 <__kernel_vsyscall+0>:       push   %ebp
+//  0xffffe401 <__kernel_vsyscall+1>:       mov    %ecx,%ebp
+//  0xffffe403 <__kernel_vsyscall+3>:       syscall
+//
+
+// The sequence below isn't actually expected in Google fleet,
+// here only for completeness. Remove this comment from OSS release.
+
+// i386 (see linux-2.6.26/arch/x86/vdso/vdso32/int80.S)
+//  0xffffe400 <__kernel_vsyscall+0>:       int $0x80
+//  0xffffe401 <__kernel_vsyscall+1>:       ret
+//
+static const int kMaxBytes = 10;
+
+// We use assert()s instead of DCHECK()s -- this is too low level
+// for DCHECK().
+
+static int CountPushInstructions(const unsigned char *const addr) {
+  int result = 0;
+  for (int i = 0; i < kMaxBytes; ++i) {
+    if (addr[i] == 0x89) {
+      // "mov reg,reg"
+      if (addr[i + 1] == 0xE5) {
+        // Found "mov %esp,%ebp".
+        return 0;  
+      }
+      ++i;  // Skip register encoding byte.
+    } else if (addr[i] == 0x0F &&
+               (addr[i + 1] == 0x34 || addr[i + 1] == 0x05)) {
+      // Found "sysenter" or "syscall".
+      return result;
+    } else if ((addr[i] & 0xF0) == 0x50) {
+      // Found "push %reg".
+      ++result;
+    } else if (addr[i] == 0xCD && addr[i + 1] == 0x80) {
+      // Found "int $0x80"
+      assert(result == 0);
+      return 0;
+    } else {
+      // Unexpected instruction.
+      assert(false && "unexpected instruction in __kernel_vsyscall");
+      return 0;
+    }
+  }
+  // Unexpected: didn't find SYSENTER or SYSCALL in
+  // [__kernel_vsyscall, __kernel_vsyscall + kMaxBytes) interval.
+  assert(false && "did not find SYSENTER or SYSCALL in __kernel_vsyscall");
+  return 0;
+}
+#endif
+
+// Assume stack frames larger than 100,000 bytes are bogus.
+static const int kMaxFrameBytes = 100000;
+
+// Returns the stack frame pointer from signal context, 0 if unknown.
+// vuc is a ucontext_t *.  We use void* to avoid the use
+// of ucontext_t on non-POSIX systems.
+static uintptr_t GetFP(const void *vuc) {
+#if defined(__linux__)
+  if (vuc != nullptr) {
+    auto *uc = reinterpret_cast<const ucontext_t *>(vuc);
+#if defined(__i386__)
+    const auto bp = uc->uc_mcontext.gregs[REG_EBP];
+    const auto sp = uc->uc_mcontext.gregs[REG_ESP];
+#elif defined(__x86_64__)
+    const auto bp = uc->uc_mcontext.gregs[REG_RBP];
+    const auto sp = uc->uc_mcontext.gregs[REG_RSP];
+#else
+    const uintptr_t bp = 0;
+    const uintptr_t sp = 0;
+#endif
+    // Sanity-check that the base pointer is valid.  It should be as long as
+    // SHRINK_WRAP_FRAME_POINTER is not set, but it's possible that some code in
+    // the process is compiled with --copt=-fomit-frame-pointer or
+    // --copt=-momit-leaf-frame-pointer.
+    //
+    // TODO(bcmills): -momit-leaf-frame-pointer is currently the default
+    // behavior when building with clang.  Talk to the C++ toolchain team about
+    // fixing that.
+    if (bp >= sp && bp - sp <= kMaxFrameBytes) return bp;
+
+    // If bp isn't a plausible frame pointer, return the stack pointer instead.
+    // If we're lucky, it points to the start of a stack frame; otherwise, we'll
+    // get one frame of garbage in the stack trace and fail the sanity check on
+    // the next iteration.
+    return sp;
+  }
+#endif
+  return 0;
+}
+
+// Given a pointer to a stack frame, locate and return the calling
+// stackframe, or return null if no stackframe can be found. Perform sanity
+// checks (the strictness of which is controlled by the boolean parameter
+// "STRICT_UNWINDING") to reduce the chance that a bad pointer is returned.
+template <bool STRICT_UNWINDING, bool WITH_CONTEXT>
+ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS  // May read random elements from stack.
+ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY   // May read random elements from stack.
+static void **NextStackFrame(void **old_fp, const void *uc) {
+  void **new_fp = (void **)*old_fp;
+
+#if defined(__linux__) && defined(__i386__)
+  if (WITH_CONTEXT && uc != nullptr) {
+    // How many "push %reg" instructions are there at __kernel_vsyscall?
+    // This is constant for a given kernel and processor, so compute
+    // it only once.
+    static int num_push_instructions = -1;  // Sentinel: not computed yet.
+    // Initialize with sentinel value: __kernel_rt_sigreturn can not possibly
+    // be there.
+    static const unsigned char *kernel_rt_sigreturn_address = nullptr;
+    static const unsigned char *kernel_vsyscall_address = nullptr;
+    if (num_push_instructions == -1) {
+      absl::debug_internal::VDSOSupport vdso;
+      if (vdso.IsPresent()) {
+        absl::debug_internal::VDSOSupport::SymbolInfo
+            rt_sigreturn_symbol_info;
+        absl::debug_internal::VDSOSupport::SymbolInfo vsyscall_symbol_info;
+        if (!vdso.LookupSymbol("__kernel_rt_sigreturn", "LINUX_2.5", STT_FUNC,
+                               &rt_sigreturn_symbol_info) ||
+            !vdso.LookupSymbol("__kernel_vsyscall", "LINUX_2.5", STT_FUNC,
+                               &vsyscall_symbol_info) ||
+            rt_sigreturn_symbol_info.address == nullptr ||
+            vsyscall_symbol_info.address == nullptr) {
+          // Unexpected: 32-bit VDSO is present, yet one of the expected
+          // symbols is missing or null.
+          assert(false && "VDSO is present, but doesn't have expected symbols");
+          num_push_instructions = 0;
+        } else {
+          kernel_rt_sigreturn_address =
+              reinterpret_cast<const unsigned char *>(
+                  rt_sigreturn_symbol_info.address);
+          kernel_vsyscall_address =
+              reinterpret_cast<const unsigned char *>(
+                  vsyscall_symbol_info.address);
+          num_push_instructions =
+              CountPushInstructions(kernel_vsyscall_address);
+        }
+      } else {
+        num_push_instructions = 0;
+      }
+    }
+    if (num_push_instructions != 0 && kernel_rt_sigreturn_address != nullptr &&
+        old_fp[1] == kernel_rt_sigreturn_address) {
+      const ucontext_t *ucv = static_cast<const ucontext_t *>(uc);
+      // This kernel does not use frame pointer in its VDSO code,
+      // and so %ebp is not suitable for unwinding.
+      void **const reg_ebp =
+          reinterpret_cast<void **>(ucv->uc_mcontext.gregs[REG_EBP]);
+      const unsigned char *const reg_eip =
+          reinterpret_cast<unsigned char *>(ucv->uc_mcontext.gregs[REG_EIP]);
+      if (new_fp == reg_ebp && kernel_vsyscall_address <= reg_eip &&
+          reg_eip - kernel_vsyscall_address < kMaxBytes) {
+        // We "stepped up" to __kernel_vsyscall, but %ebp is not usable.
+        // Restore from 'ucv' instead.
+        void **const reg_esp =
+            reinterpret_cast<void **>(ucv->uc_mcontext.gregs[REG_ESP]);
+        // Check that alleged %esp is not null and is reasonably aligned.
+        if (reg_esp &&
+            ((uintptr_t)reg_esp & (sizeof(reg_esp) - 1)) == 0) {
+          // Check that alleged %esp is actually readable. This is to prevent
+          // "double fault" in case we hit the first fault due to e.g. stack
+          // corruption.
+          void *const reg_esp2 = reg_esp[num_push_instructions - 1];
+          if (absl::debug_internal::AddressIsReadable(reg_esp2)) {
+            // Alleged %esp is readable, use it for further unwinding.
+            new_fp = reinterpret_cast<void **>(reg_esp2);
+          }
+        }
+      }
+    }
+  }
+#endif
+
+  const uintptr_t old_fp_u = reinterpret_cast<uintptr_t>(old_fp);
+  const uintptr_t new_fp_u = reinterpret_cast<uintptr_t>(new_fp);
+
+  // Check that the transition from frame pointer old_fp to frame
+  // pointer new_fp isn't clearly bogus.  Skip the checks if new_fp
+  // matches the signal context, so that we don't skip out early when
+  // using an alternate signal stack.
+  //
+  // TODO(bcmills): The GetFP call should be completely unnecessary when
+  // SHRINK_WRAP_FRAME_POINTER is set (because we should be back in the thread's
+  // stack by this point), but it is empirically still needed (e.g. when the
+  // stack includes a call to abort).  unw_get_reg returns UNW_EBADREG for some
+  // frames.  Figure out why GetValidFrameAddr and/or libunwind isn't doing what
+  // it's supposed to.
+  if (STRICT_UNWINDING &&
+      (!WITH_CONTEXT || uc == nullptr || new_fp_u != GetFP(uc))) {
+    // With the stack growing downwards, older stack frame must be
+    // at a greater address that the current one.
+    if (new_fp_u <= old_fp_u) return nullptr;
+    if (new_fp_u - old_fp_u > kMaxFrameBytes) return nullptr;
+  } else {
+    if (new_fp == nullptr) return nullptr;  // skip AddressIsReadable() below
+    // In the non-strict mode, allow discontiguous stack frames.
+    // (alternate-signal-stacks for example).
+    if (new_fp == old_fp) return nullptr;
+  }
+
+  if (new_fp_u & (sizeof(void *) - 1)) return nullptr;
+#ifdef __i386__
+  // On 32-bit machines, the stack pointer can be very close to
+  // 0xffffffff, so we explicitly check for a pointer into the
+  // last two pages in the address space
+  if (new_fp_u >= 0xffffe000) return nullptr;
+#endif
+#if !defined(_WIN32)
+  if (!STRICT_UNWINDING) {
+    // Lax sanity checks cause a crash in 32-bit tcmalloc/crash_reason_test
+    // on AMD-based machines with VDSO-enabled kernels.
+    // Make an extra sanity check to insure new_fp is readable.
+    // Note: NextStackFrame<false>() is only called while the program
+    //       is already on its last leg, so it's ok to be slow here.
+
+    if (!absl::debug_internal::AddressIsReadable(new_fp)) {
+      return nullptr;
+    }
+  }
+#endif
+  return new_fp;
+}
+
+template <bool IS_STACK_FRAMES, bool IS_WITH_CONTEXT>
+ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS  // May read random elements from stack.
+ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY   // May read random elements from stack.
+ABSL_ATTRIBUTE_NOINLINE
+static int UnwindImpl(void **result, int *sizes, int max_depth, int skip_count,
+                      const void *ucp, int *min_dropped_frames) {
+  int n = 0;
+  void **fp = reinterpret_cast<void **>(__builtin_frame_address(0));
+
+  while (fp && n < max_depth) {
+    if (*(fp + 1) == reinterpret_cast<void *>(0)) {
+      // In 64-bit code, we often see a frame that
+      // points to itself and has a return address of 0.
+      break;
+    }
+    void **next_fp = NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>(fp, ucp);
+    if (skip_count > 0) {
+      skip_count--;
+    } else {
+      result[n] = *(fp + 1);
+      if (IS_STACK_FRAMES) {
+        if (next_fp > fp) {
+          sizes[n] = (uintptr_t)next_fp - (uintptr_t)fp;
+        } else {
+          // A frame-size of 0 is used to indicate unknown frame size.
+          sizes[n] = 0;
+        }
+      }
+      n++;
+    }
+    fp = next_fp;
+  }
+  if (min_dropped_frames != nullptr) {
+    // Implementation detail: we clamp the max of frames we are willing to
+    // count, so as not to spend too much time in the loop below.
+    const int kMaxUnwind = 1000;
+    int j = 0;
+    for (; fp != nullptr && j < kMaxUnwind; j++) {
+      fp = NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>(fp, ucp);
+    }
+    *min_dropped_frames = j;
+  }
+  return n;
+}
+
+#endif  // ABSL_DEBUGGING_INTERNAL_STACKTRACE_X86_INL_INC_
diff --git a/absl/debugging/internal/vdso_support.cc b/absl/debugging/internal/vdso_support.cc
new file mode 100644
index 000000000000..5026e1c1a9c9
--- /dev/null
+++ b/absl/debugging/internal/vdso_support.cc
@@ -0,0 +1,177 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Allow dynamic symbol lookup in the kernel VDSO page.
+//
+// VDSOSupport -- a class representing kernel VDSO (if present).
+
+#include "absl/debugging/internal/vdso_support.h"
+
+#ifdef ABSL_HAVE_VDSO_SUPPORT     // defined in vdso_support.h
+
+#include <fcntl.h>
+#include <sys/syscall.h>
+#include <unistd.h>
+
+#include "absl/base/dynamic_annotations.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/base/port.h"
+
+#ifndef AT_SYSINFO_EHDR
+#define AT_SYSINFO_EHDR 33  // for crosstoolv10
+#endif
+
+namespace absl {
+namespace debug_internal {
+
+std::atomic<const void *> VDSOSupport::vdso_base_(
+    debug_internal::ElfMemImage::kInvalidBase);
+std::atomic<VDSOSupport::GetCpuFn> VDSOSupport::getcpu_fn_(&InitAndGetCPU);
+VDSOSupport::VDSOSupport()
+    // If vdso_base_ is still set to kInvalidBase, we got here
+    // before VDSOSupport::Init has been called. Call it now.
+    : image_(vdso_base_.load(std::memory_order_relaxed) ==
+                     debug_internal::ElfMemImage::kInvalidBase
+                 ? Init()
+                 : vdso_base_.load(std::memory_order_relaxed)) {}
+
+// NOTE: we can't use GoogleOnceInit() below, because we can be
+// called by tcmalloc, and none of the *once* stuff may be functional yet.
+//
+// In addition, we hope that the VDSOSupportHelper constructor
+// causes this code to run before there are any threads, and before
+// InitGoogle() has executed any chroot or setuid calls.
+//
+// Finally, even if there is a race here, it is harmless, because
+// the operation should be idempotent.
+const void *VDSOSupport::Init() {
+  if (vdso_base_.load(std::memory_order_relaxed) ==
+      debug_internal::ElfMemImage::kInvalidBase) {
+    {
+      // Valgrind zaps AT_SYSINFO_EHDR and friends from the auxv[]
+      // on stack, and so glibc works as if VDSO was not present.
+      // But going directly to kernel via /proc/self/auxv below bypasses
+      // Valgrind zapping. So we check for Valgrind separately.
+      if (RunningOnValgrind()) {
+        vdso_base_.store(nullptr, std::memory_order_relaxed);
+        getcpu_fn_.store(&GetCPUViaSyscall, std::memory_order_relaxed);
+        return nullptr;
+      }
+      int fd = open("/proc/self/auxv", O_RDONLY);
+      if (fd == -1) {
+        // Kernel too old to have a VDSO.
+        vdso_base_.store(nullptr, std::memory_order_relaxed);
+        getcpu_fn_.store(&GetCPUViaSyscall, std::memory_order_relaxed);
+        return nullptr;
+      }
+      ElfW(auxv_t) aux;
+      while (read(fd, &aux, sizeof(aux)) == sizeof(aux)) {
+        if (aux.a_type == AT_SYSINFO_EHDR) {
+          vdso_base_.store(reinterpret_cast<void *>(aux.a_un.a_val),
+                           std::memory_order_relaxed);
+          break;
+        }
+      }
+      close(fd);
+    }
+    if (vdso_base_.load(std::memory_order_relaxed) ==
+        debug_internal::ElfMemImage::kInvalidBase) {
+      // Didn't find AT_SYSINFO_EHDR in auxv[].
+      vdso_base_.store(nullptr, std::memory_order_relaxed);
+    }
+  }
+  GetCpuFn fn = &GetCPUViaSyscall;  // default if VDSO not present.
+  if (vdso_base_.load(std::memory_order_relaxed)) {
+    VDSOSupport vdso;
+    SymbolInfo info;
+    if (vdso.LookupSymbol("__vdso_getcpu", "LINUX_2.6", STT_FUNC, &info)) {
+      fn = reinterpret_cast<GetCpuFn>(const_cast<void *>(info.address));
+    }
+  }
+  // Subtle: this code runs outside of any locks; prevent compiler
+  // from assigning to getcpu_fn_ more than once.
+  getcpu_fn_.store(fn, std::memory_order_relaxed);
+  return vdso_base_.load(std::memory_order_relaxed);
+}
+
+const void *VDSOSupport::SetBase(const void *base) {
+  ABSL_RAW_CHECK(base != debug_internal::ElfMemImage::kInvalidBase,
+                 "internal error");
+  const void *old_base = vdso_base_.load(std::memory_order_relaxed);
+  vdso_base_.store(base, std::memory_order_relaxed);
+  image_.Init(base);
+  // Also reset getcpu_fn_, so GetCPU could be tested with simulated VDSO.
+  getcpu_fn_.store(&InitAndGetCPU, std::memory_order_relaxed);
+  return old_base;
+}
+
+bool VDSOSupport::LookupSymbol(const char *name,
+                               const char *version,
+                               int type,
+                               SymbolInfo *info) const {
+  return image_.LookupSymbol(name, version, type, info);
+}
+
+bool VDSOSupport::LookupSymbolByAddress(const void *address,
+                                        SymbolInfo *info_out) const {
+  return image_.LookupSymbolByAddress(address, info_out);
+}
+
+// NOLINT on 'long' because this routine mimics kernel api.
+long VDSOSupport::GetCPUViaSyscall(unsigned *cpu,  // NOLINT(runtime/int)
+                                   void *, void *) {
+#ifdef SYS_getcpu
+  return syscall(SYS_getcpu, cpu, nullptr, nullptr);
+#else
+  // x86_64 never implemented sys_getcpu(), except as a VDSO call.
+  errno = ENOSYS;
+  return -1;
+#endif
+}
+
+// Use fast __vdso_getcpu if available.
+long VDSOSupport::InitAndGetCPU(unsigned *cpu,  // NOLINT(runtime/int)
+                                void *x, void *y) {
+  Init();
+  GetCpuFn fn = getcpu_fn_.load(std::memory_order_relaxed);
+  ABSL_RAW_CHECK(fn != &InitAndGetCPU, "Init() did not set getcpu_fn_");
+  return (*fn)(cpu, x, y);
+}
+
+// This function must be very fast, and may be called from very
+// low level (e.g. tcmalloc). Hence I avoid things like
+// GoogleOnceInit() and ::operator new.
+ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY
+int GetCPU() {
+  unsigned cpu;
+  int ret_code = (*VDSOSupport::getcpu_fn_)(&cpu, nullptr, nullptr);
+  return ret_code == 0 ? cpu : ret_code;
+}
+
+// We need to make sure VDSOSupport::Init() is called before
+// InitGoogle() does any setuid or chroot calls.  If VDSOSupport
+// is used in any global constructor, this will happen, since
+// VDSOSupport's constructor calls Init.  But if not, we need to
+// ensure it here, with a global constructor of our own.  This
+// is an allowed exception to the normal rule against non-trivial
+// global constructors.
+static class VDSOInitHelper {
+ public:
+  VDSOInitHelper() { VDSOSupport::Init(); }
+} vdso_init_helper;
+
+}  // namespace debug_internal
+}  // namespace absl
+
+#endif  // ABSL_HAVE_VDSO_SUPPORT
diff --git a/absl/debugging/internal/vdso_support.h b/absl/debugging/internal/vdso_support.h
new file mode 100644
index 000000000000..a6a7a17794e3
--- /dev/null
+++ b/absl/debugging/internal/vdso_support.h
@@ -0,0 +1,155 @@
+//
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+// Allow dynamic symbol lookup in the kernel VDSO page.
+//
+// VDSO stands for "Virtual Dynamic Shared Object" -- a page of
+// executable code, which looks like a shared library, but doesn't
+// necessarily exist anywhere on disk, and which gets mmap()ed into
+// every process by kernels which support VDSO, such as 2.6.x for 32-bit
+// executables, and 2.6.24 and above for 64-bit executables.
+//
+// More details could be found here:
+// http://www.trilithium.com/johan/2005/08/linux-gate/
+//
+// VDSOSupport -- a class representing kernel VDSO (if present).
+//
+// Example usage:
+//  VDSOSupport vdso;
+//  VDSOSupport::SymbolInfo info;
+//  typedef (*FN)(unsigned *, void *, void *);
+//  FN fn = nullptr;
+//  if (vdso.LookupSymbol("__vdso_getcpu", "LINUX_2.6", STT_FUNC, &info)) {
+//     fn = reinterpret_cast<FN>(info.address);
+//  }
+
+#ifndef ABSL_DEBUGGING_INTERNAL_VDSO_SUPPORT_H_
+#define ABSL_DEBUGGING_INTERNAL_VDSO_SUPPORT_H_
+
+#include <atomic>
+
+#include "absl/debugging/internal/elf_mem_image.h"
+
+#ifdef ABSL_HAVE_ELF_MEM_IMAGE
+
+#ifdef ABSL_HAVE_VDSO_SUPPORT
+#error ABSL_HAVE_VDSO_SUPPORT cannot be directly set
+#else
+#define ABSL_HAVE_VDSO_SUPPORT 1
+#endif
+
+namespace absl {
+namespace debug_internal {
+
+// NOTE: this class may be used from within tcmalloc, and can not
+// use any memory allocation routines.
+class VDSOSupport {
+ public:
+  VDSOSupport();
+
+  typedef ElfMemImage::SymbolInfo SymbolInfo;
+  typedef ElfMemImage::SymbolIterator SymbolIterator;
+
+  // On PowerPC64 VDSO symbols can either be of type STT_FUNC or STT_NOTYPE
+  // depending on how the kernel is built.  The kernel is normally built with
+  // STT_NOTYPE type VDSO symbols.  Let's make things simpler first by using a
+  // compile-time constant.
+#ifdef __powerpc64__
+  enum { kVDSOSymbolType = STT_NOTYPE };
+#else
+  enum { kVDSOSymbolType = STT_FUNC };
+#endif
+
+  // Answers whether we have a vdso at all.
+  bool IsPresent() const { return image_.IsPresent(); }
+
+  // Allow to iterate over all VDSO symbols.
+  SymbolIterator begin() const { return image_.begin(); }
+  SymbolIterator end() const { return image_.end(); }
+
+  // Look up versioned dynamic symbol in the kernel VDSO.
+  // Returns false if VDSO is not present, or doesn't contain given
+  // symbol/version/type combination.
+  // If info_out != nullptr, additional details are filled in.
+  bool LookupSymbol(const char *name, const char *version,
+                    int symbol_type, SymbolInfo *info_out) const;
+
+  // Find info about symbol (if any) which overlaps given address.
+  // Returns true if symbol was found; false if VDSO isn't present
+  // or doesn't have a symbol overlapping given address.
+  // If info_out != nullptr, additional details are filled in.
+  bool LookupSymbolByAddress(const void *address, SymbolInfo *info_out) const;
+
+  // Used only for testing. Replace real VDSO base with a mock.
+  // Returns previous value of vdso_base_. After you are done testing,
+  // you are expected to call SetBase() with previous value, in order to
+  // reset state to the way it was.
+  const void *SetBase(const void *s);
+
+  // Computes vdso_base_ and returns it. Should be called as early as
+  // possible; before any thread creation, chroot or setuid.
+  static const void *Init();
+
+ private:
+  // image_ represents VDSO ELF image in memory.
+  // image_.ehdr_ == nullptr implies there is no VDSO.
+  ElfMemImage image_;
+
+  // Cached value of auxv AT_SYSINFO_EHDR, computed once.
+  // This is a tri-state:
+  //   kInvalidBase   => value hasn't been determined yet.
+  //              0   => there is no VDSO.
+  //           else   => vma of VDSO Elf{32,64}_Ehdr.
+  //
+  // When testing with mock VDSO, low bit is set.
+  // The low bit is always available because vdso_base_ is
+  // page-aligned.
+  static std::atomic<const void *> vdso_base_;
+
+  // NOLINT on 'long' because these routines mimic kernel api.
+  // The 'cache' parameter may be used by some versions of the kernel,
+  // and should be nullptr or point to a static buffer containing at
+  // least two 'long's.
+  static long InitAndGetCPU(unsigned *cpu, void *cache,     // NOLINT 'long'.
+                            void *unused);
+  static long GetCPUViaSyscall(unsigned *cpu, void *cache,  // NOLINT 'long'.
+                               void *unused);
+  typedef long (*GetCpuFn)(unsigned *cpu, void *cache,      // NOLINT 'long'.
+                           void *unused);
+
+  // This function pointer may point to InitAndGetCPU,
+  // GetCPUViaSyscall, or __vdso_getcpu at different stages of initialization.
+  static std::atomic<GetCpuFn> getcpu_fn_;
+
+  friend int GetCPU(void);  // Needs access to getcpu_fn_.
+
+  VDSOSupport(const VDSOSupport&) = delete;
+  VDSOSupport& operator=(const VDSOSupport&) = delete;
+};
+
+// Same as sched_getcpu() on later glibc versions.
+// Return current CPU, using (fast) __vdso_getcpu@LINUX_2.6 if present,
+// otherwise use syscall(SYS_getcpu,...).
+// May return -1 with errno == ENOSYS if the kernel doesn't
+// support SYS_getcpu.
+int GetCPU();
+
+}  // namespace debug_internal
+}  // namespace absl
+
+#endif  // ABSL_HAVE_ELF_MEM_IMAGE
+
+#endif  // ABSL_DEBUGGING_INTERNAL_VDSO_SUPPORT_H_
diff --git a/absl/debugging/leak_check.cc b/absl/debugging/leak_check.cc
new file mode 100644
index 000000000000..30e7e8a85f09
--- /dev/null
+++ b/absl/debugging/leak_check.cc
@@ -0,0 +1,35 @@
+// Wrappers around lsan_interface functions.
+// When lsan is not linked in, these functions are not available,
+// therefore Abseil code which depends on these functions is conditioned on the
+// definition of LEAK_SANITIZER.
+#include "absl/debugging/leak_check.h"
+
+#ifndef LEAK_SANITIZER
+
+namespace absl {
+bool HaveLeakSanitizer() { return false; }
+void DoIgnoreLeak(const void*) { }
+void RegisterLivePointers(const void*, size_t) { }
+void UnRegisterLivePointers(const void*, size_t) { }
+LeakCheckDisabler::LeakCheckDisabler() { }
+LeakCheckDisabler::~LeakCheckDisabler() { }
+}  // namespace absl
+
+#else
+
+#include <sanitizer/lsan_interface.h>
+
+namespace absl {
+bool HaveLeakSanitizer() { return true; }
+void DoIgnoreLeak(const void* ptr) { __lsan_ignore_object(ptr); }
+void RegisterLivePointers(const void* ptr, size_t size) {
+  __lsan_register_root_region(ptr, size);
+}
+void UnRegisterLivePointers(const void* ptr, size_t size) {
+  __lsan_unregister_root_region(ptr, size);
+}
+LeakCheckDisabler::LeakCheckDisabler() { __lsan_disable(); }
+LeakCheckDisabler::~LeakCheckDisabler() { __lsan_enable(); }
+}  // namespace absl
+
+#endif  // LEAK_SANITIZER
diff --git a/absl/debugging/leak_check.h b/absl/debugging/leak_check.h
new file mode 100644
index 000000000000..f67fe88a0fec
--- /dev/null
+++ b/absl/debugging/leak_check.h
@@ -0,0 +1,111 @@
+//
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+// -----------------------------------------------------------------------------
+// File: leak_check.h
+// -----------------------------------------------------------------------------
+//
+// This package contains functions that affect leak checking behavior within
+// targets built with the LeakSanitizer (LSan), a memory leak detector that is
+// integrated within the AddressSanitizer (ASan) as an additional component, or
+// which can be used standalone. LSan and ASan are included or can be provided
+// as additional components for most compilers such as Clang, gcc and MSVC.
+// Note: this leak checking API is not yet supported in MSVC.
+// Leak checking is enabled by default in all ASan builds.
+//
+// See https://github.com/google/sanitizers/wiki/AddressSanitizerLeakSanitizer
+//
+// -----------------------------------------------------------------------------
+#ifndef ABSL_DEBUGGING_LEAK_CHECK_H_
+#define ABSL_DEBUGGING_LEAK_CHECK_H_
+
+#include <cstddef>
+
+namespace absl {
+
+// HaveLeakSanitizer()
+//
+// Returns true if a leak-checking sanitizer (either ASan or standalone LSan) is
+// currently built into this target.
+bool HaveLeakSanitizer();
+
+// DoIgnoreLeak()
+//
+// Implements `IgnoreLeak()` below. This function should usually
+// not be called directly; calling `IgnoreLeak()` is preferred.
+void DoIgnoreLeak(const void* ptr);
+
+// IgnoreLeak()
+//
+// Instruct the leak sanitizer to ignore leak warnings on the object referenced
+// by the passed pointer, as well as all heap objects transitively referenced
+// by it. The passed object pointer can point to either the beginning of the
+// object or anywhere within it.
+//
+// Example:
+//
+//   static T* obj = IgnoreLeak(new T(...));
+//
+// If the passed `ptr` does not point to an actively allocated object at the
+// time `IgnoreLeak()` is called, the call is a no-op; if it is actively
+// allocated, the object must not get deallocated later.
+//
+template <typename T>
+T* IgnoreLeak(T* ptr) {
+  DoIgnoreLeak(ptr);
+  return ptr;
+}
+
+// LeakCheckDisabler
+//
+// This helper class indicates that any heap allocations done in the code block
+// covered by the scoped object, which should be allocated on the stack, will
+// not be reported as leaks. Leak check disabling will occur within the code
+// block and any nested function calls within the code block.
+//
+// Example:
+//
+//   void Foo() {
+//     LeakCheckDisabler disabler;
+//     ... code that allocates objects whose leaks should be ignored ...
+//   }
+//
+// REQUIRES: Destructor runs in same thread as constructor
+class LeakCheckDisabler {
+ public:
+  LeakCheckDisabler();
+  LeakCheckDisabler(const LeakCheckDisabler&) = delete;
+  LeakCheckDisabler& operator=(const LeakCheckDisabler&) = delete;
+  ~LeakCheckDisabler();
+};
+
+// RegisterLivePointers()
+//
+// Registers `ptr[0,size-1]` as pointers to memory that is still actively being
+// referenced and for which leak checking should be ignored. This function is
+// useful if you store pointers in mapped memory, for memory ranges that we know
+// are correct but for which normal analysis would flag as leaked code.
+void RegisterLivePointers(const void* ptr, size_t size);
+
+// UnRegisterLivePointers()
+//
+// Deregisters the pointers previously marked as active in
+// `RegisterLivePointers()`, enabling leak checking of those pointers.
+void UnRegisterLivePointers(const void* ptr, size_t size);
+
+}  // namespace absl
+
+#endif  // ABSL_DEBUGGING_LEAK_CHECK_H_
diff --git a/absl/debugging/leak_check_disable.cc b/absl/debugging/leak_check_disable.cc
new file mode 100644
index 000000000000..df22c1cae91d
--- /dev/null
+++ b/absl/debugging/leak_check_disable.cc
@@ -0,0 +1,20 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Disable LeakSanitizer when this file is linked in.
+// This function overrides __lsan_is_turned_off from sanitizer/lsan_interface.h
+extern "C" int __lsan_is_turned_off();
+extern "C" int __lsan_is_turned_off() {
+  return 1;
+}
diff --git a/absl/debugging/leak_check_fail_test.cc b/absl/debugging/leak_check_fail_test.cc
new file mode 100644
index 000000000000..bf541fe84153
--- /dev/null
+++ b/absl/debugging/leak_check_fail_test.cc
@@ -0,0 +1,41 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <memory>
+#include "gtest/gtest.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/debugging/leak_check.h"
+
+namespace {
+
+TEST(LeakCheckTest, LeakMemory) {
+  // This test is expected to cause lsan failures on program exit. Therefore the
+  // test will be run only by leak_check_test.sh, which will verify a
+  // failed exit code.
+
+  char* foo = strdup("lsan should complain about this leaked string");
+  ABSL_RAW_LOG(INFO, "Should detect leaked std::string %s", foo);
+}
+
+TEST(LeakCheckTest, LeakMemoryAfterDisablerScope) {
+  // This test is expected to cause lsan failures on program exit. Therefore the
+  // test will be run only by external_leak_check_test.sh, which will verify a
+  // failed exit code.
+  { absl::LeakCheckDisabler disabler; }
+  char* foo = strdup("lsan should also complain about this leaked string");
+  ABSL_RAW_LOG(INFO, "Re-enabled leak detection.Should detect leaked std::string %s",
+               foo);
+}
+
+}  // namespace
diff --git a/absl/debugging/leak_check_test.cc b/absl/debugging/leak_check_test.cc
new file mode 100644
index 000000000000..27ca2d1b7bf3
--- /dev/null
+++ b/absl/debugging/leak_check_test.cc
@@ -0,0 +1,41 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <memory>
+#include "gtest/gtest.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/debugging/leak_check.h"
+
+namespace {
+
+TEST(LeakCheckTest, DetectLeakSanitizer) {
+#ifdef ABSL_EXPECT_LEAK_SANITIZER
+  EXPECT_TRUE(absl::HaveLeakSanitizer());
+#else
+  EXPECT_FALSE(absl::HaveLeakSanitizer());
+#endif
+}
+
+TEST(LeakCheckTest, IgnoreLeakSuppressesLeakedMemoryErrors) {
+  auto foo = absl::IgnoreLeak(new std::string("some ignored leaked string"));
+  ABSL_RAW_LOG(INFO, "Ignoring leaked std::string %s", foo->c_str());
+}
+
+TEST(LeakCheckTest, LeakCheckDisablerIgnoresLeak) {
+  absl::LeakCheckDisabler disabler;
+  auto foo = new std::string("some std::string leaked while checks are disabled");
+  ABSL_RAW_LOG(INFO, "Ignoring leaked std::string %s", foo->c_str());
+}
+
+}  // namespace
diff --git a/absl/debugging/stacktrace.cc b/absl/debugging/stacktrace.cc
new file mode 100644
index 000000000000..6c1690426381
--- /dev/null
+++ b/absl/debugging/stacktrace.cc
@@ -0,0 +1,133 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Produce stack trace.
+//
+// There are three different ways we can try to get the stack trace:
+//
+// 1) Our hand-coded stack-unwinder.  This depends on a certain stack
+//    layout, which is used by gcc (and those systems using a
+//    gcc-compatible ABI) on x86 systems, at least since gcc 2.95.
+//    It uses the frame pointer to do its work.
+//
+// 2) The libunwind library.  This is still in development, and as a
+//    separate library adds a new dependency, but doesn't need a frame
+//    pointer.  It also doesn't call malloc.
+//
+// 3) The gdb unwinder -- also the one used by the c++ exception code.
+//    It's obviously well-tested, but has a fatal flaw: it can call
+//    malloc() from the unwinder.  This is a problem because we're
+//    trying to use the unwinder to instrument malloc().
+//
+// Note: if you add a new implementation here, make sure it works
+// correctly when absl::GetStackTrace() is called with max_depth == 0.
+// Some code may do that.
+
+#include "absl/debugging/stacktrace.h"
+
+#include <atomic>
+
+#include "absl/base/port.h"
+#include "absl/debugging/internal/stacktrace_config.h"
+
+#if defined(ABSL_STACKTRACE_INL_HEADER)
+#include ABSL_STACKTRACE_INL_HEADER
+#else
+# error Cannot calculate stack trace: will need to write for your environment
+# include "absl/debugging/internal/stacktrace_x86-inl.inc"
+# include "absl/debugging/internal/stacktrace_win32-inl.inc"
+# include "absl/debugging/internal/stacktrace_unimplemented-inl.inc"
+# include "absl/debugging/internal/stacktrace_libunwind-inl.inc"
+# include "absl/debugging/internal/stacktrace_generic-inl.inc"
+# include "absl/debugging/internal/stacktrace_powerpc-inl.inc"
+# include "absl/debugging/internal/stacktrace_arm-inl.inc"
+# include "absl/debugging/internal/stacktrace_aarch64-inl.inc"
+#endif
+
+namespace absl {
+namespace {
+
+typedef int (*Unwinder)(void**, int*, int, int, const void*, int*);
+std::atomic<Unwinder> custom;
+
+template <bool IS_STACK_FRAMES, bool IS_WITH_CONTEXT>
+ABSL_ATTRIBUTE_ALWAYS_INLINE inline int Unwind(void** result, int* sizes,
+                                               int max_depth, int skip_count,
+                                               const void* uc,
+                                               int* min_dropped_frames) {
+  Unwinder f = &UnwindImpl<IS_STACK_FRAMES, IS_WITH_CONTEXT>;
+  Unwinder g = custom.load(std::memory_order_acquire);
+  if (g != nullptr) f = g;
+
+  // Add 1 to skip count for the unwinder function itself
+  int size = (*f)(result, sizes, max_depth, skip_count + 1, uc,
+                  min_dropped_frames);
+  // To disable tail call to (*f)(...)
+  ABSL_BLOCK_TAIL_CALL_OPTIMIZATION();
+  return size;
+}
+
+}  // anonymous namespace
+
+int GetStackFrames(void** result, int* sizes, int max_depth, int skip_count) {
+  return Unwind<true, false>(result, sizes, max_depth, skip_count, nullptr,
+                             nullptr);
+}
+
+int GetStackFramesWithContext(void** result, int* sizes, int max_depth,
+                              int skip_count, const void* uc,
+                              int* min_dropped_frames) {
+  return Unwind<true, true>(result, sizes, max_depth, skip_count, uc,
+                            min_dropped_frames);
+}
+
+int GetStackTrace(void** result, int max_depth, int skip_count) {
+  return Unwind<false, false>(result, nullptr, max_depth, skip_count, nullptr,
+                              nullptr);
+}
+
+int GetStackTraceWithContext(void** result, int max_depth, int skip_count,
+                             const void* uc, int* min_dropped_frames) {
+  return Unwind<false, true>(result, nullptr, max_depth, skip_count, uc,
+                             min_dropped_frames);
+}
+
+void SetStackUnwinder(Unwinder w) {
+  custom.store(w, std::memory_order_release);
+}
+
+int DefaultStackUnwinder(void** pcs, int* sizes, int depth, int skip,
+                         const void* uc, int* min_dropped_frames) {
+  skip++;  // For this function
+  Unwinder f = nullptr;
+  if (sizes == nullptr) {
+    if (uc == nullptr) {
+      f = &UnwindImpl<false, false>;
+    } else {
+      f = &UnwindImpl<false, true>;
+    }
+  } else {
+    if (uc == nullptr) {
+      f = &UnwindImpl<true, false>;
+    } else {
+      f = &UnwindImpl<true, true>;
+    }
+  }
+  volatile int x = 0;
+  int n = (*f)(pcs, sizes, depth, skip, uc, min_dropped_frames);
+  x = 1; (void) x;  // To disable tail call to (*f)(...)
+  return n;
+}
+
+}  // namespace absl
diff --git a/absl/debugging/stacktrace.h b/absl/debugging/stacktrace.h
new file mode 100644
index 000000000000..0aab5749b952
--- /dev/null
+++ b/absl/debugging/stacktrace.h
@@ -0,0 +1,160 @@
+//
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+// Routines to extract the current stack trace. These functions are
+// thread-safe and async-signal-safe.
+// Note that stack trace functionality is platform dependent and requires
+// additional support from the compiler/build system in many cases. (That is,
+// this generally only works on platforms/builds that have been specifically
+// configured to support it.)
+
+#ifndef ABSL_DEBUGGING_STACKTRACE_H_
+#define ABSL_DEBUGGING_STACKTRACE_H_
+
+namespace absl {
+
+// Skips the most recent "skip_count" stack frames (also skips the
+// frame generated for the "absl::GetStackFrames" routine itself), and then
+// records the pc values for up to the next "max_depth" frames in
+// "result", and the corresponding stack frame sizes in "sizes".
+// Returns the number of values recorded in "result"/"sizes".
+//
+// Example:
+//      main() { foo(); }
+//      foo() { bar(); }
+//      bar() {
+//        void* result[10];
+//        int sizes[10];
+//        int depth = absl::GetStackFrames(result, sizes, 10, 1);
+//      }
+//
+// The absl::GetStackFrames call will skip the frame for "bar".  It will
+// return 2 and will produce pc values that map to the following
+// procedures:
+//      result[0]       foo
+//      result[1]       main
+// (Actually, there may be a few more entries after "main" to account for
+// startup procedures.)
+// And corresponding stack frame sizes will also be recorded:
+//    sizes[0]       16
+//    sizes[1]       16
+// (Stack frame sizes of 16 above are just for illustration purposes.)
+// Stack frame sizes of 0 or less indicate that those frame sizes couldn't
+// be identified.
+//
+// This routine may return fewer stack frame entries than are
+// available. Also note that "result" and "sizes" must both be non-null.
+extern int GetStackFrames(void** result, int* sizes, int max_depth,
+                          int skip_count);
+
+// Same as above, but to be used from a signal handler. The "uc" parameter
+// should be the pointer to ucontext_t which was passed as the 3rd parameter
+// to sa_sigaction signal handler. It may help the unwinder to get a
+// better stack trace under certain conditions. The "uc" may safely be null.
+//
+// If min_dropped_frames is not null, stores in *min_dropped_frames a
+// lower bound on the number of dropped stack frames. The stored value is
+// guaranteed to be >= 0. The number of real stack frames is guaranteed to
+// be >= skip_count + max_depth + *min_dropped_frames.
+extern int GetStackFramesWithContext(void** result, int* sizes, int max_depth,
+                                     int skip_count, const void* uc,
+                                     int* min_dropped_frames);
+
+// This is similar to the absl::GetStackFrames routine, except that it returns
+// the stack trace only, and not the stack frame sizes as well.
+// Example:
+//      main() { foo(); }
+//      foo() { bar(); }
+//      bar() {
+//        void* result[10];
+//        int depth = absl::GetStackTrace(result, 10, 1);
+//      }
+//
+// This produces:
+//      result[0]       foo
+//      result[1]       main
+//           ....       ...
+//
+// "result" must not be null.
+extern int GetStackTrace(void** result, int max_depth, int skip_count);
+
+// Same as above, but to be used from a signal handler. The "uc" parameter
+// should be the pointer to ucontext_t which was passed as the 3rd parameter
+// to sa_sigaction signal handler. It may help the unwinder to get a
+// better stack trace under certain conditions. The "uc" may safely be null.
+//
+// If min_dropped_frames is not null, stores in *min_dropped_frames a
+// lower bound on the number of dropped stack frames. The stored value is
+// guaranteed to be >= 0. The number of real stack frames is guaranteed to
+// be >= skip_count + max_depth + *min_dropped_frames.
+extern int GetStackTraceWithContext(void** result, int max_depth,
+                                    int skip_count, const void* uc,
+                                    int* min_dropped_frames);
+
+// Call this to provide a custom function for unwinding stack frames
+// that will be used every time someone invokes one of the static
+// GetStack{Frames,Trace}{,WithContext}() functions above.
+//
+// The arguments passed to the unwinder function will match the
+// arguments passed to absl::GetStackFramesWithContext() except that sizes
+// will be non-null iff the caller is interested in frame sizes.
+//
+// If unwinder is null, we revert to the default stack-tracing behavior.
+//
+// ****************************************************************
+// WARNINGS
+//
+// absl::SetStackUnwinder is not suitable for general purpose use.  It is
+// provided for custom runtimes.
+// Some things to watch out for when calling absl::SetStackUnwinder:
+//
+// (a) The unwinder may be called from within signal handlers and
+// therefore must be async-signal-safe.
+//
+// (b) Even after a custom stack unwinder has been unregistered, other
+// threads may still be in the process of using that unwinder.
+// Therefore do not clean up any state that may be needed by an old
+// unwinder.
+// ****************************************************************
+extern void SetStackUnwinder(int (*unwinder)(void** pcs, int* sizes,
+                                             int max_depth, int skip_count,
+                                             const void* uc,
+                                             int* min_dropped_frames));
+
+// Function that exposes built-in stack-unwinding behavior, ignoring
+// any calls to absl::SetStackUnwinder().
+//
+// pcs must NOT be null.
+//
+// sizes may be null.
+// uc may be null.
+// min_dropped_frames may be null.
+//
+// The semantics are the same as the corresponding GetStack*() function in the
+// case where absl::SetStackUnwinder() was never called.  Equivalents are:
+//
+//                       null sizes         |        non-nullptr sizes
+//             |==========================================================|
+//     null uc | GetStackTrace()            | GetStackFrames()            |
+// non-null uc | GetStackTraceWithContext() | GetStackFramesWithContext() |
+//             |==========================================================|
+extern int DefaultStackUnwinder(void** pcs, int* sizes, int max_depth,
+                                int skip_count, const void* uc,
+                                int* min_dropped_frames);
+
+}  // namespace absl
+
+#endif  // ABSL_DEBUGGING_STACKTRACE_H_