about summary refs log tree commit diff
path: root/absl/base
diff options
context:
space:
mode:
Diffstat (limited to 'absl/base')
-rw-r--r--absl/base/BUILD.bazel369
-rw-r--r--absl/base/attributes.h469
-rw-r--r--absl/base/bit_cast_test.cc107
-rw-r--r--absl/base/call_once.h210
-rw-r--r--absl/base/call_once_test.cc102
-rw-r--r--absl/base/casts.h141
-rw-r--r--absl/base/config.h367
-rw-r--r--absl/base/config_test.cc45
-rw-r--r--absl/base/dynamic_annotations.cc129
-rw-r--r--absl/base/dynamic_annotations.h409
-rw-r--r--absl/base/internal/atomic_hook.h122
-rw-r--r--absl/base/internal/cycleclock.cc81
-rw-r--r--absl/base/internal/cycleclock.h77
-rw-r--r--absl/base/internal/endian.h267
-rw-r--r--absl/base/internal/endian_test.cc281
-rw-r--r--absl/base/internal/exception_testing.h24
-rw-r--r--absl/base/internal/identity.h33
-rw-r--r--absl/base/internal/invoke.h188
-rw-r--r--absl/base/internal/log_severity.cc15
-rw-r--r--absl/base/internal/log_severity.h52
-rw-r--r--absl/base/internal/low_level_alloc.cc598
-rw-r--r--absl/base/internal/low_level_alloc.h120
-rw-r--r--absl/base/internal/low_level_alloc_test.cc203
-rw-r--r--absl/base/internal/low_level_scheduling.h104
-rw-r--r--absl/base/internal/malloc_extension.cc197
-rw-r--r--absl/base/internal/malloc_extension.h424
-rw-r--r--absl/base/internal/malloc_extension_c.h75
-rw-r--r--absl/base/internal/malloc_extension_test.cc102
-rw-r--r--absl/base/internal/malloc_hook.cc611
-rw-r--r--absl/base/internal/malloc_hook.h333
-rw-r--r--absl/base/internal/malloc_hook_c.h131
-rw-r--r--absl/base/internal/malloc_hook_invoke.h198
-rw-r--r--absl/base/internal/malloc_hook_mmap_linux.inc236
-rw-r--r--absl/base/internal/per_thread_tls.h48
-rw-r--r--absl/base/internal/raw_logging.cc225
-rw-r--r--absl/base/internal/raw_logging.h129
-rw-r--r--absl/base/internal/scheduling_mode.h54
-rw-r--r--absl/base/internal/spinlock.cc243
-rw-r--r--absl/base/internal/spinlock.h227
-rw-r--r--absl/base/internal/spinlock_posix.inc46
-rw-r--r--absl/base/internal/spinlock_wait.cc77
-rw-r--r--absl/base/internal/spinlock_wait.h94
-rw-r--r--absl/base/internal/spinlock_win32.inc37
-rw-r--r--absl/base/internal/sysinfo.cc370
-rw-r--r--absl/base/internal/sysinfo.h64
-rw-r--r--absl/base/internal/sysinfo_test.cc99
-rw-r--r--absl/base/internal/thread_identity.cc126
-rw-r--r--absl/base/internal/thread_identity.h240
-rw-r--r--absl/base/internal/thread_identity_test.cc124
-rw-r--r--absl/base/internal/throw_delegate.cc106
-rw-r--r--absl/base/internal/throw_delegate.h71
-rw-r--r--absl/base/internal/tsan_mutex_interface.h51
-rw-r--r--absl/base/internal/unaligned_access.h256
-rw-r--r--absl/base/internal/unscaledcycleclock.cc101
-rw-r--r--absl/base/internal/unscaledcycleclock.h118
-rw-r--r--absl/base/invoke_test.cc199
-rw-r--r--absl/base/macros.h201
-rw-r--r--absl/base/optimization.h164
-rw-r--r--absl/base/policy_checks.h99
-rw-r--r--absl/base/port.h26
-rw-r--r--absl/base/raw_logging_test.cc50
-rw-r--r--absl/base/spinlock_test_common.cc265
-rw-r--r--absl/base/thread_annotations.h247
-rw-r--r--absl/base/throw_delegate_test.cc95
64 files changed, 11072 insertions, 0 deletions
diff --git a/absl/base/BUILD.bazel b/absl/base/BUILD.bazel
new file mode 100644
index 000000000000..87a6d3e65117
--- /dev/null
+++ b/absl/base/BUILD.bazel
@@ -0,0 +1,369 @@
+#
+# Copyright 2017 The Abseil Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+load(
+    "//absl:copts.bzl",
+    "ABSL_DEFAULT_COPTS",
+    "ABSL_TEST_COPTS",
+    "ABSL_EXCEPTIONS_FLAG",
+)
+load(
+    "//absl:test_dependencies.bzl",
+    "GUNIT_MAIN_DEPS_SELECTOR",
+    "GUNIT_MAIN_NO_LEAK_CHECK_DEPS_SELECTOR",
+)
+
+package(default_visibility = ["//visibility:public"])
+
+licenses(["notice"])  # Apache 2.0
+
+# Some header files in //base are directly exported for unusual use cases,
+# and the ABSL versions must also be exported for those users.
+
+exports_files(["thread_annotations.h"])
+
+cc_library(
+    name = "spinlock_wait",
+    srcs = [
+        "internal/spinlock_posix.inc",
+        "internal/spinlock_wait.cc",
+        "internal/spinlock_win32.inc",
+    ],
+    hdrs = [
+        "internal/scheduling_mode.h",
+        "internal/spinlock_wait.h",
+    ],
+    copts = ABSL_DEFAULT_COPTS,
+    deps = [":core_headers"],
+)
+
+cc_library(
+    name = "config",
+    hdrs = [
+        "config.h",
+        "policy_checks.h",
+    ],
+    copts = ABSL_DEFAULT_COPTS,
+)
+
+cc_library(
+    name = "dynamic_annotations",
+    srcs = ["dynamic_annotations.cc"],
+    hdrs = ["dynamic_annotations.h"],
+    copts = ABSL_DEFAULT_COPTS,
+    defines = ["__CLANG_SUPPORT_DYN_ANNOTATION__"],
+)
+
+cc_library(
+    name = "core_headers",
+    hdrs = [
+        "attributes.h",
+        "macros.h",
+        "optimization.h",
+        "port.h",
+        "thread_annotations.h",
+    ],
+    copts = ABSL_DEFAULT_COPTS,
+    deps = [
+        ":config",
+        ":dynamic_annotations",
+    ],
+)
+
+cc_library(
+    name = "malloc_extension",
+    srcs = ["internal/malloc_extension.cc"],
+    hdrs = [
+        "internal/malloc_extension.h",
+        "internal/malloc_extension_c.h",
+    ],
+    copts = ABSL_DEFAULT_COPTS,
+    deps = [
+        ":core_headers",
+        ":dynamic_annotations",
+    ],
+)
+
+# malloc_extension feels like it wants to be folded into this target, but
+# malloc_internal gets special build treatment to compile at -O3, so these
+# need to stay separate.
+cc_library(
+    name = "malloc_internal",
+    srcs = [
+        "internal/low_level_alloc.cc",
+        "internal/malloc_hook.cc",
+        "internal/malloc_hook_mmap_linux.inc",
+    ],
+    hdrs = [
+        "internal/low_level_alloc.h",
+        "internal/malloc_hook.h",
+        "internal/malloc_hook_c.h",
+    ],
+    copts = ABSL_DEFAULT_COPTS,
+    textual_hdrs = [
+        "internal/malloc_hook_invoke.h",
+    ],
+    deps = [
+        ":base",
+        ":config",
+        ":core_headers",
+        ":dynamic_annotations",
+    ],
+)
+
+cc_library(
+    name = "base_internal",
+    hdrs = [
+        "internal/identity.h",
+        "internal/invoke.h",
+    ],
+    copts = ABSL_DEFAULT_COPTS,
+)
+
+cc_library(
+    name = "base",
+    srcs = [
+        "internal/cycleclock.cc",
+        "internal/raw_logging.cc",
+        "internal/spinlock.cc",
+        "internal/sysinfo.cc",
+        "internal/thread_identity.cc",
+        "internal/unscaledcycleclock.cc",
+    ],
+    hdrs = [
+        "call_once.h",
+        "casts.h",
+        "internal/atomic_hook.h",
+        "internal/cycleclock.h",
+        "internal/log_severity.h",
+        "internal/low_level_scheduling.h",
+        "internal/per_thread_tls.h",
+        "internal/raw_logging.h",
+        "internal/spinlock.h",
+        "internal/sysinfo.h",
+        "internal/thread_identity.h",
+        "internal/tsan_mutex_interface.h",
+        "internal/unscaledcycleclock.h",
+    ],
+    copts = ABSL_DEFAULT_COPTS,
+    deps = [
+        ":base_internal",
+        ":config",
+        ":core_headers",
+        ":dynamic_annotations",
+        ":spinlock_wait",
+    ],
+)
+
+cc_test(
+    name = "bit_cast_test",
+    size = "small",
+    srcs = [
+        "bit_cast_test.cc",
+    ],
+    copts = ABSL_TEST_COPTS,
+    deps = [
+        ":base",
+        ":core_headers",
+    ] + select(GUNIT_MAIN_DEPS_SELECTOR),
+)
+
+cc_library(
+    name = "throw_delegate",
+    srcs = ["internal/throw_delegate.cc"],
+    hdrs = ["internal/throw_delegate.h"],
+    copts = ABSL_DEFAULT_COPTS + ABSL_EXCEPTIONS_FLAG,
+    features = [
+        "-use_header_modules",  # b/33207452
+    ],
+    deps = [
+        ":base",
+        ":config",
+    ],
+)
+
+cc_test(
+    name = "throw_delegate_test",
+    srcs = ["throw_delegate_test.cc"],
+    copts = ABSL_TEST_COPTS + ABSL_EXCEPTIONS_FLAG,
+    deps = [
+        ":throw_delegate",
+    ] + select(GUNIT_MAIN_DEPS_SELECTOR),
+)
+
+cc_library(
+    name = "exception_testing",
+    testonly = 1,
+    hdrs = ["internal/exception_testing.h"],
+    copts = ABSL_TEST_COPTS,
+    deps = [
+        ":config",
+        "@com_google_googletest//:gtest",
+    ],
+)
+
+cc_test(
+    name = "invoke_test",
+    size = "small",
+    srcs = ["invoke_test.cc"],
+    copts = ABSL_TEST_COPTS,
+    deps = [
+        ":base_internal",
+        "//absl/strings",
+        "//absl/memory",
+    ] + select(GUNIT_MAIN_DEPS_SELECTOR),
+)
+
+# Common test library made available for use in non-absl code that overrides
+# AbslInternalSpinLockDelay and AbslInternalSpinLockWake.
+cc_library(
+    name = "spinlock_test_common",
+    testonly = 1,
+    srcs = ["spinlock_test_common.cc"],
+    copts = ABSL_TEST_COPTS,
+    deps = [
+        ":base",
+        "//absl/synchronization",
+        "@com_google_googletest//:gtest",
+    ],
+    alwayslink = 1,
+)
+
+cc_test(
+    name = "spinlock_test",
+    size = "medium",
+    srcs = ["spinlock_test_common.cc"],
+    copts = ABSL_TEST_COPTS,
+    deps = [
+        ":base",
+        "//absl/synchronization",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
+cc_library(
+    name = "endian",
+    hdrs = [
+        "internal/endian.h",
+        "internal/unaligned_access.h",
+    ],
+    copts = ABSL_DEFAULT_COPTS,
+    deps = [
+        ":config",
+        ":core_headers",
+    ],
+)
+
+cc_test(
+    name = "endian_test",
+    srcs = ["internal/endian_test.cc"],
+    copts = ABSL_TEST_COPTS,
+    deps = [
+        ":base",
+        ":config",
+        ":endian",
+    ] + select(GUNIT_MAIN_DEPS_SELECTOR),
+)
+
+cc_test(
+    name = "config_test",
+    srcs = ["config_test.cc"],
+    copts = ABSL_TEST_COPTS,
+    deps = [
+        ":config",
+    ] + select(GUNIT_MAIN_DEPS_SELECTOR),
+)
+
+cc_test(
+    name = "call_once_test",
+    srcs = ["call_once_test.cc"],
+    copts = ABSL_TEST_COPTS,
+    deps = [
+        ":base",
+        ":core_headers",
+        "//absl/synchronization",
+    ] + select(GUNIT_MAIN_DEPS_SELECTOR),
+)
+
+cc_test(
+    name = "raw_logging_test",
+    srcs = ["raw_logging_test.cc"],
+    copts = ABSL_TEST_COPTS,
+    deps = [
+        ":base",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
+cc_test(
+    name = "sysinfo_test",
+    size = "small",
+    srcs = ["internal/sysinfo_test.cc"],
+    copts = ABSL_TEST_COPTS,
+    deps = [
+        ":base",
+        "//absl/synchronization",
+    ] + select(GUNIT_MAIN_DEPS_SELECTOR),
+)
+
+cc_test(
+    name = "low_level_alloc_test",
+    size = "small",
+    srcs = ["internal/low_level_alloc_test.cc"],
+    copts = ABSL_TEST_COPTS,
+    linkopts = select({
+        "//absl:windows": [],
+        "//conditions:default": ["-pthread"],
+    }),
+    deps = [":malloc_internal"],
+)
+
+cc_test(
+    name = "thread_identity_test",
+    size = "small",
+    srcs = ["internal/thread_identity_test.cc"],
+    copts = ABSL_TEST_COPTS,
+    linkopts = select({
+        "//absl:windows": [],
+        "//conditions:default": ["-pthread"],
+    }),
+    deps = [
+        ":base",
+        "//absl/synchronization",
+    ] + select(GUNIT_MAIN_DEPS_SELECTOR),
+)
+
+cc_test(
+    name = "malloc_extension_system_malloc_test",
+    size = "small",
+    srcs = ["internal/malloc_extension_test.cc"],
+    copts = select({
+        "//absl:windows": [
+            "/DABSL_MALLOC_EXTENSION_TEST_ALLOW_MISSING_EXTENSION=1",
+        ],
+        "//conditions:default": [
+            "-DABSL_MALLOC_EXTENSION_TEST_ALLOW_MISSING_EXTENSION=1",
+        ],
+    }) + ABSL_TEST_COPTS,
+    features = [
+        # This test can't be run under lsan because the test requires system
+        # malloc, and lsan provides a competing malloc implementation.
+        "-leak_sanitize",
+    ],
+    deps = [
+        ":malloc_extension",
+    ] + select(GUNIT_MAIN_NO_LEAK_CHECK_DEPS_SELECTOR),
+)
diff --git a/absl/base/attributes.h b/absl/base/attributes.h
new file mode 100644
index 000000000000..5eecdabedb16
--- /dev/null
+++ b/absl/base/attributes.h
@@ -0,0 +1,469 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Various macros for C++ attributes
+// This file is used for both C and C++!
+//
+// Most macros here are exposing GCC or Clang features, and are stubbed out for
+// other compilers.
+// GCC attributes documentation:
+// https://gcc.gnu.org/onlinedocs/gcc-4.7.0/gcc/Function-Attributes.html
+// https://gcc.gnu.org/onlinedocs/gcc-4.7.0/gcc/Variable-Attributes.html
+// https://gcc.gnu.org/onlinedocs/gcc-4.7.0/gcc/Type-Attributes.html
+//
+// Most attributes in this file are already supported by GCC 4.7.
+// However, some of them are not supported in older version of Clang.
+// Thus, we check __has_attribute() first. If the check fails, we check if we
+// are on GCC and assume the attribute exists on GCC (which is verified on GCC
+// 4.7).
+//
+// For sanitizer-related attributes, define the following macros
+// using -D along with the given value for -fsanitize:
+// - ADDRESS_SANITIZER with -fsanitize=address (GCC 4.8+, Clang)
+// - MEMORY_SANITIZER with -fsanitize=memory (Clang)
+// - THREAD_SANITIZER with -fsanitize=thread (GCC 4.8+, Clang)
+// - UNDEFINED_BEHAVIOR_SANITIZER with -fsanitize=undefined (GCC 4.9+, Clang)
+// - CONTROL_FLOW_INTEGRITY with -fsanitize=cfi (Clang)
+// Since these are only supported by GCC and Clang now, we only check for
+// __GNUC__ (GCC or Clang) and the above macros.
+#ifndef ABSL_BASE_ATTRIBUTES_H_
+#define ABSL_BASE_ATTRIBUTES_H_
+
+// ABSL_HAVE_ATTRIBUTE is a function-like feature checking macro.
+// It's a wrapper around __has_attribute, which is defined by GCC 5+ and Clang.
+// It evaluates to a nonzero constant integer if the attribute is supported
+// or 0 if not.
+// It evaluates to zero if __has_attribute is not defined by the compiler.
+// GCC: https://gcc.gnu.org/gcc-5/changes.html
+// Clang: https://clang.llvm.org/docs/LanguageExtensions.html
+#ifdef __has_attribute
+#define ABSL_HAVE_ATTRIBUTE(x) __has_attribute(x)
+#else
+#define ABSL_HAVE_ATTRIBUTE(x) 0
+#endif
+
+// ABSL_HAVE_CPP_ATTRIBUTE is a function-like feature checking macro that
+// accepts C++11 style attributes. It's a wrapper around __has_cpp_attribute,
+// defined by ISO C++ SD-6
+// (http://en.cppreference.com/w/cpp/experimental/feature_test). If we don't
+// find __has_cpp_attribute, will evaluate to 0.
+#if defined(__cplusplus) && defined(__has_cpp_attribute)
+// NOTE: requiring __cplusplus above should not be necessary, but
+// works around https://bugs.llvm.org/show_bug.cgi?id=23435.
+#define ABSL_HAVE_CPP_ATTRIBUTE(x) __has_cpp_attribute(x)
+#else
+#define ABSL_HAVE_CPP_ATTRIBUTE(x) 0
+#endif
+
+// -----------------------------------------------------------------------------
+// Function Attributes
+// -----------------------------------------------------------------------------
+// GCC: https://gcc.gnu.org/onlinedocs/gcc/Function-Attributes.html
+// Clang: https://clang.llvm.org/docs/AttributeReference.html
+
+// ABSL_PRINTF_ATTRIBUTE, ABSL_SCANF_ATTRIBUTE
+// Tell the compiler to do printf format std::string checking if the
+// compiler supports it; see the 'format' attribute in
+// <http://gcc.gnu.org/onlinedocs/gcc-4.7.0/gcc/Function-Attributes.html>.
+//
+// N.B.: As the GCC manual states, "[s]ince non-static C++ methods
+// have an implicit 'this' argument, the arguments of such methods
+// should be counted from two, not one."
+#if ABSL_HAVE_ATTRIBUTE(format) || (defined(__GNUC__) && !defined(__clang__))
+#define ABSL_PRINTF_ATTRIBUTE(string_index, first_to_check) \
+  __attribute__((__format__(__printf__, string_index, first_to_check)))
+#define ABSL_SCANF_ATTRIBUTE(string_index, first_to_check) \
+  __attribute__((__format__(__scanf__, string_index, first_to_check)))
+#else
+#define ABSL_PRINTF_ATTRIBUTE(string_index, first_to_check)
+#define ABSL_SCANF_ATTRIBUTE(string_index, first_to_check)
+#endif
+
+// ABSL_ATTRIBUTE_ALWAYS_INLINE, ABSL_ATTRIBUTE_NOINLINE
+// For functions we want to force inline or not inline.
+// Introduced in gcc 3.1.
+#if ABSL_HAVE_ATTRIBUTE(always_inline) || \
+    (defined(__GNUC__) && !defined(__clang__))
+#define ABSL_ATTRIBUTE_ALWAYS_INLINE __attribute__((always_inline))
+#define ABSL_HAVE_ATTRIBUTE_ALWAYS_INLINE 1
+#else
+#define ABSL_ATTRIBUTE_ALWAYS_INLINE
+#endif
+
+#if ABSL_HAVE_ATTRIBUTE(noinline) || (defined(__GNUC__) && !defined(__clang__))
+#define ABSL_ATTRIBUTE_NOINLINE __attribute__((noinline))
+#define ABSL_HAVE_ATTRIBUTE_NOINLINE 1
+#else
+#define ABSL_ATTRIBUTE_NOINLINE
+#endif
+
+// ABSL_ATTRIBUTE_NO_TAIL_CALL
+// Prevent the compiler from optimizing away stack frames for functions which
+// end in a call to another function.
+#if ABSL_HAVE_ATTRIBUTE(disable_tail_calls)
+#define ABSL_HAVE_ATTRIBUTE_NO_TAIL_CALL 1
+#define ABSL_ATTRIBUTE_NO_TAIL_CALL __attribute__((disable_tail_calls))
+#elif defined(__GNUC__) && !defined(__clang__)
+#define ABSL_HAVE_ATTRIBUTE_NO_TAIL_CALL 1
+#define ABSL_ATTRIBUTE_NO_TAIL_CALL \
+  __attribute__((optimize("no-optimize-sibling-calls")))
+#else
+#define ABSL_ATTRIBUTE_NO_TAIL_CALL
+#define ABSL_HAVE_ATTRIBUTE_NO_TAIL_CALL 0
+#endif
+// ABSL_ATTRIBUTE_WEAK
+// For weak functions
+#if ABSL_HAVE_ATTRIBUTE(weak) || (defined(__GNUC__) && !defined(__clang__))
+#undef ABSL_ATTRIBUTE_WEAK
+#define ABSL_ATTRIBUTE_WEAK __attribute__((weak))
+#define ABSL_HAVE_ATTRIBUTE_WEAK 1
+#else
+#define ABSL_ATTRIBUTE_WEAK
+#define ABSL_HAVE_ATTRIBUTE_WEAK 0
+#endif
+// ABSL_ATTRIBUTE_NONNULL
+// Tell the compiler either that a particular function parameter
+// should be a non-null pointer, or that all pointer arguments should
+// be non-null.
+//
+// Note: As the GCC manual states, "[s]ince non-static C++ methods
+// have an implicit 'this' argument, the arguments of such methods
+// should be counted from two, not one."
+//
+// Args are indexed starting at 1.
+// For non-static class member functions, the implicit "this" argument
+// is arg 1, and the first explicit argument is arg 2.
+// For static class member functions, there is no implicit "this", and
+// the first explicit argument is arg 1.
+//
+//   /* arg_a cannot be null, but arg_b can */
+//   void Function(void* arg_a, void* arg_b) ABSL_ATTRIBUTE_NONNULL(1);
+//
+//   class C {
+//     /* arg_a cannot be null, but arg_b can */
+//     void Method(void* arg_a, void* arg_b) ABSL_ATTRIBUTE_NONNULL(2);
+//
+//     /* arg_a cannot be null, but arg_b can */
+//     static void StaticMethod(void* arg_a, void* arg_b)
+//     ABSL_ATTRIBUTE_NONNULL(1);
+//   };
+//
+// If no arguments are provided, then all pointer arguments should be non-null.
+//
+//  /* No pointer arguments may be null. */
+//  void Function(void* arg_a, void* arg_b, int arg_c) ABSL_ATTRIBUTE_NONNULL();
+//
+// NOTE: The GCC nonnull attribute actually accepts a list of arguments, but
+// ABSL_ATTRIBUTE_NONNULL does not.
+#if ABSL_HAVE_ATTRIBUTE(nonnull) || (defined(__GNUC__) && !defined(__clang__))
+#define ABSL_ATTRIBUTE_NONNULL(arg_index) __attribute__((nonnull(arg_index)))
+#else
+#define ABSL_ATTRIBUTE_NONNULL(...)
+#endif
+// ABSL_ATTRIBUTE_NORETURN
+// Tell the compiler that a given function never returns
+#if ABSL_HAVE_ATTRIBUTE(noreturn) || (defined(__GNUC__) && !defined(__clang__))
+#define ABSL_ATTRIBUTE_NORETURN __attribute__((noreturn))
+#elif defined(_MSC_VER)
+#define ABSL_ATTRIBUTE_NORETURN __declspec(noreturn)
+#else
+#define ABSL_ATTRIBUTE_NORETURN
+#endif
+// ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS
+// Tell AddressSanitizer (or other memory testing tools) to ignore a given
+// function. Useful for cases when a function reads random locations on stack,
+// calls _exit from a cloned subprocess, deliberately accesses buffer
+// out of bounds or does other scary things with memory.
+// NOTE: GCC supports AddressSanitizer(asan) since 4.8.
+// https://gcc.gnu.org/gcc-4.8/changes.html
+#if defined(__GNUC__) && defined(ADDRESS_SANITIZER)
+#define ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS __attribute__((no_sanitize_address))
+#else
+#define ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS
+#endif
+
+// ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY
+// Tell MemorySanitizer to relax the handling of a given function. All "Use of
+// uninitialized value" warnings from such functions will be suppressed, and all
+// values loaded from memory will be considered fully initialized.
+// This is similar to the ADDRESS_SANITIZER attribute above, but deals with
+// initializedness rather than addressability issues.
+// NOTE: MemorySanitizer(msan) is supported by Clang but not GCC.
+#if defined(__GNUC__) && defined(MEMORY_SANITIZER)
+#define ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY __attribute__((no_sanitize_memory))
+#else
+#define ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY
+#endif
+
+// ABSL_ATTRIBUTE_NO_SANITIZE_THREAD
+// Tell ThreadSanitizer to not instrument a given function.
+// If you are adding this attribute, please cc dynamic-tools@ on the cl.
+// NOTE: GCC supports ThreadSanitizer(tsan) since 4.8.
+// https://gcc.gnu.org/gcc-4.8/changes.html
+#if defined(__GNUC__) && defined(THREAD_SANITIZER)
+#define ABSL_ATTRIBUTE_NO_SANITIZE_THREAD __attribute__((no_sanitize_thread))
+#else
+#define ABSL_ATTRIBUTE_NO_SANITIZE_THREAD
+#endif
+
+// ABSL_ATTRIBUTE_NO_SANITIZE_UNDEFINED
+// Tell UndefinedSanitizer to ignore a given function. Useful for cases
+// where certain behavior (eg. devision by zero) is being used intentionally.
+// NOTE: GCC supports UndefinedBehaviorSanitizer(ubsan) since 4.9.
+// https://gcc.gnu.org/gcc-4.9/changes.html
+#if defined(__GNUC__) && \
+    (defined(UNDEFINED_BEHAVIOR_SANITIZER) || defined(ADDRESS_SANITIZER))
+#define ABSL_ATTRIBUTE_NO_SANITIZE_UNDEFINED \
+  __attribute__((no_sanitize("undefined")))
+#else
+#define ABSL_ATTRIBUTE_NO_SANITIZE_UNDEFINED
+#endif
+
+// ABSL_ATTRIBUTE_NO_SANITIZE_CFI
+// Tell ControlFlowIntegrity sanitizer to not instrument a given function.
+// See https://clang.llvm.org/docs/ControlFlowIntegrity.html for details.
+#if defined(__GNUC__) && defined(CONTROL_FLOW_INTEGRITY)
+#define ABSL_ATTRIBUTE_NO_SANITIZE_CFI __attribute__((no_sanitize("cfi")))
+#else
+#define ABSL_ATTRIBUTE_NO_SANITIZE_CFI
+#endif
+
+// ABSL_ATTRIBUTE_SECTION
+// Labeled sections are not supported on Darwin/iOS.
+#ifdef ABSL_HAVE_ATTRIBUTE_SECTION
+#error ABSL_HAVE_ATTRIBUTE_SECTION cannot be directly set
+#elif (ABSL_HAVE_ATTRIBUTE(section) ||                \
+       (defined(__GNUC__) && !defined(__clang__))) && \
+    !defined(__APPLE__)
+#define ABSL_HAVE_ATTRIBUTE_SECTION 1
+//
+// Tell the compiler/linker to put a given function into a section and define
+// "__start_ ## name" and "__stop_ ## name" symbols to bracket the section.
+// This functionality is supported by GNU linker.
+// Any function with ABSL_ATTRIBUTE_SECTION must not be inlined, or it will
+// be placed into whatever section its caller is placed into.
+//
+#ifndef ABSL_ATTRIBUTE_SECTION
+#define ABSL_ATTRIBUTE_SECTION(name) \
+  __attribute__((section(#name))) __attribute__((noinline))
+#endif
+// Tell the compiler/linker to put a given variable into a section and define
+// "__start_ ## name" and "__stop_ ## name" symbols to bracket the section.
+// This functionality is supported by GNU linker.
+#ifndef ABSL_ATTRIBUTE_SECTION_VARIABLE
+#define ABSL_ATTRIBUTE_SECTION_VARIABLE(name) __attribute__((section(#name)))
+#endif
+//
+// Weak section declaration to be used as a global declaration
+// for ABSL_ATTRIBUTE_SECTION_START|STOP(name) to compile and link
+// even without functions with ABSL_ATTRIBUTE_SECTION(name).
+// ABSL_DEFINE_ATTRIBUTE_SECTION should be in the exactly one file; it's
+// a no-op on ELF but not on Mach-O.
+//
+#ifndef ABSL_DECLARE_ATTRIBUTE_SECTION_VARS
+#define ABSL_DECLARE_ATTRIBUTE_SECTION_VARS(name) \
+  extern char __start_##name[] ABSL_ATTRIBUTE_WEAK;    \
+  extern char __stop_##name[] ABSL_ATTRIBUTE_WEAK
+#endif
+#ifndef ABSL_DEFINE_ATTRIBUTE_SECTION_VARS
+#define ABSL_INIT_ATTRIBUTE_SECTION_VARS(name)
+#define ABSL_DEFINE_ATTRIBUTE_SECTION_VARS(name)
+#endif
+
+// Return void* pointers to start/end of a section of code with
+// functions having ABSL_ATTRIBUTE_SECTION(name).
+// Returns 0 if no such functions exist.
+// One must ABSL_DECLARE_ATTRIBUTE_SECTION_VARS(name) for this to compile and
+// link.
+//
+#define ABSL_ATTRIBUTE_SECTION_START(name) \
+  (reinterpret_cast<void *>(__start_##name))
+#define ABSL_ATTRIBUTE_SECTION_STOP(name) \
+  (reinterpret_cast<void *>(__stop_##name))
+#else  // !ABSL_HAVE_ATTRIBUTE_SECTION
+
+#define ABSL_HAVE_ATTRIBUTE_SECTION 0
+
+// provide dummy definitions
+#define ABSL_ATTRIBUTE_SECTION(name)
+#define ABSL_ATTRIBUTE_SECTION_VARIABLE(name)
+#define ABSL_INIT_ATTRIBUTE_SECTION_VARS(name)
+#define ABSL_DEFINE_ATTRIBUTE_SECTION_VARS(name)
+#define ABSL_DECLARE_ATTRIBUTE_SECTION_VARS(name)
+#define ABSL_ATTRIBUTE_SECTION_START(name) (reinterpret_cast<void *>(0))
+#define ABSL_ATTRIBUTE_SECTION_STOP(name) (reinterpret_cast<void *>(0))
+#endif  // ABSL_ATTRIBUTE_SECTION
+
+// ABSL_ATTRIBUTE_STACK_ALIGN_FOR_OLD_LIBC
+// Support for aligning the stack on 32-bit x86.
+#if ABSL_HAVE_ATTRIBUTE(force_align_arg_pointer) || \
+    (defined(__GNUC__) && !defined(__clang__))
+#if defined(__i386__)
+#define ABSL_ATTRIBUTE_STACK_ALIGN_FOR_OLD_LIBC \
+  __attribute__((force_align_arg_pointer))
+#define ABSL_REQUIRE_STACK_ALIGN_TRAMPOLINE (0)
+#elif defined(__x86_64__)
+#define ABSL_REQUIRE_STACK_ALIGN_TRAMPOLINE (1)
+#define ABSL_ATTRIBUTE_STACK_ALIGN_FOR_OLD_LIBC
+#else  // !__i386__ && !__x86_64
+#define ABSL_REQUIRE_STACK_ALIGN_TRAMPOLINE (0)
+#define ABSL_ATTRIBUTE_STACK_ALIGN_FOR_OLD_LIBC
+#endif  // __i386__
+#else
+#define ABSL_ATTRIBUTE_STACK_ALIGN_FOR_OLD_LIBC
+#define ABSL_REQUIRE_STACK_ALIGN_TRAMPOLINE (0)
+#endif
+
+// ABSL_MUST_USE_RESULT
+// Tell the compiler to warn about unused return values for functions declared
+// with this macro. The macro must appear as the very first part of a function
+// declaration or definition:
+//
+//   ABSL_MUST_USE_RESULT Sprocket* AllocateSprocket();
+//
+// This placement has the broadest compatibility with GCC, Clang, and MSVC, with
+// both defs and decls, and with GCC-style attributes, MSVC declspec, C++11
+// and C++17 attributes.
+//
+// ABSL_MUST_USE_RESULT allows using cast-to-void to suppress the unused result
+// warning. For that, warn_unused_result is used only for clang but not for gcc.
+// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=66425
+//
+// Note: past advice was to place the macro after the argument list.
+#if ABSL_HAVE_ATTRIBUTE(nodiscard)
+#define ABSL_MUST_USE_RESULT [[nodiscard]]
+#elif defined(__clang__) && ABSL_HAVE_ATTRIBUTE(warn_unused_result)
+#define ABSL_MUST_USE_RESULT __attribute__((warn_unused_result))
+#else
+#define ABSL_MUST_USE_RESULT
+#endif
+
+// ABSL_ATTRIBUTE_HOT, ABSL_ATTRIBUTE_COLD
+// Tell GCC that a function is hot or cold. GCC can use this information to
+// improve static analysis, i.e. a conditional branch to a cold function
+// is likely to be not-taken.
+// This annotation is used for function declarations, e.g.:
+//   int foo() ABSL_ATTRIBUTE_HOT;
+#if ABSL_HAVE_ATTRIBUTE(hot) || (defined(__GNUC__) && !defined(__clang__))
+#define ABSL_ATTRIBUTE_HOT __attribute__((hot))
+#else
+#define ABSL_ATTRIBUTE_HOT
+#endif
+
+#if ABSL_HAVE_ATTRIBUTE(cold) || (defined(__GNUC__) && !defined(__clang__))
+#define ABSL_ATTRIBUTE_COLD __attribute__((cold))
+#else
+#define ABSL_ATTRIBUTE_COLD
+#endif
+
+// ABSL_XRAY_ALWAYS_INSTRUMENT, ABSL_XRAY_NEVER_INSTRUMENT, ABSL_XRAY_LOG_ARGS
+//
+// We define the ABSL_XRAY_ALWAYS_INSTRUMENT and ABSL_XRAY_NEVER_INSTRUMENT
+// macro used as an attribute to mark functions that must always or never be
+// instrumented by XRay. Currently, this is only supported in Clang/LLVM.
+//
+// For reference on the LLVM XRay instrumentation, see
+// http://llvm.org/docs/XRay.html.
+//
+// A function with the XRAY_ALWAYS_INSTRUMENT macro attribute in its declaration
+// will always get the XRay instrumentation sleds. These sleds may introduce
+// some binary size and runtime overhead and must be used sparingly.
+//
+// These attributes only take effect when the following conditions are met:
+//
+//   - The file/target is built in at least C++11 mode, with a Clang compiler
+//   that supports XRay attributes.
+//   - The file/target is built with the -fxray-instrument flag set for the
+//   Clang/LLVM compiler.
+//   - The function is defined in the translation unit (the compiler honors the
+//   attribute in either the definition or the declaration, and must match).
+//
+// There are cases when, even when building with XRay instrumentation, users
+// might want to control specifically which functions are instrumented for a
+// particular build using special-case lists provided to the compiler. These
+// special case lists are provided to Clang via the
+// -fxray-always-instrument=... and -fxray-never-instrument=... flags. The
+// attributes in source take precedence over these special-case lists.
+//
+// To disable the XRay attributes at build-time, users may define
+// ABSL_NO_XRAY_ATTRIBUTES. Do NOT define ABSL_NO_XRAY_ATTRIBUTES on specific
+// packages/targets, as this may lead to conflicting definitions of functions at
+// link-time.
+//
+#if ABSL_HAVE_CPP_ATTRIBUTE(clang::xray_always_instrument) && \
+    !defined(ABSL_NO_XRAY_ATTRIBUTES)
+#define ABSL_XRAY_ALWAYS_INSTRUMENT [[clang::xray_always_instrument]]
+#define ABSL_XRAY_NEVER_INSTRUMENT [[clang::xray_never_instrument]]
+#if ABSL_HAVE_CPP_ATTRIBUTE(clang::xray_log_args)
+#define ABSL_XRAY_LOG_ARGS(N) \
+    [[clang::xray_always_instrument, clang::xray_log_args(N)]]
+#else
+#define ABSL_XRAY_LOG_ARGS(N) [[clang::xray_always_instrument]]
+#endif
+#else
+#define ABSL_XRAY_ALWAYS_INSTRUMENT
+#define ABSL_XRAY_NEVER_INSTRUMENT
+#define ABSL_XRAY_LOG_ARGS(N)
+#endif
+
+// -----------------------------------------------------------------------------
+// Variable Attributes
+// -----------------------------------------------------------------------------
+
+// ABSL_ATTRIBUTE_UNUSED
+// Prevent the compiler from complaining about or optimizing away variables
+// that appear unused.
+#if ABSL_HAVE_ATTRIBUTE(unused) || (defined(__GNUC__) && !defined(__clang__))
+#undef ABSL_ATTRIBUTE_UNUSED
+#define ABSL_ATTRIBUTE_UNUSED __attribute__((__unused__))
+#else
+#define ABSL_ATTRIBUTE_UNUSED
+#endif
+// ABSL_ATTRIBUTE_INITIAL_EXEC
+// Tell the compiler to use "initial-exec" mode for a thread-local variable.
+// See http://people.redhat.com/drepper/tls.pdf for the gory details.
+#if ABSL_HAVE_ATTRIBUTE(tls_model) || (defined(__GNUC__) && !defined(__clang__))
+#define ABSL_ATTRIBUTE_INITIAL_EXEC __attribute__((tls_model("initial-exec")))
+#else
+#define ABSL_ATTRIBUTE_INITIAL_EXEC
+#endif
+
+// ABSL_ATTRIBUTE_PACKED
+// Prevent the compiler from padding a structure to natural alignment
+#if ABSL_HAVE_ATTRIBUTE(packed) || (defined(__GNUC__) && !defined(__clang__))
+#define ABSL_ATTRIBUTE_PACKED __attribute__((__packed__))
+#else
+#define ABSL_ATTRIBUTE_PACKED
+#endif
+
+// ABSL_CONST_INIT
+// A variable declaration annotated with the ABSL_CONST_INIT attribute will
+// not compile (on supported platforms) unless the variable has a constant
+// initializer. This is useful for variables with static and thread storage
+// duration, because it guarantees that they will not suffer from the so-called
+// "static init order fiasco".
+//
+// Sample usage:
+//
+// ABSL_CONST_INIT static MyType my_var = MakeMyType(...);
+//
+// Note that this attribute is redundant if the variable is declared constexpr.
+#if ABSL_HAVE_CPP_ATTRIBUTE(clang::require_constant_initialization)
+// NOLINTNEXTLINE(whitespace/braces) (b/36288871)
+#define ABSL_CONST_INIT [[clang::require_constant_initialization]]
+#else
+#define ABSL_CONST_INIT
+#endif  // ABSL_HAVE_CPP_ATTRIBUTE(clang::require_constant_initialization)
+
+#endif  // ABSL_BASE_ATTRIBUTES_H_
diff --git a/absl/base/bit_cast_test.cc b/absl/base/bit_cast_test.cc
new file mode 100644
index 000000000000..8cd878d756e3
--- /dev/null
+++ b/absl/base/bit_cast_test.cc
@@ -0,0 +1,107 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Unit test for bit_cast template.
+
+#include <cstdint>
+#include <cstring>
+
+#include "gtest/gtest.h"
+#include "absl/base/casts.h"
+#include "absl/base/macros.h"
+
+namespace absl {
+namespace {
+
+template <int N>
+struct marshall { char buf[N]; };
+
+template <typename T>
+void TestMarshall(const T values[], int num_values) {
+  for (int i = 0; i < num_values; ++i) {
+    T t0 = values[i];
+    marshall<sizeof(T)> m0 = absl::bit_cast<marshall<sizeof(T)> >(t0);
+    T t1 = absl::bit_cast<T>(m0);
+    marshall<sizeof(T)> m1 = absl::bit_cast<marshall<sizeof(T)> >(t1);
+    ASSERT_EQ(0, memcmp(&t0, &t1, sizeof(T)));
+    ASSERT_EQ(0, memcmp(&m0, &m1, sizeof(T)));
+  }
+}
+
+// Convert back and forth to an integral type.  The C++ standard does
+// not guarantee this will work, but we test that this works on all the
+// platforms we support.
+//
+// Likewise, we below make assumptions about sizeof(float) and
+// sizeof(double) which the standard does not guarantee, but which hold on the
+// platforms we support.
+
+template <typename T, typename I>
+void TestIntegral(const T values[], int num_values) {
+  for (int i = 0; i < num_values; ++i) {
+    T t0 = values[i];
+    I i0 = absl::bit_cast<I>(t0);
+    T t1 = absl::bit_cast<T>(i0);
+    I i1 = absl::bit_cast<I>(t1);
+    ASSERT_EQ(0, memcmp(&t0, &t1, sizeof(T)));
+    ASSERT_EQ(i0, i1);
+  }
+}
+
+TEST(BitCast, Bool) {
+  static const bool bool_list[] = { false, true };
+  TestMarshall<bool>(bool_list, ABSL_ARRAYSIZE(bool_list));
+}
+
+TEST(BitCast, Int32) {
+  static const int32_t int_list[] =
+    { 0, 1, 100, 2147483647, -1, -100, -2147483647, -2147483647-1 };
+  TestMarshall<int32_t>(int_list, ABSL_ARRAYSIZE(int_list));
+}
+
+TEST(BitCast, Int64) {
+  static const int64_t int64_list[] =
+    { 0, 1, 1LL << 40, -1, -(1LL<<40) };
+  TestMarshall<int64_t>(int64_list, ABSL_ARRAYSIZE(int64_list));
+}
+
+TEST(BitCast, Uint64) {
+  static const uint64_t uint64_list[] =
+    { 0, 1, 1LLU << 40, 1LLU << 63 };
+  TestMarshall<uint64_t>(uint64_list, ABSL_ARRAYSIZE(uint64_list));
+}
+
+TEST(BitCast, Float) {
+  static const float float_list[] =
+    { 0.0f, 1.0f, -1.0f, 10.0f, -10.0f,
+      1e10f, 1e20f, 1e-10f, 1e-20f,
+      2.71828f, 3.14159f };
+  TestMarshall<float>(float_list, ABSL_ARRAYSIZE(float_list));
+  TestIntegral<float, int>(float_list, ABSL_ARRAYSIZE(float_list));
+  TestIntegral<float, unsigned>(float_list, ABSL_ARRAYSIZE(float_list));
+}
+
+TEST(BitCast, Double) {
+  static const double double_list[] =
+    { 0.0, 1.0, -1.0, 10.0, -10.0,
+      1e10, 1e100, 1e-10, 1e-100,
+      2.718281828459045,
+      3.141592653589793238462643383279502884197169399375105820974944 };
+  TestMarshall<double>(double_list, ABSL_ARRAYSIZE(double_list));
+  TestIntegral<double, int64_t>(double_list, ABSL_ARRAYSIZE(double_list));
+  TestIntegral<double, uint64_t>(double_list, ABSL_ARRAYSIZE(double_list));
+}
+
+}  // namespace
+}  // namespace absl
diff --git a/absl/base/call_once.h b/absl/base/call_once.h
new file mode 100644
index 000000000000..478a41c1fafb
--- /dev/null
+++ b/absl/base/call_once.h
@@ -0,0 +1,210 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: call_once.h
+// -----------------------------------------------------------------------------
+//
+// This header file provides an Abseil version of `std::call_once` for invoking
+// a given function at most once, across all threads. This Abseil version is
+// faster than the C++11 version and incorporates the C++17 argument-passing
+// fix, so that (for example) non-const references may be passed to the invoked
+// function.
+
+#ifndef ABSL_BASE_CALL_ONCE_H_
+#define ABSL_BASE_CALL_ONCE_H_
+
+#include <atomic>
+#include <cstdint>
+#include <type_traits>
+
+#include "absl/base/internal/invoke.h"
+#include "absl/base/internal/low_level_scheduling.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/base/internal/spinlock_wait.h"
+
+namespace absl {
+
+class once_flag;
+
+namespace base_internal {
+// Implementation detail.
+std::atomic<uint32_t>* ControlWord(absl::once_flag* flag);
+}  // namespace base_internal
+
+// call_once()
+//
+// For all invocations using a given `once_flag`, invokes a given `fn` exactly
+// once across all threads. The first call to `call_once()` with a particular
+// `once_flag` argument (that does not throw an exception) will run the
+// specified function with the provided `args`; other calls with the same
+// `once_flag` argument will not run the function, but will wait
+// for the provided function to finish running (if it is still running).
+//
+// This mechanism provides a safe, simple, and fast mechanism for one-time
+// initialization in a multi-threaded process.
+//
+// Example:
+//
+// class MyInitClass {
+//  public:
+//  ...
+//  mutable absl::once_flag once_;
+//
+//  MyInitClass* init() const {
+//    absl::call_once(once_, &MyInitClass::Init, this);
+//    return ptr_;
+//  }
+//
+template <typename Callable, typename... Args>
+void call_once(absl::once_flag& flag, Callable&& fn, Args&&... args);
+
+// once_flag
+//
+// Objects of this type are used to distinguish calls to `call_once()` and
+// ensure the provided function is only invoked once across all threads. This
+// type is not copyable or movable. However, it has a `constexpr`
+// constructor, and is safe to use as a namespace-scoped global variable.
+class once_flag {
+ public:
+  constexpr once_flag() : control_(0) {}
+  once_flag(const once_flag&) = delete;
+  once_flag& operator=(const once_flag&) = delete;
+
+ private:
+  friend std::atomic<uint32_t>* base_internal::ControlWord(once_flag* flag);
+  std::atomic<uint32_t> control_;
+};
+
+//------------------------------------------------------------------------------
+// End of public interfaces.
+// Implementation details follow.
+//------------------------------------------------------------------------------
+
+namespace base_internal {
+
+// Like call_once, but uses KERNEL_ONLY scheduling. Intended to be used to
+// initialize entities used by the scheduler implementation.
+template <typename Callable, typename... Args>
+void LowLevelCallOnce(absl::once_flag* flag, Callable&& fn, Args&&... args);
+
+// Disables scheduling while on stack when scheduling mode is non-cooperative.
+// No effect for cooperative scheduling modes.
+class SchedulingHelper {
+ public:
+  explicit SchedulingHelper(base_internal::SchedulingMode mode) : mode_(mode) {
+    if (mode_ == base_internal::SCHEDULE_KERNEL_ONLY) {
+      guard_result_ = base_internal::SchedulingGuard::DisableRescheduling();
+    }
+  }
+
+  ~SchedulingHelper() {
+    if (mode_ == base_internal::SCHEDULE_KERNEL_ONLY) {
+      base_internal::SchedulingGuard::EnableRescheduling(guard_result_);
+    }
+  }
+
+ private:
+  base_internal::SchedulingMode mode_;
+  bool guard_result_;
+};
+
+// Bit patterns for call_once state machine values.  Internal implementation
+// detail, not for use by clients.
+//
+// The bit patterns are arbitrarily chosen from unlikely values, to aid in
+// debugging.  However, kOnceInit must be 0, so that a zero-initialized
+// once_flag will be valid for immediate use.
+enum {
+  kOnceInit = 0,
+  kOnceRunning = 0x65C2937B,
+  kOnceWaiter = 0x05A308D2,
+  kOnceDone = 0x3F2D8AB0,
+};
+
+template <typename Callable, typename... Args>
+void CallOnceImpl(std::atomic<uint32_t>* control,
+                  base_internal::SchedulingMode scheduling_mode, Callable&& fn,
+                  Args&&... args) {
+#ifndef NDEBUG
+  {
+    uint32_t old_control = control->load(std::memory_order_acquire);
+    if (old_control != kOnceInit &&
+        old_control != kOnceRunning &&
+        old_control != kOnceWaiter &&
+        old_control != kOnceDone) {
+      ABSL_RAW_LOG(
+          FATAL,
+          "Unexpected value for control word: %d. Either the control word "
+          "has non-static storage duration (where GoogleOnceDynamic might "
+          "be appropriate), or there's been a memory corruption.",
+          old_control);
+    }
+  }
+#endif  // NDEBUG
+  static const base_internal::SpinLockWaitTransition trans[] = {
+      {kOnceInit, kOnceRunning, true},
+      {kOnceRunning, kOnceWaiter, false},
+      {kOnceDone, kOnceDone, true}};
+
+  // Must do this before potentially modifying control word's state.
+  base_internal::SchedulingHelper maybe_disable_scheduling(scheduling_mode);
+  // Short circuit the simplest case to avoid procedure call overhead.
+  uint32_t old_control = kOnceInit;
+  if (control->compare_exchange_strong(old_control, kOnceRunning,
+                                       std::memory_order_acquire,
+                                       std::memory_order_relaxed) ||
+      base_internal::SpinLockWait(control, ABSL_ARRAYSIZE(trans), trans,
+                                  scheduling_mode) == kOnceInit) {
+    base_internal::Invoke(std::forward<Callable>(fn),
+                          std::forward<Args>(args)...);
+    old_control = control->load(std::memory_order_relaxed);
+    control->store(base_internal::kOnceDone, std::memory_order_release);
+    if (old_control == base_internal::kOnceWaiter) {
+      base_internal::SpinLockWake(control, true);
+    }
+  }  // else *control is already kOnceDone
+}
+
+inline std::atomic<uint32_t>* ControlWord(once_flag* flag) {
+  return &flag->control_;
+}
+
+template <typename Callable, typename... Args>
+void LowLevelCallOnce(absl::once_flag* flag, Callable&& fn, Args&&... args) {
+  std::atomic<uint32_t>* once = base_internal::ControlWord(flag);
+  uint32_t s = once->load(std::memory_order_acquire);
+  if (ABSL_PREDICT_FALSE(s != base_internal::kOnceDone)) {
+    base_internal::CallOnceImpl(once, base_internal::SCHEDULE_KERNEL_ONLY,
+                                std::forward<Callable>(fn),
+                                std::forward<Args>(args)...);
+  }
+}
+
+}  // namespace base_internal
+
+template <typename Callable, typename... Args>
+void call_once(absl::once_flag& flag, Callable&& fn, Args&&... args) {
+  std::atomic<uint32_t>* once = base_internal::ControlWord(&flag);
+  uint32_t s = once->load(std::memory_order_acquire);
+  if (ABSL_PREDICT_FALSE(s != base_internal::kOnceDone)) {
+    base_internal::CallOnceImpl(
+        once, base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL,
+        std::forward<Callable>(fn), std::forward<Args>(args)...);
+  }
+}
+
+}  // namespace absl
+
+#endif  // ABSL_BASE_CALL_ONCE_H_
diff --git a/absl/base/call_once_test.cc b/absl/base/call_once_test.cc
new file mode 100644
index 000000000000..de235432bb8e
--- /dev/null
+++ b/absl/base/call_once_test.cc
@@ -0,0 +1,102 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/call_once.h"
+
+#include <atomic>
+#include <thread>
+
+#include "absl/base/thread_annotations.h"
+#include "absl/synchronization/mutex.h"
+#include "gtest/gtest.h"
+
+namespace absl {
+namespace {
+
+absl::once_flag once;
+Mutex counters_mu;
+
+int running_thread_count GUARDED_BY(counters_mu) = 0;
+int call_once_invoke_count GUARDED_BY(counters_mu) = 0;
+int call_once_finished_count GUARDED_BY(counters_mu) = 0;
+int call_once_return_count GUARDED_BY(counters_mu) = 0;
+bool done_blocking GUARDED_BY(counters_mu) = false;
+
+// Function to be called from absl::call_once.  Waits for a notification.
+void WaitAndIncrement() {
+  counters_mu.Lock();
+  ++call_once_invoke_count;
+  counters_mu.Unlock();
+
+  counters_mu.LockWhen(Condition(&done_blocking));
+  ++call_once_finished_count;
+  counters_mu.Unlock();
+}
+
+void ThreadBody() {
+  counters_mu.Lock();
+  ++running_thread_count;
+  counters_mu.Unlock();
+
+  absl::call_once(once, WaitAndIncrement);
+
+  counters_mu.Lock();
+  ++call_once_return_count;
+  counters_mu.Unlock();
+}
+
+// Returns true if all threads are set up for the test.
+bool ThreadsAreSetup(void*) EXCLUSIVE_LOCKS_REQUIRED(counters_mu) {
+  // All ten threads must be running, and WaitAndIncrement should be blocked.
+  return running_thread_count == 10 && call_once_invoke_count == 1;
+}
+
+TEST(CallOnceTest, ExecutionCount) {
+  std::vector<std::thread> threads;
+
+  // Start 10 threads all calling call_once on the same once_flag.
+  for (int i = 0; i < 10; ++i) {
+    threads.emplace_back(ThreadBody);
+  }
+
+
+  // Wait until all ten threads have started, and WaitAndIncrement has been
+  // invoked.
+  counters_mu.LockWhen(Condition(ThreadsAreSetup, nullptr));
+
+  // WaitAndIncrement should have been invoked by exactly one call_once()
+  // instance.  That thread should be blocking on a notification, and all other
+  // call_once instances should be blocking as well.
+  EXPECT_EQ(call_once_invoke_count, 1);
+  EXPECT_EQ(call_once_finished_count, 0);
+  EXPECT_EQ(call_once_return_count, 0);
+
+  // Allow WaitAndIncrement to finish executing.  Once it does, the other
+  // call_once waiters will be unblocked.
+  done_blocking = true;
+  counters_mu.Unlock();
+
+  for (std::thread& thread : threads) {
+    thread.join();
+  }
+
+  counters_mu.Lock();
+  EXPECT_EQ(call_once_invoke_count, 1);
+  EXPECT_EQ(call_once_finished_count, 1);
+  EXPECT_EQ(call_once_return_count, 10);
+  counters_mu.Unlock();
+}
+
+}  // namespace
+}  // namespace absl
diff --git a/absl/base/casts.h b/absl/base/casts.h
new file mode 100644
index 000000000000..2a0adc29504b
--- /dev/null
+++ b/absl/base/casts.h
@@ -0,0 +1,141 @@
+//
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: casts.h
+// -----------------------------------------------------------------------------
+//
+// This header file defines casting templates to fit use cases not covered by
+// the standard casts provided in the C++ standard. As with all cast operations,
+// use these with caution and only if alternatives do not exist.
+//
+
+#ifndef ABSL_BASE_CASTS_H_
+#define ABSL_BASE_CASTS_H_
+
+#include <cstring>
+#include <type_traits>
+
+#include "absl/base/internal/identity.h"
+
+namespace absl {
+
+// implicit_cast()
+//
+// Performs an implicit conversion between types following the language
+// rules for implicit conversion; if an implicit conversion is otherwise
+// allowed by the language in the given context, this function performs such an
+// implicit conversion.
+//
+// Example:
+//
+//   // If the context allows implicit conversion:
+//   From from;
+//   To to = from;
+//
+//   // Such code can be replaced by:
+//   implicit_cast<To>(from);
+//
+// An `implicit_cast()` may also be used to annotate numeric type conversions
+// that, although safe, may produce compiler warnings (such as `long` to `int`).
+// Additionally, an `implict_cast()` is also useful within return statements to
+// indicate a specific implicit conversion is being undertaken.
+//
+// Example:
+//
+//   return implicit_cast<double>(size_in_bytes) / capacity_;
+//
+// Annotating code with `implicit_cast()` allows you to explicitly select
+// particular overloads and template instantiations, while providing a safer
+// cast than `reinterpret_cast()` or `static_cast()`.
+//
+// Additionally, an `implicit_cast()` can be used to allow upcasting within a
+// type hierarchy where incorrect use of `static_cast()` could accidentally
+// allow downcasting.
+//
+// Finally, an `implicit_cast()` can be used to perform implicit conversions
+// from unrelated types that otherwise couldn't be implicitly cast directly;
+// C++ will normally only implicitly cast "one step" in such conversions.
+//
+// That is, if C is a type which can be implicitly converted to B, with B being
+// a type that can be implicitly converted to A, an `implicit_cast()` can be
+// used to convert C to B (which the compiler can then implicitly convert to A
+// using language rules).
+//
+// Example:
+//
+//   // Assume an object C is convertible to B, which is implicitly convertible
+//   // to A
+//   A a = implicit_cast<B>(C);
+//
+// Such implicit cast chaining may be useful within template logic.
+template <typename To>
+inline To implicit_cast(typename absl::internal::identity_t<To> to) {
+  return to;
+}
+
+// bit_cast()
+//
+// Performs a bitwise cast on a type without changing the underlying bit
+// representation of that type's value. The two types must be of the same size
+// and both types must be trivially copyable. As with most casts, use with
+// caution. A `bit_cast()` might be needed when you need to temporarily treat a
+// type as some other type, such as in the following cases:
+//
+//    * Serialization (casting temporarily to `char *` for those purposes is
+//      always allowed by the C++ standard)
+//    * Managing the individual bits of a type within mathematical operations
+//      that are not normally accessible through that type
+//    * Casting non-pointer types to pointer types (casting the other way is
+//      allowed by `reinterpret_cast()` but round-trips cannot occur the other
+//      way).
+//
+// Example:
+//
+//   float f = 3.14159265358979;
+//   int i = bit_cast<int32_t>(f);
+//   // i = 0x40490fdb
+//
+// Casting non-pointer types to pointer types and then dereferencing them
+// traditionally produces undefined behavior.
+//
+// Example:
+//
+//   // WRONG
+//   float f = 3.14159265358979;            // WRONG
+//   int i = * reinterpret_cast<int*>(&f);  // WRONG
+//
+// The address-casting method produces undefined behavior according to the ISO
+// C++ specification section [basic.lval]. Roughly, this section says: if an
+// object in memory has one type, and a program accesses it with a different
+// type, the result is undefined behavior for most values of "different type".
+//
+// Such casting results is type punning: holding an object in memory of one type
+// and reading its bits back using a different type. A `bit_cast()` avoids this
+// issue by implementating its casts using `memcpy()`, which avoids introducing
+// this undefined behavior.
+template <typename Dest, typename Source>
+inline Dest bit_cast(const Source& source) {
+  static_assert(sizeof(Dest) == sizeof(Source),
+                "Source and destination types should have equal sizes.");
+
+  Dest dest;
+  memcpy(&dest, &source, sizeof(dest));
+  return dest;
+}
+
+}  // namespace absl
+
+#endif  // ABSL_BASE_CASTS_H_
diff --git a/absl/base/config.h b/absl/base/config.h
new file mode 100644
index 000000000000..cf47f84c3356
--- /dev/null
+++ b/absl/base/config.h
@@ -0,0 +1,367 @@
+//
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: config.h
+// -----------------------------------------------------------------------------
+//
+// This header file defines a set of macros for checking the presence of
+// important compiler and platform features. Such macros can be used to
+// produce portable code by parameterizing compilation based on the presence or
+// lack of a given feature.
+//
+// We define a "feature" as some interface we wish to program to: for example,
+// a library function or system call. A value of `1` indicates support for
+// that feature; any other value indicates the feature support is undefined.
+//
+// Example:
+//
+// Suppose a programmer wants to write a program that uses the 'mmap()' system
+// call. The Abseil macro for that feature (`ABSL_HAVE_MMAP`) allows you to
+// selectively include the `mmap.h` header and bracket code using that feature
+// in the macro:
+//
+//   #include "absl/base/config.h"
+//
+//   #ifdef ABSL_HAVE_MMAP
+//   #include "sys/mman.h"
+//   #endif  //ABSL_HAVE_MMAP
+//
+//   ...
+//   #ifdef ABSL_HAVE_MMAP
+//   void *ptr = mmap(...);
+//   ...
+//   #endif  // ABSL_HAVE_MMAP
+
+#ifndef ABSL_BASE_CONFIG_H_
+#define ABSL_BASE_CONFIG_H_
+
+// Included for the __GLIBC__ macro (or similar macros on other systems).
+#include <limits.h>
+
+#ifdef __cplusplus
+// Included for __GLIBCXX__, _LIBCPP_VERSION
+#include <cstddef>
+#endif  // __cplusplus
+
+#include "absl/base/policy_checks.h"
+
+// -----------------------------------------------------------------------------
+// Compiler Feature Checks
+// -----------------------------------------------------------------------------
+
+// ABSL_HAVE_BUILTIN()
+//
+// Checks whether the compiler supports a Clang Feature Checking Macro, and if
+// so, checks whether it supports the provided builtin function "x" where x
+// is one of the functions noted in
+// https://clang.llvm.org/docs/LanguageExtensions.html
+//
+// Note: Use this macro to avoid an extra level of #ifdef __has_builtin check.
+// http://releases.llvm.org/3.3/tools/clang/docs/LanguageExtensions.html
+#ifdef __has_builtin
+#define ABSL_HAVE_BUILTIN(x) __has_builtin(x)
+#else
+#define ABSL_HAVE_BUILTIN(x) 0
+#endif
+
+// ABSL_HAVE_TLS is defined to 1 when __thread should be supported.
+// We assume __thread is supported on Linux when compiled with Clang or compiled
+// against libstdc++ with _GLIBCXX_HAVE_TLS defined.
+#ifdef ABSL_HAVE_TLS
+#error ABSL_HAVE_TLS cannot be directly set
+#elif defined(__linux__) && (defined(__clang__) || defined(_GLIBCXX_HAVE_TLS))
+#define ABSL_HAVE_TLS 1
+#endif
+
+// There are platforms for which TLS should not be used even though the compiler
+// makes it seem like it's supported (Android NDK < r12b for example).
+// This is primarily because of linker problems and toolchain misconfiguration:
+// Abseil does not intend to support this indefinitely. Currently, the newest
+// toolchain that we intend to support that requires this behavior is the
+// r11 NDK - allowing for a 5 year support window on that means this option
+// is likely to be removed around June of 2021.
+#if defined(__ANDROID__) && defined(__clang__)
+#if __has_include(<android/ndk-version.h>)
+#include <android/ndk-version.h>
+#endif
+// TLS isn't supported until NDK r12b per
+// https://developer.android.com/ndk/downloads/revision_history.html
+// Since NDK r16, `__NDK_MAJOR__` and `__NDK_MINOR__` are defined in
+// <android/ndk-version.h>. For NDK < r16, users should define these macros,
+// e.g. `-D__NDK_MAJOR__=11 -D__NKD_MINOR__=0` for NDK r11.
+#if defined(__NDK_MAJOR__) && defined(__NDK_MINOR__) && \
+    ((__NDK_MAJOR__ < 12) || ((__NDK_MAJOR__ == 12) && (__NDK_MINOR__ < 1)))
+#undef ABSL_HAVE_TLS
+#endif
+#endif  // defined(__ANDROID__) && defined(__clang__)
+
+// ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE
+//
+// Checks whether `std::is_trivially_destructible<T>` is supported.
+//
+// Notes: All supported compilers using libc++ support this feature, as does
+// gcc >= 4.8.1 using libstdc++, and Visual Studio.
+#ifdef ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE
+#error ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE cannot be directly set
+#elif defined(_LIBCPP_VERSION) ||                                        \
+    (!defined(__clang__) && defined(__GNUC__) && defined(__GLIBCXX__) && \
+     (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8))) ||        \
+    defined(_MSC_VER)
+#define ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE 1
+#endif
+
+// ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE
+//
+// Checks whether `std::is_trivially_default_constructible<T>` and
+// `std::is_trivially_copy_constructible<T>` are supported.
+
+// ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE
+//
+// Checks whether `std::is_trivially_copy_assignable<T>` is supported.
+
+// Notes: Clang with libc++ supports these features, as does gcc >= 5.1 with
+// either libc++ or libstdc++, and Visual Studio.
+#if defined(ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE)
+#error ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE cannot be directly set
+#elif defined(ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE)
+#error ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE cannot directly set
+#elif (defined(__clang__) && defined(_LIBCPP_VERSION)) ||        \
+    (!defined(__clang__) && defined(__GNUC__) &&                 \
+     (__GNUC__ > 5 || (__GNUC__ == 5 && __GNUC_MINOR__ >= 1)) && \
+     (defined(_LIBCPP_VERSION) || defined(__GLIBCXX__))) ||      \
+    defined(_MSC_VER)
+#define ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE 1
+#define ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE 1
+#endif
+
+// ABSL_HAVE_THREAD_LOCAL
+//
+// Checks whether C++11's `thread_local` storage duration specifier is
+// supported.
+//
+// Notes: Clang implements the `thread_local` keyword but Xcode did not support
+// the implementation until Xcode 8.
+#ifdef ABSL_HAVE_THREAD_LOCAL
+#error ABSL_HAVE_THREAD_LOCAL cannot be directly set
+#elif !defined(__apple_build_version__) || __apple_build_version__ >= 8000042
+#define ABSL_HAVE_THREAD_LOCAL 1
+#endif
+
+// ABSL_HAVE_INTRINSIC_INT128
+//
+// Checks whether the __int128 compiler extension for a 128-bit integral type is
+// supported.
+//
+// Notes: __SIZEOF_INT128__ is defined by Clang and GCC when __int128 is
+// supported, except on ppc64 and aarch64 where __int128 exists but has exhibits
+// a sporadic compiler crashing bug. Nvidia's nvcc also defines __GNUC__ and
+// __SIZEOF_INT128__ but not all versions actually support __int128.
+#ifdef ABSL_HAVE_INTRINSIC_INT128
+#error ABSL_HAVE_INTRINSIC_INT128 cannot be directly set
+#elif (defined(__clang__) && defined(__SIZEOF_INT128__) &&               \
+       !defined(__ppc64__) && !defined(__aarch64__)) ||                  \
+    (defined(__CUDACC__) && defined(__SIZEOF_INT128__) &&                \
+     __CUDACC_VER__ >= 70000) ||                                         \
+    (!defined(__clang__) && !defined(__CUDACC__) && defined(__GNUC__) && \
+     defined(__SIZEOF_INT128__))
+#define ABSL_HAVE_INTRINSIC_INT128 1
+#endif
+
+// ABSL_HAVE_EXCEPTIONS
+//
+// Checks whether the compiler both supports and enables exceptions. Many
+// compilers support a "no exceptions" mode that disables exceptions.
+//
+// Generally, when ABSL_HAVE_EXCEPTIONS is not defined:
+//
+// * Code using `throw` and `try` may not compile.
+// * The `noexcept` specifier will still compile and behave as normal.
+// * The `noexcept` operator may still return `false`.
+//
+// For further details, consult the compiler's documentation.
+#ifdef ABSL_HAVE_EXCEPTIONS
+#error ABSL_HAVE_EXCEPTIONS cannot be directly set.
+
+#elif defined(__clang__)
+// TODO(calabrese)
+// Switch to using __cpp_exceptions when we no longer support versions < 3.6.
+// For details on this check, see:
+//   http://releases.llvm.org/3.6.0/tools/clang/docs/ReleaseNotes.html#the-exceptions-macro
+#if defined(__EXCEPTIONS) && __has_feature(cxx_exceptions)
+#define ABSL_HAVE_EXCEPTIONS 1
+#endif  // defined(__EXCEPTIONS) && __has_feature(cxx_exceptions)
+
+// Handle remaining special cases and default to exceptions being supported.
+#elif !(defined(__GNUC__) && (__GNUC__ < 5) && !defined(__EXCEPTIONS)) &&    \
+    !(defined(__GNUC__) && (__GNUC__ >= 5) && !defined(__cpp_exceptions)) && \
+    !(defined(_MSC_VER) && !defined(_CPPUNWIND))
+#define ABSL_HAVE_EXCEPTIONS 1
+#endif
+
+// -----------------------------------------------------------------------------
+// Platform Feature Checks
+// -----------------------------------------------------------------------------
+
+// Currently supported operating systems and associated preprocessor
+// symbols:
+//
+//   Linux and Linux-derived           __linux__
+//   Android                           __ANDROID__ (implies __linux__)
+//   Linux (non-Android)               __linux__ && !__ANDROID__
+//   Darwin (Mac OS X and iOS)         __APPLE__
+//   Akaros (http://akaros.org)        __ros__
+//   Windows                           _WIN32
+//   NaCL                              __native_client__
+//   AsmJS                             __asmjs__
+//   Fuschia                           __Fuchsia__
+//
+// Note that since Android defines both __ANDROID__ and __linux__, one
+// may probe for either Linux or Android by simply testing for __linux__.
+
+// ABSL_HAVE_MMAP
+//
+// Checks whether the platform has an mmap(2) implementation as defined in
+// POSIX.1-2001.
+#ifdef ABSL_HAVE_MMAP
+#error ABSL_HAVE_MMAP cannot be directly set
+#elif defined(__linux__) || defined(__APPLE__) || defined(__ros__) || \
+    defined(__native_client__) || defined(__asmjs__) || defined(__Fuchsia__)
+#define ABSL_HAVE_MMAP 1
+#endif
+
+// ABSL_HAVE_PTHREAD_GETSCHEDPARAM
+//
+// Checks whether the platform implements the pthread_(get|set)schedparam(3)
+// functions as defined in POSIX.1-2001.
+#ifdef ABSL_HAVE_PTHREAD_GETSCHEDPARAM
+#error ABSL_HAVE_PTHREAD_GETSCHEDPARAM cannot be directly set
+#elif defined(__linux__) || defined(__APPLE__) || defined(__ros__)
+#define ABSL_HAVE_PTHREAD_GETSCHEDPARAM 1
+#endif
+
+// ABSL_HAVE_SCHED_YIELD
+//
+// Checks whether the platform implements sched_yield(2) as defined in
+// POSIX.1-2001.
+#ifdef ABSL_HAVE_SCHED_YIELD
+#error ABSL_HAVE_SCHED_YIELD cannot be directly set
+#elif defined(__linux__) || defined(__ros__) || defined(__native_client__)
+#define ABSL_HAVE_SCHED_YIELD 1
+#endif
+
+// ABSL_HAVE_SEMAPHORE_H
+//
+// Checks whether the platform supports the <semaphore.h> header and sem_open(3)
+// family of functions as standardized in POSIX.1-2001.
+//
+// Note: While Apple provides <semaphore.h> for both iOS and macOS, it is
+// explicity deprecated and will cause build failures if enabled for those
+// platforms.  We side-step the issue by not defining it here for Apple
+// platforms.
+#ifdef ABSL_HAVE_SEMAPHORE_H
+#error ABSL_HAVE_SEMAPHORE_H cannot be directly set
+#elif defined(__linux__) || defined(__ros__)
+#define ABSL_HAVE_SEMAPHORE_H 1
+#endif
+
+// ABSL_HAVE_ALARM
+//
+// Checks whether the platform supports the <signal.h> header and alarm(2)
+// function as standardized in POSIX.1-2001.
+#ifdef ABSL_HAVE_ALARM
+#error ABSL_HAVE_ALARM cannot be directly set
+#elif defined(__GOOGLE_GRTE_VERSION__)
+// feature tests for Google's GRTE
+#define ABSL_HAVE_ALARM 1
+#elif defined(__GLIBC__)
+// feature test for glibc
+#define ABSL_HAVE_ALARM 1
+#elif defined(_MSC_VER)
+// feature tests for Microsoft's library
+#elif defined(__native_client__)
+#else
+// other standard libraries
+#define ABSL_HAVE_ALARM 1
+#endif
+
+// ABSL_IS_LITTLE_ENDIAN
+// ABSL_IS_BIG_ENDIAN
+//
+// Checks the endianness of the platform.
+//
+// Notes: uses the built in endian macros provided by GCC (since 4.6) and
+// Clang (since 3.2); see
+// https://gcc.gnu.org/onlinedocs/cpp/Common-Predefined-Macros.html.
+// Otherwise, if _WIN32, assume little endian. Otherwise, bail with an error.
+#if defined(ABSL_IS_BIG_ENDIAN)
+#error "ABSL_IS_BIG_ENDIAN cannot be directly set."
+#endif
+#if defined(ABSL_IS_LITTLE_ENDIAN)
+#error "ABSL_IS_LITTLE_ENDIAN cannot be directly set."
+#endif
+
+#if (defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && \
+     __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
+#define ABSL_IS_LITTLE_ENDIAN 1
+#elif defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && \
+    __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#define ABSL_IS_BIG_ENDIAN 1
+#elif defined(_WIN32)
+#define ABSL_IS_LITTLE_ENDIAN 1
+#else
+#error "absl endian detection needs to be set up for your compiler"
+#endif
+
+// ABSL_HAVE_STD_ANY
+//
+// Checks whether C++17 std::any is availble by checking whether <any> exists.
+#ifdef ABSL_HAVE_STD_ANY
+#error "ABSL_HAVE_STD_ANY cannot be directly set."
+#endif
+
+#ifdef __has_include
+#if __has_include(<any>) && __cplusplus >= 201703L
+#define ABSL_HAVE_STD_ANY 1
+#endif
+#endif
+
+// ABSL_HAVE_STD_OPTIONAL
+//
+// Checks whether C++17 std::optional is available.
+#ifdef ABSL_HAVE_STD_OPTIONAL
+#error "ABSL_HAVE_STD_OPTIONAL cannot be directly set."
+#endif
+
+#ifdef __has_include
+#if __has_include(<optional>) && __cplusplus >= 201703L
+#define ABSL_HAVE_STD_OPTIONAL 1
+#endif
+#endif
+
+// ABSL_HAVE_STD_STRING_VIEW
+//
+// Checks whether C++17 std::string_view is available.
+#ifdef ABSL_HAVE_STD_STRING_VIEW
+#error "ABSL_HAVE_STD_STRING_VIEW cannot be directly set."
+#endif
+
+#ifdef __has_include
+#if __has_include(<string_view>) && __cplusplus >= 201703L
+#define ABSL_HAVE_STD_STRING_VIEW 1
+#endif
+#endif
+
+#endif  // ABSL_BASE_CONFIG_H_
diff --git a/absl/base/config_test.cc b/absl/base/config_test.cc
new file mode 100644
index 000000000000..578bb810b214
--- /dev/null
+++ b/absl/base/config_test.cc
@@ -0,0 +1,45 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/config.h"
+
+#include <cstdint>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+
+namespace {
+
+TEST(ConfigTest, Endianness) {
+  union
+  {
+    uint32_t value;
+    uint8_t data[sizeof(uint32_t)];
+  } number;
+  number.data[0] = 0x00;
+  number.data[1] = 0x01;
+  number.data[2] = 0x02;
+  number.data[3] = 0x03;
+#if defined(ABSL_IS_LITTLE_ENDIAN) && defined(ABSL_IS_BIG_ENDIAN)
+#error Both ABSL_IS_LITTLE_ENDIAN and ABSL_IS_BIG_ENDIAN are defined
+#elif defined(ABSL_IS_LITTLE_ENDIAN)
+  EXPECT_EQ(UINT32_C(0x03020100), number.value);
+#elif defined(ABSL_IS_BIG_ENDIAN)
+  EXPECT_EQ(UINT32_C(0x00010203), number.value);
+#else
+#error Unknown endianness
+#endif
+}
+
+}  // namespace
diff --git a/absl/base/dynamic_annotations.cc b/absl/base/dynamic_annotations.cc
new file mode 100644
index 000000000000..08c27e511f0a
--- /dev/null
+++ b/absl/base/dynamic_annotations.cc
@@ -0,0 +1,129 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "absl/base/dynamic_annotations.h"
+
+#ifndef __has_feature
+#define __has_feature(x) 0
+#endif
+
+/* Compiler-based ThreadSanitizer defines
+   DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL = 1
+   and provides its own definitions of the functions. */
+
+#ifndef DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL
+# define DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL 0
+#endif
+
+/* Each function is empty and called (via a macro) only in debug mode.
+   The arguments are captured by dynamic tools at runtime. */
+
+#if DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL == 0 && !defined(__native_client__)
+
+#if __has_feature(memory_sanitizer)
+#include <sanitizer/msan_interface.h>
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void AnnotateRWLockCreate(const char *, int,
+                          const volatile void *){}
+void AnnotateRWLockDestroy(const char *, int,
+                           const volatile void *){}
+void AnnotateRWLockAcquired(const char *, int,
+                            const volatile void *, long){}
+void AnnotateRWLockReleased(const char *, int,
+                            const volatile void *, long){}
+void AnnotateBenignRace(const char *, int,
+                        const volatile void *,
+                        const char *){}
+void AnnotateBenignRaceSized(const char *, int,
+                             const volatile void *,
+                             size_t,
+                             const char *) {}
+void AnnotateThreadName(const char *, int,
+                        const char *){}
+void AnnotateIgnoreReadsBegin(const char *, int){}
+void AnnotateIgnoreReadsEnd(const char *, int){}
+void AnnotateIgnoreWritesBegin(const char *, int){}
+void AnnotateIgnoreWritesEnd(const char *, int){}
+void AnnotateEnableRaceDetection(const char *, int, int){}
+void AnnotateMemoryIsInitialized(const char *, int,
+                                 const volatile void *mem, size_t size) {
+#if __has_feature(memory_sanitizer)
+  __msan_unpoison(mem, size);
+#else
+  (void)mem;
+  (void)size;
+#endif
+}
+
+void AnnotateMemoryIsUninitialized(const char *, int,
+                                   const volatile void *mem, size_t size) {
+#if __has_feature(memory_sanitizer)
+  __msan_allocated_memory(mem, size);
+#else
+  (void)mem;
+  (void)size;
+#endif
+}
+
+static int GetRunningOnValgrind(void) {
+#ifdef RUNNING_ON_VALGRIND
+  if (RUNNING_ON_VALGRIND) return 1;
+#endif
+  char *running_on_valgrind_str = getenv("RUNNING_ON_VALGRIND");
+  if (running_on_valgrind_str) {
+    return strcmp(running_on_valgrind_str, "0") != 0;
+  }
+  return 0;
+}
+
+/* See the comments in dynamic_annotations.h */
+int RunningOnValgrind(void) {
+  static volatile int running_on_valgrind = -1;
+  int local_running_on_valgrind = running_on_valgrind;
+  /* C doesn't have thread-safe initialization of statics, and we
+     don't want to depend on pthread_once here, so hack it. */
+  ANNOTATE_BENIGN_RACE(&running_on_valgrind, "safe hack");
+  if (local_running_on_valgrind == -1)
+    running_on_valgrind = local_running_on_valgrind = GetRunningOnValgrind();
+  return local_running_on_valgrind;
+}
+
+/* See the comments in dynamic_annotations.h */
+double ValgrindSlowdown(void) {
+  /* Same initialization hack as in RunningOnValgrind(). */
+  static volatile double slowdown = 0.0;
+  double local_slowdown = slowdown;
+  ANNOTATE_BENIGN_RACE(&slowdown, "safe hack");
+  if (RunningOnValgrind() == 0) {
+    return 1.0;
+  }
+  if (local_slowdown == 0.0) {
+    char *env = getenv("VALGRIND_SLOWDOWN");
+    slowdown = local_slowdown = env ? atof(env) : 50.0;
+  }
+  return local_slowdown;
+}
+
+#ifdef __cplusplus
+}  // extern "C"
+#endif
+#endif  /* DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL == 0 */
diff --git a/absl/base/dynamic_annotations.h b/absl/base/dynamic_annotations.h
new file mode 100644
index 000000000000..b9c015ba7dbd
--- /dev/null
+++ b/absl/base/dynamic_annotations.h
@@ -0,0 +1,409 @@
+/*
+ *  Copyright 2017 The Abseil Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/* This file defines dynamic annotations for use with dynamic analysis
+   tool such as valgrind, PIN, etc.
+
+   Dynamic annotation is a source code annotation that affects
+   the generated code (that is, the annotation is not a comment).
+   Each such annotation is attached to a particular
+   instruction and/or to a particular object (address) in the program.
+
+   The annotations that should be used by users are macros in all upper-case
+   (e.g., ANNOTATE_THREAD_NAME).
+
+   Actual implementation of these macros may differ depending on the
+   dynamic analysis tool being used.
+
+   This file supports the following configurations:
+   - Dynamic Annotations enabled (with static thread-safety warnings disabled).
+     In this case, macros expand to functions implemented by Thread Sanitizer,
+     when building with TSan. When not provided an external implementation,
+     dynamic_annotations.cc provides no-op implementations.
+
+   - Static Clang thread-safety warnings enabled.
+     When building with a Clang compiler that supports thread-safety warnings,
+     a subset of annotations can be statically-checked at compile-time. We
+     expand these macros to static-inline functions that can be analyzed for
+     thread-safety, but afterwards elided when building the final binary.
+
+   - All annotations are disabled.
+     If neither Dynamic Annotations nor Clang thread-safety warnings are
+     enabled, then all annotation-macros expand to empty. */
+
+#ifndef ABSL_BASE_DYNAMIC_ANNOTATIONS_H_
+#define ABSL_BASE_DYNAMIC_ANNOTATIONS_H_
+
+#ifndef DYNAMIC_ANNOTATIONS_ENABLED
+# define DYNAMIC_ANNOTATIONS_ENABLED 0
+#endif
+
+#if defined(__native_client__)
+  #include "nacl/dynamic_annotations.h"
+
+  // Stub out the macros missing from the NaCl version.
+  #ifndef ANNOTATE_CONTIGUOUS_CONTAINER
+    #define ANNOTATE_CONTIGUOUS_CONTAINER(beg, end, old_mid, new_mid)
+  #endif
+  #ifndef ANNOTATE_RWLOCK_CREATE_STATIC
+    #define ANNOTATE_RWLOCK_CREATE_STATIC(lock)
+  #endif
+  #ifndef ADDRESS_SANITIZER_REDZONE
+    #define ADDRESS_SANITIZER_REDZONE(name)
+  #endif
+  #ifndef ANNOTATE_MEMORY_IS_UNINITIALIZED
+    #define ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size)
+  #endif
+
+#else /* !__native_client__ */
+
+#if DYNAMIC_ANNOTATIONS_ENABLED != 0
+
+  /* -------------------------------------------------------------
+     Annotations that suppress errors.  It is usually better to express the
+     program's synchronization using the other annotations, but these can
+     be used when all else fails. */
+
+  /* Report that we may have a benign race at "pointer", with size
+     "sizeof(*(pointer))". "pointer" must be a non-void* pointer.  Insert at the
+     point where "pointer" has been allocated, preferably close to the point
+     where the race happens.  See also ANNOTATE_BENIGN_RACE_STATIC. */
+  #define ANNOTATE_BENIGN_RACE(pointer, description) \
+    AnnotateBenignRaceSized(__FILE__, __LINE__, pointer, \
+                            sizeof(*(pointer)), description)
+
+  /* Same as ANNOTATE_BENIGN_RACE(address, description), but applies to
+     the memory range [address, address+size). */
+  #define ANNOTATE_BENIGN_RACE_SIZED(address, size, description) \
+    AnnotateBenignRaceSized(__FILE__, __LINE__, address, size, description)
+
+  /* Enable (enable!=0) or disable (enable==0) race detection for all threads.
+     This annotation could be useful if you want to skip expensive race analysis
+     during some period of program execution, e.g. during initialization. */
+  #define ANNOTATE_ENABLE_RACE_DETECTION(enable) \
+    AnnotateEnableRaceDetection(__FILE__, __LINE__, enable)
+
+  /* -------------------------------------------------------------
+     Annotations useful for debugging. */
+
+  /* Report the current thread name to a race detector. */
+  #define ANNOTATE_THREAD_NAME(name) \
+    AnnotateThreadName(__FILE__, __LINE__, name)
+
+  /* -------------------------------------------------------------
+     Annotations useful when implementing locks.  They are not
+     normally needed by modules that merely use locks.
+     The "lock" argument is a pointer to the lock object. */
+
+  /* Report that a lock has been created at address "lock". */
+  #define ANNOTATE_RWLOCK_CREATE(lock) \
+    AnnotateRWLockCreate(__FILE__, __LINE__, lock)
+
+  /* Report that a linker initialized lock has been created at address "lock".
+   */
+#ifdef THREAD_SANITIZER
+  #define ANNOTATE_RWLOCK_CREATE_STATIC(lock) \
+    AnnotateRWLockCreateStatic(__FILE__, __LINE__, lock)
+#else
+  #define ANNOTATE_RWLOCK_CREATE_STATIC(lock) ANNOTATE_RWLOCK_CREATE(lock)
+#endif
+
+  /* Report that the lock at address "lock" is about to be destroyed. */
+  #define ANNOTATE_RWLOCK_DESTROY(lock) \
+    AnnotateRWLockDestroy(__FILE__, __LINE__, lock)
+
+  /* Report that the lock at address "lock" has been acquired.
+     is_w=1 for writer lock, is_w=0 for reader lock. */
+  #define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) \
+    AnnotateRWLockAcquired(__FILE__, __LINE__, lock, is_w)
+
+  /* Report that the lock at address "lock" is about to be released. */
+  #define ANNOTATE_RWLOCK_RELEASED(lock, is_w) \
+    AnnotateRWLockReleased(__FILE__, __LINE__, lock, is_w)
+
+#else  /* DYNAMIC_ANNOTATIONS_ENABLED == 0 */
+
+  #define ANNOTATE_RWLOCK_CREATE(lock) /* empty */
+  #define ANNOTATE_RWLOCK_CREATE_STATIC(lock) /* empty */
+  #define ANNOTATE_RWLOCK_DESTROY(lock) /* empty */
+  #define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) /* empty */
+  #define ANNOTATE_RWLOCK_RELEASED(lock, is_w) /* empty */
+  #define ANNOTATE_BENIGN_RACE(address, description) /* empty */
+  #define ANNOTATE_BENIGN_RACE_SIZED(address, size, description) /* empty */
+  #define ANNOTATE_THREAD_NAME(name) /* empty */
+  #define ANNOTATE_ENABLE_RACE_DETECTION(enable) /* empty */
+
+#endif  /* DYNAMIC_ANNOTATIONS_ENABLED */
+
+/* These annotations are also made available to LLVM's Memory Sanitizer */
+#if DYNAMIC_ANNOTATIONS_ENABLED == 1 || defined(MEMORY_SANITIZER)
+  #define ANNOTATE_MEMORY_IS_INITIALIZED(address, size) \
+    AnnotateMemoryIsInitialized(__FILE__, __LINE__, address, size)
+
+  #define ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) \
+    AnnotateMemoryIsUninitialized(__FILE__, __LINE__, address, size)
+#else
+  #define ANNOTATE_MEMORY_IS_INITIALIZED(address, size) /* empty */
+  #define ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) /* empty */
+#endif  /* DYNAMIC_ANNOTATIONS_ENABLED || MEMORY_SANITIZER */
+/* TODO(delesley) -- Replace __CLANG_SUPPORT_DYN_ANNOTATION__ with the
+   appropriate feature ID. */
+#if defined(__clang__) && (!defined(SWIG)) \
+    && defined(__CLANG_SUPPORT_DYN_ANNOTATION__)
+
+  #if DYNAMIC_ANNOTATIONS_ENABLED == 0
+    #define ANNOTALYSIS_ENABLED
+  #endif
+
+  /* When running in opt-mode, GCC will issue a warning, if these attributes are
+     compiled. Only include them when compiling using Clang. */
+  #define ATTRIBUTE_IGNORE_READS_BEGIN \
+      __attribute((exclusive_lock_function("*")))
+  #define ATTRIBUTE_IGNORE_READS_END \
+      __attribute((unlock_function("*")))
+#else
+  #define ATTRIBUTE_IGNORE_READS_BEGIN  /* empty */
+  #define ATTRIBUTE_IGNORE_READS_END  /* empty */
+#endif  /* defined(__clang__) && ... */
+
+#if (DYNAMIC_ANNOTATIONS_ENABLED != 0) || defined(ANNOTALYSIS_ENABLED)
+  #define ANNOTATIONS_ENABLED
+#endif
+
+#if (DYNAMIC_ANNOTATIONS_ENABLED != 0)
+
+  /* Request the analysis tool to ignore all reads in the current thread
+     until ANNOTATE_IGNORE_READS_END is called.
+     Useful to ignore intentional racey reads, while still checking
+     other reads and all writes.
+     See also ANNOTATE_UNPROTECTED_READ. */
+  #define ANNOTATE_IGNORE_READS_BEGIN() \
+    AnnotateIgnoreReadsBegin(__FILE__, __LINE__)
+
+  /* Stop ignoring reads. */
+  #define ANNOTATE_IGNORE_READS_END() \
+    AnnotateIgnoreReadsEnd(__FILE__, __LINE__)
+
+  /* Similar to ANNOTATE_IGNORE_READS_BEGIN, but ignore writes instead. */
+  #define ANNOTATE_IGNORE_WRITES_BEGIN() \
+    AnnotateIgnoreWritesBegin(__FILE__, __LINE__)
+
+  /* Stop ignoring writes. */
+  #define ANNOTATE_IGNORE_WRITES_END() \
+    AnnotateIgnoreWritesEnd(__FILE__, __LINE__)
+
+/* Clang provides limited support for static thread-safety analysis
+   through a feature called Annotalysis. We configure macro-definitions
+   according to whether Annotalysis support is available. */
+#elif defined(ANNOTALYSIS_ENABLED)
+
+  #define ANNOTATE_IGNORE_READS_BEGIN() \
+    StaticAnnotateIgnoreReadsBegin(__FILE__, __LINE__)
+
+  #define ANNOTATE_IGNORE_READS_END() \
+    StaticAnnotateIgnoreReadsEnd(__FILE__, __LINE__)
+
+  #define ANNOTATE_IGNORE_WRITES_BEGIN() \
+    StaticAnnotateIgnoreWritesBegin(__FILE__, __LINE__)
+
+  #define ANNOTATE_IGNORE_WRITES_END() \
+    StaticAnnotateIgnoreWritesEnd(__FILE__, __LINE__)
+
+#else
+  #define ANNOTATE_IGNORE_READS_BEGIN()  /* empty */
+  #define ANNOTATE_IGNORE_READS_END()  /* empty */
+  #define ANNOTATE_IGNORE_WRITES_BEGIN()  /* empty */
+  #define ANNOTATE_IGNORE_WRITES_END()  /* empty */
+#endif
+
+/* Implement the ANNOTATE_IGNORE_READS_AND_WRITES_* annotations using the more
+   primitive annotations defined above. */
+#if defined(ANNOTATIONS_ENABLED)
+
+  /* Start ignoring all memory accesses (both reads and writes). */
+  #define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() \
+    do {                                           \
+      ANNOTATE_IGNORE_READS_BEGIN();               \
+      ANNOTATE_IGNORE_WRITES_BEGIN();              \
+    }while (0)
+
+  /* Stop ignoring both reads and writes. */
+  #define ANNOTATE_IGNORE_READS_AND_WRITES_END()   \
+    do {                                           \
+      ANNOTATE_IGNORE_WRITES_END();                \
+      ANNOTATE_IGNORE_READS_END();                 \
+    }while (0)
+
+#else
+  #define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN()  /* empty */
+  #define ANNOTATE_IGNORE_READS_AND_WRITES_END()  /* empty */
+#endif
+
+/* Use the macros above rather than using these functions directly. */
+#include <stddef.h>
+#ifdef __cplusplus
+extern "C" {
+#endif
+void AnnotateRWLockCreate(const char *file, int line,
+                          const volatile void *lock);
+void AnnotateRWLockCreateStatic(const char *file, int line,
+                          const volatile void *lock);
+void AnnotateRWLockDestroy(const char *file, int line,
+                           const volatile void *lock);
+void AnnotateRWLockAcquired(const char *file, int line,
+                            const volatile void *lock, long is_w);  /* NOLINT */
+void AnnotateRWLockReleased(const char *file, int line,
+                            const volatile void *lock, long is_w);  /* NOLINT */
+void AnnotateBenignRace(const char *file, int line,
+                        const volatile void *address,
+                        const char *description);
+void AnnotateBenignRaceSized(const char *file, int line,
+                        const volatile void *address,
+                        size_t size,
+                        const char *description);
+void AnnotateThreadName(const char *file, int line,
+                        const char *name);
+void AnnotateEnableRaceDetection(const char *file, int line, int enable);
+void AnnotateMemoryIsInitialized(const char *file, int line,
+                                 const volatile void *mem, size_t size);
+void AnnotateMemoryIsUninitialized(const char *file, int line,
+                                   const volatile void *mem, size_t size);
+
+/* Annotations expand to these functions, when Dynamic Annotations are enabled.
+   These functions are either implemented as no-op calls, if no Sanitizer is
+   attached, or provided with externally-linked implementations by a library
+   like ThreadSanitizer. */
+void AnnotateIgnoreReadsBegin(const char *file, int line)
+    ATTRIBUTE_IGNORE_READS_BEGIN;
+void AnnotateIgnoreReadsEnd(const char *file, int line)
+    ATTRIBUTE_IGNORE_READS_END;
+void AnnotateIgnoreWritesBegin(const char *file, int line);
+void AnnotateIgnoreWritesEnd(const char *file, int line);
+
+#if defined(ANNOTALYSIS_ENABLED)
+/* When Annotalysis is enabled without Dynamic Annotations, the use of
+   static-inline functions allows the annotations to be read at compile-time,
+   while still letting the compiler elide the functions from the final build.
+
+   TODO(delesley) -- The exclusive lock here ignores writes as well, but
+   allows INGORE_READS_AND_WRITES to work properly. */
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-function"
+static inline void StaticAnnotateIgnoreReadsBegin(const char *file, int line)
+    ATTRIBUTE_IGNORE_READS_BEGIN { (void)file; (void)line; }
+static inline void StaticAnnotateIgnoreReadsEnd(const char *file, int line)
+    ATTRIBUTE_IGNORE_READS_END { (void)file; (void)line; }
+static inline void StaticAnnotateIgnoreWritesBegin(
+    const char *file, int line) { (void)file; (void)line; }
+static inline void StaticAnnotateIgnoreWritesEnd(
+    const char *file, int line) { (void)file; (void)line; }
+#pragma GCC diagnostic pop
+#endif
+
+/* Return non-zero value if running under valgrind.
+
+  If "valgrind.h" is included into dynamic_annotations.cc,
+  the regular valgrind mechanism will be used.
+  See http://valgrind.org/docs/manual/manual-core-adv.html about
+  RUNNING_ON_VALGRIND and other valgrind "client requests".
+  The file "valgrind.h" may be obtained by doing
+     svn co svn://svn.valgrind.org/valgrind/trunk/include
+
+  If for some reason you can't use "valgrind.h" or want to fake valgrind,
+  there are two ways to make this function return non-zero:
+    - Use environment variable: export RUNNING_ON_VALGRIND=1
+    - Make your tool intercept the function RunningOnValgrind() and
+      change its return value.
+ */
+int RunningOnValgrind(void);
+
+/* ValgrindSlowdown returns:
+    * 1.0, if (RunningOnValgrind() == 0)
+    * 50.0, if (RunningOnValgrind() != 0 && getenv("VALGRIND_SLOWDOWN") == NULL)
+    * atof(getenv("VALGRIND_SLOWDOWN")) otherwise
+   This function can be used to scale timeout values:
+   EXAMPLE:
+   for (;;) {
+     DoExpensiveBackgroundTask();
+     SleepForSeconds(5 * ValgrindSlowdown());
+   }
+ */
+double ValgrindSlowdown(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+/* ANNOTATE_UNPROTECTED_READ is the preferred way to annotate racey reads.
+
+     Instead of doing
+        ANNOTATE_IGNORE_READS_BEGIN();
+        ... = x;
+        ANNOTATE_IGNORE_READS_END();
+     one can use
+        ... = ANNOTATE_UNPROTECTED_READ(x); */
+#if defined(__cplusplus) && defined(ANNOTATIONS_ENABLED)
+template <typename T>
+inline T ANNOTATE_UNPROTECTED_READ(const volatile T &x) { /* NOLINT */
+  ANNOTATE_IGNORE_READS_BEGIN();
+  T res = x;
+  ANNOTATE_IGNORE_READS_END();
+  return res;
+  }
+#else
+  #define ANNOTATE_UNPROTECTED_READ(x) (x)
+#endif
+
+#if DYNAMIC_ANNOTATIONS_ENABLED != 0 && defined(__cplusplus)
+  /* Apply ANNOTATE_BENIGN_RACE_SIZED to a static variable. */
+  #define ANNOTATE_BENIGN_RACE_STATIC(static_var, description)        \
+    namespace {                                                       \
+      class static_var ## _annotator {                                \
+       public:                                                        \
+        static_var ## _annotator() {                                  \
+          ANNOTATE_BENIGN_RACE_SIZED(&static_var,                     \
+                                      sizeof(static_var),             \
+            # static_var ": " description);                           \
+        }                                                             \
+      };                                                              \
+      static static_var ## _annotator the ## static_var ## _annotator;\
+    }  // namespace
+#else /* DYNAMIC_ANNOTATIONS_ENABLED == 0 */
+  #define ANNOTATE_BENIGN_RACE_STATIC(static_var, description)  /* empty */
+#endif /* DYNAMIC_ANNOTATIONS_ENABLED */
+
+#ifdef ADDRESS_SANITIZER
+/* Describe the current state of a contiguous container such as e.g.
+ * std::vector or std::string. For more details see
+ * sanitizer/common_interface_defs.h, which is provided by the compiler. */
+#include <sanitizer/common_interface_defs.h>
+#define ANNOTATE_CONTIGUOUS_CONTAINER(beg, end, old_mid, new_mid) \
+  __sanitizer_annotate_contiguous_container(beg, end, old_mid, new_mid)
+#define ADDRESS_SANITIZER_REDZONE(name)         \
+  struct { char x[8] __attribute__ ((aligned (8))); } name
+#else
+#define ANNOTATE_CONTIGUOUS_CONTAINER(beg, end, old_mid, new_mid)
+#define ADDRESS_SANITIZER_REDZONE(name)
+#endif  // ADDRESS_SANITIZER
+
+/* Undefine the macros intended only in this file. */
+#undef ANNOTALYSIS_ENABLED
+#undef ANNOTATIONS_ENABLED
+#undef ATTRIBUTE_IGNORE_READS_BEGIN
+#undef ATTRIBUTE_IGNORE_READS_END
+
+#endif /* !__native_client__ */
+
+#endif  /* ABSL_BASE_DYNAMIC_ANNOTATIONS_H_ */
diff --git a/absl/base/internal/atomic_hook.h b/absl/base/internal/atomic_hook.h
new file mode 100644
index 000000000000..8eee367e4f9c
--- /dev/null
+++ b/absl/base/internal/atomic_hook.h
@@ -0,0 +1,122 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef ABSL_BASE_INTERNAL_ATOMIC_HOOK_H_
+#define ABSL_BASE_INTERNAL_ATOMIC_HOOK_H_
+
+#include <cassert>
+#include <atomic>
+#include <utility>
+
+namespace absl {
+namespace base_internal {
+
+// In current versions of MSVC (as of July 2017), a std::atomic<T> where T is a
+// pointer to function cannot be constant-initialized with an address constant
+// expression.  That is, the following code does not compile:
+//   void NoOp() {}
+//   constexpr std::atomic<void(*)()> ptr(NoOp);
+//
+// This is the only compiler we support that seems to have this issue.  We
+// conditionalize on MSVC here to use a fallback implementation.  But we
+// should revisit this occasionally.  If MSVC fixes this compiler bug, we
+// can then change this to be conditionalized on the value on _MSC_FULL_VER
+// instead.
+#ifdef _MSC_FULL_VER
+#define ABSL_HAVE_FUNCTION_ADDRESS_CONSTANT_EXPRESSION 0
+#else
+#define ABSL_HAVE_FUNCTION_ADDRESS_CONSTANT_EXPRESSION 1
+#endif
+
+template <typename T>
+class AtomicHook;
+
+// AtomicHook is a helper class, templatized on a raw function pointer type, for
+// implementing Abseil customization hooks.  It is a callable object that
+// dispatches to the registered hook, or performs a no-op (and returns a default
+// constructed object) if no hook has been registered.
+//
+// Reads and writes guarantee memory_order_acquire/memory_order_release
+// semantics.
+template <typename ReturnType, typename... Args>
+class AtomicHook<ReturnType (*)(Args...)> {
+ public:
+  using FnPtr = ReturnType (*)(Args...);
+
+  constexpr AtomicHook() : hook_(DummyFunction) {}
+
+  // Stores the provided function pointer as the value for this hook.
+  //
+  // This is intended to be called once.  Multiple calls are legal only if the
+  // same function pointer is provided for each call.  The store is implemented
+  // as a memory_order_release operation, and read accesses are implemented as
+  // memory_order_acquire.
+  void Store(FnPtr fn) {
+    assert(fn);
+    FnPtr expected = DummyFunction;
+    hook_.compare_exchange_strong(expected, fn, std::memory_order_acq_rel,
+                                  std::memory_order_acquire);
+    // If the compare and exchange failed, make sure that's because hook_ was
+    // already set to `fn` by an earlier call.  Any other state reflects an API
+    // violation (calling Store() multiple times with different values).
+    //
+    // Avoid ABSL_RAW_CHECK, since raw logging depends on AtomicHook.
+    assert(expected == DummyFunction || expected == fn);
+  }
+
+  // Invokes the registered callback.  If no callback has yet been registered, a
+  // default-constructed object of the appropriate type is returned instead.
+  template <typename... CallArgs>
+  ReturnType operator()(CallArgs&&... args) const {
+    FnPtr hook = hook_.load(std::memory_order_acquire);
+    if (ABSL_HAVE_FUNCTION_ADDRESS_CONSTANT_EXPRESSION || hook) {
+      return hook(std::forward<CallArgs>(args)...);
+    } else {
+      return ReturnType();
+    }
+  }
+
+  // Returns the registered callback, or nullptr if none has been registered.
+  // Useful if client code needs to conditionalize behavior based on whether a
+  // callback was registered.
+  //
+  // Note that atomic_hook.Load()() and atomic_hook() have different semantics:
+  // operator()() will perform a no-op if no callback was registered, while
+  // Load()() will dereference a null function pointer.  Prefer operator()() to
+  // Load()() unless you must conditionalize behavior on whether a hook was
+  // registered.
+  FnPtr Load() const {
+    FnPtr ptr = hook_.load(std::memory_order_acquire);
+    return (ptr == DummyFunction) ? nullptr : ptr;
+  }
+
+ private:
+#if ABSL_HAVE_FUNCTION_ADDRESS_CONSTANT_EXPRESSION
+  static ReturnType DummyFunction(Args...) {
+    return ReturnType();
+  }
+#else
+  static constexpr FnPtr DummyFunction = nullptr;
+#endif
+
+  std::atomic<FnPtr> hook_;
+};
+
+#undef ABSL_HAVE_FUNCTION_ADDRESS_CONSTANT_EXPRESSION
+
+}  // namespace base_internal
+}  // namespace absl
+
+#endif  // ABSL_BASE_INTERNAL_ATOMIC_HOOK_H_
diff --git a/absl/base/internal/cycleclock.cc b/absl/base/internal/cycleclock.cc
new file mode 100644
index 000000000000..a742df01f94a
--- /dev/null
+++ b/absl/base/internal/cycleclock.cc
@@ -0,0 +1,81 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// The implementation of CycleClock::Frequency.
+//
+// NOTE: only i386 and x86_64 have been well tested.
+// PPC, sparc, alpha, and ia64 are based on
+//    http://peter.kuscsik.com/wordpress/?p=14
+// with modifications by m3b.  See also
+//    https://setisvn.ssl.berkeley.edu/svn/lib/fftw-3.0.1/kernel/cycle.h
+
+#include "absl/base/internal/cycleclock.h"
+
+#include <chrono>  // NOLINT(build/c++11)
+
+#include "absl/base/internal/unscaledcycleclock.h"
+
+namespace absl {
+namespace base_internal {
+
+#if ABSL_USE_UNSCALED_CYCLECLOCK
+
+namespace {
+
+#ifdef NDEBUG
+#ifdef ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY
+// Not debug mode and the UnscaledCycleClock frequency is the CPU
+// frequency.  Scale the CycleClock to prevent overflow if someone
+// tries to represent the time as cycles since the Unix epoch.
+static constexpr int32_t kShift = 1;
+#else
+// Not debug mode and the UnscaledCycleClock isn't operating at the
+// raw CPU frequency. There is no need to do any scaling, so don't
+// needlessly sacrifice precision.
+static constexpr int32_t kShift = 0;
+#endif
+#else
+// In debug mode use a different shift to discourage depending on a
+// particular shift value.
+static constexpr int32_t kShift = 2;
+#endif
+
+static constexpr double kFrequencyScale = 1.0 / (1 << kShift);
+
+}  // namespace
+
+int64_t CycleClock::Now() {
+  return base_internal::UnscaledCycleClock::Now() >> kShift;
+}
+
+double CycleClock::Frequency() {
+  return kFrequencyScale * base_internal::UnscaledCycleClock::Frequency();
+}
+
+#else
+
+int64_t CycleClock::Now() {
+  return std::chrono::duration_cast<std::chrono::nanoseconds>(
+             std::chrono::steady_clock::now().time_since_epoch())
+      .count();
+}
+
+double CycleClock::Frequency() {
+  return 1e9;
+}
+
+#endif
+
+}  // namespace base_internal
+}  // namespace absl
diff --git a/absl/base/internal/cycleclock.h b/absl/base/internal/cycleclock.h
new file mode 100644
index 000000000000..60e971583c57
--- /dev/null
+++ b/absl/base/internal/cycleclock.h
@@ -0,0 +1,77 @@
+//
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+// -----------------------------------------------------------------------------
+// File: cycleclock.h
+// -----------------------------------------------------------------------------
+//
+// This header file defines a `CycleClock`, which yields the value and frequency
+// of a cycle counter that increments at a rate that is approximately constant.
+//
+// NOTE:
+//
+// The cycle counter frequency is not necessarily related to the core clock
+// frequency and should not be treated as such. That is, `CycleClock` cycles are
+// not necessarily "CPU cycles" and code should not rely on that behavior, even
+// if experimentally observed.
+//
+//
+// An arbitrary offset may have been added to the counter at power on.
+//
+// On some platforms, the rate and offset of the counter may differ
+// slightly when read from different CPUs of a multiprocessor. Usually,
+// we try to ensure that the operating system adjusts values periodically
+// so that values agree approximately.   If you need stronger guarantees,
+// consider using alternate interfaces.
+//
+// The CPU is not required to maintain the ordering of a cycle counter read
+// with respect to surrounding instructions.
+
+#ifndef ABSL_BASE_INTERNAL_CYCLECLOCK_H_
+#define ABSL_BASE_INTERNAL_CYCLECLOCK_H_
+
+#include <cstdint>
+
+namespace absl {
+namespace base_internal {
+
+// -----------------------------------------------------------------------------
+// CycleClock
+// -----------------------------------------------------------------------------
+class CycleClock {
+ public:
+  // CycleClock::Now()
+  //
+  // Returns the value of a cycle counter that counts at a rate that is
+  // approximately constant.
+  static int64_t Now();
+
+  // CycleClock::Frequency()
+  //
+  // Returns the amount by which `CycleClock::Now()` increases per second. Note
+  // that this value may not necessarily match the core CPU clock frequency.
+  static double Frequency();
+
+ private:
+  CycleClock() = delete;  // no instances
+  CycleClock(const CycleClock&) = delete;
+  CycleClock& operator=(const CycleClock&) = delete;
+};
+
+}  // namespace base_internal
+}  // namespace absl
+
+#endif  // ABSL_BASE_INTERNAL_CYCLECLOCK_H_
diff --git a/absl/base/internal/endian.h b/absl/base/internal/endian.h
new file mode 100644
index 000000000000..602129eed207
--- /dev/null
+++ b/absl/base/internal/endian.h
@@ -0,0 +1,267 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef ABSL_BASE_INTERNAL_ENDIAN_H_
+#define ABSL_BASE_INTERNAL_ENDIAN_H_
+
+// The following guarantees declaration of the byte swap functions
+#ifdef _MSC_VER
+#include <stdlib.h>  // NOLINT(build/include)
+#elif defined(__APPLE__)
+// Mac OS X / Darwin features
+#include <libkern/OSByteOrder.h>
+#elif defined(__GLIBC__)
+#include <byteswap.h>  // IWYU pragma: export
+#endif
+
+#include <cstdint>
+#include "absl/base/config.h"
+#include "absl/base/internal/unaligned_access.h"
+#include "absl/base/port.h"
+
+namespace absl {
+
+// Use compiler byte-swapping intrinsics if they are available.  32-bit
+// and 64-bit versions are available in Clang and GCC as of GCC 4.3.0.
+// The 16-bit version is available in Clang and GCC only as of GCC 4.8.0.
+// For simplicity, we enable them all only for GCC 4.8.0 or later.
+#if defined(__clang__) || \
+    (defined(__GNUC__) && \
+     ((__GNUC__ == 4 && __GNUC_MINOR__ >= 8) || __GNUC__ >= 5))
+inline uint64_t gbswap_64(uint64_t host_int) {
+  return __builtin_bswap64(host_int);
+}
+inline uint32_t gbswap_32(uint32_t host_int) {
+  return __builtin_bswap32(host_int);
+}
+inline uint16_t gbswap_16(uint16_t host_int) {
+  return __builtin_bswap16(host_int);
+}
+
+#elif defined(_MSC_VER)
+inline uint64_t gbswap_64(uint64_t host_int) {
+  return _byteswap_uint64(host_int);
+}
+inline uint32_t gbswap_32(uint32_t host_int) {
+  return _byteswap_ulong(host_int);
+}
+inline uint16_t gbswap_16(uint16_t host_int) {
+  return _byteswap_ushort(host_int);
+}
+
+#elif defined(__APPLE__)
+inline uint64_t gbswap_64(uint64_t host_int) { return OSSwapInt16(host_int); }
+inline uint32_t gbswap_32(uint32_t host_int) { return OSSwapInt32(host_int); }
+inline uint16_t gbswap_16(uint16_t host_int) { return OSSwapInt64(host_int); }
+
+#else
+inline uint64_t gbswap_64(uint64_t host_int) {
+#if defined(__GNUC__) && defined(__x86_64__) && !defined(__APPLE__)
+  // Adapted from /usr/include/byteswap.h.  Not available on Mac.
+  if (__builtin_constant_p(host_int)) {
+    return __bswap_constant_64(host_int);
+  } else {
+    register uint64_t result;
+    __asm__("bswap %0" : "=r"(result) : "0"(host_int));
+    return result;
+  }
+#elif defined(__GLIBC__)
+  return bswap_64(host_int);
+#else
+  return (((x & uint64_t{(0xFF}) << 56) |
+          ((x & uint64_t{(0xFF00}) << 40) |
+          ((x & uint64_t{(0xFF0000}) << 24) |
+          ((x & uint64_t{(0xFF000000}) << 8) |
+          ((x & uint64_t{(0xFF00000000}) >> 8) |
+          ((x & uint64_t{(0xFF0000000000}) >> 24) |
+          ((x & uint64_t{(0xFF000000000000}) >> 40) |
+          ((x & uint64_t{(0xFF00000000000000}) >> 56));
+#endif  // bswap_64
+}
+
+inline uint32_t gbswap_32(uint32_t host_int) {
+#if defined(__GLIBC__)
+  return bswap_32(host_int);
+#else
+  return (((x & 0xFF) << 24) | ((x & 0xFF00) << 8) | ((x & 0xFF0000) >> 8) |
+          ((x & 0xFF000000) >> 24));
+#endif
+}
+
+inline uint16_t gbswap_16(uint16_t host_int) {
+#if defined(__GLIBC__)
+  return bswap_16(host_int);
+#else
+  return uint16_t{((x & 0xFF) << 8) | ((x & 0xFF00) >> 8)};
+#endif
+}
+
+#endif  // intrinics available
+
+#ifdef ABSL_IS_LITTLE_ENDIAN
+
+// Definitions for ntohl etc. that don't require us to include
+// netinet/in.h. We wrap gbswap_32 and gbswap_16 in functions rather
+// than just #defining them because in debug mode, gcc doesn't
+// correctly handle the (rather involved) definitions of bswap_32.
+// gcc guarantees that inline functions are as fast as macros, so
+// this isn't a performance hit.
+inline uint16_t ghtons(uint16_t x) { return gbswap_16(x); }
+inline uint32_t ghtonl(uint32_t x) { return gbswap_32(x); }
+inline uint64_t ghtonll(uint64_t x) { return gbswap_64(x); }
+
+#elif defined ABSL_IS_BIG_ENDIAN
+
+// These definitions are simpler on big-endian machines
+// These are functions instead of macros to avoid self-assignment warnings
+// on calls such as "i = ghtnol(i);".  This also provides type checking.
+inline uint16_t ghtons(uint16_t x) { return x; }
+inline uint32_t ghtonl(uint32_t x) { return x; }
+inline uint64_t ghtonll(uint64_t x) { return x; }
+
+#else
+#error \
+    "Unsupported byte order: Either ABSL_IS_BIG_ENDIAN or " \
+       "ABSL_IS_LITTLE_ENDIAN must be defined"
+#endif  // byte order
+
+inline uint16_t gntohs(uint16_t x) { return ghtons(x); }
+inline uint32_t gntohl(uint32_t x) { return ghtonl(x); }
+inline uint64_t gntohll(uint64_t x) { return ghtonll(x); }
+
+// Utilities to convert numbers between the current hosts's native byte
+// order and little-endian byte order
+//
+// Load/Store methods are alignment safe
+namespace little_endian {
+// Conversion functions.
+#ifdef ABSL_IS_LITTLE_ENDIAN
+
+inline uint16_t FromHost16(uint16_t x) { return x; }
+inline uint16_t ToHost16(uint16_t x) { return x; }
+
+inline uint32_t FromHost32(uint32_t x) { return x; }
+inline uint32_t ToHost32(uint32_t x) { return x; }
+
+inline uint64_t FromHost64(uint64_t x) { return x; }
+inline uint64_t ToHost64(uint64_t x) { return x; }
+
+inline constexpr bool IsLittleEndian() { return true; }
+
+#elif defined ABSL_IS_BIG_ENDIAN
+
+inline uint16_t FromHost16(uint16_t x) { return gbswap_16(x); }
+inline uint16_t ToHost16(uint16_t x) { return gbswap_16(x); }
+
+inline uint32_t FromHost32(uint32_t x) { return gbswap_32(x); }
+inline uint32_t ToHost32(uint32_t x) { return gbswap_32(x); }
+
+inline uint64_t FromHost64(uint64_t x) { return gbswap_64(x); }
+inline uint64_t ToHost64(uint64_t x) { return gbswap_64(x); }
+
+inline constexpr bool IsLittleEndian() { return false; }
+
+#endif /* ENDIAN */
+
+// Functions to do unaligned loads and stores in little-endian order.
+inline uint16_t Load16(const void *p) {
+  return ToHost16(ABSL_INTERNAL_UNALIGNED_LOAD16(p));
+}
+
+inline void Store16(void *p, uint16_t v) {
+  ABSL_INTERNAL_UNALIGNED_STORE16(p, FromHost16(v));
+}
+
+inline uint32_t Load32(const void *p) {
+  return ToHost32(ABSL_INTERNAL_UNALIGNED_LOAD32(p));
+}
+
+inline void Store32(void *p, uint32_t v) {
+  ABSL_INTERNAL_UNALIGNED_STORE32(p, FromHost32(v));
+}
+
+inline uint64_t Load64(const void *p) {
+  return ToHost64(ABSL_INTERNAL_UNALIGNED_LOAD64(p));
+}
+
+inline void Store64(void *p, uint64_t v) {
+  ABSL_INTERNAL_UNALIGNED_STORE64(p, FromHost64(v));
+}
+
+}  // namespace little_endian
+
+// Utilities to convert numbers between the current hosts's native byte
+// order and big-endian byte order (same as network byte order)
+//
+// Load/Store methods are alignment safe
+namespace big_endian {
+#ifdef ABSL_IS_LITTLE_ENDIAN
+
+inline uint16_t FromHost16(uint16_t x) { return gbswap_16(x); }
+inline uint16_t ToHost16(uint16_t x) { return gbswap_16(x); }
+
+inline uint32_t FromHost32(uint32_t x) { return gbswap_32(x); }
+inline uint32_t ToHost32(uint32_t x) { return gbswap_32(x); }
+
+inline uint64_t FromHost64(uint64_t x) { return gbswap_64(x); }
+inline uint64_t ToHost64(uint64_t x) { return gbswap_64(x); }
+
+inline constexpr bool IsLittleEndian() { return true; }
+
+#elif defined ABSL_IS_BIG_ENDIAN
+
+inline uint16_t FromHost16(uint16_t x) { return x; }
+inline uint16_t ToHost16(uint16_t x) { return x; }
+
+inline uint32_t FromHost32(uint32_t x) { return x; }
+inline uint32_t ToHost32(uint32_t x) { return x; }
+
+inline uint64_t FromHost64(uint64_t x) { return x; }
+inline uint64_t ToHost64(uint64_t x) { return x; }
+
+inline constexpr bool IsLittleEndian() { return false; }
+
+#endif /* ENDIAN */
+
+// Functions to do unaligned loads and stores in big-endian order.
+inline uint16_t Load16(const void *p) {
+  return ToHost16(ABSL_INTERNAL_UNALIGNED_LOAD16(p));
+}
+
+inline void Store16(void *p, uint16_t v) {
+  ABSL_INTERNAL_UNALIGNED_STORE16(p, FromHost16(v));
+}
+
+inline uint32_t Load32(const void *p) {
+  return ToHost32(ABSL_INTERNAL_UNALIGNED_LOAD32(p));
+}
+
+inline void Store32(void *p, uint32_t v) {
+  ABSL_INTERNAL_UNALIGNED_STORE32(p, FromHost32(v));
+}
+
+inline uint64_t Load64(const void *p) {
+  return ToHost64(ABSL_INTERNAL_UNALIGNED_LOAD64(p));
+}
+
+inline void Store64(void *p, uint64_t v) {
+  ABSL_INTERNAL_UNALIGNED_STORE64(p, FromHost64(v));
+}
+
+}  // namespace big_endian
+
+}  // namespace absl
+
+#endif  // ABSL_BASE_INTERNAL_ENDIAN_H_
diff --git a/absl/base/internal/endian_test.cc b/absl/base/internal/endian_test.cc
new file mode 100644
index 000000000000..6812214ef045
--- /dev/null
+++ b/absl/base/internal/endian_test.cc
@@ -0,0 +1,281 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/internal/endian.h"
+
+#include <algorithm>
+#include <cstdint>
+#include <cstdio>
+#include <limits>
+#include <random>
+#include <vector>
+
+#include "gtest/gtest.h"
+#include "absl/base/casts.h"
+#include "absl/base/config.h"
+
+namespace absl {
+namespace {
+
+const uint64_t kInitialNumber{0x0123456789abcdef};
+const uint64_t k64Value{kInitialNumber};
+const uint32_t k32Value{0x01234567};
+const uint16_t k16Value{0x0123};
+const int kNumValuesToTest = 1000000;
+const int kRandomSeed = 12345;
+
+#ifdef ABSL_IS_BIG_ENDIAN
+const uint64_t kInitialInNetworkOrder{kInitialNumber};
+const uint64_t k64ValueLE{0xefcdab8967452301};
+const uint32_t k32ValueLE{0x67452301};
+const uint16_t k16ValueLE{0x2301};
+const uint8_t k8ValueLE{k8Value};
+const uint64_t k64IValueLE{0xefcdab89674523a1};
+const uint32_t k32IValueLE{0x67452391};
+const uint16_t k16IValueLE{0x85ff};
+const uint8_t k8IValueLE{0xff};
+const uint64_t kDoubleValueLE{0x6e861bf0f9210940};
+const uint32_t kFloatValueLE{0xd00f4940};
+const uint8_t kBoolValueLE{0x1};
+
+const uint64_t k64ValueBE{kInitialNumber};
+const uint32_t k32ValueBE{k32Value};
+const uint16_t k16ValueBE{k16Value};
+const uint8_t k8ValueBE{k8Value};
+const uint64_t k64IValueBE{0xa123456789abcdef};
+const uint32_t k32IValueBE{0x91234567};
+const uint16_t k16IValueBE{0xff85};
+const uint8_t k8IValueBE{0xff};
+const uint64_t kDoubleValueBE{0x400921f9f01b866e};
+const uint32_t kFloatValueBE{0x40490fd0};
+const uint8_t kBoolValueBE{0x1};
+#elif defined ABSL_IS_LITTLE_ENDIAN
+const uint64_t kInitialInNetworkOrder{0xefcdab8967452301};
+const uint64_t k64ValueLE{kInitialNumber};
+const uint32_t k32ValueLE{k32Value};
+const uint16_t k16ValueLE{k16Value};
+
+const uint64_t k64ValueBE{0xefcdab8967452301};
+const uint32_t k32ValueBE{0x67452301};
+const uint16_t k16ValueBE{0x2301};
+#endif
+
+template<typename T>
+std::vector<T> GenerateAllValuesForType() {
+  std::vector<T> result;
+  T next = std::numeric_limits<T>::min();
+  while (true) {
+    result.push_back(next);
+    if (next == std::numeric_limits<T>::max()) {
+      return result;
+    }
+    ++next;
+  }
+}
+
+template<typename T>
+std::vector<T> GenerateRandomIntegers(size_t numValuesToTest) {
+  std::vector<T> result;
+  std::mt19937_64 rng(kRandomSeed);
+  for (size_t i = 0; i < numValuesToTest; ++i) {
+    result.push_back(rng());
+  }
+  return result;
+}
+
+void ManualByteSwap(char* bytes, int length) {
+  if (length == 1)
+    return;
+
+  EXPECT_EQ(0, length % 2);
+  for (int i = 0; i < length / 2; ++i) {
+    int j = (length - 1) - i;
+    using std::swap;
+    swap(bytes[i], bytes[j]);
+  }
+}
+
+template<typename T>
+inline T UnalignedLoad(const char* p) {
+  static_assert(
+      sizeof(T) == 1 || sizeof(T) == 2 || sizeof(T) == 4 || sizeof(T) == 8,
+      "Unexpected type size");
+
+  switch (sizeof(T)) {
+    case 1: return *reinterpret_cast<const T*>(p);
+    case 2:
+      return ABSL_INTERNAL_UNALIGNED_LOAD16(p);
+    case 4:
+      return ABSL_INTERNAL_UNALIGNED_LOAD32(p);
+    case 8:
+      return ABSL_INTERNAL_UNALIGNED_LOAD64(p);
+    default:
+      // Suppresses invalid "not all control paths return a value" on MSVC
+      return {};
+  }
+}
+
+template <typename T, typename ByteSwapper>
+static void GBSwapHelper(const std::vector<T>& host_values_to_test,
+                         const ByteSwapper& byte_swapper) {
+  // Test byte_swapper against a manual byte swap.
+  for (typename std::vector<T>::const_iterator it = host_values_to_test.begin();
+       it != host_values_to_test.end(); ++it) {
+    T host_value = *it;
+
+    char actual_value[sizeof(host_value)];
+    memcpy(actual_value, &host_value, sizeof(host_value));
+    byte_swapper(actual_value);
+
+    char expected_value[sizeof(host_value)];
+    memcpy(expected_value, &host_value, sizeof(host_value));
+    ManualByteSwap(expected_value, sizeof(host_value));
+
+    ASSERT_EQ(0, memcmp(actual_value, expected_value, sizeof(host_value)))
+        << "Swap output for 0x" << std::hex << host_value << " does not match. "
+        << "Expected: 0x" << UnalignedLoad<T>(expected_value) << "; "
+        << "actual: 0x" <<  UnalignedLoad<T>(actual_value);
+  }
+}
+
+void Swap16(char* bytes) {
+  ABSL_INTERNAL_UNALIGNED_STORE16(
+      bytes, gbswap_16(ABSL_INTERNAL_UNALIGNED_LOAD16(bytes)));
+}
+
+void Swap32(char* bytes) {
+  ABSL_INTERNAL_UNALIGNED_STORE32(
+      bytes, gbswap_32(ABSL_INTERNAL_UNALIGNED_LOAD32(bytes)));
+}
+
+void Swap64(char* bytes) {
+  ABSL_INTERNAL_UNALIGNED_STORE64(
+      bytes, gbswap_64(ABSL_INTERNAL_UNALIGNED_LOAD64(bytes)));
+}
+
+TEST(EndianessTest, Uint16) {
+  GBSwapHelper(GenerateAllValuesForType<uint16_t>(), &Swap16);
+}
+
+TEST(EndianessTest, Uint32) {
+  GBSwapHelper(GenerateRandomIntegers<uint32_t>(kNumValuesToTest), &Swap32);
+}
+
+TEST(EndianessTest, Uint64) {
+  GBSwapHelper(GenerateRandomIntegers<uint64_t>(kNumValuesToTest), &Swap64);
+}
+
+TEST(EndianessTest, ghtonll_gntohll) {
+  // Test that absl::ghtonl compiles correctly
+  uint32_t test = 0x01234567;
+  EXPECT_EQ(absl::gntohl(absl::ghtonl(test)), test);
+
+  uint64_t comp = absl::ghtonll(kInitialNumber);
+  EXPECT_EQ(comp, kInitialInNetworkOrder);
+  comp = absl::gntohll(kInitialInNetworkOrder);
+  EXPECT_EQ(comp, kInitialNumber);
+
+  // Test that htonll and ntohll are each others' inverse functions on a
+  // somewhat assorted batch of numbers. 37 is chosen to not be anything
+  // particularly nice base 2.
+  uint64_t value = 1;
+  for (int i = 0; i < 100; ++i) {
+    comp = absl::ghtonll(absl::gntohll(value));
+    EXPECT_EQ(value, comp);
+    comp = absl::gntohll(absl::ghtonll(value));
+    EXPECT_EQ(value, comp);
+    value *= 37;
+  }
+}
+
+TEST(EndianessTest, little_endian) {
+  // Check little_endian uint16_t.
+  uint64_t comp = little_endian::FromHost16(k16Value);
+  EXPECT_EQ(comp, k16ValueLE);
+  comp = little_endian::ToHost16(k16ValueLE);
+  EXPECT_EQ(comp, k16Value);
+
+  // Check little_endian uint32_t.
+  comp = little_endian::FromHost32(k32Value);
+  EXPECT_EQ(comp, k32ValueLE);
+  comp = little_endian::ToHost32(k32ValueLE);
+  EXPECT_EQ(comp, k32Value);
+
+  // Check little_endian uint64_t.
+  comp = little_endian::FromHost64(k64Value);
+  EXPECT_EQ(comp, k64ValueLE);
+  comp = little_endian::ToHost64(k64ValueLE);
+  EXPECT_EQ(comp, k64Value);
+
+  // Check little-endian Load and store functions.
+  uint16_t u16Buf;
+  uint32_t u32Buf;
+  uint64_t u64Buf;
+
+  little_endian::Store16(&u16Buf, k16Value);
+  EXPECT_EQ(u16Buf, k16ValueLE);
+  comp = little_endian::Load16(&u16Buf);
+  EXPECT_EQ(comp, k16Value);
+
+  little_endian::Store32(&u32Buf, k32Value);
+  EXPECT_EQ(u32Buf, k32ValueLE);
+  comp = little_endian::Load32(&u32Buf);
+  EXPECT_EQ(comp, k32Value);
+
+  little_endian::Store64(&u64Buf, k64Value);
+  EXPECT_EQ(u64Buf, k64ValueLE);
+  comp = little_endian::Load64(&u64Buf);
+  EXPECT_EQ(comp, k64Value);
+}
+
+TEST(EndianessTest, big_endian) {
+  // Check big-endian Load and store functions.
+  uint16_t u16Buf;
+  uint32_t u32Buf;
+  uint64_t u64Buf;
+
+  unsigned char buffer[10];
+  big_endian::Store16(&u16Buf, k16Value);
+  EXPECT_EQ(u16Buf, k16ValueBE);
+  uint64_t comp = big_endian::Load16(&u16Buf);
+  EXPECT_EQ(comp, k16Value);
+
+  big_endian::Store32(&u32Buf, k32Value);
+  EXPECT_EQ(u32Buf, k32ValueBE);
+  comp = big_endian::Load32(&u32Buf);
+  EXPECT_EQ(comp, k32Value);
+
+  big_endian::Store64(&u64Buf, k64Value);
+  EXPECT_EQ(u64Buf, k64ValueBE);
+  comp = big_endian::Load64(&u64Buf);
+  EXPECT_EQ(comp, k64Value);
+
+  big_endian::Store16(buffer + 1, k16Value);
+  EXPECT_EQ(u16Buf, k16ValueBE);
+  comp = big_endian::Load16(buffer + 1);
+  EXPECT_EQ(comp, k16Value);
+
+  big_endian::Store32(buffer + 1, k32Value);
+  EXPECT_EQ(u32Buf, k32ValueBE);
+  comp = big_endian::Load32(buffer + 1);
+  EXPECT_EQ(comp, k32Value);
+
+  big_endian::Store64(buffer + 1, k64Value);
+  EXPECT_EQ(u64Buf, k64ValueBE);
+  comp = big_endian::Load64(buffer + 1);
+  EXPECT_EQ(comp, k64Value);
+}
+
+}  // namespace
+}  // namespace absl
diff --git a/absl/base/internal/exception_testing.h b/absl/base/internal/exception_testing.h
new file mode 100644
index 000000000000..99a10734842b
--- /dev/null
+++ b/absl/base/internal/exception_testing.h
@@ -0,0 +1,24 @@
+// Testing utilities for ABSL types which throw exceptions.
+
+#ifndef ABSL_BASE_INTERNAL_EXCEPTION_TESTING_H_
+#define ABSL_BASE_INTERNAL_EXCEPTION_TESTING_H_
+
+#include "gtest/gtest.h"
+#include "absl/base/config.h"
+
+// ABSL_BASE_INTERNAL_EXPECT_FAIL tests either for a specified thrown exception
+// if exceptions are enabled, or for death with a specified text in the error
+// message
+#ifdef ABSL_HAVE_EXCEPTIONS
+
+#define ABSL_BASE_INTERNAL_EXPECT_FAIL(expr, exception_t, text) \
+  EXPECT_THROW(expr, exception_t)
+
+#else
+
+#define ABSL_BASE_INTERNAL_EXPECT_FAIL(expr, exception_t, text) \
+  EXPECT_DEATH(expr, text)
+
+#endif
+
+#endif  // ABSL_BASE_INTERNAL_EXCEPTION_TESTING_H_
diff --git a/absl/base/internal/identity.h b/absl/base/internal/identity.h
new file mode 100644
index 000000000000..a6734b4d3537
--- /dev/null
+++ b/absl/base/internal/identity.h
@@ -0,0 +1,33 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef ABSL_BASE_INTERNAL_IDENTITY_H_
+#define ABSL_BASE_INTERNAL_IDENTITY_H_
+
+namespace absl {
+namespace internal {
+
+template <typename T>
+struct identity {
+  typedef T type;
+};
+
+template <typename T>
+using identity_t = typename identity<T>::type;
+
+}  //  namespace internal
+}  //  namespace absl
+
+#endif  // ABSL_BASE_INTERNAL_IDENTITY_H_
diff --git a/absl/base/internal/invoke.h b/absl/base/internal/invoke.h
new file mode 100644
index 000000000000..8c3f4f60637c
--- /dev/null
+++ b/absl/base/internal/invoke.h
@@ -0,0 +1,188 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// absl::base_internal::Invoke(f, args...) is an implementation of
+// INVOKE(f, args...) from section [func.require] of the C++ standard.
+//
+// [func.require]
+// Define INVOKE (f, t1, t2, ..., tN) as follows:
+// 1. (t1.*f)(t2, ..., tN) when f is a pointer to a member function of a class T
+//    and t1 is an object of type T or a reference to an object of type T or a
+//    reference to an object of a type derived from T;
+// 2. ((*t1).*f)(t2, ..., tN) when f is a pointer to a member function of a
+//    class T and t1 is not one of the types described in the previous item;
+// 3. t1.*f when N == 1 and f is a pointer to member data of a class T and t1 is
+//    an object of type T or a reference to an object of type T or a reference
+//    to an object of a type derived from T;
+// 4. (*t1).*f when N == 1 and f is a pointer to member data of a class T and t1
+//    is not one of the types described in the previous item;
+// 5. f(t1, t2, ..., tN) in all other cases.
+//
+// The implementation is SFINAE-friendly: substitution failure within Invoke()
+// isn't an error.
+
+#ifndef ABSL_BASE_INTERNAL_INVOKE_H_
+#define ABSL_BASE_INTERNAL_INVOKE_H_
+
+#include <algorithm>
+#include <type_traits>
+#include <utility>
+
+// The following code is internal implementation detail.  See the comment at the
+// top of this file for the API documentation.
+
+namespace absl {
+namespace base_internal {
+
+// The five classes below each implement one of the clauses from the definition
+// of INVOKE. The inner class template Accept<F, Args...> checks whether the
+// clause is applicable; static function template Invoke(f, args...) does the
+// invocation.
+//
+// By separating the clause selection logic from invocation we make sure that
+// Invoke() does exactly what the standard says.
+
+template <typename Derived>
+struct StrippedAccept {
+  template <typename... Args>
+  struct Accept : Derived::template AcceptImpl<typename std::remove_cv<
+                      typename std::remove_reference<Args>::type>::type...> {};
+};
+
+// (t1.*f)(t2, ..., tN) when f is a pointer to a member function of a class T
+// and t1 is an object of type T or a reference to an object of type T or a
+// reference to an object of a type derived from T.
+struct MemFunAndRef : StrippedAccept<MemFunAndRef> {
+  template <typename... Args>
+  struct AcceptImpl : std::false_type {};
+
+  template <typename R, typename C, typename... Params, typename Obj,
+            typename... Args>
+  struct AcceptImpl<R (C::*)(Params...), Obj, Args...>
+      : std::is_base_of<C, Obj> {};
+
+  template <typename R, typename C, typename... Params, typename Obj,
+            typename... Args>
+  struct AcceptImpl<R (C::*)(Params...) const, Obj, Args...>
+      : std::is_base_of<C, Obj> {};
+
+  template <typename MemFun, typename Obj, typename... Args>
+  static decltype((std::declval<Obj>().*
+                   std::declval<MemFun>())(std::declval<Args>()...))
+  Invoke(MemFun&& mem_fun, Obj&& obj, Args&&... args) {
+    return (std::forward<Obj>(obj).*
+            std::forward<MemFun>(mem_fun))(std::forward<Args>(args)...);
+  }
+};
+
+// ((*t1).*f)(t2, ..., tN) when f is a pointer to a member function of a
+// class T and t1 is not one of the types described in the previous item.
+struct MemFunAndPtr : StrippedAccept<MemFunAndPtr> {
+  template <typename... Args>
+  struct AcceptImpl : std::false_type {};
+
+  template <typename R, typename C, typename... Params, typename Ptr,
+            typename... Args>
+  struct AcceptImpl<R (C::*)(Params...), Ptr, Args...>
+      : std::integral_constant<bool, !std::is_base_of<C, Ptr>::value> {};
+
+  template <typename R, typename C, typename... Params, typename Ptr,
+            typename... Args>
+  struct AcceptImpl<R (C::*)(Params...) const, Ptr, Args...>
+      : std::integral_constant<bool, !std::is_base_of<C, Ptr>::value> {};
+
+  template <typename MemFun, typename Ptr, typename... Args>
+  static decltype(((*std::declval<Ptr>()).*
+                   std::declval<MemFun>())(std::declval<Args>()...))
+  Invoke(MemFun&& mem_fun, Ptr&& ptr, Args&&... args) {
+    return ((*std::forward<Ptr>(ptr)).*
+            std::forward<MemFun>(mem_fun))(std::forward<Args>(args)...);
+  }
+};
+
+// t1.*f when N == 1 and f is a pointer to member data of a class T and t1 is
+// an object of type T or a reference to an object of type T or a reference
+// to an object of a type derived from T.
+struct DataMemAndRef : StrippedAccept<DataMemAndRef> {
+  template <typename... Args>
+  struct AcceptImpl : std::false_type {};
+
+  template <typename R, typename C, typename Obj>
+  struct AcceptImpl<R C::*, Obj> : std::is_base_of<C, Obj> {};
+
+  template <typename DataMem, typename Ref>
+  static decltype(std::declval<Ref>().*std::declval<DataMem>()) Invoke(
+      DataMem&& data_mem, Ref&& ref) {
+    return std::forward<Ref>(ref).*std::forward<DataMem>(data_mem);
+  }
+};
+
+// (*t1).*f when N == 1 and f is a pointer to member data of a class T and t1
+// is not one of the types described in the previous item.
+struct DataMemAndPtr : StrippedAccept<DataMemAndPtr> {
+  template <typename... Args>
+  struct AcceptImpl : std::false_type {};
+
+  template <typename R, typename C, typename Ptr>
+  struct AcceptImpl<R C::*, Ptr>
+      : std::integral_constant<bool, !std::is_base_of<C, Ptr>::value> {};
+
+  template <typename DataMem, typename Ptr>
+  static decltype((*std::declval<Ptr>()).*std::declval<DataMem>()) Invoke(
+      DataMem&& data_mem, Ptr&& ptr) {
+    return (*std::forward<Ptr>(ptr)).*std::forward<DataMem>(data_mem);
+  }
+};
+
+// f(t1, t2, ..., tN) in all other cases.
+struct Callable {
+  // Callable doesn't have Accept because it's the last clause that gets picked
+  // when none of the previous clauses are applicable.
+  template <typename F, typename... Args>
+  static decltype(std::declval<F>()(std::declval<Args>()...)) Invoke(
+      F&& f, Args&&... args) {
+    return std::forward<F>(f)(std::forward<Args>(args)...);
+  }
+};
+
+// Resolves to the first matching clause.
+template <typename... Args>
+struct Invoker {
+  typedef typename std::conditional<
+      MemFunAndRef::Accept<Args...>::value, MemFunAndRef,
+      typename std::conditional<
+          MemFunAndPtr::Accept<Args...>::value, MemFunAndPtr,
+          typename std::conditional<
+              DataMemAndRef::Accept<Args...>::value, DataMemAndRef,
+              typename std::conditional<DataMemAndPtr::Accept<Args...>::value,
+                                        DataMemAndPtr, Callable>::type>::type>::
+          type>::type type;
+};
+
+// The result type of Invoke<F, Args...>.
+template <typename F, typename... Args>
+using InvokeT = decltype(Invoker<F, Args...>::type::Invoke(
+    std::declval<F>(), std::declval<Args>()...));
+
+// Invoke(f, args...) is an implementation of INVOKE(f, args...) from section
+// [func.require] of the C++ standard.
+template <typename F, typename... Args>
+InvokeT<F, Args...> Invoke(F&& f, Args&&... args) {
+  return Invoker<F, Args...>::type::Invoke(std::forward<F>(f),
+                                           std::forward<Args>(args)...);
+}
+}  // namespace base_internal
+}  // namespace absl
+
+#endif  // ABSL_BASE_INTERNAL_INVOKE_H_
diff --git a/absl/base/internal/log_severity.cc b/absl/base/internal/log_severity.cc
new file mode 100644
index 000000000000..430ac45848a0
--- /dev/null
+++ b/absl/base/internal/log_severity.cc
@@ -0,0 +1,15 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/internal/log_severity.h"
diff --git a/absl/base/internal/log_severity.h b/absl/base/internal/log_severity.h
new file mode 100644
index 000000000000..deaf6a578925
--- /dev/null
+++ b/absl/base/internal/log_severity.h
@@ -0,0 +1,52 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef ABSL_BASE_INTERNAL_LOG_SEVERITY_H_
+#define ABSL_BASE_INTERNAL_LOG_SEVERITY_H_
+
+#include "absl/base/attributes.h"
+
+namespace absl {
+
+enum class LogSeverity : int {
+  kInfo = 0,
+  kWarning = 1,
+  kError = 2,
+  kFatal = 3,
+};
+
+constexpr const char* LogSeverityName(absl::LogSeverity s) {
+  return s == absl::LogSeverity::kInfo
+             ? "INFO"
+             : s == absl::LogSeverity::kWarning
+                   ? "WARNING"
+                   : s == absl::LogSeverity::kError
+                         ? "ERROR"
+                         : s == absl::LogSeverity::kFatal ? "FATAL" : "UNKNOWN";
+}
+
+// Note that out-of-range large severities normalize to kError, not kFatal.
+constexpr absl::LogSeverity NormalizeLogSeverity(absl::LogSeverity s) {
+  return s < absl::LogSeverity::kInfo
+             ? absl::LogSeverity::kInfo
+             : s > absl::LogSeverity::kFatal ? absl::LogSeverity::kError : s;
+}
+constexpr absl::LogSeverity NormalizeLogSeverity(int s) {
+  return NormalizeLogSeverity(static_cast<absl::LogSeverity>(s));
+}
+
+}  // namespace absl
+
+#endif  // ABSL_BASE_INTERNAL_LOG_SEVERITY_H_
diff --git a/absl/base/internal/low_level_alloc.cc b/absl/base/internal/low_level_alloc.cc
new file mode 100644
index 000000000000..c55201b38bad
--- /dev/null
+++ b/absl/base/internal/low_level_alloc.cc
@@ -0,0 +1,598 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// A low-level allocator that can be used by other low-level
+// modules without introducing dependency cycles.
+// This allocator is slow and wasteful of memory;
+// it should not be used when performance is key.
+
+#include "absl/base/config.h"
+
+#include "absl/base/internal/low_level_alloc.h"
+
+// LowLevelAlloc requires that the platform support low-level
+// allocation of virtual memory. Platforms lacking this cannot use
+// LowLevelAlloc.
+#ifndef ABSL_LOW_LEVEL_ALLOC_MISSING
+
+#ifndef _WIN32
+#include <pthread.h>
+#include <signal.h>
+#include <sys/mman.h>
+#include <unistd.h>
+#else
+#include <windows.h>
+#endif
+
+#include <string.h>
+#include <algorithm>
+#include <atomic>
+#include <cstddef>
+#include <cerrno>
+#include <new>                   // for placement-new
+
+#include "absl/base/dynamic_annotations.h"
+#include "absl/base/internal/malloc_hook.h"
+#include "absl/base/internal/malloc_hook_invoke.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/base/internal/spinlock.h"
+
+// MAP_ANONYMOUS
+#if defined(__APPLE__)
+// For mmap, Linux defines both MAP_ANONYMOUS and MAP_ANON and says MAP_ANON is
+// deprecated. In Darwin, MAP_ANON is all there is.
+#if !defined MAP_ANONYMOUS
+#define MAP_ANONYMOUS MAP_ANON
+#endif  // !MAP_ANONYMOUS
+#endif  // __APPLE__
+
+namespace absl {
+namespace base_internal {
+
+// A first-fit allocator with amortized logarithmic free() time.
+
+// ---------------------------------------------------------------------------
+static const int kMaxLevel = 30;
+
+namespace {
+// This struct describes one allocated block, or one free block.
+struct AllocList {
+  struct Header {
+    // Size of entire region, including this field. Must be
+    // first. Valid in both allocated and unallocated blocks.
+    uintptr_t size;
+
+    // kMagicAllocated or kMagicUnallocated xor this.
+    uintptr_t magic;
+
+    // Pointer to parent arena.
+    LowLevelAlloc::Arena *arena;
+
+    // Aligns regions to 0 mod 2*sizeof(void*).
+    void *dummy_for_alignment;
+  } header;
+
+  // Next two fields: in unallocated blocks: freelist skiplist data
+  //                  in allocated blocks: overlaps with client data
+
+  // Levels in skiplist used.
+  int levels;
+
+  // Actually has levels elements. The AllocList node may not have room
+  // for all kMaxLevel entries. See max_fit in LLA_SkiplistLevels().
+  AllocList *next[kMaxLevel];
+};
+}  // namespace
+
+// ---------------------------------------------------------------------------
+// A trivial skiplist implementation.  This is used to keep the freelist
+// in address order while taking only logarithmic time per insert and delete.
+
+// An integer approximation of log2(size/base)
+// Requires size >= base.
+static int IntLog2(size_t size, size_t base) {
+  int result = 0;
+  for (size_t i = size; i > base; i >>= 1) {  // i == floor(size/2**result)
+    result++;
+  }
+  //    floor(size / 2**result) <= base < floor(size / 2**(result-1))
+  // =>     log2(size/(base+1)) <= result < 1+log2(size/base)
+  // => result ~= log2(size/base)
+  return result;
+}
+
+// Return a random integer n:  p(n)=1/(2**n) if 1 <= n; p(n)=0 if n < 1.
+static int Random(uint32_t *state) {
+  uint32_t r = *state;
+  int result = 1;
+  while ((((r = r*1103515245 + 12345) >> 30) & 1) == 0) {
+    result++;
+  }
+  *state = r;
+  return result;
+}
+
+// Return a number of skiplist levels for a node of size bytes, where
+// base is the minimum node size.  Compute level=log2(size / base)+n
+// where n is 1 if random is false and otherwise a random number generated with
+// the standard distribution for a skiplist:  See Random() above.
+// Bigger nodes tend to have more skiplist levels due to the log2(size / base)
+// term, so first-fit searches touch fewer nodes.  "level" is clipped so
+// level<kMaxLevel and next[level-1] will fit in the node.
+// 0 < LLA_SkiplistLevels(x,y,false) <= LLA_SkiplistLevels(x,y,true) < kMaxLevel
+static int LLA_SkiplistLevels(size_t size, size_t base, uint32_t *random) {
+  // max_fit is the maximum number of levels that will fit in a node for the
+  // given size.   We can't return more than max_fit, no matter what the
+  // random number generator says.
+  size_t max_fit = (size - offsetof(AllocList, next)) / sizeof(AllocList *);
+  int level = IntLog2(size, base) + (random != nullptr ? Random(random) : 1);
+  if (static_cast<size_t>(level) > max_fit) level = static_cast<int>(max_fit);
+  if (level > kMaxLevel-1) level = kMaxLevel - 1;
+  ABSL_RAW_CHECK(level >= 1, "block not big enough for even one level");
+  return level;
+}
+
+// Return "atleast", the first element of AllocList *head s.t. *atleast >= *e.
+// For 0 <= i < head->levels, set prev[i] to "no_greater", where no_greater
+// points to the last element at level i in the AllocList less than *e, or is
+// head if no such element exists.
+static AllocList *LLA_SkiplistSearch(AllocList *head,
+                                     AllocList *e, AllocList **prev) {
+  AllocList *p = head;
+  for (int level = head->levels - 1; level >= 0; level--) {
+    for (AllocList *n; (n = p->next[level]) != nullptr && n < e; p = n) {
+    }
+    prev[level] = p;
+  }
+  return (head->levels == 0) ? nullptr : prev[0]->next[0];
+}
+
+// Insert element *e into AllocList *head.  Set prev[] as LLA_SkiplistSearch.
+// Requires that e->levels be previously set by the caller (using
+// LLA_SkiplistLevels())
+static void LLA_SkiplistInsert(AllocList *head, AllocList *e,
+                               AllocList **prev) {
+  LLA_SkiplistSearch(head, e, prev);
+  for (; head->levels < e->levels; head->levels++) {  // extend prev pointers
+    prev[head->levels] = head;                        // to all *e's levels
+  }
+  for (int i = 0; i != e->levels; i++) {  // add element to list
+    e->next[i] = prev[i]->next[i];
+    prev[i]->next[i] = e;
+  }
+}
+
+// Remove element *e from AllocList *head.  Set prev[] as LLA_SkiplistSearch().
+// Requires that e->levels be previous set by the caller (using
+// LLA_SkiplistLevels())
+static void LLA_SkiplistDelete(AllocList *head, AllocList *e,
+                               AllocList **prev) {
+  AllocList *found = LLA_SkiplistSearch(head, e, prev);
+  ABSL_RAW_CHECK(e == found, "element not in freelist");
+  for (int i = 0; i != e->levels && prev[i]->next[i] == e; i++) {
+    prev[i]->next[i] = e->next[i];
+  }
+  while (head->levels > 0 && head->next[head->levels - 1] == nullptr) {
+    head->levels--;   // reduce head->levels if level unused
+  }
+}
+
+// ---------------------------------------------------------------------------
+// Arena implementation
+
+struct LowLevelAlloc::Arena {
+  // This constructor does nothing, and relies on zero-initialization to get
+  // the proper initial state.
+  Arena() : mu(base_internal::kLinkerInitialized) {}  // NOLINT
+  explicit Arena(int)  // NOLINT(readability/casting)
+      :  // Avoid recursive cooperative scheduling w/ kernel scheduling.
+        mu(base_internal::SCHEDULE_KERNEL_ONLY),
+        // Set pagesize to zero explicitly for non-static init.
+        pagesize(0),
+        random(0) {}
+
+  base_internal::SpinLock mu;   // protects freelist, allocation_count,
+                                // pagesize, roundup, min_size
+  AllocList freelist;           // head of free list; sorted by addr (under mu)
+  int32_t allocation_count;     // count of allocated blocks (under mu)
+  std::atomic<uint32_t> flags;  // flags passed to NewArena (ro after init)
+  size_t pagesize;              // ==getpagesize()  (init under mu, then ro)
+  size_t roundup;               // lowest 2^n >= max(16,sizeof (AllocList))
+                                // (init under mu, then ro)
+  size_t min_size;              // smallest allocation block size
+                                // (init under mu, then ro)
+  uint32_t random;              // PRNG state
+};
+
+// The default arena, which is used when 0 is passed instead of an Arena
+// pointer.
+static struct LowLevelAlloc::Arena default_arena;  // NOLINT
+
+// Non-malloc-hooked arenas: used only to allocate metadata for arenas that
+// do not want malloc hook reporting, so that for them there's no malloc hook
+// reporting even during arena creation.
+static struct LowLevelAlloc::Arena unhooked_arena;  // NOLINT
+
+#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
+static struct LowLevelAlloc::Arena unhooked_async_sig_safe_arena;  // NOLINT
+#endif
+
+// magic numbers to identify allocated and unallocated blocks
+static const uintptr_t kMagicAllocated = 0x4c833e95U;
+static const uintptr_t kMagicUnallocated = ~kMagicAllocated;
+
+namespace {
+class SCOPED_LOCKABLE ArenaLock {
+ public:
+  explicit ArenaLock(LowLevelAlloc::Arena *arena)
+      EXCLUSIVE_LOCK_FUNCTION(arena->mu)
+      : arena_(arena) {
+#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
+    if (arena == &unhooked_async_sig_safe_arena ||
+        (arena->flags.load(std::memory_order_relaxed) &
+         LowLevelAlloc::kAsyncSignalSafe) != 0) {
+      sigset_t all;
+      sigfillset(&all);
+      mask_valid_ = pthread_sigmask(SIG_BLOCK, &all, &mask_) == 0;
+    }
+#endif
+    arena_->mu.Lock();
+  }
+  ~ArenaLock() { ABSL_RAW_CHECK(left_, "haven't left Arena region"); }
+  void Leave() UNLOCK_FUNCTION() {
+    arena_->mu.Unlock();
+#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
+    if (mask_valid_) {
+      pthread_sigmask(SIG_SETMASK, &mask_, nullptr);
+    }
+#endif
+    left_ = true;
+  }
+
+ private:
+  bool left_ = false;  // whether left region
+#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
+  bool mask_valid_ = false;
+  sigset_t mask_;  // old mask of blocked signals
+#endif
+  LowLevelAlloc::Arena *arena_;
+  ArenaLock(const ArenaLock &) = delete;
+  ArenaLock &operator=(const ArenaLock &) = delete;
+};
+}  // namespace
+
+// create an appropriate magic number for an object at "ptr"
+// "magic" should be kMagicAllocated or kMagicUnallocated
+inline static uintptr_t Magic(uintptr_t magic, AllocList::Header *ptr) {
+  return magic ^ reinterpret_cast<uintptr_t>(ptr);
+}
+
+// Initialize the fields of an Arena
+static void ArenaInit(LowLevelAlloc::Arena *arena) {
+  if (arena->pagesize == 0) {
+#ifdef _WIN32
+    SYSTEM_INFO system_info;
+    GetSystemInfo(&system_info);
+    arena->pagesize = std::max(system_info.dwPageSize,
+                               system_info.dwAllocationGranularity);
+#else
+    arena->pagesize = getpagesize();
+#endif
+    // Round up block sizes to a power of two close to the header size.
+    arena->roundup = 16;
+    while (arena->roundup < sizeof (arena->freelist.header)) {
+      arena->roundup += arena->roundup;
+    }
+    // Don't allocate blocks less than twice the roundup size to avoid tiny
+    // free blocks.
+    arena->min_size = 2 * arena->roundup;
+    arena->freelist.header.size = 0;
+    arena->freelist.header.magic =
+        Magic(kMagicUnallocated, &arena->freelist.header);
+    arena->freelist.header.arena = arena;
+    arena->freelist.levels = 0;
+    memset(arena->freelist.next, 0, sizeof (arena->freelist.next));
+    arena->allocation_count = 0;
+    if (arena == &default_arena) {
+      // Default arena should be hooked, e.g. for heap-checker to trace
+      // pointer chains through objects in the default arena.
+      arena->flags.store(LowLevelAlloc::kCallMallocHook,
+                         std::memory_order_relaxed);
+    }
+#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
+    else if (arena ==  // NOLINT(readability/braces)
+             &unhooked_async_sig_safe_arena) {
+      arena->flags.store(LowLevelAlloc::kAsyncSignalSafe,
+                         std::memory_order_relaxed);
+    }
+#endif
+    else {  // NOLINT(readability/braces)
+      // other arenas' flags may be overridden by client,
+      // but unhooked_arena will have 0 in 'flags'.
+      arena->flags.store(0, std::memory_order_relaxed);
+    }
+  }
+}
+
+// L < meta_data_arena->mu
+LowLevelAlloc::Arena *LowLevelAlloc::NewArena(int32_t flags,
+                                              Arena *meta_data_arena) {
+  ABSL_RAW_CHECK(meta_data_arena != nullptr, "must pass a valid arena");
+  if (meta_data_arena == &default_arena) {
+#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
+    if ((flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
+      meta_data_arena = &unhooked_async_sig_safe_arena;
+    } else  // NOLINT(readability/braces)
+#endif
+        if ((flags & LowLevelAlloc::kCallMallocHook) == 0) {
+      meta_data_arena = &unhooked_arena;
+    }
+  }
+  // Arena(0) uses the constructor for non-static contexts
+  Arena *result =
+    new (AllocWithArena(sizeof (*result), meta_data_arena)) Arena(0);
+  ArenaInit(result);
+  result->flags.store(flags, std::memory_order_relaxed);
+  return result;
+}
+
+// L < arena->mu, L < arena->arena->mu
+bool LowLevelAlloc::DeleteArena(Arena *arena) {
+  ABSL_RAW_CHECK(
+      arena != nullptr && arena != &default_arena && arena != &unhooked_arena,
+      "may not delete default arena");
+  ArenaLock section(arena);
+  bool empty = (arena->allocation_count == 0);
+  section.Leave();
+  if (empty) {
+    while (arena->freelist.next[0] != nullptr) {
+      AllocList *region = arena->freelist.next[0];
+      size_t size = region->header.size;
+      arena->freelist.next[0] = region->next[0];
+      ABSL_RAW_CHECK(
+          region->header.magic == Magic(kMagicUnallocated, &region->header),
+          "bad magic number in DeleteArena()");
+      ABSL_RAW_CHECK(region->header.arena == arena,
+                     "bad arena pointer in DeleteArena()");
+      ABSL_RAW_CHECK(size % arena->pagesize == 0,
+                     "empty arena has non-page-aligned block size");
+      ABSL_RAW_CHECK(reinterpret_cast<uintptr_t>(region) % arena->pagesize == 0,
+                     "empty arena has non-page-aligned block");
+      int munmap_result;
+#ifdef _WIN32
+      munmap_result = VirtualFree(region, 0, MEM_RELEASE);
+      ABSL_RAW_CHECK(munmap_result != 0,
+                     "LowLevelAlloc::DeleteArena: VitualFree failed");
+#else
+      if ((arena->flags.load(std::memory_order_relaxed) &
+           LowLevelAlloc::kAsyncSignalSafe) == 0) {
+        munmap_result = munmap(region, size);
+      } else {
+        munmap_result = MallocHook::UnhookedMUnmap(region, size);
+      }
+      if (munmap_result != 0) {
+        ABSL_RAW_LOG(FATAL, "LowLevelAlloc::DeleteArena: munmap failed: %d",
+                     errno);
+      }
+#endif
+    }
+    Free(arena);
+  }
+  return empty;
+}
+
+// ---------------------------------------------------------------------------
+
+// Addition, checking for overflow.  The intent is to die if an external client
+// manages to push through a request that would cause arithmetic to fail.
+static inline uintptr_t CheckedAdd(uintptr_t a, uintptr_t b) {
+  uintptr_t sum = a + b;
+  ABSL_RAW_CHECK(sum >= a, "LowLevelAlloc arithmetic overflow");
+  return sum;
+}
+
+// Return value rounded up to next multiple of align.
+// align must be a power of two.
+static inline uintptr_t RoundUp(uintptr_t addr, uintptr_t align) {
+  return CheckedAdd(addr, align - 1) & ~(align - 1);
+}
+
+// Equivalent to "return prev->next[i]" but with sanity checking
+// that the freelist is in the correct order, that it
+// consists of regions marked "unallocated", and that no two regions
+// are adjacent in memory (they should have been coalesced).
+// L < arena->mu
+static AllocList *Next(int i, AllocList *prev, LowLevelAlloc::Arena *arena) {
+  ABSL_RAW_CHECK(i < prev->levels, "too few levels in Next()");
+  AllocList *next = prev->next[i];
+  if (next != nullptr) {
+    ABSL_RAW_CHECK(
+        next->header.magic == Magic(kMagicUnallocated, &next->header),
+        "bad magic number in Next()");
+    ABSL_RAW_CHECK(next->header.arena == arena, "bad arena pointer in Next()");
+    if (prev != &arena->freelist) {
+      ABSL_RAW_CHECK(prev < next, "unordered freelist");
+      ABSL_RAW_CHECK(reinterpret_cast<char *>(prev) + prev->header.size <
+                         reinterpret_cast<char *>(next),
+                     "malformed freelist");
+    }
+  }
+  return next;
+}
+
+// Coalesce list item "a" with its successor if they are adjacent.
+static void Coalesce(AllocList *a) {
+  AllocList *n = a->next[0];
+  if (n != nullptr && reinterpret_cast<char *>(a) + a->header.size ==
+                          reinterpret_cast<char *>(n)) {
+    LowLevelAlloc::Arena *arena = a->header.arena;
+    a->header.size += n->header.size;
+    n->header.magic = 0;
+    n->header.arena = nullptr;
+    AllocList *prev[kMaxLevel];
+    LLA_SkiplistDelete(&arena->freelist, n, prev);
+    LLA_SkiplistDelete(&arena->freelist, a, prev);
+    a->levels = LLA_SkiplistLevels(a->header.size, arena->min_size,
+                                   &arena->random);
+    LLA_SkiplistInsert(&arena->freelist, a, prev);
+  }
+}
+
+// Adds block at location "v" to the free list
+// L >= arena->mu
+static void AddToFreelist(void *v, LowLevelAlloc::Arena *arena) {
+  AllocList *f = reinterpret_cast<AllocList *>(
+                        reinterpret_cast<char *>(v) - sizeof (f->header));
+  ABSL_RAW_CHECK(f->header.magic == Magic(kMagicAllocated, &f->header),
+                 "bad magic number in AddToFreelist()");
+  ABSL_RAW_CHECK(f->header.arena == arena,
+                 "bad arena pointer in AddToFreelist()");
+  f->levels = LLA_SkiplistLevels(f->header.size, arena->min_size,
+                                 &arena->random);
+  AllocList *prev[kMaxLevel];
+  LLA_SkiplistInsert(&arena->freelist, f, prev);
+  f->header.magic = Magic(kMagicUnallocated, &f->header);
+  Coalesce(f);                  // maybe coalesce with successor
+  Coalesce(prev[0]);            // maybe coalesce with predecessor
+}
+
+// Frees storage allocated by LowLevelAlloc::Alloc().
+// L < arena->mu
+void LowLevelAlloc::Free(void *v) {
+  if (v != nullptr) {
+    AllocList *f = reinterpret_cast<AllocList *>(
+                        reinterpret_cast<char *>(v) - sizeof (f->header));
+    ABSL_RAW_CHECK(f->header.magic == Magic(kMagicAllocated, &f->header),
+                   "bad magic number in Free()");
+    LowLevelAlloc::Arena *arena = f->header.arena;
+    if ((arena->flags.load(std::memory_order_relaxed) & kCallMallocHook) != 0) {
+      MallocHook::InvokeDeleteHook(v);
+    }
+    ArenaLock section(arena);
+    AddToFreelist(v, arena);
+    ABSL_RAW_CHECK(arena->allocation_count > 0, "nothing in arena to free");
+    arena->allocation_count--;
+    section.Leave();
+  }
+}
+
+// allocates and returns a block of size bytes, to be freed with Free()
+// L < arena->mu
+static void *DoAllocWithArena(size_t request, LowLevelAlloc::Arena *arena) {
+  void *result = nullptr;
+  if (request != 0) {
+    AllocList *s;       // will point to region that satisfies request
+    ArenaLock section(arena);
+    ArenaInit(arena);
+    // round up with header
+    size_t req_rnd = RoundUp(CheckedAdd(request, sizeof (s->header)),
+                             arena->roundup);
+    for (;;) {      // loop until we find a suitable region
+      // find the minimum levels that a block of this size must have
+      int i = LLA_SkiplistLevels(req_rnd, arena->min_size, nullptr) - 1;
+      if (i < arena->freelist.levels) {   // potential blocks exist
+        AllocList *before = &arena->freelist;  // predecessor of s
+        while ((s = Next(i, before, arena)) != nullptr &&
+               s->header.size < req_rnd) {
+          before = s;
+        }
+        if (s != nullptr) {       // we found a region
+          break;
+        }
+      }
+      // we unlock before mmap() both because mmap() may call a callback hook,
+      // and because it may be slow.
+      arena->mu.Unlock();
+      // mmap generous 64K chunks to decrease
+      // the chances/impact of fragmentation:
+      size_t new_pages_size = RoundUp(req_rnd, arena->pagesize * 16);
+      void *new_pages;
+#ifdef _WIN32
+      new_pages = VirtualAlloc(0, new_pages_size,
+                               MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
+      ABSL_RAW_CHECK(new_pages != nullptr, "VirtualAlloc failed");
+#else
+      if ((arena->flags.load(std::memory_order_relaxed) &
+           LowLevelAlloc::kAsyncSignalSafe) != 0) {
+        new_pages = MallocHook::UnhookedMMap(nullptr, new_pages_size,
+            PROT_WRITE|PROT_READ, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
+      } else {
+        new_pages = mmap(nullptr, new_pages_size, PROT_WRITE | PROT_READ,
+                         MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+      }
+      if (new_pages == MAP_FAILED) {
+        ABSL_RAW_LOG(FATAL, "mmap error: %d", errno);
+      }
+#endif
+      arena->mu.Lock();
+      s = reinterpret_cast<AllocList *>(new_pages);
+      s->header.size = new_pages_size;
+      // Pretend the block is allocated; call AddToFreelist() to free it.
+      s->header.magic = Magic(kMagicAllocated, &s->header);
+      s->header.arena = arena;
+      AddToFreelist(&s->levels, arena);  // insert new region into free list
+    }
+    AllocList *prev[kMaxLevel];
+    LLA_SkiplistDelete(&arena->freelist, s, prev);    // remove from free list
+    // s points to the first free region that's big enough
+    if (CheckedAdd(req_rnd, arena->min_size) <= s->header.size) {
+      // big enough to split
+      AllocList *n = reinterpret_cast<AllocList *>
+                        (req_rnd + reinterpret_cast<char *>(s));
+      n->header.size = s->header.size - req_rnd;
+      n->header.magic = Magic(kMagicAllocated, &n->header);
+      n->header.arena = arena;
+      s->header.size = req_rnd;
+      AddToFreelist(&n->levels, arena);
+    }
+    s->header.magic = Magic(kMagicAllocated, &s->header);
+    ABSL_RAW_CHECK(s->header.arena == arena, "");
+    arena->allocation_count++;
+    section.Leave();
+    result = &s->levels;
+  }
+  ANNOTATE_MEMORY_IS_UNINITIALIZED(result, request);
+  return result;
+}
+
+void *LowLevelAlloc::Alloc(size_t request) {
+  void *result = DoAllocWithArena(request, &default_arena);
+  if ((default_arena.flags.load(std::memory_order_relaxed) &
+       kCallMallocHook) != 0) {
+    // this call must be directly in the user-called allocator function
+    // for MallocHook::GetCallerStackTrace to work properly
+    MallocHook::InvokeNewHook(result, request);
+  }
+  return result;
+}
+
+void *LowLevelAlloc::AllocWithArena(size_t request, Arena *arena) {
+  ABSL_RAW_CHECK(arena != nullptr, "must pass a valid arena");
+  void *result = DoAllocWithArena(request, arena);
+  if ((arena->flags.load(std::memory_order_relaxed) & kCallMallocHook) != 0) {
+    // this call must be directly in the user-called allocator function
+    // for MallocHook::GetCallerStackTrace to work properly
+    MallocHook::InvokeNewHook(result, request);
+  }
+  return result;
+}
+
+LowLevelAlloc::Arena *LowLevelAlloc::DefaultArena() {
+  return &default_arena;
+}
+
+}  // namespace base_internal
+}  // namespace absl
+
+#endif  // ABSL_LOW_LEVEL_ALLOC_MISSING
diff --git a/absl/base/internal/low_level_alloc.h b/absl/base/internal/low_level_alloc.h
new file mode 100644
index 000000000000..d6eb813f0d6a
--- /dev/null
+++ b/absl/base/internal/low_level_alloc.h
@@ -0,0 +1,120 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef ABSL_BASE_INTERNAL_LOW_LEVEL_ALLOC_H_
+#define ABSL_BASE_INTERNAL_LOW_LEVEL_ALLOC_H_
+
+// A simple thread-safe memory allocator that does not depend on
+// mutexes or thread-specific data.  It is intended to be used
+// sparingly, and only when malloc() would introduce an unwanted
+// dependency, such as inside the heap-checker, or the Mutex
+// implementation.
+
+// IWYU pragma: private, include "base/low_level_alloc.h"
+
+#include <cstdint>
+
+#include "absl/base/config.h"
+
+// LowLevelAlloc requires that the platform support low-level
+// allocation of virtual memory. Platforms lacking this cannot use
+// LowLevelAlloc.
+#ifdef ABSL_LOW_LEVEL_ALLOC_MISSING
+#error ABSL_LOW_LEVEL_ALLOC_MISSING cannot be directly set
+#elif !defined(ABSL_HAVE_MMAP) && !defined(_WIN32)
+#define ABSL_LOW_LEVEL_ALLOC_MISSING 1
+#endif
+
+// Using LowLevelAlloc with kAsyncSignalSafe isn't supported on Windows.
+#ifdef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
+#error ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING cannot be directly set
+#elif defined(_WIN32)
+#define ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING 1
+#endif
+
+#include <cstddef>
+
+#include "absl/base/port.h"
+
+namespace absl {
+namespace base_internal {
+
+class LowLevelAlloc {
+ public:
+  struct Arena;       // an arena from which memory may be allocated
+
+  // Returns a pointer to a block of at least "request" bytes
+  // that have been newly allocated from the specific arena.
+  // for Alloc() call the DefaultArena() is used.
+  // Returns 0 if passed request==0.
+  // Does not return 0 under other circumstances; it crashes if memory
+  // is not available.
+  static void *Alloc(size_t request) ABSL_ATTRIBUTE_SECTION(malloc_hook);
+  static void *AllocWithArena(size_t request, Arena *arena)
+      ABSL_ATTRIBUTE_SECTION(malloc_hook);
+
+  // Deallocates a region of memory that was previously allocated with
+  // Alloc().   Does nothing if passed 0.   "s" must be either 0,
+  // or must have been returned from a call to Alloc() and not yet passed to
+  // Free() since that call to Alloc().  The space is returned to the arena
+  // from which it was allocated.
+  static void Free(void *s) ABSL_ATTRIBUTE_SECTION(malloc_hook);
+
+  // ABSL_ATTRIBUTE_SECTION(malloc_hook) for Alloc* and Free
+  // are to put all callers of MallocHook::Invoke* in this module
+  // into special section,
+  // so that MallocHook::GetCallerStackTrace can function accurately.
+
+  // Create a new arena.
+  // The root metadata for the new arena is allocated in the
+  // meta_data_arena; the DefaultArena() can be passed for meta_data_arena.
+  // These values may be ored into flags:
+  enum {
+    // Report calls to Alloc() and Free() via the MallocHook interface.
+    // Set in the DefaultArena.
+    kCallMallocHook = 0x0001,
+
+#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
+    // Make calls to Alloc(), Free() be async-signal-safe. Not set in
+    // DefaultArena(). Not supported on all platforms.
+    kAsyncSignalSafe = 0x0002,
+#endif
+
+    // When used with DefaultArena(), the NewArena() and DeleteArena() calls
+    // obey the flags given explicitly in the NewArena() call, even if those
+    // flags differ from the settings in DefaultArena().  So the call
+    // NewArena(kAsyncSignalSafe, DefaultArena()) is itself async-signal-safe,
+    // as well as generatating an arena that provides async-signal-safe
+    // Alloc/Free.
+  };
+  static Arena *NewArena(int32_t flags, Arena *meta_data_arena);
+
+  // Destroys an arena allocated by NewArena and returns true,
+  // provided no allocated blocks remain in the arena.
+  // If allocated blocks remain in the arena, does nothing and
+  // returns false.
+  // It is illegal to attempt to destroy the DefaultArena().
+  static bool DeleteArena(Arena *arena);
+
+  // The default arena that always exists.
+  static Arena *DefaultArena();
+
+ private:
+  LowLevelAlloc();      // no instances
+};
+
+}  // namespace base_internal
+}  // namespace absl
+#endif  // ABSL_BASE_INTERNAL_LOW_LEVEL_ALLOC_H_
diff --git a/absl/base/internal/low_level_alloc_test.cc b/absl/base/internal/low_level_alloc_test.cc
new file mode 100644
index 000000000000..5f60c3d9dfb9
--- /dev/null
+++ b/absl/base/internal/low_level_alloc_test.cc
@@ -0,0 +1,203 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/internal/low_level_alloc.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <thread>  // NOLINT(build/c++11)
+#include <unordered_map>
+
+#include "absl/base/internal/malloc_hook.h"
+
+namespace absl {
+namespace base_internal {
+namespace {
+
+// This test doesn't use gtest since it needs to test that everything
+// works before main().
+#define TEST_ASSERT(x)                                           \
+  if (!(x)) {                                                    \
+    printf("TEST_ASSERT(%s) FAILED ON LINE %d\n", #x, __LINE__); \
+    abort();                                                     \
+  }
+
+// a block of memory obtained from the allocator
+struct BlockDesc {
+  char *ptr;      // pointer to memory
+  int len;        // number of bytes
+  int fill;       // filled with data starting with this
+};
+
+// Check that the pattern placed in the block d
+// by RandomizeBlockDesc is still there.
+static void CheckBlockDesc(const BlockDesc &d) {
+  for (int i = 0; i != d.len; i++) {
+    TEST_ASSERT((d.ptr[i] & 0xff) == ((d.fill + i) & 0xff));
+  }
+}
+
+// Fill the block "*d" with a pattern
+// starting with a random byte.
+static void RandomizeBlockDesc(BlockDesc *d) {
+  d->fill = rand() & 0xff;
+  for (int i = 0; i != d->len; i++) {
+    d->ptr[i] = (d->fill + i) & 0xff;
+  }
+}
+
+// Use to indicate to the malloc hooks that
+// this calls is from LowLevelAlloc.
+static bool using_low_level_alloc = false;
+
+// n times, toss a coin, and based on the outcome
+// either allocate a new block or deallocate an old block.
+// New blocks are placed in a std::unordered_map with a random key
+// and initialized with RandomizeBlockDesc().
+// If keys conflict, the older block is freed.
+// Old blocks are always checked with CheckBlockDesc()
+// before being freed.  At the end of the run,
+// all remaining allocated blocks are freed.
+// If use_new_arena is true, use a fresh arena, and then delete it.
+// If call_malloc_hook is true and user_arena is true,
+// allocations and deallocations are reported via the MallocHook
+// interface.
+static void Test(bool use_new_arena, bool call_malloc_hook, int n) {
+  typedef std::unordered_map<int, BlockDesc> AllocMap;
+  AllocMap allocated;
+  AllocMap::iterator it;
+  BlockDesc block_desc;
+  int rnd;
+  LowLevelAlloc::Arena *arena = 0;
+  if (use_new_arena) {
+    int32_t flags = call_malloc_hook ? LowLevelAlloc::kCallMallocHook : 0;
+    arena = LowLevelAlloc::NewArena(flags, LowLevelAlloc::DefaultArena());
+  }
+  for (int i = 0; i != n; i++) {
+    if (i != 0 && i % 10000 == 0) {
+      printf(".");
+      fflush(stdout);
+    }
+
+    switch (rand() & 1) {      // toss a coin
+    case 0:     // coin came up heads: add a block
+      using_low_level_alloc = true;
+      block_desc.len = rand() & 0x3fff;
+      block_desc.ptr =
+        reinterpret_cast<char *>(
+                        arena == 0
+                        ? LowLevelAlloc::Alloc(block_desc.len)
+                        : LowLevelAlloc::AllocWithArena(block_desc.len, arena));
+      using_low_level_alloc = false;
+      RandomizeBlockDesc(&block_desc);
+      rnd = rand();
+      it = allocated.find(rnd);
+      if (it != allocated.end()) {
+        CheckBlockDesc(it->second);
+        using_low_level_alloc = true;
+        LowLevelAlloc::Free(it->second.ptr);
+        using_low_level_alloc = false;
+        it->second = block_desc;
+      } else {
+        allocated[rnd] = block_desc;
+      }
+      break;
+    case 1:     // coin came up tails: remove a block
+      it = allocated.begin();
+      if (it != allocated.end()) {
+        CheckBlockDesc(it->second);
+        using_low_level_alloc = true;
+        LowLevelAlloc::Free(it->second.ptr);
+        using_low_level_alloc = false;
+        allocated.erase(it);
+      }
+      break;
+    }
+  }
+  // remove all remaining blocks
+  while ((it = allocated.begin()) != allocated.end()) {
+    CheckBlockDesc(it->second);
+    using_low_level_alloc = true;
+    LowLevelAlloc::Free(it->second.ptr);
+    using_low_level_alloc = false;
+    allocated.erase(it);
+  }
+  if (use_new_arena) {
+    TEST_ASSERT(LowLevelAlloc::DeleteArena(arena));
+  }
+}
+
+// used for counting allocates and frees
+static int32_t allocates;
+static int32_t frees;
+
+// ignore uses of the allocator not triggered by our test
+static std::thread::id* test_tid;
+
+// called on each alloc if kCallMallocHook specified
+static void AllocHook(const void *p, size_t size) {
+  if (using_low_level_alloc) {
+    if (*test_tid == std::this_thread::get_id()) {
+      allocates++;
+    }
+  }
+}
+
+// called on each free if kCallMallocHook specified
+static void FreeHook(const void *p) {
+  if (using_low_level_alloc) {
+    if (*test_tid == std::this_thread::get_id()) {
+      frees++;
+    }
+  }
+}
+
+// LowLevelAlloc is designed to be safe to call before main().
+static struct BeforeMain {
+  BeforeMain() {
+    test_tid = new std::thread::id(std::this_thread::get_id());
+    TEST_ASSERT(MallocHook::AddNewHook(&AllocHook));
+    TEST_ASSERT(MallocHook::AddDeleteHook(&FreeHook));
+    TEST_ASSERT(allocates == 0);
+    TEST_ASSERT(frees == 0);
+    Test(false, false, 50000);
+    TEST_ASSERT(allocates != 0);  // default arena calls hooks
+    TEST_ASSERT(frees != 0);
+    for (int i = 0; i != 16; i++) {
+      bool call_hooks = ((i & 1) == 1);
+      allocates = 0;
+      frees = 0;
+      Test(true, call_hooks, 15000);
+      if (call_hooks) {
+        TEST_ASSERT(allocates > 5000);  // arena calls hooks
+        TEST_ASSERT(frees > 5000);
+      } else {
+        TEST_ASSERT(allocates == 0);  // arena doesn't call hooks
+        TEST_ASSERT(frees == 0);
+      }
+    }
+    TEST_ASSERT(MallocHook::RemoveNewHook(&AllocHook));
+    TEST_ASSERT(MallocHook::RemoveDeleteHook(&FreeHook));
+  }
+} before_main;
+
+}  // namespace
+}  // namespace base_internal
+}  // namespace absl
+
+int main(int argc, char *argv[]) {
+  // The actual test runs in the global constructor of `before_main`.
+  printf("PASS\n");
+  return 0;
+}
diff --git a/absl/base/internal/low_level_scheduling.h b/absl/base/internal/low_level_scheduling.h
new file mode 100644
index 000000000000..b9501076ec57
--- /dev/null
+++ b/absl/base/internal/low_level_scheduling.h
@@ -0,0 +1,104 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Core interfaces and definitions used by by low-level //base interfaces such
+// as SpinLock.
+
+#ifndef ABSL_BASE_INTERNAL_LOW_LEVEL_SCHEDULING_H_
+#define ABSL_BASE_INTERNAL_LOW_LEVEL_SCHEDULING_H_
+
+#include "absl/base/internal/scheduling_mode.h"
+#include "absl/base/macros.h"
+
+// The following two declarations exist so SchedulingGuard may friend them with
+// the appropriate language linkage.  These callbacks allow libc internals, such
+// as function level statics, to schedule cooperatively when locking.
+extern "C" bool __google_disable_rescheduling(void);
+extern "C" void __google_enable_rescheduling(bool disable_result);
+
+namespace absl {
+namespace base_internal {
+
+class SpinLock;          // To allow use of SchedulingGuard.
+class SchedulingHelper;  // To allow use of SchedulingGuard.
+
+// SchedulingGuard
+// Provides guard semantics that may be used to disable cooperative rescheduling
+// of the calling thread within specific program blocks.  This is used to
+// protect resources (e.g. low-level SpinLocks or Domain code) that cooperative
+// scheduling depends on.
+//
+// Domain implementations capable of rescheduling in reaction to involuntary
+// kernel thread actions (e.g blocking due to a pagefault or syscall) must
+// guarantee that an annotated thread is not allowed to (cooperatively)
+// reschedule until the annotated region is complete.
+//
+// It is an error to attempt to use a cooperatively scheduled resource (e.g.
+// Mutex) within a rescheduling-disabled region.
+//
+// All methods are async-signal safe.
+class SchedulingGuard {
+ public:
+  // Returns true iff the calling thread may be cooperatively rescheduled.
+  static bool ReschedulingIsAllowed();
+
+ private:
+  // Disable cooperative rescheduling of the calling thread.  It may still
+  // initiate scheduling operations (e.g. wake-ups), however, it may not itself
+  // reschedule.  Nestable.  The returned result is opaque, clients should not
+  // attempt to interpret it.
+  // REQUIRES: Result must be passed to a pairing EnableScheduling().
+  static bool DisableRescheduling();
+
+  // Marks the end of a rescheduling disabled region, previously started by
+  // DisableRescheduling().
+  // REQUIRES: Pairs with innermost call (and result) of DisableRescheduling().
+  static void EnableRescheduling(bool disable_result);
+
+  // A scoped helper for {Disable, Enable}Rescheduling().
+  // REQUIRES: destructor must run in same thread as constructor.
+  struct ScopedDisable {
+    ScopedDisable() { disabled = SchedulingGuard::DisableRescheduling(); }
+    ~ScopedDisable() { SchedulingGuard::EnableRescheduling(disabled); }
+
+    bool disabled;
+  };
+
+  // Access to SchedulingGuard is explicitly white-listed.
+  friend class SchedulingHelper;
+  friend class SpinLock;
+
+  SchedulingGuard(const SchedulingGuard&) = delete;
+  SchedulingGuard& operator=(const SchedulingGuard&) = delete;
+};
+
+//------------------------------------------------------------------------------
+// End of public interfaces.
+//------------------------------------------------------------------------------
+inline bool SchedulingGuard::ReschedulingIsAllowed() {
+  return false;
+}
+
+inline bool SchedulingGuard::DisableRescheduling() {
+  return false;
+}
+
+inline void SchedulingGuard::EnableRescheduling(bool /* disable_result */) {
+  return;
+}
+
+
+}  // namespace base_internal
+}  // namespace absl
+#endif  // ABSL_BASE_INTERNAL_LOW_LEVEL_SCHEDULING_H_
diff --git a/absl/base/internal/malloc_extension.cc b/absl/base/internal/malloc_extension.cc
new file mode 100644
index 000000000000..daf4bc32e768
--- /dev/null
+++ b/absl/base/internal/malloc_extension.cc
@@ -0,0 +1,197 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/internal/malloc_extension.h"
+
+#include <assert.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include <atomic>
+#include <string>
+
+#include "absl/base/dynamic_annotations.h"
+#include "absl/base/internal/malloc_extension_c.h"
+#include "absl/base/port.h"
+
+namespace absl {
+namespace base_internal {
+
+// SysAllocator implementation
+SysAllocator::~SysAllocator() {}
+void SysAllocator::GetStats(char* buffer, int) { buffer[0] = 0; }
+
+// Default implementation -- does nothing
+MallocExtension::~MallocExtension() { }
+bool MallocExtension::VerifyAllMemory() { return true; }
+bool MallocExtension::VerifyNewMemory(const void*) { return true; }
+bool MallocExtension::VerifyArrayNewMemory(const void*) { return true; }
+bool MallocExtension::VerifyMallocMemory(const void*) { return true; }
+
+bool MallocExtension::GetNumericProperty(const char*, size_t*) {
+  return false;
+}
+
+bool MallocExtension::SetNumericProperty(const char*, size_t) {
+  return false;
+}
+
+void MallocExtension::GetStats(char* buffer, int length) {
+  assert(length > 0);
+  static_cast<void>(length);
+  buffer[0] = '\0';
+}
+
+bool MallocExtension::MallocMemoryStats(int* blocks, size_t* total,
+                                        int histogram[kMallocHistogramSize]) {
+  *blocks = 0;
+  *total = 0;
+  memset(histogram, 0, sizeof(*histogram) * kMallocHistogramSize);
+  return true;
+}
+
+void MallocExtension::MarkThreadIdle() {
+  // Default implementation does nothing
+}
+
+void MallocExtension::MarkThreadBusy() {
+  // Default implementation does nothing
+}
+
+SysAllocator* MallocExtension::GetSystemAllocator() {
+  return nullptr;
+}
+
+void MallocExtension::SetSystemAllocator(SysAllocator*) {
+  // Default implementation does nothing
+}
+
+void MallocExtension::ReleaseToSystem(size_t) {
+  // Default implementation does nothing
+}
+
+void MallocExtension::ReleaseFreeMemory() {
+  ReleaseToSystem(static_cast<size_t>(-1));   // SIZE_T_MAX
+}
+
+void MallocExtension::SetMemoryReleaseRate(double) {
+  // Default implementation does nothing
+}
+
+double MallocExtension::GetMemoryReleaseRate() {
+  return -1.0;
+}
+
+size_t MallocExtension::GetEstimatedAllocatedSize(size_t size) {
+  return size;
+}
+
+size_t MallocExtension::GetAllocatedSize(const void* p) {
+  assert(GetOwnership(p) != kNotOwned);
+  static_cast<void>(p);
+  return 0;
+}
+
+MallocExtension::Ownership MallocExtension::GetOwnership(const void*) {
+  return kUnknownOwnership;
+}
+
+void MallocExtension::GetProperties(MallocExtension::StatLevel,
+                                    std::map<std::string, Property>* result) {
+  result->clear();
+}
+
+size_t MallocExtension::ReleaseCPUMemory(int) {
+  return 0;
+}
+
+// The current malloc extension object.
+
+std::atomic<MallocExtension*> MallocExtension::current_instance_;
+
+MallocExtension* MallocExtension::InitModule() {
+  MallocExtension* ext = new MallocExtension;
+  current_instance_.store(ext, std::memory_order_release);
+  return ext;
+}
+
+void MallocExtension::Register(MallocExtension* implementation) {
+  InitModuleOnce();
+  // When running under valgrind, our custom malloc is replaced with
+  // valgrind's one and malloc extensions will not work.  (Note:
+  // callers should be responsible for checking that they are the
+  // malloc that is really being run, before calling Register.  This
+  // is just here as an extra sanity check.)
+  // Under compiler-based ThreadSanitizer RunningOnValgrind() returns true,
+  // but we still want to use malloc extensions.
+#ifndef THREAD_SANITIZER
+  if (RunningOnValgrind()) {
+    return;
+  }
+#endif  // #ifndef THREAD_SANITIZER
+  current_instance_.store(implementation, std::memory_order_release);
+}
+void MallocExtension::GetHeapSample(MallocExtensionWriter*) {}
+
+void MallocExtension::GetHeapGrowthStacks(MallocExtensionWriter*) {}
+
+void MallocExtension::GetFragmentationProfile(MallocExtensionWriter*) {}
+
+}  // namespace base_internal
+}  // namespace absl
+
+// These are C shims that work on the current instance.
+
+#define C_SHIM(fn, retval, paramlist, arglist)                           \
+  extern "C" retval MallocExtension_##fn paramlist {                     \
+    return absl::base_internal::MallocExtension::instance()->fn arglist; \
+  }
+
+C_SHIM(VerifyAllMemory, int, (void), ());
+C_SHIM(VerifyNewMemory, int, (const void* p), (p));
+C_SHIM(VerifyArrayNewMemory, int, (const void* p), (p));
+C_SHIM(VerifyMallocMemory, int, (const void* p), (p));
+C_SHIM(
+    MallocMemoryStats, int,
+    (int* blocks, size_t* total,
+     int histogram[absl::base_internal::MallocExtension::kMallocHistogramSize]),
+    (blocks, total, histogram));
+
+C_SHIM(GetStats, void,
+       (char* buffer, int buffer_length), (buffer, buffer_length));
+C_SHIM(GetNumericProperty, int,
+       (const char* property, size_t* value), (property, value));
+C_SHIM(SetNumericProperty, int,
+       (const char* property, size_t value), (property, value));
+
+C_SHIM(MarkThreadIdle, void, (void), ());
+C_SHIM(MarkThreadBusy, void, (void), ());
+C_SHIM(ReleaseFreeMemory, void, (void), ());
+C_SHIM(ReleaseToSystem, void, (size_t num_bytes), (num_bytes));
+C_SHIM(GetEstimatedAllocatedSize, size_t, (size_t size), (size));
+C_SHIM(GetAllocatedSize, size_t, (const void* p), (p));
+
+// Can't use the shim here because of the need to translate the enums.
+extern "C"
+MallocExtension_Ownership MallocExtension_GetOwnership(const void* p) {
+  return static_cast<MallocExtension_Ownership>(
+      absl::base_internal::MallocExtension::instance()->GetOwnership(p));
+}
+
+// Default implementation just returns size. The expectation is that
+// the linked-in malloc implementation might provide an override of
+// this weak function with a better implementation.
+ABSL_ATTRIBUTE_WEAK ABSL_ATTRIBUTE_NOINLINE size_t nallocx(size_t size, int) {
+  return size;
+}
diff --git a/absl/base/internal/malloc_extension.h b/absl/base/internal/malloc_extension.h
new file mode 100644
index 000000000000..dee4116b207c
--- /dev/null
+++ b/absl/base/internal/malloc_extension.h
@@ -0,0 +1,424 @@
+//
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+// Extra extensions exported by some malloc implementations.  These
+// extensions are accessed through a virtual base class so an
+// application can link against a malloc that does not implement these
+// extensions, and it will get default versions that do nothing.
+//
+// NOTE FOR C USERS: If you wish to use this functionality from within
+// a C program, see malloc_extension_c.h.
+
+#ifndef ABSL_BASE_INTERNAL_MALLOC_EXTENSION_H_
+#define ABSL_BASE_INTERNAL_MALLOC_EXTENSION_H_
+
+#include <atomic>
+#include <map>
+#include <memory>
+#include <vector>
+
+#include <stddef.h>
+#include <stdint.h>
+#include <string>
+#include "absl/base/macros.h"
+#include "absl/base/port.h"
+namespace absl {
+namespace base_internal {
+
+class MallocExtensionWriter;
+
+// Interface to a pluggable system allocator.
+class SysAllocator {
+ public:
+  SysAllocator() {
+  }
+  virtual ~SysAllocator();
+
+  // Allocates "size"-byte of memory from system aligned with "alignment".
+  // Returns null if failed. Otherwise, the returned pointer p up to and
+  // including (p + actual_size -1) have been allocated.
+  virtual void* Alloc(size_t size, size_t *actual_size, size_t alignment) = 0;
+
+  // Get a human-readable description of the current state of the
+  // allocator.  The state is stored as a null-terminated std::string in
+  // a prefix of buffer.
+  virtual void GetStats(char* buffer, int length);
+};
+
+// The default implementations of the following routines do nothing.
+// All implementations should be thread-safe; the current ones
+// (DebugMallocImplementation and TCMallocImplementation) are.
+class MallocExtension {
+ public:
+  virtual ~MallocExtension();
+
+  // Verifies that all blocks are valid.  Returns true if all are; dumps
+  // core otherwise.  A no-op except in debug mode.  Even in debug mode,
+  // they may not do any checking except with certain malloc
+  // implementations.  Thread-safe.
+  virtual bool VerifyAllMemory();
+
+  // Verifies that p was returned by new, has not been deleted, and is
+  // valid.  Returns true if p is good; dumps core otherwise.  A no-op
+  // except in debug mode.  Even in debug mode, may not do any checking
+  // except with certain malloc implementations.  Thread-safe.
+  virtual bool VerifyNewMemory(const void* p);
+
+  // Verifies that p was returned by new[], has not been deleted, and is
+  // valid.  Returns true if p is good; dumps core otherwise.  A no-op
+  // except in debug mode.  Even in debug mode, may not do any checking
+  // except with certain malloc implementations.  Thread-safe.
+  virtual bool VerifyArrayNewMemory(const void* p);
+
+  // Verifies that p was returned by malloc, has not been freed, and is
+  // valid.  Returns true if p is good; dumps core otherwise.  A no-op
+  // except in debug mode.  Even in debug mode, may not do any checking
+  // except with certain malloc implementations.  Thread-safe.
+  virtual bool VerifyMallocMemory(const void* p);
+
+  // If statistics collection is enabled, sets *blocks to be the number of
+  // currently allocated blocks, sets *total to be the total size allocated
+  // over all blocks, sets histogram[n] to be the number of blocks with
+  // size between 2^n-1 and 2^(n+1), and returns true.  Returns false, and
+  // does not change *blocks, *total, or *histogram, if statistics
+  // collection is disabled.
+  //
+  // Note that these statistics reflect memory allocated by new, new[],
+  // malloc(), and realloc(), but not mmap().  They may be larger (if not
+  // all pages have been written to) or smaller (if pages have been
+  // allocated by mmap()) than the total RSS size.  They will always be
+  // smaller than the total virtual memory size.
+  static constexpr int kMallocHistogramSize = 64;
+  virtual bool MallocMemoryStats(int* blocks, size_t* total,
+                                 int histogram[kMallocHistogramSize]);
+
+  // Get a human readable description of the current state of the malloc
+  // data structures.  The state is stored as a null-terminated std::string
+  // in a prefix of "buffer[0,buffer_length-1]".
+  // REQUIRES: buffer_length > 0.
+  virtual void GetStats(char* buffer, int buffer_length);
+
+  // Outputs to "writer" a sample of live objects and the stack traces
+  // that allocated these objects. The output can be passed to pprof.
+  virtual void GetHeapSample(MallocExtensionWriter* writer);
+
+  // Outputs to "writer" the stack traces that caused growth in the
+  // address space size. The output can be passed to "pprof".
+  virtual void GetHeapGrowthStacks(MallocExtensionWriter* writer);
+
+  // Outputs to "writer" a fragmentation profile. The output can be
+  // passed to "pprof".  In particular, the result is a list of
+  // <n,total,stacktrace> tuples that says that "total" bytes in "n"
+  // objects are currently unusable because of fragmentation caused by
+  // an allocation with the specified "stacktrace".
+  virtual void GetFragmentationProfile(MallocExtensionWriter* writer);
+
+  // -------------------------------------------------------------------
+  // Control operations for getting and setting malloc implementation
+  // specific parameters.  Some currently useful properties:
+  //
+  // generic
+  // -------
+  // "generic.current_allocated_bytes"
+  //      Number of bytes currently allocated by application
+  //      This property is not writable.
+  //
+  // "generic.heap_size"
+  //      Number of bytes in the heap ==
+  //            current_allocated_bytes +
+  //            fragmentation +
+  //            freed memory regions
+  //      This property is not writable.
+  //
+  // tcmalloc
+  // --------
+  // "tcmalloc.max_total_thread_cache_bytes"
+  //      Upper limit on total number of bytes stored across all
+  //      per-thread caches.  Default: 16MB.
+  //
+  // "tcmalloc.current_total_thread_cache_bytes"
+  //      Number of bytes used across all thread caches.
+  //      This property is not writable.
+  //
+  // "tcmalloc.pageheap_free_bytes"
+  //      Number of bytes in free, mapped pages in page heap.  These
+  //      bytes can be used to fulfill allocation requests.  They
+  //      always count towards virtual memory usage, and unless the
+  //      underlying memory is swapped out by the OS, they also count
+  //      towards physical memory usage.  This property is not writable.
+  //
+  // "tcmalloc.pageheap_unmapped_bytes"
+  //      Number of bytes in free, unmapped pages in page heap.
+  //      These are bytes that have been released back to the OS,
+  //      possibly by one of the MallocExtension "Release" calls.
+  //      They can be used to fulfill allocation requests, but
+  //      typically incur a page fault.  They always count towards
+  //      virtual memory usage, and depending on the OS, typically
+  //      do not count towards physical memory usage.  This property
+  //      is not writable.
+  //
+  //  "tcmalloc.per_cpu_caches_active"
+  //      Whether tcmalloc is using per-CPU caches (1 or 0 respectively).
+  //      This property is not writable.
+  // -------------------------------------------------------------------
+
+  // Get the named "property"'s value.  Returns true if the property
+  // is known.  Returns false if the property is not a valid property
+  // name for the current malloc implementation.
+  // REQUIRES: property != null; value != null
+  virtual bool GetNumericProperty(const char* property, size_t* value);
+
+  // Set the named "property"'s value.  Returns true if the property
+  // is known and writable.  Returns false if the property is not a
+  // valid property name for the current malloc implementation, or
+  // is not writable.
+  // REQUIRES: property != null
+  virtual bool SetNumericProperty(const char* property, size_t value);
+
+  // Mark the current thread as "idle".  This routine may optionally
+  // be called by threads as a hint to the malloc implementation that
+  // any thread-specific resources should be released.  Note: this may
+  // be an expensive routine, so it should not be called too often.
+  //
+  // Also, if the code that calls this routine will go to sleep for
+  // a while, it should take care to not allocate anything between
+  // the call to this routine and the beginning of the sleep.
+  //
+  // Most malloc implementations ignore this routine.
+  virtual void MarkThreadIdle();
+
+  // Mark the current thread as "busy".  This routine should be
+  // called after MarkThreadIdle() if the thread will now do more
+  // work.  If this method is not called, performance may suffer.
+  //
+  // Most malloc implementations ignore this routine.
+  virtual void MarkThreadBusy();
+
+  // Attempt to free any resources associated with cpu <cpu> (in the sense
+  // of only being usable from that CPU.)  Returns the number of bytes
+  // previously assigned to "cpu" that were freed.  Safe to call from
+  // any processor, not just <cpu>.
+  //
+  // Most malloc implementations ignore this routine (known exceptions:
+  // tcmalloc with --tcmalloc_per_cpu_caches=true.)
+  virtual size_t ReleaseCPUMemory(int cpu);
+
+  // Gets the system allocator used by the malloc extension instance. Returns
+  // null for malloc implementations that do not support pluggable system
+  // allocators.
+  virtual SysAllocator* GetSystemAllocator();
+
+  // Sets the system allocator to the specified.
+  //
+  // Users could register their own system allocators for malloc implementation
+  // that supports pluggable system allocators, such as TCMalloc, by doing:
+  //   alloc = new MyOwnSysAllocator();
+  //   MallocExtension::instance()->SetSystemAllocator(alloc);
+  // It's up to users whether to fall back (recommended) to the default
+  // system allocator (use GetSystemAllocator() above) or not. The caller is
+  // responsible to any necessary locking.
+  // See tcmalloc/system-alloc.h for the interface and
+  //     tcmalloc/memfs_malloc.cc for the examples.
+  //
+  // It's a no-op for malloc implementations that do not support pluggable
+  // system allocators.
+  virtual void SetSystemAllocator(SysAllocator *a);
+
+  // Try to release num_bytes of free memory back to the operating
+  // system for reuse.  Use this extension with caution -- to get this
+  // memory back may require faulting pages back in by the OS, and
+  // that may be slow.  (Currently only implemented in tcmalloc.)
+  virtual void ReleaseToSystem(size_t num_bytes);
+
+  // Same as ReleaseToSystem() but release as much memory as possible.
+  virtual void ReleaseFreeMemory();
+
+  // Sets the rate at which we release unused memory to the system.
+  // Zero means we never release memory back to the system.  Increase
+  // this flag to return memory faster; decrease it to return memory
+  // slower.  Reasonable rates are in the range [0,10].  (Currently
+  // only implemented in tcmalloc).
+  virtual void SetMemoryReleaseRate(double rate);
+
+  // Gets the release rate.  Returns a value < 0 if unknown.
+  virtual double GetMemoryReleaseRate();
+
+  // Returns the estimated number of bytes that will be allocated for
+  // a request of "size" bytes.  This is an estimate: an allocation of
+  // SIZE bytes may reserve more bytes, but will never reserve less.
+  // (Currently only implemented in tcmalloc, other implementations
+  // always return SIZE.)
+  // This is equivalent to malloc_good_size() in OS X.
+  virtual size_t GetEstimatedAllocatedSize(size_t size);
+
+  // Returns the actual number N of bytes reserved by tcmalloc for the
+  // pointer p.  This number may be equal to or greater than the
+  // number of bytes requested when p was allocated.
+  //
+  // This routine is just useful for statistics collection.  The
+  // client must *not* read or write from the extra bytes that are
+  // indicated by this call.
+  //
+  // Example, suppose the client gets memory by calling
+  //    p = malloc(10)
+  // and GetAllocatedSize(p) returns 16.  The client must only use the
+  // first 10 bytes p[0..9], and not attempt to read or write p[10..15].
+  //
+  // p must have been allocated by this malloc implementation, must
+  // not be an interior pointer -- that is, must be exactly the
+  // pointer returned to by malloc() et al., not some offset from that
+  // -- and should not have been freed yet.  p may be null.
+  // (Currently only implemented in tcmalloc; other implementations
+  // will return 0.)
+  virtual size_t GetAllocatedSize(const void* p);
+
+  // Returns kOwned if this malloc implementation allocated the memory
+  // pointed to by p, or kNotOwned if some other malloc implementation
+  // allocated it or p is null.  May also return kUnknownOwnership if
+  // the malloc implementation does not keep track of ownership.
+  // REQUIRES: p must be a value returned from a previous call to
+  // malloc(), calloc(), realloc(), memalign(), posix_memalign(),
+  // valloc(), pvalloc(), new, or new[], and must refer to memory that
+  // is currently allocated (so, for instance, you should not pass in
+  // a pointer after having called free() on it).
+  enum Ownership {
+    // NOTE: Enum values MUST be kept in sync with the version in
+    // malloc_extension_c.h
+    kUnknownOwnership = 0,
+    kOwned,
+    kNotOwned
+  };
+  virtual Ownership GetOwnership(const void* p);
+
+  // The current malloc implementation.  Always non-null.
+  static MallocExtension* instance() {
+    InitModuleOnce();
+    return current_instance_.load(std::memory_order_acquire);
+  }
+
+  // Change the malloc implementation.  Typically called by the
+  // malloc implementation during initialization.
+  static void Register(MallocExtension* implementation);
+
+  // Type used by GetProperties.  See comment on GetProperties.
+  struct Property {
+    size_t value;
+    // Stores breakdown of the property value bucketed by object size.
+    struct Bucket {
+      size_t min_object_size;
+      size_t max_object_size;
+      size_t size;
+    };
+    // Empty unless detailed info was asked for and this type has buckets
+    std::vector<Bucket> buckets;
+  };
+
+  // Type used by GetProperties.  See comment on GetProperties.
+  enum StatLevel { kSummary, kDetailed };
+
+  // Stores in *result detailed statistics about the malloc
+  // implementation. *result will be a map keyed by the name of
+  // the statistic. Each statistic has at least a "value" field.
+  //
+  // Some statistics may also contain an array of buckets if
+  // level==kDetailed and the "value" can be subdivided
+  // into different buckets for different object sizes.  If
+  // such detailed statistics are not available, Property::buckets
+  // will be empty.  Otherwise Property::buckets will contain
+  // potentially many entries.  For each bucket b, b.value
+  // will count the value contributed by objects in the range
+  // [b.min_object_size, b.max_object_size].
+  //
+  // Common across malloc implementations:
+  //  generic.bytes_in_use_by_app  -- Bytes currently in use by application
+  //  generic.physical_memory_used -- Overall (including malloc internals)
+  //  generic.virtual_memory_used  -- Overall (including malloc internals)
+  //
+  // Tcmalloc specific properties
+  //  tcmalloc.cpu_free            -- Bytes in per-cpu free-lists
+  //  tcmalloc.thread_cache_free   -- Bytes in per-thread free-lists
+  //  tcmalloc.transfer_cache      -- Bytes in cross-thread transfer caches
+  //  tcmalloc.central_cache_free  -- Bytes in central cache
+  //  tcmalloc.page_heap_free      -- Bytes in page heap
+  //  tcmalloc.page_heap_unmapped  -- Bytes in page heap (no backing phys. mem)
+  //  tcmalloc.metadata_bytes      -- Used by internal data structures
+  //  tcmalloc.thread_cache_count  -- Number of thread caches in use
+  //
+  // Debug allocator
+  //  debug.free_queue             -- Recently freed objects
+  virtual void GetProperties(StatLevel level,
+                             std::map<std::string, Property>* result);
+ private:
+  static MallocExtension* InitModule();
+
+  static void InitModuleOnce() {
+    // Pointer stored here so heap leak checker will consider the default
+    // instance reachable, even if current_instance_ is later overridden by
+    // MallocExtension::Register().
+    ABSL_ATTRIBUTE_UNUSED static MallocExtension* default_instance =
+        InitModule();
+  }
+
+  static std::atomic<MallocExtension*> current_instance_;
+};
+
+// Base class than can handle output generated by GetHeapSample() and
+// GetHeapGrowthStacks().  Use the available subclass or roll your
+// own.  Useful if you want explicit control over the type of output
+// buffer used (e.g. IOBuffer, Cord, etc.)
+class MallocExtensionWriter {
+ public:
+  virtual ~MallocExtensionWriter() {}
+  virtual void Write(const char* buf, int len) = 0;
+ protected:
+  MallocExtensionWriter() {}
+  MallocExtensionWriter(const MallocExtensionWriter&) = delete;
+  MallocExtensionWriter& operator=(const MallocExtensionWriter&) = delete;
+};
+
+// A subclass that writes to the std::string "out".  NOTE: The generated
+// data is *appended* to "*out".  I.e., the old contents of "*out" are
+// preserved.
+class StringMallocExtensionWriter : public MallocExtensionWriter {
+ public:
+  explicit StringMallocExtensionWriter(std::string* out) : out_(out) {}
+  virtual void Write(const char* buf, int len) {
+    out_->append(buf, len);
+  }
+
+ private:
+  std::string* const out_;
+  StringMallocExtensionWriter(const StringMallocExtensionWriter&) = delete;
+  StringMallocExtensionWriter& operator=(const StringMallocExtensionWriter&) =
+      delete;
+};
+
+}  // namespace base_internal
+}  // namespace absl
+
+// The nallocx function allocates no memory, but it performs the same size
+// computation as the malloc function, and returns the real size of the
+// allocation that would result from the equivalent malloc function call.
+// Default weak implementation returns size unchanged, but tcmalloc overrides it
+// and returns rounded up size. See the following link for details:
+// http://www.unix.com/man-page/freebsd/3/nallocx/
+extern "C" size_t nallocx(size_t size, int flags);
+
+#ifndef MALLOCX_LG_ALIGN
+#define MALLOCX_LG_ALIGN(la) (la)
+#endif
+
+#endif  // ABSL_BASE_INTERNAL_MALLOC_EXTENSION_H_
diff --git a/absl/base/internal/malloc_extension_c.h b/absl/base/internal/malloc_extension_c.h
new file mode 100644
index 000000000000..5afc84a43584
--- /dev/null
+++ b/absl/base/internal/malloc_extension_c.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2017 The Abseil Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+
+ * C shims for the C++ malloc_extension.h.  See malloc_extension.h for
+ * details.  Note these C shims always work on
+ * MallocExtension::instance(); it is not possible to have more than
+ * one MallocExtension object in C applications.
+ */
+
+#ifndef ABSL_BASE_INTERNAL_MALLOC_EXTENSION_C_H_
+#define ABSL_BASE_INTERNAL_MALLOC_EXTENSION_C_H_
+
+#include <stddef.h>
+#include <sys/types.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define kMallocExtensionHistogramSize 64
+
+int MallocExtension_VerifyAllMemory(void);
+int MallocExtension_VerifyNewMemory(const void* p);
+int MallocExtension_VerifyArrayNewMemory(const void* p);
+int MallocExtension_VerifyMallocMemory(const void* p);
+int MallocExtension_MallocMemoryStats(int* blocks, size_t* total,
+                                      int histogram[kMallocExtensionHistogramSize]);
+
+void MallocExtension_GetStats(char* buffer, int buffer_length);
+
+/* TODO(csilvers): write a C version of these routines, that perhaps
+ * takes a function ptr and a void *.
+ */
+/* void MallocExtension_GetHeapSample(MallocExtensionWriter* result); */
+/* void MallocExtension_GetHeapGrowthStacks(MallocExtensionWriter* result); */
+
+int MallocExtension_GetNumericProperty(const char* property, size_t* value);
+int MallocExtension_SetNumericProperty(const char* property, size_t value);
+void MallocExtension_MarkThreadIdle(void);
+void MallocExtension_MarkThreadBusy(void);
+void MallocExtension_ReleaseToSystem(size_t num_bytes);
+void MallocExtension_ReleaseFreeMemory(void);
+size_t MallocExtension_GetEstimatedAllocatedSize(size_t size);
+size_t MallocExtension_GetAllocatedSize(const void* p);
+
+/*
+ * NOTE: These enum values MUST be kept in sync with the version in
+ *       malloc_extension.h
+ */
+typedef enum {
+  MallocExtension_kUnknownOwnership = 0,
+  MallocExtension_kOwned,
+  MallocExtension_kNotOwned
+} MallocExtension_Ownership;
+
+MallocExtension_Ownership MallocExtension_GetOwnership(const void* p);
+
+
+#ifdef __cplusplus
+}   // extern "C"
+#endif
+
+#endif  /* ABSL_BASE_INTERNAL_MALLOC_EXTENSION_C_H_ */
diff --git a/absl/base/internal/malloc_extension_test.cc b/absl/base/internal/malloc_extension_test.cc
new file mode 100644
index 000000000000..246875a030ef
--- /dev/null
+++ b/absl/base/internal/malloc_extension_test.cc
@@ -0,0 +1,102 @@
+/*
+ * Copyright 2017 The Abseil Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <algorithm>
+#include <cstdlib>
+
+#include "gtest/gtest.h"
+#include "absl/base/internal/malloc_extension.h"
+#include "absl/base/internal/malloc_extension_c.h"
+
+namespace absl {
+namespace base_internal {
+namespace {
+
+TEST(MallocExtension, MallocExtension) {
+  void* a = malloc(1000);
+
+  size_t cxx_bytes_used, c_bytes_used;
+  if (!MallocExtension::instance()->GetNumericProperty(
+          "generic.current_allocated_bytes", &cxx_bytes_used)) {
+    EXPECT_TRUE(ABSL_MALLOC_EXTENSION_TEST_ALLOW_MISSING_EXTENSION);
+  } else {
+    ASSERT_TRUE(MallocExtension::instance()->GetNumericProperty(
+        "generic.current_allocated_bytes", &cxx_bytes_used));
+    ASSERT_TRUE(MallocExtension_GetNumericProperty(
+        "generic.current_allocated_bytes", &c_bytes_used));
+#ifndef MEMORY_SANITIZER
+    EXPECT_GT(cxx_bytes_used, 1000);
+    EXPECT_GT(c_bytes_used, 1000);
+#endif
+
+    EXPECT_TRUE(MallocExtension::instance()->VerifyAllMemory());
+    EXPECT_TRUE(MallocExtension_VerifyAllMemory());
+
+    EXPECT_EQ(MallocExtension::kOwned,
+              MallocExtension::instance()->GetOwnership(a));
+    // TODO(csilvers): this relies on undocumented behavior that
+    // GetOwnership works on stack-allocated variables.  Use a better test.
+    EXPECT_EQ(MallocExtension::kNotOwned,
+              MallocExtension::instance()->GetOwnership(&cxx_bytes_used));
+    EXPECT_EQ(MallocExtension::kNotOwned,
+              MallocExtension::instance()->GetOwnership(nullptr));
+    EXPECT_GE(MallocExtension::instance()->GetAllocatedSize(a), 1000);
+    // This is just a sanity check.  If we allocated too much, tcmalloc is
+    // broken
+    EXPECT_LE(MallocExtension::instance()->GetAllocatedSize(a), 5000);
+    EXPECT_GE(MallocExtension::instance()->GetEstimatedAllocatedSize(1000),
+              1000);
+    for (int i = 0; i < 10; ++i) {
+      void* p = malloc(i);
+      EXPECT_GE(MallocExtension::instance()->GetAllocatedSize(p),
+                MallocExtension::instance()->GetEstimatedAllocatedSize(i));
+      free(p);
+    }
+
+    // Check the c-shim version too.
+    EXPECT_EQ(MallocExtension_kOwned, MallocExtension_GetOwnership(a));
+    EXPECT_EQ(MallocExtension_kNotOwned,
+              MallocExtension_GetOwnership(&cxx_bytes_used));
+    EXPECT_EQ(MallocExtension_kNotOwned, MallocExtension_GetOwnership(nullptr));
+    EXPECT_GE(MallocExtension_GetAllocatedSize(a), 1000);
+    EXPECT_LE(MallocExtension_GetAllocatedSize(a), 5000);
+    EXPECT_GE(MallocExtension_GetEstimatedAllocatedSize(1000), 1000);
+  }
+
+  free(a);
+}
+
+// Verify that the .cc file and .h file have the same enum values.
+TEST(GetOwnership, EnumValuesEqualForCAndCXX) {
+  EXPECT_EQ(static_cast<int>(MallocExtension::kUnknownOwnership),
+            static_cast<int>(MallocExtension_kUnknownOwnership));
+  EXPECT_EQ(static_cast<int>(MallocExtension::kOwned),
+            static_cast<int>(MallocExtension_kOwned));
+  EXPECT_EQ(static_cast<int>(MallocExtension::kNotOwned),
+            static_cast<int>(MallocExtension_kNotOwned));
+}
+
+TEST(nallocx, SaneBehavior) {
+  for (size_t size = 0; size < 64 * 1024; ++size) {
+    size_t alloc_size = nallocx(size, 0);
+    EXPECT_LE(size, alloc_size) << "size is " << size;
+    EXPECT_LE(alloc_size, std::max(size + 100, 2 * size)) << "size is " << size;
+  }
+}
+
+}  // namespace
+}  // namespace base_internal
+}  // namespace absl
diff --git a/absl/base/internal/malloc_hook.cc b/absl/base/internal/malloc_hook.cc
new file mode 100644
index 000000000000..d5d227a5f881
--- /dev/null
+++ b/absl/base/internal/malloc_hook.cc
@@ -0,0 +1,611 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/config.h"
+
+#if ABSL_HAVE_MMAP
+// Disable the glibc prototype of mremap(), as older versions of the
+// system headers define this function with only four arguments,
+// whereas newer versions allow an optional fifth argument:
+#define mremap glibc_mremap
+#include <sys/mman.h>
+#undef mremap
+#endif
+
+#include <cstddef>
+#include <cstdint>
+#include <algorithm>
+
+#include "absl/base/call_once.h"
+#include "absl/base/casts.h"
+#include "absl/base/internal/malloc_hook.h"
+#include "absl/base/internal/malloc_hook_invoke.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/base/internal/spinlock.h"
+
+// __THROW is defined in glibc systems.  It means, counter-intuitively,
+// "This function will never throw an exception."  It's an optional
+// optimization tool, but we may need to use it to match glibc prototypes.
+#ifndef __THROW    // I guess we're not on a glibc system
+# define __THROW   // __THROW is just an optimization, so ok to make it ""
+#endif
+
+namespace absl {
+namespace base_internal {
+namespace {
+
+void RemoveInitialHooksAndCallInitializers();  // below.
+
+absl::once_flag once;
+
+// These hooks are installed in MallocHook as the only initial hooks.  The first
+// hook that is called will run RemoveInitialHooksAndCallInitializers (see the
+// definition below) and then redispatch to any malloc hooks installed by
+// RemoveInitialHooksAndCallInitializers.
+//
+// Note(llib): there is a possibility of a race in the event that there are
+// multiple threads running before the first allocation.  This is pretty
+// difficult to achieve, but if it is then multiple threads may concurrently do
+// allocations.  The first caller will call
+// RemoveInitialHooksAndCallInitializers via one of the initial hooks.  A
+// concurrent allocation may, depending on timing either:
+// * still have its initial malloc hook installed, run that and block on waiting
+//   for the first caller to finish its call to
+//   RemoveInitialHooksAndCallInitializers, and proceed normally.
+// * occur some time during the RemoveInitialHooksAndCallInitializers call, at
+//   which point there could be no initial hooks and the subsequent hooks that
+//   are about to be set up by RemoveInitialHooksAndCallInitializers haven't
+//   been installed yet.  I think the worst we can get is that some allocations
+//   will not get reported to some hooks set by the initializers called from
+//   RemoveInitialHooksAndCallInitializers.
+
+void InitialNewHook(const void* ptr, size_t size) {
+  absl::call_once(once, RemoveInitialHooksAndCallInitializers);
+  MallocHook::InvokeNewHook(ptr, size);
+}
+
+void InitialPreMMapHook(const void* start,
+                               size_t size,
+                               int protection,
+                               int flags,
+                               int fd,
+                               off_t offset) {
+  absl::call_once(once, RemoveInitialHooksAndCallInitializers);
+  MallocHook::InvokePreMmapHook(start, size, protection, flags, fd, offset);
+}
+
+void InitialPreSbrkHook(ptrdiff_t increment) {
+  absl::call_once(once, RemoveInitialHooksAndCallInitializers);
+  MallocHook::InvokePreSbrkHook(increment);
+}
+
+// This function is called at most once by one of the above initial malloc
+// hooks.  It removes all initial hooks and initializes all other clients that
+// want to get control at the very first memory allocation.  The initializers
+// may assume that the initial malloc hooks have been removed.  The initializers
+// may set up malloc hooks and allocate memory.
+void RemoveInitialHooksAndCallInitializers() {
+  ABSL_RAW_CHECK(MallocHook::RemoveNewHook(&InitialNewHook), "");
+  ABSL_RAW_CHECK(MallocHook::RemovePreMmapHook(&InitialPreMMapHook), "");
+  ABSL_RAW_CHECK(MallocHook::RemovePreSbrkHook(&InitialPreSbrkHook), "");
+}
+
+}  // namespace
+}  // namespace base_internal
+}  // namespace absl
+
+namespace absl {
+namespace base_internal {
+
+// This lock is shared between all implementations of HookList::Add & Remove.
+// The potential for contention is very small.  This needs to be a SpinLock and
+// not a Mutex since it's possible for Mutex locking to allocate memory (e.g.,
+// per-thread allocation in debug builds), which could cause infinite recursion.
+static absl::base_internal::SpinLock hooklist_spinlock(
+    absl::base_internal::kLinkerInitialized);
+
+template <typename T>
+bool HookList<T>::Add(T value_as_t) {
+  if (value_as_t == T()) {
+    return false;
+  }
+  absl::base_internal::SpinLockHolder l(&hooklist_spinlock);
+  // Find the first slot in data that is 0.
+  int index = 0;
+  while ((index < kHookListMaxValues) &&
+         (priv_data[index].load(std::memory_order_relaxed) != 0)) {
+    ++index;
+  }
+  if (index == kHookListMaxValues) {
+    return false;
+  }
+  int prev_num_hooks = priv_end.load(std::memory_order_acquire);
+  priv_data[index].store(reinterpret_cast<intptr_t>(value_as_t),
+                         std::memory_order_release);
+  if (prev_num_hooks <= index) {
+    priv_end.store(index + 1, std::memory_order_release);
+  }
+  return true;
+}
+
+template <typename T>
+bool HookList<T>::Remove(T value_as_t) {
+  if (value_as_t == T()) {
+    return false;
+  }
+  absl::base_internal::SpinLockHolder l(&hooklist_spinlock);
+  int hooks_end = priv_end.load(std::memory_order_acquire);
+  int index = 0;
+  while (index < hooks_end &&
+         value_as_t != reinterpret_cast<T>(
+                           priv_data[index].load(std::memory_order_acquire))) {
+    ++index;
+  }
+  if (index == hooks_end) {
+    return false;
+  }
+  priv_data[index].store(0, std::memory_order_release);
+  if (hooks_end == index + 1) {
+    // Adjust hooks_end down to the lowest possible value.
+    hooks_end = index;
+    while ((hooks_end > 0) &&
+           (priv_data[hooks_end - 1].load(std::memory_order_acquire) == 0)) {
+      --hooks_end;
+    }
+    priv_end.store(hooks_end, std::memory_order_release);
+  }
+  return true;
+}
+
+template <typename T>
+int HookList<T>::Traverse(T* output_array, int n) const {
+  int hooks_end = priv_end.load(std::memory_order_acquire);
+  int actual_hooks_end = 0;
+  for (int i = 0; i < hooks_end && n > 0; ++i) {
+    T data = reinterpret_cast<T>(priv_data[i].load(std::memory_order_acquire));
+    if (data != T()) {
+      *output_array++ = data;
+      ++actual_hooks_end;
+      --n;
+    }
+  }
+  return actual_hooks_end;
+}
+
+// Initialize a HookList (optionally with the given initial_value in index 0).
+#define INIT_HOOK_LIST { {0}, {{}} }
+#define INIT_HOOK_LIST_WITH_VALUE(initial_value) \
+  { {1}, { {reinterpret_cast<intptr_t>(initial_value)} } }
+
+// Explicit instantiation for malloc_hook_test.cc.  This ensures all the methods
+// are instantiated.
+template struct HookList<MallocHook::NewHook>;
+
+HookList<MallocHook::NewHook> new_hooks_ =
+    INIT_HOOK_LIST_WITH_VALUE(&InitialNewHook);
+HookList<MallocHook::DeleteHook> delete_hooks_ = INIT_HOOK_LIST;
+HookList<MallocHook::SampledNewHook> sampled_new_hooks_ = INIT_HOOK_LIST;
+HookList<MallocHook::SampledDeleteHook> sampled_delete_hooks_ = INIT_HOOK_LIST;
+HookList<MallocHook::PreMmapHook> premmap_hooks_ =
+    INIT_HOOK_LIST_WITH_VALUE(&InitialPreMMapHook);
+HookList<MallocHook::MmapHook> mmap_hooks_ = INIT_HOOK_LIST;
+HookList<MallocHook::MunmapHook> munmap_hooks_ = INIT_HOOK_LIST;
+HookList<MallocHook::MremapHook> mremap_hooks_ = INIT_HOOK_LIST;
+HookList<MallocHook::PreSbrkHook> presbrk_hooks_ =
+    INIT_HOOK_LIST_WITH_VALUE(InitialPreSbrkHook);
+HookList<MallocHook::SbrkHook> sbrk_hooks_ = INIT_HOOK_LIST;
+
+// These lists contain either 0 or 1 hooks.
+HookList<MallocHook::MmapReplacement> mmap_replacement_ = INIT_HOOK_LIST;
+HookList<MallocHook::MunmapReplacement> munmap_replacement_ = INIT_HOOK_LIST;
+
+#undef INIT_HOOK_LIST_WITH_VALUE
+#undef INIT_HOOK_LIST
+
+}  // namespace base_internal
+}  // namespace absl
+
+// These are available as C bindings as well as C++, hence their
+// definition outside the MallocHook class.
+extern "C"
+int MallocHook_AddNewHook(MallocHook_NewHook hook) {
+  return absl::base_internal::new_hooks_.Add(hook);
+}
+
+extern "C"
+int MallocHook_RemoveNewHook(MallocHook_NewHook hook) {
+  return absl::base_internal::new_hooks_.Remove(hook);
+}
+
+extern "C"
+int MallocHook_AddDeleteHook(MallocHook_DeleteHook hook) {
+  return absl::base_internal::delete_hooks_.Add(hook);
+}
+
+extern "C"
+int MallocHook_RemoveDeleteHook(MallocHook_DeleteHook hook) {
+  return absl::base_internal::delete_hooks_.Remove(hook);
+}
+
+extern "C" int MallocHook_AddSampledNewHook(MallocHook_SampledNewHook hook) {
+  return absl::base_internal::sampled_new_hooks_.Add(hook);
+}
+
+extern "C" int MallocHook_RemoveSampledNewHook(MallocHook_SampledNewHook hook) {
+  return absl::base_internal::sampled_new_hooks_.Remove(hook);
+}
+
+extern "C" int MallocHook_AddSampledDeleteHook(
+    MallocHook_SampledDeleteHook hook) {
+  return absl::base_internal::sampled_delete_hooks_.Add(hook);
+}
+
+extern "C" int MallocHook_RemoveSampledDeleteHook(
+    MallocHook_SampledDeleteHook hook) {
+  return absl::base_internal::sampled_delete_hooks_.Remove(hook);
+}
+
+extern "C"
+int MallocHook_AddPreMmapHook(MallocHook_PreMmapHook hook) {
+  return absl::base_internal::premmap_hooks_.Add(hook);
+}
+
+extern "C"
+int MallocHook_RemovePreMmapHook(MallocHook_PreMmapHook hook) {
+  return absl::base_internal::premmap_hooks_.Remove(hook);
+}
+
+extern "C"
+int MallocHook_SetMmapReplacement(MallocHook_MmapReplacement hook) {
+  // NOTE this is a best effort CHECK. Concurrent sets could succeed since
+  // this test is outside of the Add spin lock.
+  ABSL_RAW_CHECK(absl::base_internal::mmap_replacement_.empty(),
+                 "Only one MMapReplacement is allowed.");
+  return absl::base_internal::mmap_replacement_.Add(hook);
+}
+
+extern "C"
+int MallocHook_RemoveMmapReplacement(MallocHook_MmapReplacement hook) {
+  return absl::base_internal::mmap_replacement_.Remove(hook);
+}
+
+extern "C"
+int MallocHook_AddMmapHook(MallocHook_MmapHook hook) {
+  return absl::base_internal::mmap_hooks_.Add(hook);
+}
+
+extern "C"
+int MallocHook_RemoveMmapHook(MallocHook_MmapHook hook) {
+  return absl::base_internal::mmap_hooks_.Remove(hook);
+}
+
+extern "C"
+int MallocHook_AddMunmapHook(MallocHook_MunmapHook hook) {
+  return absl::base_internal::munmap_hooks_.Add(hook);
+}
+
+extern "C"
+int MallocHook_RemoveMunmapHook(MallocHook_MunmapHook hook) {
+  return absl::base_internal::munmap_hooks_.Remove(hook);
+}
+
+extern "C"
+int MallocHook_SetMunmapReplacement(MallocHook_MunmapReplacement hook) {
+  // NOTE this is a best effort CHECK. Concurrent sets could succeed since
+  // this test is outside of the Add spin lock.
+  ABSL_RAW_CHECK(absl::base_internal::munmap_replacement_.empty(),
+                 "Only one MunmapReplacement is allowed.");
+  return absl::base_internal::munmap_replacement_.Add(hook);
+}
+
+extern "C"
+int MallocHook_RemoveMunmapReplacement(MallocHook_MunmapReplacement hook) {
+  return absl::base_internal::munmap_replacement_.Remove(hook);
+}
+
+extern "C"
+int MallocHook_AddMremapHook(MallocHook_MremapHook hook) {
+  return absl::base_internal::mremap_hooks_.Add(hook);
+}
+
+extern "C"
+int MallocHook_RemoveMremapHook(MallocHook_MremapHook hook) {
+  return absl::base_internal::mremap_hooks_.Remove(hook);
+}
+
+extern "C"
+int MallocHook_AddPreSbrkHook(MallocHook_PreSbrkHook hook) {
+  return absl::base_internal::presbrk_hooks_.Add(hook);
+}
+
+extern "C"
+int MallocHook_RemovePreSbrkHook(MallocHook_PreSbrkHook hook) {
+  return absl::base_internal::presbrk_hooks_.Remove(hook);
+}
+
+extern "C"
+int MallocHook_AddSbrkHook(MallocHook_SbrkHook hook) {
+  return absl::base_internal::sbrk_hooks_.Add(hook);
+}
+
+extern "C"
+int MallocHook_RemoveSbrkHook(MallocHook_SbrkHook hook) {
+  return absl::base_internal::sbrk_hooks_.Remove(hook);
+}
+
+namespace absl {
+namespace base_internal {
+
+// Note: embedding the function calls inside the traversal of HookList would be
+// very confusing, as it is legal for a hook to remove itself and add other
+// hooks.  Doing traversal first, and then calling the hooks ensures we only
+// call the hooks registered at the start.
+#define INVOKE_HOOKS(HookType, hook_list, args)                    \
+  do {                                                             \
+    HookType hooks[kHookListMaxValues];                            \
+    int num_hooks = hook_list.Traverse(hooks, kHookListMaxValues); \
+    for (int i = 0; i < num_hooks; ++i) {                          \
+      (*hooks[i]) args;                                            \
+    }                                                              \
+  } while (0)
+
+// There should only be one replacement. Return the result of the first
+// one, or false if there is none.
+#define INVOKE_REPLACEMENT(HookType, hook_list, args)              \
+  do {                                                             \
+    HookType hooks[kHookListMaxValues];                            \
+    int num_hooks = hook_list.Traverse(hooks, kHookListMaxValues); \
+    return (num_hooks > 0 && (*hooks[0])args);                     \
+  } while (0)
+
+void MallocHook::InvokeNewHookSlow(const void* ptr, size_t size) {
+  INVOKE_HOOKS(NewHook, new_hooks_, (ptr, size));
+}
+
+void MallocHook::InvokeDeleteHookSlow(const void* ptr) {
+  INVOKE_HOOKS(DeleteHook, delete_hooks_, (ptr));
+}
+
+void MallocHook::InvokeSampledNewHookSlow(const SampledAlloc* sampled_alloc) {
+  INVOKE_HOOKS(SampledNewHook, sampled_new_hooks_, (sampled_alloc));
+}
+
+void MallocHook::InvokeSampledDeleteHookSlow(AllocHandle handle) {
+  INVOKE_HOOKS(SampledDeleteHook, sampled_delete_hooks_, (handle));
+}
+
+void MallocHook::InvokePreMmapHookSlow(const void* start,
+                                       size_t size,
+                                       int protection,
+                                       int flags,
+                                       int fd,
+                                       off_t offset) {
+  INVOKE_HOOKS(PreMmapHook, premmap_hooks_, (start, size, protection, flags, fd,
+                                            offset));
+}
+
+void MallocHook::InvokeMmapHookSlow(const void* result,
+                                    const void* start,
+                                    size_t size,
+                                    int protection,
+                                    int flags,
+                                    int fd,
+                                    off_t offset) {
+  INVOKE_HOOKS(MmapHook, mmap_hooks_, (result, start, size, protection, flags,
+                                       fd, offset));
+}
+
+bool MallocHook::InvokeMmapReplacementSlow(const void* start,
+                                           size_t size,
+                                           int protection,
+                                           int flags,
+                                           int fd,
+                                           off_t offset,
+                                           void** result) {
+  INVOKE_REPLACEMENT(MmapReplacement, mmap_replacement_,
+                      (start, size, protection, flags, fd, offset, result));
+}
+
+void MallocHook::InvokeMunmapHookSlow(const void* start, size_t size) {
+  INVOKE_HOOKS(MunmapHook, munmap_hooks_, (start, size));
+}
+
+bool MallocHook::InvokeMunmapReplacementSlow(const void* start,
+                                             size_t size,
+                                             int* result) {
+  INVOKE_REPLACEMENT(MunmapReplacement, munmap_replacement_,
+                     (start, size, result));
+}
+
+void MallocHook::InvokeMremapHookSlow(const void* result,
+                                      const void* old_addr,
+                                      size_t old_size,
+                                      size_t new_size,
+                                      int flags,
+                                      const void* new_addr) {
+  INVOKE_HOOKS(MremapHook, mremap_hooks_, (result, old_addr, old_size, new_size,
+                                           flags, new_addr));
+}
+
+void MallocHook::InvokePreSbrkHookSlow(ptrdiff_t increment) {
+  INVOKE_HOOKS(PreSbrkHook, presbrk_hooks_, (increment));
+}
+
+void MallocHook::InvokeSbrkHookSlow(const void* result, ptrdiff_t increment) {
+  INVOKE_HOOKS(SbrkHook, sbrk_hooks_, (result, increment));
+}
+
+#undef INVOKE_HOOKS
+#undef INVOKE_REPLACEMENT
+
+}  // namespace base_internal
+}  // namespace absl
+
+ABSL_DEFINE_ATTRIBUTE_SECTION_VARS(google_malloc);
+ABSL_DECLARE_ATTRIBUTE_SECTION_VARS(google_malloc);
+// actual functions are in debugallocation.cc or tcmalloc.cc
+ABSL_DEFINE_ATTRIBUTE_SECTION_VARS(malloc_hook);
+ABSL_DECLARE_ATTRIBUTE_SECTION_VARS(malloc_hook);
+// actual functions are in this file, malloc_hook.cc, and low_level_alloc.cc
+ABSL_DEFINE_ATTRIBUTE_SECTION_VARS(blink_malloc);
+ABSL_DECLARE_ATTRIBUTE_SECTION_VARS(blink_malloc);
+// actual functions are in third_party/blink_headless/.../{PartitionAlloc,
+// FastMalloc}.cpp.
+
+#define ADDR_IN_ATTRIBUTE_SECTION(addr, name)                         \
+  (reinterpret_cast<uintptr_t>(ABSL_ATTRIBUTE_SECTION_START(name)) <= \
+       reinterpret_cast<uintptr_t>(addr) &&                           \
+   reinterpret_cast<uintptr_t>(addr) <                                \
+       reinterpret_cast<uintptr_t>(ABSL_ATTRIBUTE_SECTION_STOP(name)))
+
+// Return true iff 'caller' is a return address within a function
+// that calls one of our hooks via MallocHook:Invoke*.
+// A helper for GetCallerStackTrace.
+static inline bool InHookCaller(const void* caller) {
+  return ADDR_IN_ATTRIBUTE_SECTION(caller, google_malloc) ||
+         ADDR_IN_ATTRIBUTE_SECTION(caller, malloc_hook) ||
+         ADDR_IN_ATTRIBUTE_SECTION(caller, blink_malloc);
+  // We can use one section for everything except tcmalloc_or_debug
+  // due to its special linkage mode, which prevents merging of the sections.
+}
+
+#undef ADDR_IN_ATTRIBUTE_SECTION
+
+static absl::once_flag in_hook_caller_once;
+
+static void InitializeInHookCaller() {
+  ABSL_INIT_ATTRIBUTE_SECTION_VARS(google_malloc);
+  if (ABSL_ATTRIBUTE_SECTION_START(google_malloc) ==
+      ABSL_ATTRIBUTE_SECTION_STOP(google_malloc)) {
+    ABSL_RAW_LOG(ERROR,
+                 "google_malloc section is missing, "
+                 "thus InHookCaller is broken!");
+  }
+  ABSL_INIT_ATTRIBUTE_SECTION_VARS(malloc_hook);
+  if (ABSL_ATTRIBUTE_SECTION_START(malloc_hook) ==
+      ABSL_ATTRIBUTE_SECTION_STOP(malloc_hook)) {
+    ABSL_RAW_LOG(ERROR,
+                 "malloc_hook section is missing, "
+                 "thus InHookCaller is broken!");
+  }
+  ABSL_INIT_ATTRIBUTE_SECTION_VARS(blink_malloc);
+  // The blink_malloc section is only expected to be present in binaries
+  // linking against the blink rendering engine in third_party/blink_headless.
+}
+
+// We can improve behavior/compactness of this function
+// if we pass a generic test function (with a generic arg)
+// into the implementations for get_stack_trace_fn instead of the skip_count.
+extern "C" int MallocHook_GetCallerStackTrace(
+    void** result, int max_depth, int skip_count,
+    MallocHook_GetStackTraceFn get_stack_trace_fn) {
+  if (!ABSL_HAVE_ATTRIBUTE_SECTION) {
+    // Fall back to get_stack_trace_fn and good old but fragile frame skip
+    // counts.
+    // Note: this path is inaccurate when a hook is not called directly by an
+    // allocation function but is daisy-chained through another hook,
+    // search for MallocHook::(Get|Set|Invoke)* to find such cases.
+#ifdef NDEBUG
+    return get_stack_trace_fn(result, max_depth, skip_count);
+#else
+    return get_stack_trace_fn(result, max_depth, skip_count + 1);
+#endif
+    // due to -foptimize-sibling-calls in opt mode
+    // there's no need for extra frame skip here then
+  }
+  absl::call_once(in_hook_caller_once, InitializeInHookCaller);
+  // MallocHook caller determination via InHookCaller works, use it:
+  static const int kMaxSkip = 32 + 6 + 3;
+    // Constant tuned to do just one get_stack_trace_fn call below in practice
+    // and not get many frames that we don't actually need:
+    // currently max passed max_depth is 32,
+    // max passed/needed skip_count is 6
+    // and 3 is to account for some hook daisy chaining.
+  static const int kStackSize = kMaxSkip + 1;
+  void* stack[kStackSize];
+  int depth =
+      get_stack_trace_fn(stack, kStackSize, 1);  // skip this function frame
+  if (depth == 0)
+    // silently propagate cases when get_stack_trace_fn does not work
+    return 0;
+  for (int i = depth - 1; i >= 0; --i) {  // stack[0] is our immediate caller
+    if (InHookCaller(stack[i])) {
+      i += 1;  // skip hook caller frame
+      depth -= i;  // correct depth
+      if (depth > max_depth) depth = max_depth;
+      std::copy(stack + i, stack + i + depth, result);
+      if (depth < max_depth  &&  depth + i == kStackSize) {
+        // get frames for the missing depth
+        depth += get_stack_trace_fn(result + depth, max_depth - depth,
+                                    1 + kStackSize);
+      }
+      return depth;
+    }
+  }
+  ABSL_RAW_LOG(WARNING,
+               "Hooked allocator frame not found, returning empty trace");
+  // If this happens try increasing kMaxSkip
+  // or else something must be wrong with InHookCaller,
+  // e.g. for every section used in InHookCaller
+  // all functions in that section must be inside the same library.
+  return 0;
+}
+
+// On systems where we know how, we override mmap/munmap/mremap/sbrk
+// to provide support for calling the related hooks (in addition,
+// of course, to doing what these functions normally do).
+
+// The ABSL_MALLOC_HOOK_MMAP_DISABLE macro disables mmap/munmap interceptors.
+// Dynamic tools that intercept mmap/munmap can't be linked together with
+// malloc_hook interceptors. We disable the malloc_hook interceptors for the
+// widely-used dynamic tools, i.e. ThreadSanitizer and MemorySanitizer, but
+// still allow users to disable this in special cases that can't be easily
+// detected during compilation, via -DABSL_MALLOC_HOOK_MMAP_DISABLE or #define
+// ABSL_MALLOC_HOOK_MMAP_DISABLE.
+// TODO(b/62370839): Remove MALLOC_HOOK_MMAP_DISABLE in CROSSTOOL for tsan and
+// msan config; Replace MALLOC_HOOK_MMAP_DISABLE with
+// ABSL_MALLOC_HOOK_MMAP_DISABLE for other special cases.
+#if !defined(THREAD_SANITIZER) && !defined(MEMORY_SANITIZER) && \
+    !defined(ABSL_MALLOC_HOOK_MMAP_DISABLE) && defined(__linux__)
+#include "absl/base/internal/malloc_hook_mmap_linux.inc"
+
+#elif ABSL_HAVE_MMAP
+
+namespace absl {
+namespace base_internal {
+
+// static
+void* MallocHook::UnhookedMMap(void* start, size_t size, int protection,
+                               int flags, int fd, off_t offset) {
+  void* result;
+  if (!MallocHook::InvokeMmapReplacement(
+          start, size, protection, flags, fd, offset, &result)) {
+    result = mmap(start, size, protection, flags, fd, offset);
+  }
+  return result;
+}
+
+// static
+int MallocHook::UnhookedMUnmap(void* start, size_t size) {
+  int result;
+  if (!MallocHook::InvokeMunmapReplacement(start, size, &result)) {
+    result = munmap(start, size);
+  }
+  return result;
+}
+
+}  // namespace base_internal
+}  // namespace absl
+
+#endif
diff --git a/absl/base/internal/malloc_hook.h b/absl/base/internal/malloc_hook.h
new file mode 100644
index 000000000000..ed5cf2e65dd6
--- /dev/null
+++ b/absl/base/internal/malloc_hook.h
@@ -0,0 +1,333 @@
+//
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+// Some of our malloc implementations can invoke the following hooks whenever
+// memory is allocated or deallocated.  MallocHook is thread-safe, and things
+// you do before calling AddFooHook(MyHook) are visible to any resulting calls
+// to MyHook.  Hooks must be thread-safe.  If you write:
+//
+//   CHECK(MallocHook::AddNewHook(&MyNewHook));
+//
+// MyNewHook will be invoked in subsequent calls in the current thread, but
+// there are no guarantees on when it might be invoked in other threads.
+//
+// There are a limited number of slots available for each hook type.  Add*Hook
+// will return false if there are no slots available.  Remove*Hook will return
+// false if the given hook was not already installed.
+//
+// The order in which individual hooks are called in Invoke*Hook is undefined.
+//
+// It is safe for a hook to remove itself within Invoke*Hook and add other
+// hooks.  Any hooks added inside a hook invocation (for the same hook type)
+// will not be invoked for the current invocation.
+//
+// One important user of these hooks is the heap profiler.
+//
+// CAVEAT: If you add new MallocHook::Invoke* calls then those calls must be
+// directly in the code of the (de)allocation function that is provided to the
+// user and that function must have an ABSL_ATTRIBUTE_SECTION(malloc_hook)
+// attribute.
+//
+// Note: the Invoke*Hook() functions are defined in malloc_hook-inl.h.  If you
+// need to invoke a hook (which you shouldn't unless you're part of tcmalloc),
+// be sure to #include malloc_hook-inl.h in addition to malloc_hook.h.
+//
+// NOTE FOR C USERS: If you want to use malloc_hook functionality from
+// a C program, #include malloc_hook_c.h instead of this file.
+//
+// IWYU pragma: private, include "base/malloc_hook.h"
+
+#ifndef ABSL_BASE_INTERNAL_MALLOC_HOOK_H_
+#define ABSL_BASE_INTERNAL_MALLOC_HOOK_H_
+
+#include <sys/types.h>
+#include <cstddef>
+
+#include "absl/base/config.h"
+#include "absl/base/internal/malloc_hook_c.h"
+#include "absl/base/port.h"
+
+namespace absl {
+namespace base_internal {
+
+// Note: malloc_hook_c.h defines MallocHook_*Hook and
+// MallocHook_{Add,Remove}*Hook.  The version of these inside the MallocHook
+// class are defined in terms of the malloc_hook_c version.  See malloc_hook_c.h
+// for details of these types/functions.
+
+class MallocHook {
+ public:
+  // The NewHook is invoked whenever an object is being allocated.
+  // Object pointer and size are passed in.
+  // It may be passed null pointer if the allocator returned null.
+  typedef MallocHook_NewHook NewHook;
+  inline static bool AddNewHook(NewHook hook) {
+    return MallocHook_AddNewHook(hook);
+  }
+  inline static bool RemoveNewHook(NewHook hook) {
+    return MallocHook_RemoveNewHook(hook);
+  }
+  inline static void InvokeNewHook(const void* ptr, size_t size);
+
+  // The DeleteHook is invoked whenever an object is being deallocated.
+  // Object pointer is passed in.
+  // It may be passed null pointer if the caller is trying to delete null.
+  typedef MallocHook_DeleteHook DeleteHook;
+  inline static bool AddDeleteHook(DeleteHook hook) {
+    return MallocHook_AddDeleteHook(hook);
+  }
+  inline static bool RemoveDeleteHook(DeleteHook hook) {
+    return MallocHook_RemoveDeleteHook(hook);
+  }
+  inline static void InvokeDeleteHook(const void* ptr);
+
+  // The SampledNewHook is invoked for some subset of object allocations
+  // according to the sampling policy of an allocator such as tcmalloc.
+  // SampledAlloc has the following fields:
+  //  * AllocHandle handle: to be set to an effectively unique value (in this
+  //    process) by allocator.
+  //  * size_t allocated_size: space actually used by allocator to host
+  //    the object.
+  //  * int stack_depth and const void* stack: invocation stack for
+  //    the allocation.
+  // The allocator invoking the hook should record the handle value and later
+  // call InvokeSampledDeleteHook() with that value.
+  typedef MallocHook_SampledNewHook SampledNewHook;
+  typedef MallocHook_SampledAlloc SampledAlloc;
+  inline static bool AddSampledNewHook(SampledNewHook hook) {
+    return MallocHook_AddSampledNewHook(hook);
+  }
+  inline static bool RemoveSampledNewHook(SampledNewHook hook) {
+    return MallocHook_RemoveSampledNewHook(hook);
+  }
+  inline static void InvokeSampledNewHook(const SampledAlloc* sampled_alloc);
+
+  // The SampledDeleteHook is invoked whenever an object previously chosen
+  // by an allocator for sampling is being deallocated.
+  // The handle identifying the object --as previously chosen by
+  // InvokeSampledNewHook()-- is passed in.
+  typedef MallocHook_SampledDeleteHook SampledDeleteHook;
+  typedef MallocHook_AllocHandle AllocHandle;
+  inline static bool AddSampledDeleteHook(SampledDeleteHook hook) {
+    return MallocHook_AddSampledDeleteHook(hook);
+  }
+  inline static bool RemoveSampledDeleteHook(SampledDeleteHook hook) {
+    return MallocHook_RemoveSampledDeleteHook(hook);
+  }
+  inline static void InvokeSampledDeleteHook(AllocHandle handle);
+
+  // The PreMmapHook is invoked with mmap's or mmap64's arguments just
+  // before the mmap/mmap64 call is actually made.  Such a hook may be useful
+  // in memory limited contexts, to catch allocations that will exceed
+  // a memory limit, and take outside actions to increase that limit.
+  typedef MallocHook_PreMmapHook PreMmapHook;
+  inline static bool AddPreMmapHook(PreMmapHook hook) {
+    return MallocHook_AddPreMmapHook(hook);
+  }
+  inline static bool RemovePreMmapHook(PreMmapHook hook) {
+    return MallocHook_RemovePreMmapHook(hook);
+  }
+  inline static void InvokePreMmapHook(const void* start,
+                                       size_t size,
+                                       int protection,
+                                       int flags,
+                                       int fd,
+                                       off_t offset);
+
+  // The MmapReplacement is invoked with mmap's arguments and place to put the
+  // result into after the PreMmapHook but before the mmap/mmap64 call is
+  // actually made.
+  // The MmapReplacement should return true if it handled the call, or false
+  // if it is still necessary to call mmap/mmap64.
+  // This should be used only by experts, and users must be be
+  // extremely careful to avoid recursive calls to mmap. The replacement
+  // should be async signal safe.
+  // Only one MmapReplacement is supported. After setting an MmapReplacement
+  // you must call RemoveMmapReplacement before calling SetMmapReplacement
+  // again.
+  typedef MallocHook_MmapReplacement MmapReplacement;
+  inline static bool SetMmapReplacement(MmapReplacement hook) {
+    return MallocHook_SetMmapReplacement(hook);
+  }
+  inline static bool RemoveMmapReplacement(MmapReplacement hook) {
+    return MallocHook_RemoveMmapReplacement(hook);
+  }
+  inline static bool InvokeMmapReplacement(const void* start,
+                                           size_t size,
+                                           int protection,
+                                           int flags,
+                                           int fd,
+                                           off_t offset,
+                                           void** result);
+
+
+  // The MmapHook is invoked with mmap's return value and arguments whenever
+  // a region of memory has been just mapped.
+  // It may be passed MAP_FAILED if the mmap failed.
+  typedef MallocHook_MmapHook MmapHook;
+  inline static bool AddMmapHook(MmapHook hook) {
+    return MallocHook_AddMmapHook(hook);
+  }
+  inline static bool RemoveMmapHook(MmapHook hook) {
+    return MallocHook_RemoveMmapHook(hook);
+  }
+  inline static void InvokeMmapHook(const void* result,
+                                    const void* start,
+                                    size_t size,
+                                    int protection,
+                                    int flags,
+                                    int fd,
+                                    off_t offset);
+
+  // The MunmapReplacement is invoked with munmap's arguments and place to put
+  // the result into just before the munmap call is actually made.
+  // The MunmapReplacement should return true if it handled the call, or false
+  // if it is still necessary to call munmap.
+  // This should be used only by experts. The replacement should be
+  // async signal safe.
+  // Only one MunmapReplacement is supported. After setting an
+  // MunmapReplacement you must call RemoveMunmapReplacement before
+  // calling SetMunmapReplacement again.
+  typedef MallocHook_MunmapReplacement MunmapReplacement;
+  inline static bool SetMunmapReplacement(MunmapReplacement hook) {
+    return MallocHook_SetMunmapReplacement(hook);
+  }
+  inline static bool RemoveMunmapReplacement(MunmapReplacement hook) {
+    return MallocHook_RemoveMunmapReplacement(hook);
+  }
+  inline static bool InvokeMunmapReplacement(const void* start,
+                                             size_t size,
+                                             int* result);
+
+  // The MunmapHook is invoked with munmap's arguments just before the munmap
+  // call is actually made.
+  // TODO(maxim): Rename this to PreMunmapHook for consistency with PreMmapHook
+  // and PreSbrkHook.
+  typedef MallocHook_MunmapHook MunmapHook;
+  inline static bool AddMunmapHook(MunmapHook hook) {
+    return MallocHook_AddMunmapHook(hook);
+  }
+  inline static bool RemoveMunmapHook(MunmapHook hook) {
+    return MallocHook_RemoveMunmapHook(hook);
+  }
+  inline static void InvokeMunmapHook(const void* start, size_t size);
+
+  // The MremapHook is invoked with mremap's return value and arguments
+  // whenever a region of memory has been just remapped.
+  typedef MallocHook_MremapHook MremapHook;
+  inline static bool AddMremapHook(MremapHook hook) {
+    return MallocHook_AddMremapHook(hook);
+  }
+  inline static bool RemoveMremapHook(MremapHook hook) {
+    return MallocHook_RemoveMremapHook(hook);
+  }
+  inline static void InvokeMremapHook(const void* result,
+                                      const void* old_addr,
+                                      size_t old_size,
+                                      size_t new_size,
+                                      int flags,
+                                      const void* new_addr);
+
+  // The PreSbrkHook is invoked with sbrk's argument just before sbrk is called
+  // -- except when the increment is 0.  This is because sbrk(0) is often called
+  // to get the top of the memory stack, and is not actually a
+  // memory-allocation call.  It may be useful in memory-limited contexts,
+  // to catch allocations that will exceed the limit and take outside
+  // actions to increase such a limit.
+  typedef MallocHook_PreSbrkHook PreSbrkHook;
+  inline static bool AddPreSbrkHook(PreSbrkHook hook) {
+    return MallocHook_AddPreSbrkHook(hook);
+  }
+  inline static bool RemovePreSbrkHook(PreSbrkHook hook) {
+    return MallocHook_RemovePreSbrkHook(hook);
+  }
+  inline static void InvokePreSbrkHook(ptrdiff_t increment);
+
+  // The SbrkHook is invoked with sbrk's result and argument whenever sbrk
+  // has just executed -- except when the increment is 0.
+  // This is because sbrk(0) is often called to get the top of the memory stack,
+  // and is not actually a memory-allocation call.
+  typedef MallocHook_SbrkHook SbrkHook;
+  inline static bool AddSbrkHook(SbrkHook hook) {
+    return MallocHook_AddSbrkHook(hook);
+  }
+  inline static bool RemoveSbrkHook(SbrkHook hook) {
+    return MallocHook_RemoveSbrkHook(hook);
+  }
+  inline static void InvokeSbrkHook(const void* result, ptrdiff_t increment);
+
+  // Pointer to a absl::GetStackTrace implementation, following the API in
+  // base/stacktrace.h.
+  using GetStackTraceFn = int (*)(void**, int, int);
+
+  // Get the current stack trace.  Try to skip all routines up to and
+  // including the caller of MallocHook::Invoke*.
+  // Use "skip_count" (similarly to absl::GetStackTrace from stacktrace.h)
+  // as a hint about how many routines to skip if better information
+  // is not available.
+  // Stack trace is filled into *result up to the size of max_depth.
+  // The actual number of stack frames filled is returned.
+  inline static int GetCallerStackTrace(void** result, int max_depth,
+                                        int skip_count,
+                                        GetStackTraceFn get_stack_trace_fn) {
+    return MallocHook_GetCallerStackTrace(result, max_depth, skip_count,
+                                          get_stack_trace_fn);
+  }
+
+#if ABSL_HAVE_MMAP
+  // Unhooked versions of mmap() and munmap().   These should be used
+  // only by experts, since they bypass heapchecking, etc.
+  // Note: These do not run hooks, but they still use the MmapReplacement
+  // and MunmapReplacement.
+  static void* UnhookedMMap(void* start, size_t size, int protection, int flags,
+                            int fd, off_t offset);
+  static int UnhookedMUnmap(void* start, size_t size);
+#endif
+
+ private:
+  // Slow path versions of Invoke*Hook.
+  static void InvokeNewHookSlow(const void* ptr,
+                                size_t size) ABSL_ATTRIBUTE_COLD;
+  static void InvokeDeleteHookSlow(const void* ptr) ABSL_ATTRIBUTE_COLD;
+  static void InvokeSampledNewHookSlow(const SampledAlloc* sampled_alloc)
+      ABSL_ATTRIBUTE_COLD;
+  static void InvokeSampledDeleteHookSlow(AllocHandle handle)
+      ABSL_ATTRIBUTE_COLD;
+  static void InvokePreMmapHookSlow(const void* start, size_t size,
+                                    int protection, int flags, int fd,
+                                    off_t offset) ABSL_ATTRIBUTE_COLD;
+  static void InvokeMmapHookSlow(const void* result, const void* start,
+                                 size_t size, int protection, int flags, int fd,
+                                 off_t offset) ABSL_ATTRIBUTE_COLD;
+  static bool InvokeMmapReplacementSlow(const void* start, size_t size,
+                                        int protection, int flags, int fd,
+                                        off_t offset,
+                                        void** result) ABSL_ATTRIBUTE_COLD;
+  static void InvokeMunmapHookSlow(const void* ptr,
+                                   size_t size) ABSL_ATTRIBUTE_COLD;
+  static bool InvokeMunmapReplacementSlow(const void* ptr, size_t size,
+                                          int* result) ABSL_ATTRIBUTE_COLD;
+  static void InvokeMremapHookSlow(const void* result, const void* old_addr,
+                                   size_t old_size, size_t new_size, int flags,
+                                   const void* new_addr) ABSL_ATTRIBUTE_COLD;
+  static void InvokePreSbrkHookSlow(ptrdiff_t increment) ABSL_ATTRIBUTE_COLD;
+  static void InvokeSbrkHookSlow(const void* result,
+                                 ptrdiff_t increment) ABSL_ATTRIBUTE_COLD;
+};
+
+}  // namespace base_internal
+}  // namespace absl
+#endif  // ABSL_BASE_INTERNAL_MALLOC_HOOK_H_
diff --git a/absl/base/internal/malloc_hook_c.h b/absl/base/internal/malloc_hook_c.h
new file mode 100644
index 000000000000..03b84ca627d2
--- /dev/null
+++ b/absl/base/internal/malloc_hook_c.h
@@ -0,0 +1,131 @@
+/*
+ * Copyright 2017 The Abseil Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * C shims for the C++ malloc_hook.h.  See malloc_hook.h for details
+ * on how to use these.
+ */
+#ifndef ABSL_BASE_INTERNAL_MALLOC_HOOK_C_H_
+#define ABSL_BASE_INTERNAL_MALLOC_HOOK_C_H_
+
+#include <stddef.h>
+#include <stdint.h>
+#include <sys/types.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif  /* __cplusplus */
+
+/* Get the current stack trace.  Try to skip all routines up to and
+ * including the caller of MallocHook::Invoke*.
+ * Use "skip_count" (similarly to absl::GetStackTrace from stacktrace.h)
+ * as a hint about how many routines to skip if better information
+ * is not available.
+ */
+typedef int (*MallocHook_GetStackTraceFn)(void**, int, int);
+int MallocHook_GetCallerStackTrace(void** result, int max_depth, int skip_count,
+                                   MallocHook_GetStackTraceFn fn);
+
+/* All the MallocHook_{Add,Remove}*Hook functions below return 1 on success
+ * and 0 on failure.
+ */
+
+typedef void (*MallocHook_NewHook)(const void* ptr, size_t size);
+int MallocHook_AddNewHook(MallocHook_NewHook hook);
+int MallocHook_RemoveNewHook(MallocHook_NewHook hook);
+
+typedef void (*MallocHook_DeleteHook)(const void* ptr);
+int MallocHook_AddDeleteHook(MallocHook_DeleteHook hook);
+int MallocHook_RemoveDeleteHook(MallocHook_DeleteHook hook);
+
+typedef int64_t MallocHook_AllocHandle;
+typedef struct {
+  /* See malloc_hook.h  for documentation for this struct. */
+  MallocHook_AllocHandle handle;
+  size_t allocated_size;
+  int stack_depth;
+  const void* stack;
+} MallocHook_SampledAlloc;
+typedef void (*MallocHook_SampledNewHook)(
+    const MallocHook_SampledAlloc* sampled_alloc);
+int MallocHook_AddSampledNewHook(MallocHook_SampledNewHook hook);
+int MallocHook_RemoveSampledNewHook(MallocHook_SampledNewHook hook);
+
+typedef void (*MallocHook_SampledDeleteHook)(MallocHook_AllocHandle handle);
+int MallocHook_AddSampledDeleteHook(MallocHook_SampledDeleteHook hook);
+int MallocHook_RemoveSampledDeleteHook(MallocHook_SampledDeleteHook hook);
+
+typedef void (*MallocHook_PreMmapHook)(const void *start,
+                                       size_t size,
+                                       int protection,
+                                       int flags,
+                                       int fd,
+                                       off_t offset);
+int MallocHook_AddPreMmapHook(MallocHook_PreMmapHook hook);
+int MallocHook_RemovePreMmapHook(MallocHook_PreMmapHook hook);
+
+typedef void (*MallocHook_MmapHook)(const void* result,
+                                    const void* start,
+                                    size_t size,
+                                    int protection,
+                                    int flags,
+                                    int fd,
+                                    off_t offset);
+int MallocHook_AddMmapHook(MallocHook_MmapHook hook);
+int MallocHook_RemoveMmapHook(MallocHook_MmapHook hook);
+
+typedef int (*MallocHook_MmapReplacement)(const void* start,
+                                          size_t size,
+                                          int protection,
+                                          int flags,
+                                          int fd,
+                                          off_t offset,
+                                          void** result);
+int MallocHook_SetMmapReplacement(MallocHook_MmapReplacement hook);
+int MallocHook_RemoveMmapReplacement(MallocHook_MmapReplacement hook);
+
+typedef void (*MallocHook_MunmapHook)(const void* start, size_t size);
+int MallocHook_AddMunmapHook(MallocHook_MunmapHook hook);
+int MallocHook_RemoveMunmapHook(MallocHook_MunmapHook hook);
+
+typedef int (*MallocHook_MunmapReplacement)(const void* start,
+                                            size_t size,
+                                            int* result);
+int MallocHook_SetMunmapReplacement(MallocHook_MunmapReplacement hook);
+int MallocHook_RemoveMunmapReplacement(MallocHook_MunmapReplacement hook);
+
+typedef void (*MallocHook_MremapHook)(const void* result,
+                                      const void* old_addr,
+                                      size_t old_size,
+                                      size_t new_size,
+                                      int flags,
+                                      const void* new_addr);
+int MallocHook_AddMremapHook(MallocHook_MremapHook hook);
+int MallocHook_RemoveMremapHook(MallocHook_MremapHook hook);
+
+typedef void (*MallocHook_PreSbrkHook)(ptrdiff_t increment);
+int MallocHook_AddPreSbrkHook(MallocHook_PreSbrkHook hook);
+int MallocHook_RemovePreSbrkHook(MallocHook_PreSbrkHook hook);
+
+typedef void (*MallocHook_SbrkHook)(const void* result, ptrdiff_t increment);
+int MallocHook_AddSbrkHook(MallocHook_SbrkHook hook);
+int MallocHook_RemoveSbrkHook(MallocHook_SbrkHook hook);
+
+#ifdef __cplusplus
+}  /* extern "C" */
+#endif  /* __cplusplus */
+
+#endif  /* ABSL_BASE_INTERNAL_MALLOC_HOOK_C_H_ */
diff --git a/absl/base/internal/malloc_hook_invoke.h b/absl/base/internal/malloc_hook_invoke.h
new file mode 100644
index 000000000000..c08220cbcf36
--- /dev/null
+++ b/absl/base/internal/malloc_hook_invoke.h
@@ -0,0 +1,198 @@
+//
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+///
+
+// This has the implementation details of malloc_hook that are needed
+// to use malloc-hook inside the tcmalloc system.  It does not hold
+// any of the client-facing calls that are used to add new hooks.
+//
+// IWYU pragma: private, include "base/malloc_hook-inl.h"
+
+#ifndef ABSL_BASE_INTERNAL_MALLOC_HOOK_INVOKE_H_
+#define ABSL_BASE_INTERNAL_MALLOC_HOOK_INVOKE_H_
+
+#include <sys/types.h>
+#include <atomic>
+#include <cstddef>
+
+#include "absl/base/internal/malloc_hook.h"
+
+namespace absl {
+namespace base_internal {
+
+// Maximum of 7 hooks means that HookList is 8 words.
+static constexpr int kHookListMaxValues = 7;
+
+// HookList: a class that provides synchronized insertions and removals and
+// lockless traversal.  Most of the implementation is in malloc_hook.cc.
+template <typename T>
+struct HookList {
+  static_assert(sizeof(T) <= sizeof(intptr_t), "T_should_fit_in_intptr_t");
+
+  // Adds value to the list.  Note that duplicates are allowed.  Thread-safe and
+  // blocking (acquires hooklist_spinlock).  Returns true on success; false
+  // otherwise (failures include invalid value and no space left).
+  bool Add(T value);
+
+  // Removes the first entry matching value from the list.  Thread-safe and
+  // blocking (acquires hooklist_spinlock).  Returns true on success; false
+  // otherwise (failures include invalid value and no value found).
+  bool Remove(T value);
+
+  // Store up to n values of the list in output_array, and return the number of
+  // elements stored.  Thread-safe and non-blocking.  This is fast (one memory
+  // access) if the list is empty.
+  int Traverse(T* output_array, int n) const;
+
+  // Fast inline implementation for fast path of Invoke*Hook.
+  bool empty() const {
+    // empty() is only used as an optimization to determine if we should call
+    // Traverse which has proper acquire loads.  Memory reordering around a
+    // call to empty will either lead to an unnecessary Traverse call, or will
+    // miss invoking hooks, neither of which is a problem.
+    return priv_end.load(std::memory_order_relaxed) == 0;
+  }
+
+  // This internal data is not private so that the class is an aggregate and can
+  // be initialized by the linker.  Don't access this directly.  Use the
+  // INIT_HOOK_LIST macro in malloc_hook.cc.
+
+  // One more than the index of the last valid element in priv_data.  During
+  // 'Remove' this may be past the last valid element in priv_data, but
+  // subsequent values will be 0.
+  std::atomic<int> priv_end;
+  std::atomic<intptr_t> priv_data[kHookListMaxValues];
+};
+
+extern template struct HookList<MallocHook::NewHook>;
+
+extern HookList<MallocHook::NewHook> new_hooks_;
+extern HookList<MallocHook::DeleteHook> delete_hooks_;
+extern HookList<MallocHook::SampledNewHook> sampled_new_hooks_;
+extern HookList<MallocHook::SampledDeleteHook> sampled_delete_hooks_;
+extern HookList<MallocHook::PreMmapHook> premmap_hooks_;
+extern HookList<MallocHook::MmapHook> mmap_hooks_;
+extern HookList<MallocHook::MmapReplacement> mmap_replacement_;
+extern HookList<MallocHook::MunmapHook> munmap_hooks_;
+extern HookList<MallocHook::MunmapReplacement> munmap_replacement_;
+extern HookList<MallocHook::MremapHook> mremap_hooks_;
+extern HookList<MallocHook::PreSbrkHook> presbrk_hooks_;
+extern HookList<MallocHook::SbrkHook> sbrk_hooks_;
+
+inline void MallocHook::InvokeNewHook(const void* ptr, size_t size) {
+  if (!absl::base_internal::new_hooks_.empty()) {
+    InvokeNewHookSlow(ptr, size);
+  }
+}
+
+inline void MallocHook::InvokeDeleteHook(const void* ptr) {
+  if (!absl::base_internal::delete_hooks_.empty()) {
+    InvokeDeleteHookSlow(ptr);
+  }
+}
+
+inline void MallocHook::InvokeSampledNewHook(
+    const SampledAlloc* sampled_alloc) {
+  if (!absl::base_internal::sampled_new_hooks_.empty()) {
+    InvokeSampledNewHookSlow(sampled_alloc);
+  }
+}
+
+inline void MallocHook::InvokeSampledDeleteHook(AllocHandle handle) {
+  if (!absl::base_internal::sampled_delete_hooks_.empty()) {
+    InvokeSampledDeleteHookSlow(handle);
+  }
+}
+
+inline void MallocHook::InvokePreMmapHook(const void* start,
+                                          size_t size,
+                                          int protection,
+                                          int flags,
+                                          int fd,
+                                          off_t offset) {
+  if (!absl::base_internal::premmap_hooks_.empty()) {
+    InvokePreMmapHookSlow(start, size, protection, flags, fd, offset);
+  }
+}
+
+inline void MallocHook::InvokeMmapHook(const void* result,
+                                       const void* start,
+                                       size_t size,
+                                       int protection,
+                                       int flags,
+                                       int fd,
+                                       off_t offset) {
+  if (!absl::base_internal::mmap_hooks_.empty()) {
+    InvokeMmapHookSlow(result, start, size, protection, flags, fd, offset);
+  }
+}
+
+inline bool MallocHook::InvokeMmapReplacement(const void* start,
+                                              size_t size,
+                                              int protection,
+                                              int flags,
+                                              int fd,
+                                              off_t offset,
+                                              void** result) {
+  if (!absl::base_internal::mmap_replacement_.empty()) {
+    return InvokeMmapReplacementSlow(start, size,
+                                     protection, flags,
+                                     fd, offset,
+                                     result);
+  }
+  return false;
+}
+
+inline void MallocHook::InvokeMunmapHook(const void* start, size_t size) {
+  if (!absl::base_internal::munmap_hooks_.empty()) {
+    InvokeMunmapHookSlow(start, size);
+  }
+}
+
+inline bool MallocHook::InvokeMunmapReplacement(
+    const void* start, size_t size, int* result) {
+  if (!absl::base_internal::mmap_replacement_.empty()) {
+    return InvokeMunmapReplacementSlow(start, size, result);
+  }
+  return false;
+}
+
+inline void MallocHook::InvokeMremapHook(const void* result,
+                                         const void* old_addr,
+                                         size_t old_size,
+                                         size_t new_size,
+                                         int flags,
+                                         const void* new_addr) {
+  if (!absl::base_internal::mremap_hooks_.empty()) {
+    InvokeMremapHookSlow(result, old_addr, old_size, new_size, flags, new_addr);
+  }
+}
+
+inline void MallocHook::InvokePreSbrkHook(ptrdiff_t increment) {
+  if (!absl::base_internal::presbrk_hooks_.empty() && increment != 0) {
+    InvokePreSbrkHookSlow(increment);
+  }
+}
+
+inline void MallocHook::InvokeSbrkHook(const void* result,
+                                       ptrdiff_t increment) {
+  if (!absl::base_internal::sbrk_hooks_.empty() && increment != 0) {
+    InvokeSbrkHookSlow(result, increment);
+  }
+}
+
+}  // namespace base_internal
+}  // namespace absl
+#endif  // ABSL_BASE_INTERNAL_MALLOC_HOOK_INVOKE_H_
diff --git a/absl/base/internal/malloc_hook_mmap_linux.inc b/absl/base/internal/malloc_hook_mmap_linux.inc
new file mode 100644
index 000000000000..059ded57626d
--- /dev/null
+++ b/absl/base/internal/malloc_hook_mmap_linux.inc
@@ -0,0 +1,236 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// We define mmap() and mmap64(), which somewhat reimplements libc's mmap
+// syscall stubs.  Unfortunately libc only exports the stubs via weak symbols
+// (which we're overriding with our mmap64() and mmap() wrappers) so we can't
+// just call through to them.
+
+#ifndef __linux__
+# error Should only be including malloc_hook_mmap_linux.h on linux systems.
+#endif
+
+#include <sys/mman.h>
+#include <sys/types.h>
+#ifdef __BIONIC__
+#include <sys/syscall.h>
+#else
+#include <syscall.h>
+#endif
+
+#include <linux/unistd.h>
+#include <unistd.h>
+#include <cerrno>
+#include <cstdarg>
+#include <cstdint>
+
+#ifdef __mips__
+// Include definitions of the ABI currently in use.
+#ifdef __BIONIC__
+// Android doesn't have sgidefs.h, but does have asm/sgidefs.h, which has the
+// definitions we need.
+#include <asm/sgidefs.h>
+#else
+#include <sgidefs.h>
+#endif  // __BIONIC__
+#endif  // __mips__
+
+// SYS_mmap, SYS_munmap, and SYS_mremap are not defined in Android.
+#ifdef __BIONIC__
+extern "C" void *__mmap2(void *, size_t, int, int, int, long);
+#if defined(__NR_mmap) && !defined(SYS_mmap)
+#define SYS_mmap __NR_mmap
+#endif
+#ifndef SYS_munmap
+#define SYS_munmap __NR_munmap
+#endif
+#ifndef SYS_mremap
+#define SYS_mremap __NR_mremap
+#endif
+#endif  // __BIONIC__
+
+// Platform specific logic extracted from
+// https://chromium.googlesource.com/linux-syscall-support/+/master/linux_syscall_support.h
+static inline void* do_mmap64(void* start, size_t length, int prot,
+                              int flags, int fd, off64_t offset) __THROW {
+#if defined(__i386__) ||                                                \
+  defined(__ARM_ARCH_3__) || defined(__ARM_EABI__) ||                   \
+  (defined(__mips__) && _MIPS_SIM == _MIPS_SIM_ABI32) ||                \
+  (defined(__PPC__) && !defined(__PPC64__)) ||                          \
+  (defined(__s390__) && !defined(__s390x__))
+  // On these architectures, implement mmap with mmap2.
+  static int pagesize = 0;
+  if (pagesize == 0) {
+    pagesize = getpagesize();
+  }
+  if (offset < 0 || offset % pagesize != 0) {
+    errno = EINVAL;
+    return MAP_FAILED;
+  }
+#ifdef __BIONIC__
+  // SYS_mmap2 has problems on Android API level <= 16.
+  // Workaround by invoking __mmap2() instead.
+  return __mmap2(start, length, prot, flags, fd, offset / pagesize);
+#else
+  return reinterpret_cast<void*>(
+      syscall(SYS_mmap2, start, length, prot, flags, fd,
+              static_cast<off_t>(offset / pagesize)));
+#endif
+#elif defined(__s390x__)
+  // On s390x, mmap() arguments are passed in memory.
+  uint32_t buf[6] = {
+      reinterpret_cast<uint32_t>(start), static_cast<uint32_t>(length),
+      static_cast<uint32_t>(prot),       static_cast<uint32_t>(flags),
+      static_cast<uint32_t>(fd),         static_cast<uint32_t>(offset)};
+  return reintrepret_cast<void*>(syscall(SYS_mmap, buf));
+#elif defined(__x86_64__)
+  // The x32 ABI has 32 bit longs, but the syscall interface is 64 bit.
+  // We need to explicitly cast to an unsigned 64 bit type to avoid implicit
+  // sign extension.  We can't cast pointers directly because those are
+  // 32 bits, and gcc will dump ugly warnings about casting from a pointer
+  // to an integer of a different size. We also need to make sure __off64_t
+  // isn't truncated to 32-bits under x32.
+  #define MMAP_SYSCALL_ARG(x) ((uint64_t)(uintptr_t)(x))
+  return reinterpret_cast<void*>(
+      syscall(SYS_mmap, MMAP_SYSCALL_ARG(start), MMAP_SYSCALL_ARG(length),
+              MMAP_SYSCALL_ARG(prot), MMAP_SYSCALL_ARG(flags),
+              MMAP_SYSCALL_ARG(fd), static_cast<uint64_t>(offset)));
+  #undef MMAP_SYSCALL_ARG
+#else  // Remaining 64-bit aritectures.
+  static_assert(sizeof(unsigned long) == 8, "Platform is not 64-bit");
+  return reinterpret_cast<void*>(
+      syscall(SYS_mmap, start, length, prot, flags, fd, offset));
+#endif
+}
+
+// We use do_mmap64 abstraction to put MallocHook::InvokeMmapHook
+// calls right into mmap and mmap64, so that the stack frames in the caller's
+// stack are at the same offsets for all the calls of memory allocating
+// functions.
+
+// Put all callers of MallocHook::Invoke* in this module into
+// malloc_hook section,
+// so that MallocHook::GetCallerStackTrace can function accurately:
+
+// Make sure mmap doesn't get #define'd away by <sys/mman.h>
+# undef mmap
+
+extern "C" {
+ABSL_ATTRIBUTE_SECTION(malloc_hook)
+void* mmap64(void* start, size_t length, int prot, int flags, int fd,
+             off64_t offset) __THROW;
+ABSL_ATTRIBUTE_SECTION(malloc_hook)
+void* mmap(void* start, size_t length, int prot, int flags, int fd,
+           off_t offset) __THROW;
+ABSL_ATTRIBUTE_SECTION(malloc_hook)
+int munmap(void* start, size_t length) __THROW;
+ABSL_ATTRIBUTE_SECTION(malloc_hook)
+void* mremap(void* old_addr, size_t old_size, size_t new_size, int flags,
+             ...) __THROW;
+ABSL_ATTRIBUTE_SECTION(malloc_hook) void* sbrk(ptrdiff_t increment) __THROW;
+}
+
+extern "C" void* mmap64(void *start, size_t length, int prot, int flags,
+                        int fd, off64_t offset) __THROW {
+  absl::base_internal::MallocHook::InvokePreMmapHook(start, length, prot, flags,
+                                                     fd, offset);
+  void *result;
+  if (!absl::base_internal::MallocHook::InvokeMmapReplacement(
+          start, length, prot, flags, fd, offset, &result)) {
+    result = do_mmap64(start, length, prot, flags, fd, offset);
+  }
+  absl::base_internal::MallocHook::InvokeMmapHook(result, start, length, prot,
+                                                  flags, fd, offset);
+  return result;
+}
+
+# if !defined(__USE_FILE_OFFSET64) || !defined(__REDIRECT_NTH)
+
+extern "C" void* mmap(void *start, size_t length, int prot, int flags,
+                      int fd, off_t offset) __THROW {
+  absl::base_internal::MallocHook::InvokePreMmapHook(start, length, prot, flags,
+                                                     fd, offset);
+  void *result;
+  if (!absl::base_internal::MallocHook::InvokeMmapReplacement(
+          start, length, prot, flags, fd, offset, &result)) {
+    result = do_mmap64(start, length, prot, flags, fd,
+                       static_cast<size_t>(offset)); // avoid sign extension
+  }
+  absl::base_internal::MallocHook::InvokeMmapHook(result, start, length, prot,
+                                                  flags, fd, offset);
+  return result;
+}
+
+# endif  // !defined(__USE_FILE_OFFSET64) || !defined(__REDIRECT_NTH)
+
+extern "C" int munmap(void* start, size_t length) __THROW {
+  absl::base_internal::MallocHook::InvokeMunmapHook(start, length);
+  int result;
+  if (!absl::base_internal::MallocHook::InvokeMunmapReplacement(start, length,
+                                                                &result)) {
+    result = syscall(SYS_munmap, start, length);
+  }
+  return result;
+}
+
+extern "C" void* mremap(void* old_addr, size_t old_size, size_t new_size,
+                        int flags, ...) __THROW {
+  va_list ap;
+  va_start(ap, flags);
+  void *new_address = va_arg(ap, void *);
+  va_end(ap);
+  void* result = reinterpret_cast<void*>(
+      syscall(SYS_mremap, old_addr, old_size, new_size, flags, new_address));
+  absl::base_internal::MallocHook::InvokeMremapHook(
+      result, old_addr, old_size, new_size, flags, new_address);
+  return result;
+}
+
+// sbrk cannot be intercepted on Android as there is no mechanism to
+// invoke the original sbrk (since there is no __sbrk as with glibc).
+#if !defined(__BIONIC__)
+// libc's version:
+extern "C" void* __sbrk(ptrdiff_t increment);
+
+extern "C" void* sbrk(ptrdiff_t increment) __THROW {
+  absl::base_internal::MallocHook::InvokePreSbrkHook(increment);
+  void *result = __sbrk(increment);
+  absl::base_internal::MallocHook::InvokeSbrkHook(result, increment);
+  return result;
+}
+#endif  // !defined(__BIONIC__)
+
+namespace absl {
+namespace base_internal {
+
+/*static*/void* MallocHook::UnhookedMMap(void *start, size_t length, int prot,
+                                         int flags, int fd, off_t offset) {
+  void* result;
+  if (!MallocHook::InvokeMmapReplacement(
+          start, length, prot, flags, fd, offset, &result)) {
+    result = do_mmap64(start, length, prot, flags, fd, offset);
+  }
+  return result;
+}
+
+/*static*/int MallocHook::UnhookedMUnmap(void *start, size_t length) {
+  int result;
+  if (!MallocHook::InvokeMunmapReplacement(start, length, &result)) {
+    result = syscall(SYS_munmap, start, length);
+  }
+  return result;
+}
+
+}  // namespace base_internal
+}  // namespace absl
diff --git a/absl/base/internal/per_thread_tls.h b/absl/base/internal/per_thread_tls.h
new file mode 100644
index 000000000000..7397451031b9
--- /dev/null
+++ b/absl/base/internal/per_thread_tls.h
@@ -0,0 +1,48 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_BASE_INTERNAL_PER_THREAD_TLS_H_
+#define ABSL_BASE_INTERNAL_PER_THREAD_TLS_H_
+
+// This header defines two macros:
+// If the platform supports thread-local storage:
+//   ABSL_PER_THREAD_TLS_KEYWORD is the C keyword needed to declare a
+//   thread-local variable ABSL_PER_THREAD_TLS is 1
+//
+// Otherwise:
+//   ABSL_PER_THREAD_TLS_KEYWORD is empty
+//   ABSL_PER_THREAD_TLS is 0
+//
+// Microsoft C supports thread-local storage.
+// GCC supports it if the appropriate version of glibc is available,
+// which the programme can indicate by defining ABSL_HAVE_TLS
+
+#include "absl/base/port.h"  // For ABSL_HAVE_TLS
+
+#if defined(ABSL_PER_THREAD_TLS)
+#error ABSL_PER_THREAD_TLS cannot be directly set
+#elif defined(ABSL_PER_THREAD_TLS_KEYWORD)
+#error ABSL_PER_THREAD_TLS_KEYWORD cannot be directly set
+#elif defined(ABSL_HAVE_TLS)
+#define ABSL_PER_THREAD_TLS_KEYWORD __thread
+#define ABSL_PER_THREAD_TLS 1
+#elif defined(_MSC_VER)
+#define ABSL_PER_THREAD_TLS_KEYWORD __declspec(thread)
+#define ABSL_PER_THREAD_TLS 1
+#else
+#define ABSL_PER_THREAD_TLS_KEYWORD
+#define ABSL_PER_THREAD_TLS 0
+#endif
+
+#endif  // ABSL_BASE_INTERNAL_PER_THREAD_TLS_H_
diff --git a/absl/base/internal/raw_logging.cc b/absl/base/internal/raw_logging.cc
new file mode 100644
index 000000000000..d197d444a6f4
--- /dev/null
+++ b/absl/base/internal/raw_logging.cc
@@ -0,0 +1,225 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <atomic>
+#include <cassert>
+#include <cstdarg>
+#include <cstdio>
+#include <cstdlib>
+#include <cstring>
+
+#include "absl/base/config.h"
+#include "absl/base/internal/atomic_hook.h"
+#include "absl/base/internal/log_severity.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/base/port.h"
+
+// We know how to perform low-level writes to stderr in POSIX and Windows.  For
+// these platforms, we define the token ABSL_LOW_LEVEL_WRITE_SUPPORTED.
+// Much of raw_logging.cc becomes a no-op when we can't output messages,
+// although a FATAL ABSL_RAW_LOG message will still abort the process.
+
+// ABSL_HAVE_POSIX_WRITE is defined when the platform provides posix write()
+// (as from unistd.h)
+//
+// This preprocessor token is also defined in raw_io.cc.  If you need to copy
+// this, consider moving both to config.h instead.
+#if defined(__linux__) || defined(__APPLE__) || defined(__Fuchsia__) || \
+    defined(__GENCLAVE__)
+#include <unistd.h>
+#define ABSL_HAVE_POSIX_WRITE 1
+#define ABSL_LOW_LEVEL_WRITE_SUPPORTED 1
+#else
+#undef ABSL_HAVE_POSIX_WRITE
+#endif
+
+// ABSL_HAVE_SYSCALL_WRITE is defined when the platform provides the syscall
+//   syscall(SYS_write, /*int*/ fd, /*char* */ buf, /*size_t*/ len);
+// for low level operations that want to avoid libc.
+#if defined(__linux__) && !defined(__ANDROID__)
+#include <sys/syscall.h>
+#define ABSL_HAVE_SYSCALL_WRITE 1
+#define ABSL_LOW_LEVEL_WRITE_SUPPORTED 1
+#else
+#undef ABSL_HAVE_SYSCALL_WRITE
+#endif
+
+#ifdef _WIN32
+#include <io.h>
+#define ABSL_HAVE_RAW_IO 1
+#define ABSL_LOW_LEVEL_WRITE_SUPPORTED 1
+#else
+#undef ABSL_HAVE_RAW_IO
+#endif
+
+// TODO(gfalcon): We want raw-logging to work on as many platforms as possible.
+// Explicitly #error out when not ABSL_LOW_LEVEL_WRITE_SUPPORTED, except for a
+// whitelisted set of platforms for which we expect not to be able to raw log.
+
+ABSL_CONST_INIT static absl::base_internal::AtomicHook<
+    absl::raw_logging_internal::LogPrefixHook> log_prefix_hook;
+ABSL_CONST_INIT static absl::base_internal::AtomicHook<
+    absl::raw_logging_internal::AbortHook> abort_hook;
+
+#ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED
+static const char kTruncated[] = " ... (message truncated)\n";
+
+// sprintf the format to the buffer, adjusting *buf and *size to reflect the
+// consumed bytes, and return whether the message fit without truncation.  If
+// truncation occurred, if possible leave room in the buffer for the message
+// kTruncated[].
+inline static bool VADoRawLog(char** buf, int* size,
+                              const char* format, va_list ap) {
+  int n = vsnprintf(*buf, *size, format, ap);
+  bool result = true;
+  if (n < 0 || n > *size) {
+    result = false;
+    if (static_cast<size_t>(*size) > sizeof(kTruncated)) {
+      n = *size - sizeof(kTruncated);  // room for truncation message
+    } else {
+      n = 0;                           // no room for truncation message
+    }
+  }
+  *size -= n;
+  *buf += n;
+  return result;
+}
+#endif  // ABSL_LOW_LEVEL_WRITE_SUPPORTED
+
+static constexpr int kLogBufSize = 3000;
+
+namespace absl {
+namespace raw_logging_internal {
+void SafeWriteToStderr(const char *s, size_t len);
+}  // namespace raw_logging_internal
+}  // namespace absl
+
+namespace {
+
+// CAVEAT: vsnprintf called from *DoRawLog below has some (exotic) code paths
+// that invoke malloc() and getenv() that might acquire some locks.
+// If this becomes a problem we should reimplement a subset of vsnprintf
+// that does not need locks and malloc.
+// E.g. google3/third_party/clearsilver/core/util/snprintf.c
+// looks like such a reimplementation.
+
+// Helper for RawLog below.
+// *DoRawLog writes to *buf of *size and move them past the written portion.
+// It returns true iff there was no overflow or error.
+bool DoRawLog(char** buf, int* size, const char* format, ...)
+    ABSL_PRINTF_ATTRIBUTE(3, 4);
+bool DoRawLog(char** buf, int* size, const char* format, ...) {
+  va_list ap;
+  va_start(ap, format);
+  int n = vsnprintf(*buf, *size, format, ap);
+  va_end(ap);
+  if (n < 0 || n > *size) return false;
+  *size -= n;
+  *buf += n;
+  return true;
+}
+
+void RawLogVA(absl::LogSeverity severity, const char* file, int line,
+              const char* format, va_list ap) {
+  char buffer[kLogBufSize];
+  char* buf = buffer;
+  int size = sizeof(buffer);
+#ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED
+  bool enabled = true;
+#else
+  bool enabled = false;
+#endif
+
+#ifdef ABSL_MIN_LOG_LEVEL
+  if (static_cast<int>(severity) < ABSL_MIN_LOG_LEVEL &&
+      severity < absl::LogSeverity::kFatal) {
+    enabled = false;
+  }
+#endif
+
+  auto log_prefix_hook_ptr = log_prefix_hook.Load();
+  if (log_prefix_hook_ptr) {
+    enabled = log_prefix_hook_ptr(severity, file, line, &buf, &size);
+  } else {
+    if (enabled) {
+      DoRawLog(&buf, &size, "[%s : %d] RAW: ", file, line);
+    }
+  }
+  const char* const prefix_end = buf;
+
+#ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED
+  if (enabled) {
+    bool no_chop = VADoRawLog(&buf, &size, format, ap);
+    if (no_chop) {
+      DoRawLog(&buf, &size, "\n");
+    } else {
+      DoRawLog(&buf, &size, "%s", kTruncated);
+    }
+    absl::raw_logging_internal::SafeWriteToStderr(buffer, strlen(buffer));
+  }
+#else
+  static_cast<void>(format);
+  static_cast<void>(ap);
+#endif
+
+  // Abort the process after logging a FATAL message, even if the output itself
+  // was suppressed.
+  if (severity == absl::LogSeverity::kFatal) {
+    abort_hook(file, line, buffer, prefix_end, buffer + kLogBufSize);
+    abort();
+  }
+}
+
+}  // namespace
+
+namespace absl {
+namespace raw_logging_internal {
+
+// Writes the provided buffer directly to stderr, in a safe, low-level manner.
+//
+// In POSIX this means calling write(), which is async-signal safe and does
+// not malloc.  If the platform supports the SYS_write syscall, we invoke that
+// directly to side-step any libc interception.
+void SafeWriteToStderr(const char *s, size_t len) {
+#if defined(ABSL_HAVE_SYSCALL_WRITE)
+  syscall(SYS_write, STDERR_FILENO, s, len);
+#elif defined(ABSL_HAVE_POSIX_WRITE)
+  write(STDERR_FILENO, s, len);
+#elif defined(ABSL_HAVE_RAW_IO)
+  _write(/* stderr */ 2, s, len);
+#else
+  // stderr logging unsupported on this platform
+  (void) s;
+  (void) len;
+#endif
+}
+
+void RawLog(absl::LogSeverity severity, const char* file, int line,
+            const char* format, ...) {
+  va_list ap;
+  va_start(ap, format);
+  RawLogVA(severity, file, line, format, ap);
+  va_end(ap);
+}
+
+bool RawLoggingFullySupported() {
+#ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED
+  return true;
+#else  // !ABSL_LOW_LEVEL_WRITE_SUPPORTED
+  return false;
+#endif  // !ABSL_LOW_LEVEL_WRITE_SUPPORTED
+}
+
+}  // namespace raw_logging_internal
+}  // namespace absl
diff --git a/absl/base/internal/raw_logging.h b/absl/base/internal/raw_logging.h
new file mode 100644
index 000000000000..47cf4373db0b
--- /dev/null
+++ b/absl/base/internal/raw_logging.h
@@ -0,0 +1,129 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Thread-safe logging routines that do not allocate any memory or
+// acquire any locks, and can therefore be used by low-level memory
+// allocation, synchronization, and signal-handling code.
+
+#ifndef ABSL_BASE_INTERNAL_RAW_LOGGING_H_
+#define ABSL_BASE_INTERNAL_RAW_LOGGING_H_
+
+#include "absl/base/internal/log_severity.h"
+#include "absl/base/macros.h"
+#include "absl/base/port.h"
+
+// This is similar to LOG(severity) << format..., but
+// * it is to be used ONLY by low-level modules that can't use normal LOG()
+// * it is designed to be a low-level logger that does not allocate any
+//   memory and does not need any locks, hence:
+// * it logs straight and ONLY to STDERR w/o buffering
+// * it uses an explicit printf-format and arguments list
+// * it will silently chop off really long message strings
+// Usage example:
+//   ABSL_RAW_LOG(ERROR, "Failed foo with %i: %s", status, error);
+// This will print an almost standard log line like this to stderr only:
+//   E0821 211317 file.cc:123] RAW: Failed foo with 22: bad_file
+#define ABSL_RAW_LOG(severity, ...)                                            \
+  do {                                                                         \
+    constexpr const char* absl_raw_logging_internal_basename =                 \
+        ::absl::raw_logging_internal::Basename(__FILE__,                       \
+                                               sizeof(__FILE__) - 1);          \
+    ::absl::raw_logging_internal::RawLog(ABSL_RAW_LOGGING_INTERNAL_##severity, \
+                                         absl_raw_logging_internal_basename,   \
+                                         __LINE__, __VA_ARGS__);               \
+  } while (0)
+
+// Similar to CHECK(condition) << message, but for low-level modules:
+// we use only ABSL_RAW_LOG that does not allocate memory.
+// We do not want to provide args list here to encourage this usage:
+//   if (!cond)  ABSL_RAW_LOG(FATAL, "foo ...", hard_to_compute_args);
+// so that the args are not computed when not needed.
+#define ABSL_RAW_CHECK(condition, message)                             \
+  do {                                                                 \
+    if (ABSL_PREDICT_FALSE(!(condition))) {                            \
+      ABSL_RAW_LOG(FATAL, "Check %s failed: %s", #condition, message); \
+    }                                                                  \
+  } while (0)
+
+#define ABSL_RAW_LOGGING_INTERNAL_INFO ::absl::LogSeverity::kInfo
+#define ABSL_RAW_LOGGING_INTERNAL_WARNING ::absl::LogSeverity::kWarning
+#define ABSL_RAW_LOGGING_INTERNAL_ERROR ::absl::LogSeverity::kError
+#define ABSL_RAW_LOGGING_INTERNAL_FATAL ::absl::LogSeverity::kFatal
+#define ABSL_RAW_LOGGING_INTERNAL_LEVEL(severity) \
+  ::absl::NormalizeLogSeverity(severity)
+
+namespace absl {
+namespace raw_logging_internal {
+
+// Helper function to implement ABSL_RAW_LOG
+// Logs format... at "severity" level, reporting it
+// as called from file:line.
+// This does not allocate memory or acquire locks.
+void RawLog(absl::LogSeverity severity, const char* file, int line,
+            const char* format, ...) ABSL_PRINTF_ATTRIBUTE(4, 5);
+
+// compile-time function to get the "base" filename, that is, the part of
+// a filename after the last "/" or "\" path separator.  The search starts at
+// the end of the std::string; the second parameter is the length of the std::string.
+constexpr const char* Basename(const char* fname, int offset) {
+  return offset == 0 || fname[offset - 1] == '/' || fname[offset - 1] == '\\'
+             ? fname + offset
+             : Basename(fname, offset - 1);
+}
+
+// For testing only.
+// Returns true if raw logging is fully supported. When it is not
+// fully supported, no messages will be emitted, but a log at FATAL
+// severity will cause an abort.
+//
+// TODO(gfalcon): Come up with a better name for this method.
+bool RawLoggingFullySupported();
+
+// Function type for a raw_logging customization hook for suppressing messages
+// by severity, and for writing custom prefixes on non-suppressed messages.
+//
+// The installed hook is called for every raw log invocation.  The message will
+// be logged to stderr only if the hook returns true.  FATAL errors will cause
+// the process to abort, even if writing to stderr is suppressed.  The hook is
+// also provided with an output buffer, where it can write a custom log message
+// prefix.
+//
+// The raw_logging system does not allocate memory or grab locks.  User-provided
+// hooks must avoid these operations, and must not throw exceptions.
+//
+// 'severity' is the severity level of the message being written.
+// 'file' and 'line' are the file and line number where the ABSL_RAW_LOG macro
+// was located.
+// 'buffer' and 'buf_size' are pointers to the buffer and buffer size.  If the
+// hook writes a prefix, it must increment *buffer and decrement *buf_size
+// accordingly.
+using LogPrefixHook = bool (*)(absl::LogSeverity severity, const char* file,
+                               int line, char** buffer, int* buf_size);
+
+// Function type for a raw_logging customization hook called to abort a process
+// when a FATAL message is logged.  If the provided AbortHook() returns, the
+// logging system will call abort().
+//
+// 'file' and 'line' are the file and line number where the ABSL_RAW_LOG macro
+// was located.
+// The null-terminated logged message lives in the buffer between 'buf_start'
+// and 'buf_end'.  'prefix_end' points to the first non-prefix character of the
+// buffer (as written by the LogPrefixHook.)
+using AbortHook = void (*)(const char* file, int line, const char* buf_start,
+                           const char* prefix_end, const char* buf_end);
+
+}  // namespace raw_logging_internal
+}  // namespace absl
+
+#endif  // ABSL_BASE_INTERNAL_RAW_LOGGING_H_
diff --git a/absl/base/internal/scheduling_mode.h b/absl/base/internal/scheduling_mode.h
new file mode 100644
index 000000000000..b7560f30d793
--- /dev/null
+++ b/absl/base/internal/scheduling_mode.h
@@ -0,0 +1,54 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Core interfaces and definitions used by by low-level //base interfaces such
+// as SpinLock.
+
+#ifndef ABSL_BASE_INTERNAL_SCHEDULING_MODE_H_
+#define ABSL_BASE_INTERNAL_SCHEDULING_MODE_H_
+
+namespace absl {
+namespace base_internal {
+
+// Used to describe how a thread may be scheduled.  Typically associated with
+// the declaration of a resource supporting synchronized access.
+//
+// SCHEDULE_COOPERATIVE_AND_KERNEL:
+// Specifies that when waiting, a cooperative thread (e.g. a Fiber) may
+// reschedule (using base::scheduling semantics); allowing other cooperative
+// threads to proceed.
+//
+// SCHEDULE_KERNEL_ONLY: (Also described as "non-cooperative")
+// Specifies that no cooperative scheduling semantics may be used, even if the
+// current thread is itself cooperatively scheduled.  This means that
+// cooperative threads will NOT allow other cooperative threads to execute in
+// their place while waiting for a resource of this type.  Host operating system
+// semantics (e.g. a futex) may still be used.
+//
+// When optional, clients should strongly prefer SCHEDULE_COOPERATIVE_AND_KERNEL
+// by default.  SCHEDULE_KERNEL_ONLY should only be used for resources on which
+// base::scheduling (e.g. the implementation of a Scheduler) may depend.
+//
+// NOTE: Cooperative resources may not be nested below non-cooperative ones.
+// This means that it is invalid to to acquire a SCHEDULE_COOPERATIVE_AND_KERNEL
+// resource if a SCHEDULE_KERNEL_ONLY resource is already held.
+enum SchedulingMode {
+  SCHEDULE_KERNEL_ONLY = 0,         // Allow scheduling only the host OS.
+  SCHEDULE_COOPERATIVE_AND_KERNEL,  // Also allow cooperative scheduling.
+};
+
+}  // namespace base_internal
+}  // namespace absl
+
+#endif  // ABSL_BASE_INTERNAL_SCHEDULING_MODE_H_
diff --git a/absl/base/internal/spinlock.cc b/absl/base/internal/spinlock.cc
new file mode 100644
index 000000000000..6257bfcec6f8
--- /dev/null
+++ b/absl/base/internal/spinlock.cc
@@ -0,0 +1,243 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/internal/spinlock.h"
+
+#include <algorithm>
+#include <atomic>
+
+#include "absl/base/casts.h"
+#include "absl/base/internal/atomic_hook.h"
+#include "absl/base/internal/cycleclock.h"
+#include "absl/base/internal/spinlock_wait.h"
+#include "absl/base/internal/sysinfo.h" /* For NumCPUs() */
+
+// Description of lock-word:
+//  31..00: [............................3][2][1][0]
+//
+//     [0]: kSpinLockHeld
+//     [1]: kSpinLockCooperative
+//     [2]: kSpinLockDisabledScheduling
+// [31..3]: ONLY kSpinLockSleeper OR
+//          Wait time in cycles >> PROFILE_TIMESTAMP_SHIFT
+//
+// Detailed descriptions:
+//
+// Bit [0]: The lock is considered held iff kSpinLockHeld is set.
+//
+// Bit [1]: Eligible waiters (e.g. Fibers) may co-operatively reschedule when
+//          contended iff kSpinLockCooperative is set.
+//
+// Bit [2]: This bit is exclusive from bit [1].  It is used only by a
+//          non-cooperative lock.  When set, indicates that scheduling was
+//          successfully disabled when the lock was acquired.  May be unset,
+//          even if non-cooperative, if a ThreadIdentity did not yet exist at
+//          time of acquisition.
+//
+// Bit [3]: If this is the only upper bit ([31..3]) set then this lock was
+//          acquired without contention, however, at least one waiter exists.
+//
+//          Otherwise, bits [31..3] represent the time spent by the current lock
+//          holder to acquire the lock.  There may be outstanding waiter(s).
+
+namespace absl {
+namespace base_internal {
+
+static int adaptive_spin_count = 0;
+
+namespace {
+struct SpinLock_InitHelper {
+  SpinLock_InitHelper() {
+    // On multi-cpu machines, spin for longer before yielding
+    // the processor or sleeping.  Reduces idle time significantly.
+    if (base_internal::NumCPUs() > 1) {
+      adaptive_spin_count = 1000;
+    }
+  }
+};
+
+// Hook into global constructor execution:
+// We do not do adaptive spinning before that,
+// but nothing lock-intensive should be going on at that time.
+static SpinLock_InitHelper init_helper;
+
+ABSL_CONST_INIT static base_internal::AtomicHook<void (*)(const void *lock,
+                                                          int64_t wait_cycles)>
+    submit_profile_data;
+
+}  // namespace
+
+void RegisterSpinLockProfiler(void (*fn)(const void *contendedlock,
+                                         int64_t wait_cycles)) {
+  submit_profile_data.Store(fn);
+}
+
+static inline bool IsCooperative(
+    base_internal::SchedulingMode scheduling_mode) {
+  return scheduling_mode == base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL;
+}
+
+// Uncommon constructors.
+SpinLock::SpinLock(base_internal::SchedulingMode mode)
+    : lockword_(IsCooperative(mode) ? kSpinLockCooperative : 0) {
+  ABSL_TSAN_MUTEX_CREATE(this, 0);
+}
+
+SpinLock::SpinLock(base_internal::LinkerInitialized,
+                   base_internal::SchedulingMode mode) {
+  ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_linker_init);
+  if (IsCooperative(mode)) {
+    InitLinkerInitializedAndCooperative();
+  }
+  // Otherwise, lockword_ is already initialized.
+}
+
+// Static (linker initialized) spinlocks always start life as functional
+// non-cooperative locks.  When their static constructor does run, it will call
+// this initializer to augment the lockword with the cooperative bit.  By
+// actually taking the lock when we do this we avoid the need for an atomic
+// operation in the regular unlock path.
+//
+// SlowLock() must be careful to re-test for this bit so that any outstanding
+// waiters may be upgraded to cooperative status.
+void SpinLock::InitLinkerInitializedAndCooperative() {
+  Lock();
+  lockword_.fetch_or(kSpinLockCooperative, std::memory_order_relaxed);
+  Unlock();
+}
+
+// Monitor the lock to see if its value changes within some time period
+// (adaptive_spin_count loop iterations).  A timestamp indicating
+// when the thread initially started waiting for the lock is passed in via
+// the initial_wait_timestamp value.  The total wait time in cycles for the
+// lock is returned in the wait_cycles parameter.  The last value read
+// from the lock is returned from the method.
+uint32_t SpinLock::SpinLoop(int64_t initial_wait_timestamp,
+                            uint32_t *wait_cycles) {
+  int c = adaptive_spin_count;
+  uint32_t lock_value;
+  do {
+    lock_value = lockword_.load(std::memory_order_relaxed);
+  } while ((lock_value & kSpinLockHeld) != 0 && --c > 0);
+  uint32_t spin_loop_wait_cycles =
+      EncodeWaitCycles(initial_wait_timestamp, CycleClock::Now());
+  *wait_cycles = spin_loop_wait_cycles;
+
+  return TryLockInternal(lock_value, spin_loop_wait_cycles);
+}
+
+void SpinLock::SlowLock() {
+  // The lock was not obtained initially, so this thread needs to wait for
+  // it.  Record the current timestamp in the local variable wait_start_time
+  // so the total wait time can be stored in the lockword once this thread
+  // obtains the lock.
+  int64_t wait_start_time = CycleClock::Now();
+  uint32_t wait_cycles;
+  uint32_t lock_value = SpinLoop(wait_start_time, &wait_cycles);
+
+  int lock_wait_call_count = 0;
+  while ((lock_value & kSpinLockHeld) != 0) {
+    // If the lock is currently held, but not marked as having a sleeper, mark
+    // it as having a sleeper.
+    if ((lock_value & kWaitTimeMask) == 0) {
+      // Here, just "mark" that the thread is going to sleep.  Don't store the
+      // lock wait time in the lock as that will cause the current lock
+      // owner to think it experienced contention.
+      if (lockword_.compare_exchange_strong(
+              lock_value, lock_value | kSpinLockSleeper,
+              std::memory_order_acquire, std::memory_order_relaxed)) {
+        // Successfully transitioned to kSpinLockSleeper.  Pass
+        // kSpinLockSleeper to the SpinLockWait routine to properly indicate
+        // the last lock_value observed.
+        lock_value |= kSpinLockSleeper;
+      } else if ((lock_value & kSpinLockHeld) == 0) {
+        // Lock is free again, so try and acquire it before sleeping.  The
+        // new lock state will be the number of cycles this thread waited if
+        // this thread obtains the lock.
+        lock_value = TryLockInternal(lock_value, wait_cycles);
+        continue;   // Skip the delay at the end of the loop.
+      }
+    }
+
+    base_internal::SchedulingMode scheduling_mode;
+    if ((lock_value & kSpinLockCooperative) != 0) {
+      scheduling_mode = base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL;
+    } else {
+      scheduling_mode = base_internal::SCHEDULE_KERNEL_ONLY;
+    }
+    // SpinLockDelay() calls into fiber scheduler, we need to see
+    // synchronization there to avoid false positives.
+    ABSL_TSAN_MUTEX_PRE_DIVERT(this, 0);
+    // Wait for an OS specific delay.
+    base_internal::SpinLockDelay(&lockword_, lock_value, ++lock_wait_call_count,
+                                 scheduling_mode);
+    ABSL_TSAN_MUTEX_POST_DIVERT(this, 0);
+    // Spin again after returning from the wait routine to give this thread
+    // some chance of obtaining the lock.
+    lock_value = SpinLoop(wait_start_time, &wait_cycles);
+  }
+}
+
+void SpinLock::SlowUnlock(uint32_t lock_value) {
+  base_internal::SpinLockWake(&lockword_,
+                              false);  // wake waiter if necessary
+
+  // If our acquisition was contended, collect contentionz profile info.  We
+  // reserve a unitary wait time to represent that a waiter exists without our
+  // own acquisition having been contended.
+  if ((lock_value & kWaitTimeMask) != kSpinLockSleeper) {
+    const uint64_t wait_cycles = DecodeWaitCycles(lock_value);
+    ABSL_TSAN_MUTEX_PRE_DIVERT(this, 0);
+    submit_profile_data(this, wait_cycles);
+    ABSL_TSAN_MUTEX_POST_DIVERT(this, 0);
+  }
+}
+
+// We use the upper 29 bits of the lock word to store the time spent waiting to
+// acquire this lock.  This is reported by contentionz profiling.  Since the
+// lower bits of the cycle counter wrap very quickly on high-frequency
+// processors we divide to reduce the granularity to 2^PROFILE_TIMESTAMP_SHIFT
+// sized units.  On a 4Ghz machine this will lose track of wait times greater
+// than (2^29/4 Ghz)*128 =~ 17.2 seconds.  Such waits should be extremely rare.
+enum { PROFILE_TIMESTAMP_SHIFT = 7 };
+enum { LOCKWORD_RESERVED_SHIFT = 3 };  // We currently reserve the lower 3 bits.
+
+uint32_t SpinLock::EncodeWaitCycles(int64_t wait_start_time,
+                                    int64_t wait_end_time) {
+  static const int64_t kMaxWaitTime =
+      std::numeric_limits<uint32_t>::max() >> LOCKWORD_RESERVED_SHIFT;
+  int64_t scaled_wait_time =
+      (wait_end_time - wait_start_time) >> PROFILE_TIMESTAMP_SHIFT;
+
+  // Return a representation of the time spent waiting that can be stored in
+  // the lock word's upper bits.  bit_cast is required as Atomic32 is signed.
+  const uint32_t clamped = static_cast<uint32_t>(
+      std::min(scaled_wait_time, kMaxWaitTime) << LOCKWORD_RESERVED_SHIFT);
+
+  // bump up value if necessary to avoid returning kSpinLockSleeper.
+  const uint32_t after_spinlock_sleeper =
+     kSpinLockSleeper + (1 << LOCKWORD_RESERVED_SHIFT);
+  return clamped == kSpinLockSleeper ? after_spinlock_sleeper : clamped;
+}
+
+uint64_t SpinLock::DecodeWaitCycles(uint32_t lock_value) {
+  // Cast to uint32_t first to ensure bits [63:32] are cleared.
+  const uint64_t scaled_wait_time =
+      static_cast<uint32_t>(lock_value & kWaitTimeMask);
+  return scaled_wait_time
+      << (PROFILE_TIMESTAMP_SHIFT - LOCKWORD_RESERVED_SHIFT);
+}
+
+}  // namespace base_internal
+}  // namespace absl
diff --git a/absl/base/internal/spinlock.h b/absl/base/internal/spinlock.h
new file mode 100644
index 000000000000..fa64ba65bbf2
--- /dev/null
+++ b/absl/base/internal/spinlock.h
@@ -0,0 +1,227 @@
+//
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+//  Most users requiring mutual exclusion should use Mutex.
+//  SpinLock is provided for use in three situations:
+//   - for use in code that Mutex itself depends on
+//   - to get a faster fast-path release under low contention (without an
+//     atomic read-modify-write) In return, SpinLock has worse behaviour under
+//     contention, which is why Mutex is preferred in most situations.
+//   - for async signal safety (see below)
+
+// SpinLock is async signal safe.  If a spinlock is used within a signal
+// handler, all code that acquires the lock must ensure that the signal cannot
+// arrive while they are holding the lock.  Typically, this is done by blocking
+// the signal.
+
+#ifndef ABSL_BASE_INTERNAL_SPINLOCK_H_
+#define ABSL_BASE_INTERNAL_SPINLOCK_H_
+
+#include <atomic>
+
+#include "absl/base/dynamic_annotations.h"
+#include "absl/base/internal/low_level_scheduling.h"
+#include "absl/base/internal/tsan_mutex_interface.h"
+#include "absl/base/port.h"
+#include "absl/base/thread_annotations.h"
+
+namespace absl {
+namespace base_internal {
+
+class LOCKABLE SpinLock {
+ public:
+  SpinLock() : lockword_(kSpinLockCooperative) {
+    ABSL_TSAN_MUTEX_CREATE(this, 0);
+  }
+
+  // Special constructor for use with static SpinLock objects.  E.g.,
+  //
+  //    static SpinLock lock(base_internal::kLinkerInitialized);
+  //
+  // When intialized using this constructor, we depend on the fact
+  // that the linker has already initialized the memory appropriately.
+  // A SpinLock constructed like this can be freely used from global
+  // initializers without worrying about the order in which global
+  // initializers run.
+  explicit SpinLock(base_internal::LinkerInitialized) {
+    // Does nothing; lockword_ is already initialized
+    ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_linker_init);
+  }
+
+  // Constructors that allow non-cooperative spinlocks to be created for use
+  // inside thread schedulers.  Normal clients should not use these.
+  explicit SpinLock(base_internal::SchedulingMode mode);
+  SpinLock(base_internal::LinkerInitialized,
+           base_internal::SchedulingMode mode);
+
+  ~SpinLock() { ABSL_TSAN_MUTEX_DESTROY(this, 0); }
+
+  // Acquire this SpinLock.
+  inline void Lock() EXCLUSIVE_LOCK_FUNCTION() {
+    ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
+    if (!TryLockImpl()) {
+      SlowLock();
+    }
+    ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
+  }
+
+  // Try to acquire this SpinLock without blocking and return true if the
+  // acquisition was successful.  If the lock was not acquired, false is
+  // returned.  If this SpinLock is free at the time of the call, TryLock
+  // will return true with high probability.
+  inline bool TryLock() EXCLUSIVE_TRYLOCK_FUNCTION(true) {
+    ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_try_lock);
+    bool res = TryLockImpl();
+    ABSL_TSAN_MUTEX_POST_LOCK(
+        this, __tsan_mutex_try_lock | (res ? 0 : __tsan_mutex_try_lock_failed),
+        0);
+    return res;
+  }
+
+  // Release this SpinLock, which must be held by the calling thread.
+  inline void Unlock() UNLOCK_FUNCTION() {
+    ABSL_TSAN_MUTEX_PRE_UNLOCK(this, 0);
+    uint32_t lock_value = lockword_.load(std::memory_order_relaxed);
+    lockword_.store(lock_value & kSpinLockCooperative,
+                    std::memory_order_release);
+
+    if ((lock_value & kSpinLockDisabledScheduling) != 0) {
+      base_internal::SchedulingGuard::EnableRescheduling(true);
+    }
+    if ((lock_value & kWaitTimeMask) != 0) {
+      // Collect contentionz profile info, and speed the wakeup of any waiter.
+      // The wait_cycles value indicates how long this thread spent waiting
+      // for the lock.
+      SlowUnlock(lock_value);
+    }
+    ABSL_TSAN_MUTEX_POST_UNLOCK(this, 0);
+  }
+
+  // Determine if the lock is held.  When the lock is held by the invoking
+  // thread, true will always be returned. Intended to be used as
+  // CHECK(lock.IsHeld()).
+  inline bool IsHeld() const {
+    return (lockword_.load(std::memory_order_relaxed) & kSpinLockHeld) != 0;
+  }
+
+ protected:
+  // These should not be exported except for testing.
+
+  // Store number of cycles between wait_start_time and wait_end_time in a
+  // lock value.
+  static uint32_t EncodeWaitCycles(int64_t wait_start_time,
+                                   int64_t wait_end_time);
+
+  // Extract number of wait cycles in a lock value.
+  static uint64_t DecodeWaitCycles(uint32_t lock_value);
+
+  // Provide access to protected method above.  Use for testing only.
+  friend struct SpinLockTest;
+
+ private:
+  // lockword_ is used to store the following:
+  //
+  // bit[0] encodes whether a lock is being held.
+  // bit[1] encodes whether a lock uses cooperative scheduling.
+  // bit[2] encodes whether a lock disables scheduling.
+  // bit[3:31] encodes time a lock spent on waiting as a 29-bit unsigned int.
+  enum { kSpinLockHeld = 1 };
+  enum { kSpinLockCooperative = 2 };
+  enum { kSpinLockDisabledScheduling = 4 };
+  enum { kSpinLockSleeper = 8 };
+  enum { kWaitTimeMask =                      // Includes kSpinLockSleeper.
+    ~(kSpinLockHeld | kSpinLockCooperative | kSpinLockDisabledScheduling) };
+
+  uint32_t TryLockInternal(uint32_t lock_value, uint32_t wait_cycles);
+  void InitLinkerInitializedAndCooperative();
+  void SlowLock() ABSL_ATTRIBUTE_COLD;
+  void SlowUnlock(uint32_t lock_value) ABSL_ATTRIBUTE_COLD;
+  uint32_t SpinLoop(int64_t initial_wait_timestamp, uint32_t* wait_cycles);
+
+  inline bool TryLockImpl() {
+    uint32_t lock_value = lockword_.load(std::memory_order_relaxed);
+    return (TryLockInternal(lock_value, 0) & kSpinLockHeld) == 0;
+  }
+
+  std::atomic<uint32_t> lockword_;
+
+  SpinLock(const SpinLock&) = delete;
+  SpinLock& operator=(const SpinLock&) = delete;
+};
+
+// Corresponding locker object that arranges to acquire a spinlock for
+// the duration of a C++ scope.
+class SCOPED_LOCKABLE SpinLockHolder {
+ public:
+  inline explicit SpinLockHolder(SpinLock* l) EXCLUSIVE_LOCK_FUNCTION(l)
+      : lock_(l) {
+    l->Lock();
+  }
+  inline ~SpinLockHolder() UNLOCK_FUNCTION() { lock_->Unlock(); }
+
+  SpinLockHolder(const SpinLockHolder&) = delete;
+  SpinLockHolder& operator=(const SpinLockHolder&) = delete;
+
+ private:
+  SpinLock* lock_;
+};
+
+// Register a hook for profiling support.
+//
+// The function pointer registered here will be called whenever a spinlock is
+// contended.  The callback is given an opaque handle to the contended spinlock
+// and the number of wait cycles.  This is thread-safe, but only a single
+// profiler can be registered.  It is an error to call this function multiple
+// times with different arguments.
+void RegisterSpinLockProfiler(void (*fn)(const void* lock,
+                                         int64_t wait_cycles));
+
+//------------------------------------------------------------------------------
+// Public interface ends here.
+//------------------------------------------------------------------------------
+
+// If (result & kSpinLockHeld) == 0, then *this was successfully locked.
+// Otherwise, returns last observed value for lockword_.
+inline uint32_t SpinLock::TryLockInternal(uint32_t lock_value,
+                                          uint32_t wait_cycles) {
+  if ((lock_value & kSpinLockHeld) != 0) {
+    return lock_value;
+  }
+
+  uint32_t sched_disabled_bit = 0;
+  if ((lock_value & kSpinLockCooperative) == 0) {
+    // For non-cooperative locks we must make sure we mark ourselves as
+    // non-reschedulable before we attempt to CompareAndSwap.
+    if (base_internal::SchedulingGuard::DisableRescheduling()) {
+      sched_disabled_bit = kSpinLockDisabledScheduling;
+    }
+  }
+
+  if (lockword_.compare_exchange_strong(
+          lock_value,
+          kSpinLockHeld | lock_value | wait_cycles | sched_disabled_bit,
+          std::memory_order_acquire, std::memory_order_relaxed)) {
+  } else {
+    base_internal::SchedulingGuard::EnableRescheduling(sched_disabled_bit);
+  }
+
+  return lock_value;
+}
+
+}  // namespace base_internal
+}  // namespace absl
+
+#endif  // ABSL_BASE_INTERNAL_SPINLOCK_H_
diff --git a/absl/base/internal/spinlock_posix.inc b/absl/base/internal/spinlock_posix.inc
new file mode 100644
index 000000000000..0098c1c76016
--- /dev/null
+++ b/absl/base/internal/spinlock_posix.inc
@@ -0,0 +1,46 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// This file is a Posix-specific part of spinlock_wait.cc
+
+#include <sched.h>
+#include <atomic>
+#include <ctime>
+#include <cerrno>
+
+#include "absl/base/internal/scheduling_mode.h"
+#include "absl/base/port.h"
+
+extern "C" {
+
+ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockDelay(
+    std::atomic<uint32_t>* /* lock_word */, uint32_t /* value */, int loop,
+    absl::base_internal::SchedulingMode /* mode */) {
+  int save_errno = errno;
+  if (loop == 0) {
+  } else if (loop == 1) {
+    sched_yield();
+  } else {
+    struct timespec tm;
+    tm.tv_sec = 0;
+    tm.tv_nsec = absl::base_internal::SpinLockSuggestedDelayNS(loop);
+    nanosleep(&tm, nullptr);
+  }
+  errno = save_errno;
+}
+
+ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockWake(
+    std::atomic<uint32_t>* /* lock_word */, bool /* all */) {}
+
+}  // extern "C"
diff --git a/absl/base/internal/spinlock_wait.cc b/absl/base/internal/spinlock_wait.cc
new file mode 100644
index 000000000000..0fd36286629d
--- /dev/null
+++ b/absl/base/internal/spinlock_wait.cc
@@ -0,0 +1,77 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// The OS-specific header included below must provide two calls:
+// base::subtle::SpinLockDelay() and base::subtle::SpinLockWake().
+// See spinlock_wait.h for the specs.
+
+#include <atomic>
+#include <cstdint>
+
+#include "absl/base/internal/spinlock_wait.h"
+
+#if defined(_WIN32)
+#include "absl/base/internal/spinlock_win32.inc"
+#else
+#include "absl/base/internal/spinlock_posix.inc"
+#endif
+
+namespace absl {
+namespace base_internal {
+
+// See spinlock_wait.h for spec.
+uint32_t SpinLockWait(std::atomic<uint32_t> *w, int n,
+                      const SpinLockWaitTransition trans[],
+                      base_internal::SchedulingMode scheduling_mode) {
+  for (int loop = 0; ; loop++) {
+    uint32_t v = w->load(std::memory_order_acquire);
+    int i;
+    for (i = 0; i != n && v != trans[i].from; i++) {
+    }
+    if (i == n) {
+      SpinLockDelay(w, v, loop, scheduling_mode);  // no matching transition
+    } else if (trans[i].to == v ||                 // null transition
+               w->compare_exchange_strong(v, trans[i].to,
+                                          std::memory_order_acquire,
+                                          std::memory_order_relaxed)) {
+      if (trans[i].done) return v;
+    }
+  }
+}
+
+static std::atomic<uint64_t> delay_rand;
+
+// Return a suggested delay in nanoseconds for iteration number "loop"
+int SpinLockSuggestedDelayNS(int loop) {
+  // Weak pseudo-random number generator to get some spread between threads
+  // when many are spinning.
+  uint64_t r = delay_rand.load(std::memory_order_relaxed);
+  r = 0x5deece66dLL * r + 0xb;   // numbers from nrand48()
+  delay_rand.store(r, std::memory_order_relaxed);
+
+  r <<= 16;   // 48-bit random number now in top 48-bits.
+  if (loop < 0 || loop > 32) {   // limit loop to 0..32
+    loop = 32;
+  }
+  // loop>>3 cannot exceed 4 because loop cannot exceed 32.
+  // Select top 20..24 bits of lower 48 bits,
+  // giving approximately 0ms to 16ms.
+  // Mean is exponential in loop for first 32 iterations, then 8ms.
+  // The futex path multiplies this by 16, since we expect explicit wakeups
+  // almost always on that path.
+  return r >> (44 - (loop >> 3));
+}
+
+}  // namespace base_internal
+}  // namespace absl
diff --git a/absl/base/internal/spinlock_wait.h b/absl/base/internal/spinlock_wait.h
new file mode 100644
index 000000000000..5432c1ce2094
--- /dev/null
+++ b/absl/base/internal/spinlock_wait.h
@@ -0,0 +1,94 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_BASE_INTERNAL_SPINLOCK_WAIT_H_
+#define ABSL_BASE_INTERNAL_SPINLOCK_WAIT_H_
+
+// Operations to make atomic transitions on a word, and to allow
+// waiting for those transitions to become possible.
+
+// This file is used internally in spinlock.cc and once.cc, and a few other
+// places listing in //base:spinlock_wait_users.  If you need to use it outside
+// of //base, please request permission to be added to that list.
+
+#include <atomic>
+
+#include "absl/base/internal/scheduling_mode.h"
+
+namespace absl {
+namespace base_internal {
+
+// SpinLockWait() waits until it can perform one of several transitions from
+// "from" to "to".  It returns when it performs a transition where done==true.
+struct SpinLockWaitTransition {
+  uint32_t from;
+  uint32_t to;
+  bool done;
+};
+
+// Wait until *w can transition from trans[i].from to trans[i].to for some i
+// satisfying 0<=i<n && trans[i].done, atomically make the transition,
+// then return the old value of *w.   Make any other atomic transitions
+// where !trans[i].done, but continue waiting.
+uint32_t SpinLockWait(std::atomic<uint32_t> *w, int n,
+                      const SpinLockWaitTransition trans[],
+                      SchedulingMode scheduling_mode);
+
+// If possible, wake some thread that has called SpinLockDelay(w, ...). If
+// "all" is true, wake all such threads.  This call is a hint, and on some
+// systems it may be a no-op; threads calling SpinLockDelay() will always wake
+// eventually even if SpinLockWake() is never called.
+void SpinLockWake(std::atomic<uint32_t> *w, bool all);
+
+// Wait for an appropriate spin delay on iteration "loop" of a
+// spin loop on location *w, whose previously observed value was "value".
+// SpinLockDelay() may do nothing, may yield the CPU, may sleep a clock tick,
+// or may wait for a delay that can be truncated by a call to SpinLockWake(w).
+// In all cases, it must return in bounded time even if SpinLockWake() is not
+// called.
+void SpinLockDelay(std::atomic<uint32_t> *w, uint32_t value, int loop,
+                   base_internal::SchedulingMode scheduling_mode);
+
+// Helper used by AbslInternalSpinLockDelay.
+// Returns a suggested delay in nanoseconds for iteration number "loop".
+int SpinLockSuggestedDelayNS(int loop);
+
+}  // namespace base_internal
+}  // namespace absl
+
+// In some build configurations we pass --detect-odr-violations to the
+// gold linker.  This causes it to flag weak symbol overrides as ODR
+// violations.  Because ODR only applies to C++ and not C,
+// --detect-odr-violations ignores symbols not mangled with C++ names.
+// By changing our extension points to be extern "C", we dodge this
+// check.
+extern "C" {
+void AbslInternalSpinLockWake(std::atomic<uint32_t> *w, bool all);
+void AbslInternalSpinLockDelay(
+    std::atomic<uint32_t> *w, uint32_t value, int loop,
+    absl::base_internal::SchedulingMode scheduling_mode);
+}
+
+inline void absl::base_internal::SpinLockWake(std::atomic<uint32_t> *w,
+                                              bool all) {
+  AbslInternalSpinLockWake(w, all);
+}
+
+inline void absl::base_internal::SpinLockDelay(
+    std::atomic<uint32_t> *w, uint32_t value, int loop,
+    base_internal::SchedulingMode scheduling_mode) {
+  AbslInternalSpinLockDelay(w, value, loop, scheduling_mode);
+}
+
+#endif  // ABSL_BASE_INTERNAL_SPINLOCK_WAIT_H_
diff --git a/absl/base/internal/spinlock_win32.inc b/absl/base/internal/spinlock_win32.inc
new file mode 100644
index 000000000000..32c8fc0bb51d
--- /dev/null
+++ b/absl/base/internal/spinlock_win32.inc
@@ -0,0 +1,37 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// This file is a Win32-specific part of spinlock_wait.cc
+
+#include <windows.h>
+#include <atomic>
+#include "absl/base/internal/scheduling_mode.h"
+
+extern "C" {
+
+void AbslInternalSpinLockDelay(std::atomic<uint32_t>* /* lock_word */,
+                               uint32_t /* value */, int loop,
+                               absl::base_internal::SchedulingMode /* mode */) {
+  if (loop == 0) {
+  } else if (loop == 1) {
+    Sleep(0);
+  } else {
+    Sleep(absl::base_internal::SpinLockSuggestedDelayNS(loop) / 1000000);
+  }
+}
+
+void AbslInternalSpinLockWake(std::atomic<uint32_t>* /* lock_word */,
+                              bool /* all */) {}
+
+}  // extern "C"
diff --git a/absl/base/internal/sysinfo.cc b/absl/base/internal/sysinfo.cc
new file mode 100644
index 000000000000..11863eab20ea
--- /dev/null
+++ b/absl/base/internal/sysinfo.cc
@@ -0,0 +1,370 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/internal/sysinfo.h"
+
+#ifdef _WIN32
+#include <shlwapi.h>
+#include <windows.h>
+#else
+#include <fcntl.h>
+#include <pthread.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+#endif
+
+#ifdef __linux__
+#include <sys/syscall.h>
+#endif
+
+#ifdef __APPLE__
+#include <sys/sysctl.h>
+#endif
+
+#include <string.h>
+#include <cassert>
+#include <cstdint>
+#include <cstdio>
+#include <cstdlib>
+#include <ctime>
+#include <limits>
+#include <thread>  // NOLINT(build/c++11)
+#include <utility>
+#include <vector>
+
+#include "absl/base/call_once.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/base/internal/spinlock.h"
+#include "absl/base/internal/unscaledcycleclock.h"
+#include "absl/base/thread_annotations.h"
+
+namespace absl {
+namespace base_internal {
+
+static once_flag init_system_info_once;
+static int num_cpus = 0;
+static double nominal_cpu_frequency = 1.0;  // 0.0 might be dangerous.
+
+static int GetNumCPUs() {
+#if defined(__myriad2__) || defined(__GENCLAVE__)
+  // TODO(b/28296132): Calling std::thread::hardware_concurrency() induces a
+  // link error on myriad2 builds.
+  // TODO(b/62709537): Support std::thread::hardware_concurrency() in gEnclalve.
+  return 1;
+#else
+  // Other possibilities:
+  //  - Read /sys/devices/system/cpu/online and use cpumask_parse()
+  //  - sysconf(_SC_NPROCESSORS_ONLN)
+  return std::thread::hardware_concurrency();
+#endif
+}
+
+#if defined(_WIN32)
+
+static double GetNominalCPUFrequency() {
+  DWORD data;
+  DWORD data_size = sizeof(data);
+  #pragma comment(lib, "shlwapi.lib")  // For SHGetValue().
+  if (SUCCEEDED(
+          SHGetValueA(HKEY_LOCAL_MACHINE,
+                      "HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0",
+                      "~MHz", nullptr, &data, &data_size))) {
+    return data * 1e6;  // Value is MHz.
+  }
+  return 1.0;
+}
+
+#elif defined(CTL_HW) && defined(HW_CPU_FREQ)
+
+static double GetNominalCPUFrequency() {
+  unsigned freq;
+  size_t size = sizeof(freq);
+  int mib[2] = {CTL_HW, HW_CPU_FREQ};
+  if (sysctl(mib, 2, &freq, &size, nullptr, 0) == 0) {
+    return static_cast<double>(freq);
+  }
+  return 1.0;
+}
+
+#else
+
+// Helper function for reading a long from a file. Returns true if successful
+// and the memory location pointed to by value is set to the value read.
+static bool ReadLongFromFile(const char *file, long *value) {
+  bool ret = false;
+  int fd = open(file, O_RDONLY);
+  if (fd != -1) {
+    char line[1024];
+    char *err;
+    memset(line, '\0', sizeof(line));
+    int len = read(fd, line, sizeof(line) - 1);
+    if (len <= 0) {
+      ret = false;
+    } else {
+      const long temp_value = strtol(line, &err, 10);
+      if (line[0] != '\0' && (*err == '\n' || *err == '\0')) {
+        *value = temp_value;
+        ret = true;
+      }
+    }
+    close(fd);
+  }
+  return ret;
+}
+
+#if defined(ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY)
+
+// Reads a monotonic time source and returns a value in
+// nanoseconds. The returned value uses an arbitrary epoch, not the
+// Unix epoch.
+static int64_t ReadMonotonicClockNanos() {
+  struct timespec t;
+#ifdef CLOCK_MONOTONIC_RAW
+  int rc = clock_gettime(CLOCK_MONOTONIC_RAW, &t);
+#else
+  int rc = clock_gettime(CLOCK_MONOTONIC, &t);
+#endif
+  if (rc != 0) {
+    perror("clock_gettime() failed");
+    abort();
+  }
+  return int64_t{t.tv_sec} * 1000000000 + t.tv_nsec;
+}
+
+class UnscaledCycleClockWrapperForInitializeFrequency {
+ public:
+  static int64_t Now() { return base_internal::UnscaledCycleClock::Now(); }
+};
+
+struct TimeTscPair {
+  int64_t time;  // From ReadMonotonicClockNanos().
+  int64_t tsc;   // From UnscaledCycleClock::Now().
+};
+
+// Returns a pair of values (monotonic kernel time, TSC ticks) that
+// approximately correspond to each other.  This is accomplished by
+// doing several reads and picking the reading with the lowest
+// latency.  This approach is used to minimize the probability that
+// our thread was preempted between clock reads.
+static TimeTscPair GetTimeTscPair() {
+  int64_t best_latency = std::numeric_limits<int64_t>::max();
+  TimeTscPair best;
+  for (int i = 0; i < 10; ++i) {
+    int64_t t0 = ReadMonotonicClockNanos();
+    int64_t tsc = UnscaledCycleClockWrapperForInitializeFrequency::Now();
+    int64_t t1 = ReadMonotonicClockNanos();
+    int64_t latency = t1 - t0;
+    if (latency < best_latency) {
+      best_latency = latency;
+      best.time = t0;
+      best.tsc = tsc;
+    }
+  }
+  return best;
+}
+
+// Measures and returns the TSC frequency by taking a pair of
+// measurements approximately `sleep_nanoseconds` apart.
+static double MeasureTscFrequencyWithSleep(int sleep_nanoseconds) {
+  auto t0 = GetTimeTscPair();
+  struct timespec ts;
+  ts.tv_sec = 0;
+  ts.tv_nsec = sleep_nanoseconds;
+  while (nanosleep(&ts, &ts) != 0 && errno == EINTR) {}
+  auto t1 = GetTimeTscPair();
+  double elapsed_ticks = t1.tsc - t0.tsc;
+  double elapsed_time = (t1.time - t0.time) * 1e-9;
+  return elapsed_ticks / elapsed_time;
+}
+
+// Measures and returns the TSC frequency by calling
+// MeasureTscFrequencyWithSleep(), doubling the sleep interval until the
+// frequency measurement stabilizes.
+static double MeasureTscFrequency() {
+  double last_measurement = -1.0;
+  int sleep_nanoseconds = 1000000;  // 1 millisecond.
+  for (int i = 0; i < 8; ++i) {
+    double measurement = MeasureTscFrequencyWithSleep(sleep_nanoseconds);
+    if (measurement * 0.99 < last_measurement &&
+        last_measurement < measurement * 1.01) {
+      // Use the current measurement if it is within 1% of the
+      // previous measurement.
+      return measurement;
+    }
+    last_measurement = measurement;
+    sleep_nanoseconds *= 2;
+  }
+  return last_measurement;
+}
+
+#endif  // ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY
+
+static double GetNominalCPUFrequency() {
+  long freq = 0;
+
+  // Google's production kernel has a patch to export the TSC
+  // frequency through sysfs. If the kernel is exporting the TSC
+  // frequency use that. There are issues where cpuinfo_max_freq
+  // cannot be relied on because the BIOS may be exporting an invalid
+  // p-state (on x86) or p-states may be used to put the processor in
+  // a new mode (turbo mode). Essentially, those frequencies cannot
+  // always be relied upon. The same reasons apply to /proc/cpuinfo as
+  // well.
+  if (ReadLongFromFile("/sys/devices/system/cpu/cpu0/tsc_freq_khz", &freq)) {
+    return freq * 1e3;  // Value is kHz.
+  }
+
+#if defined(ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY)
+  // On these platforms, the TSC frequency is the nominal CPU
+  // frequency.  But without having the kernel export it directly
+  // though /sys/devices/system/cpu/cpu0/tsc_freq_khz, there is no
+  // other way to reliably get the TSC frequency, so we have to
+  // measure it ourselves.  Some CPUs abuse cpuinfo_max_freq by
+  // exporting "fake" frequencies for implementing new features. For
+  // example, Intel's turbo mode is enabled by exposing a p-state
+  // value with a higher frequency than that of the real TSC
+  // rate. Because of this, we prefer to measure the TSC rate
+  // ourselves on i386 and x86-64.
+  return MeasureTscFrequency();
+#else
+
+  // If CPU scaling is in effect, we want to use the *maximum*
+  // frequency, not whatever CPU speed some random processor happens
+  // to be using now.
+  if (ReadLongFromFile("/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq",
+                       &freq)) {
+    return freq * 1e3;  // Value is kHz.
+  }
+
+  return 1.0;
+#endif  // !ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY
+}
+
+#endif
+
+// InitializeSystemInfo() may be called before main() and before
+// malloc is properly initialized, therefore this must not allocate
+// memory.
+static void InitializeSystemInfo() {
+  num_cpus = GetNumCPUs();
+  nominal_cpu_frequency = GetNominalCPUFrequency();
+}
+
+int NumCPUs() {
+  base_internal::LowLevelCallOnce(&init_system_info_once, InitializeSystemInfo);
+  return num_cpus;
+}
+
+double NominalCPUFrequency() {
+  base_internal::LowLevelCallOnce(&init_system_info_once, InitializeSystemInfo);
+  return nominal_cpu_frequency;
+}
+
+#if defined(_WIN32)
+
+pid_t GetTID() {
+  return GetCurrentThreadId();
+}
+
+#elif defined(__linux__)
+
+#ifndef SYS_gettid
+#define SYS_gettid __NR_gettid
+#endif
+
+pid_t GetTID() {
+  return syscall(SYS_gettid);
+}
+
+#else
+
+// Fallback implementation of GetTID using pthread_getspecific.
+static once_flag tid_once;
+static pthread_key_t tid_key;
+static absl::base_internal::SpinLock tid_lock(
+    absl::base_internal::kLinkerInitialized);
+
+// We set a bit per thread in this array to indicate that an ID is in
+// use. ID 0 is unused because it is the default value returned by
+// pthread_getspecific().
+static std::vector<uint32_t>* tid_array GUARDED_BY(tid_lock) = nullptr;
+static constexpr int kBitsPerWord = 32;  // tid_array is uint32_t.
+
+// Returns the TID to tid_array.
+static void FreeTID(void *v) {
+  intptr_t tid = reinterpret_cast<intptr_t>(v);
+  int word = tid / kBitsPerWord;
+  uint32_t mask = ~(1u << (tid % kBitsPerWord));
+  absl::base_internal::SpinLockHolder lock(&tid_lock);
+  assert(0 <= word && static_cast<size_t>(word) < tid_array->size());
+  (*tid_array)[word] &= mask;
+}
+
+static void InitGetTID() {
+  if (pthread_key_create(&tid_key, FreeTID) != 0) {
+    // The logging system calls GetTID() so it can't be used here.
+    perror("pthread_key_create failed");
+    abort();
+  }
+
+  // Initialize tid_array.
+  absl::base_internal::SpinLockHolder lock(&tid_lock);
+  tid_array = new std::vector<uint32_t>(1);
+  (*tid_array)[0] = 1;  // ID 0 is never-allocated.
+}
+
+// Return a per-thread small integer ID from pthread's thread-specific data.
+pid_t GetTID() {
+  absl::call_once(tid_once, InitGetTID);
+
+  intptr_t tid = reinterpret_cast<intptr_t>(pthread_getspecific(tid_key));
+  if (tid != 0) {
+    return tid;
+  }
+
+  int bit;  // tid_array[word] = 1u << bit;
+  size_t word;
+  {
+    // Search for the first unused ID.
+    absl::base_internal::SpinLockHolder lock(&tid_lock);
+    // First search for a word in the array that is not all ones.
+    word = 0;
+    while (word < tid_array->size() && ~(*tid_array)[word] == 0) {
+      ++word;
+    }
+    if (word == tid_array->size()) {
+      tid_array->push_back(0);  // No space left, add kBitsPerWord more IDs.
+    }
+    // Search for a zero bit in the word.
+    bit = 0;
+    while (bit < kBitsPerWord && (((*tid_array)[word] >> bit) & 1) != 0) {
+      ++bit;
+    }
+    tid = (word * kBitsPerWord) + bit;
+    (*tid_array)[word] |= 1u << bit;  // Mark the TID as allocated.
+  }
+
+  if (pthread_setspecific(tid_key, reinterpret_cast<void *>(tid)) != 0) {
+    perror("pthread_setspecific failed");
+    abort();
+  }
+
+  return static_cast<pid_t>(tid);
+}
+
+#endif
+
+}  // namespace base_internal
+}  // namespace absl
diff --git a/absl/base/internal/sysinfo.h b/absl/base/internal/sysinfo.h
new file mode 100644
index 000000000000..f21de14325c7
--- /dev/null
+++ b/absl/base/internal/sysinfo.h
@@ -0,0 +1,64 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// This file includes routines to find out characteristics
+// of the machine a program is running on.  It is undoubtedly
+// system-dependent.
+
+// Functions listed here that accept a pid_t as an argument act on the
+// current process if the pid_t argument is 0
+// All functions here are thread-hostile due to file caching unless
+// commented otherwise.
+
+#ifndef ABSL_BASE_INTERNAL_SYSINFO_H_
+#define ABSL_BASE_INTERNAL_SYSINFO_H_
+
+#ifndef _WIN32
+#include <sys/types.h>
+#else
+#include <intsafe.h>
+#endif
+
+#include "absl/base/port.h"
+
+namespace absl {
+namespace base_internal {
+
+// Nominal core processor cycles per second of each processor.   This is _not_
+// necessarily the frequency of the CycleClock counter (see cycleclock.h)
+// Thread-safe.
+double NominalCPUFrequency();
+
+// Number of logical processors (hyperthreads) in system. See
+// //base/cpuid/cpuid.h for more CPU-related info.  Thread-safe.
+int NumCPUs();
+
+// Return the thread id of the current thread, as told by the system.
+// No two currently-live threads implemented by the OS shall have the same ID.
+// Thread ids of exited threads may be reused.   Multiple user-level threads
+// may have the same thread ID if multiplexed on the same OS thread.
+//
+// On Linux, you may send a signal to the resulting ID with kill().  However,
+// it is recommended for portability that you use pthread_kill() instead.
+#ifdef _WIN32
+// On Windows, process id and thread id are of the same type according to
+// the return types of GetProcessId() and GetThreadId() are both DWORD.
+using pid_t = DWORD;
+#endif
+pid_t GetTID();
+
+}  // namespace base_internal
+}  // namespace absl
+
+#endif  // ABSL_BASE_INTERNAL_SYSINFO_H_
diff --git a/absl/base/internal/sysinfo_test.cc b/absl/base/internal/sysinfo_test.cc
new file mode 100644
index 000000000000..4c7d66b7f9c3
--- /dev/null
+++ b/absl/base/internal/sysinfo_test.cc
@@ -0,0 +1,99 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/internal/sysinfo.h"
+
+#ifndef _WIN32
+#include <sys/types.h>
+#include <unistd.h>
+#endif
+
+#include <thread>  // NOLINT(build/c++11)
+#include <unordered_set>
+#include <vector>
+
+#include "gtest/gtest.h"
+#include "absl/synchronization/barrier.h"
+#include "absl/synchronization/mutex.h"
+
+namespace absl {
+namespace base_internal {
+namespace {
+
+TEST(SysinfoTest, NumCPUs) {
+  EXPECT_NE(NumCPUs(), 0)
+      << "NumCPUs() should not have the default value of 0";
+}
+
+TEST(SysinfoTest, NominalCPUFrequency) {
+#if !(defined(__aarch64__) && defined(__linux__))
+  EXPECT_GE(NominalCPUFrequency(), 1000.0)
+      << "NominalCPUFrequency() did not return a reasonable value";
+#else
+  // TODO(b/37919252): Aarch64 cannot read the CPU frequency from sysfs, so we
+  // get back 1.0. Fix once the value is available.
+  EXPECT_EQ(NominalCPUFrequency(), 1.0)
+      << "CPU frequency detection was fixed! Please update unittest and "
+         "b/37919252";
+#endif
+}
+
+TEST(SysinfoTest, GetTID) {
+  EXPECT_EQ(GetTID(), GetTID());  // Basic compile and equality test.
+#ifdef __native_client__
+  // Native Client has a race condition bug that leads to memory
+  // exaustion when repeatedly creating and joining threads.
+  // https://bugs.chromium.org/p/nativeclient/issues/detail?id=1027
+  return;
+#endif
+  // Test that TIDs are unique to each thread.
+  // Uses a few loops to exercise implementations that reallocate IDs.
+  for (int i = 0; i < 32; ++i) {
+    constexpr int kNumThreads = 64;
+    Barrier all_threads_done(kNumThreads);
+    std::vector<std::thread> threads;
+
+    Mutex mutex;
+    std::unordered_set<pid_t> tids;
+
+    for (int j = 0; j < kNumThreads; ++j) {
+      threads.push_back(std::thread([&]() {
+        pid_t id = GetTID();
+        {
+          MutexLock lock(&mutex);
+          ASSERT_TRUE(tids.find(id) == tids.end());
+          tids.insert(id);
+        }
+        // We can't simply join the threads here. The threads need to
+        // be alive otherwise the TID might have been reallocated to
+        // another live thread.
+        all_threads_done.Block();
+      }));
+    }
+    for (auto& thread : threads) {
+      thread.join();
+    }
+  }
+}
+
+#ifdef __linux__
+TEST(SysinfoTest, LinuxGetTID) {
+  // On Linux, for the main thread, GetTID()==getpid() is guaranteed by the API.
+  EXPECT_EQ(GetTID(), getpid());
+}
+#endif
+
+}  // namespace
+}  // namespace base_internal
+}  // namespace absl
diff --git a/absl/base/internal/thread_identity.cc b/absl/base/internal/thread_identity.cc
new file mode 100644
index 000000000000..ee96a5883293
--- /dev/null
+++ b/absl/base/internal/thread_identity.cc
@@ -0,0 +1,126 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/internal/thread_identity.h"
+
+#ifndef _WIN32
+#include <pthread.h>
+#include <signal.h>
+#endif
+
+#include <atomic>
+#include <cassert>
+#include <memory>
+
+#include "absl/base/call_once.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/base/internal/spinlock.h"
+
+namespace absl {
+namespace base_internal {
+
+#if ABSL_THREAD_IDENTITY_MODE != ABSL_THREAD_IDENTITY_MODE_USE_CPP11
+namespace {
+// Used to co-ordinate one-time creation of our pthread_key
+absl::once_flag init_thread_identity_key_once;
+pthread_key_t thread_identity_pthread_key;
+std::atomic<bool> pthread_key_initialized(false);
+
+void AllocateThreadIdentityKey(ThreadIdentityReclaimerFunction reclaimer) {
+  pthread_key_create(&thread_identity_pthread_key, reclaimer);
+  pthread_key_initialized.store(true, std::memory_order_release);
+}
+}  // namespace
+#endif
+
+#if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_TLS || \
+    ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_CPP11
+// The actual TLS storage for a thread's currently associated ThreadIdentity.
+// This is referenced by inline accessors in the header.
+// "protected" visibility ensures that if multiple copies of //base exist in a
+// process (via dlopen() or similar), references to
+// thread_identity_ptr from each copy of the code will refer to
+// *different* instances of this ptr. See extensive discussion of this choice
+// in cl/90634708
+// TODO(ahh): hard deprecate multiple copies of //base; remove this.
+#ifdef __GNUC__
+__attribute__((visibility("protected")))
+#endif  // __GNUC__
+  ABSL_PER_THREAD_TLS_KEYWORD ThreadIdentity* thread_identity_ptr;
+#endif  // TLS or CPP11
+
+void SetCurrentThreadIdentity(
+    ThreadIdentity* identity, ThreadIdentityReclaimerFunction reclaimer) {
+  assert(CurrentThreadIdentityIfPresent() == nullptr);
+  // Associate our destructor.
+  // NOTE: This call to pthread_setspecific is currently the only immovable
+  // barrier to CurrentThreadIdentity() always being async signal safe.
+#if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC
+  // NOTE: Not async-safe.  But can be open-coded.
+  absl::call_once(init_thread_identity_key_once, AllocateThreadIdentityKey,
+                  reclaimer);
+  // b/18366710:
+  // We must mask signals around the call to setspecific as with current glibc,
+  // a concurrent getspecific (needed for GetCurrentThreadIdentityIfPresent())
+  // may zero our value.
+  //
+  // While not officially async-signal safe, getspecific within a signal handler
+  // is otherwise OK.
+  sigset_t all_signals;
+  sigset_t curr_signals;
+  sigfillset(&all_signals);
+  pthread_sigmask(SIG_SETMASK, &all_signals, &curr_signals);
+  pthread_setspecific(thread_identity_pthread_key,
+                      reinterpret_cast<void*>(identity));
+  pthread_sigmask(SIG_SETMASK, &curr_signals, nullptr);
+#elif ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_TLS
+  // NOTE: Not async-safe.  But can be open-coded.
+  absl::call_once(init_thread_identity_key_once, AllocateThreadIdentityKey,
+                  reclaimer);
+  pthread_setspecific(thread_identity_pthread_key,
+                      reinterpret_cast<void*>(identity));
+  thread_identity_ptr = identity;
+#elif ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_CPP11
+  thread_local std::unique_ptr<ThreadIdentity, ThreadIdentityReclaimerFunction>
+      holder(identity, reclaimer);
+  thread_identity_ptr = identity;
+#else
+#error Unimplemented ABSL_THREAD_IDENTITY_MODE
+#endif
+}
+
+void ClearCurrentThreadIdentity() {
+#if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_TLS || \
+    ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_CPP11
+  thread_identity_ptr = nullptr;
+#elif ABSL_THREAD_IDENTITY_MODE == \
+      ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC
+  // pthread_setspecific expected to clear value on destruction
+  assert(CurrentThreadIdentityIfPresent() == nullptr);
+#endif
+}
+
+#if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC
+ThreadIdentity* CurrentThreadIdentityIfPresent() {
+  bool initialized = pthread_key_initialized.load(std::memory_order_acquire);
+  if (!initialized) {
+    return nullptr;
+  }
+  return reinterpret_cast<ThreadIdentity*>(
+      pthread_getspecific(thread_identity_pthread_key));
+}
+#endif
+
+}  // namespace base_internal
+}  // namespace absl
diff --git a/absl/base/internal/thread_identity.h b/absl/base/internal/thread_identity.h
new file mode 100644
index 000000000000..914d5da7ef40
--- /dev/null
+++ b/absl/base/internal/thread_identity.h
@@ -0,0 +1,240 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Each active thread has an ThreadIdentity that may represent the thread in
+// various level interfaces.  ThreadIdentity objects are never deallocated.
+// When a thread terminates, its ThreadIdentity object may be reused for a
+// thread created later.
+
+#ifndef ABSL_BASE_INTERNAL_THREAD_IDENTITY_H_
+#define ABSL_BASE_INTERNAL_THREAD_IDENTITY_H_
+
+#ifndef _WIN32
+#include <pthread.h>
+// Defines __GOOGLE_GRTE_VERSION__ (via glibc-specific features.h) when
+// supported.
+#include <unistd.h>
+#endif
+
+#include <atomic>
+#include <cstdint>
+
+#include "absl/base/internal/per_thread_tls.h"
+
+namespace absl {
+
+struct SynchLocksHeld;
+struct SynchWaitParams;
+
+namespace base_internal {
+
+class SpinLock;
+struct ThreadIdentity;
+
+// Used by the implementation of base::Mutex and base::CondVar.
+struct PerThreadSynch {
+  // The internal representation of base::Mutex and base::CondVar rely
+  // on the alignment of PerThreadSynch. Both store the address of the
+  // PerThreadSynch in the high-order bits of their internal state,
+  // which means the low kLowZeroBits of the address of PerThreadSynch
+  // must be zero.
+  static constexpr int kLowZeroBits = 8;
+  static constexpr int kAlignment = 1 << kLowZeroBits;
+
+  // Returns the associated ThreadIdentity.
+  // This can be implemented as a cast because we guarantee
+  // PerThreadSynch is the first element of ThreadIdentity.
+  ThreadIdentity* thread_identity() {
+    return reinterpret_cast<ThreadIdentity*>(this);
+  }
+
+  PerThreadSynch *next;  // Circular waiter queue; initialized to 0.
+  PerThreadSynch *skip;  // If non-zero, all entries in Mutex queue
+                         // upto and including "skip" have same
+                         // condition as this, and will be woken later
+  bool may_skip;         // if false while on mutex queue, a mutex unlocker
+                         // is using this PerThreadSynch as a terminator.  Its
+                         // skip field must not be filled in because the loop
+                         // might then skip over the terminator.
+
+  // The wait parameters of the current wait.  waitp is null if the
+  // thread is not waiting. Transitions from null to non-null must
+  // occur before the enqueue commit point (state = kQueued in
+  // Enqueue() and CondVarEnqueue()). Transitions from non-null to
+  // null must occur after the wait is finished (state = kAvailable in
+  // Mutex::Block() and CondVar::WaitCommon()). This field may be
+  // changed only by the thread that describes this PerThreadSynch.  A
+  // special case is Fer(), which calls Enqueue() on another thread,
+  // but with an identical SynchWaitParams pointer, thus leaving the
+  // pointer unchanged.
+  SynchWaitParams *waitp;
+
+  bool suppress_fatal_errors;  // If true, try to proceed even in the face of
+                               // broken invariants.  This is used within fatal
+                               // signal handlers to improve the chances of
+                               // debug logging information being output
+                               // successfully.
+
+  intptr_t readers;     // Number of readers in mutex.
+  int priority;         // Priority of thread (updated every so often).
+
+  // When priority will next be read (cycles).
+  int64_t next_priority_read_cycles;
+
+  // State values:
+  //   kAvailable: This PerThreadSynch is available.
+  //   kQueued: This PerThreadSynch is unavailable, it's currently queued on a
+  //            Mutex or CondVar waistlist.
+  //
+  // Transitions from kQueued to kAvailable require a release
+  // barrier. This is needed as a waiter may use "state" to
+  // independently observe that it's no longer queued.
+  //
+  // Transitions from kAvailable to kQueued require no barrier, they
+  // are externally ordered by the Mutex.
+  enum State {
+    kAvailable,
+    kQueued
+  };
+  std::atomic<State> state;
+
+  bool maybe_unlocking;  // Valid at head of Mutex waiter queue;
+                         // true if UnlockSlow could be searching
+                         // for a waiter to wake.  Used for an optimization
+                         // in Enqueue().  true is always a valid value.
+                         // Can be reset to false when the unlocker or any
+                         // writer releases the lock, or a reader fully releases
+                         // the lock.  It may not be set to false by a reader
+                         // that decrements the count to non-zero.
+                         // protected by mutex spinlock
+
+  bool wake;  // This thread is to be woken from a Mutex.
+
+  // If "x" is on a waiter list for a mutex, "x->cond_waiter" is true iff the
+  // waiter is waiting on the mutex as part of a CV Wait or Mutex Await.
+  //
+  // The value of "x->cond_waiter" is meaningless if "x" is not on a
+  // Mutex waiter list.
+  bool cond_waiter;
+
+  // Locks held; used during deadlock detection.
+  // Allocated in Synch_GetAllLocks() and freed in ReclaimThreadIdentity().
+  SynchLocksHeld *all_locks;
+};
+
+struct ThreadIdentity {
+  // Must be the first member.  The Mutex implementation requires that
+  // the PerThreadSynch object associated with each thread is
+  // PerThreadSynch::kAlignment aligned.  We provide this alignment on
+  // ThreadIdentity itself.
+  PerThreadSynch per_thread_synch;
+
+  // Private: Reserved for absl::synchronization_internal::Waiter.
+  struct WaiterState {
+    char data[128];
+  } waiter_state;
+
+  // Used by PerThreadSem::{Get,Set}ThreadBlockedCounter().
+  std::atomic<int>* blocked_count_ptr;
+
+  // The following variables are mostly read/written just by the
+  // thread itself.  The only exception is that these are read by
+  // a ticker thread as a hint.
+  std::atomic<int> ticker;      // Tick counter, incremented once per second.
+  std::atomic<int> wait_start;  // Ticker value when thread started waiting.
+  std::atomic<bool> is_idle;    // Has thread become idle yet?
+
+  ThreadIdentity* next;
+};
+
+// Returns the ThreadIdentity object representing the calling thread; guaranteed
+// to be unique for its lifetime.  The returned object will remain valid for the
+// program's lifetime; although it may be re-assigned to a subsequent thread.
+// If one does not exist, return nullptr instead.
+//
+// Does not malloc(*), and is async-signal safe.
+// [*] Technically pthread_setspecific() does malloc on first use; however this
+// is handled internally within tcmalloc's initialization already.
+//
+// New ThreadIdentity objects can be constructed and associated with a thread
+// by calling GetOrCreateCurrentThreadIdentity() in per-thread-sem.h.
+ThreadIdentity* CurrentThreadIdentityIfPresent();
+
+using ThreadIdentityReclaimerFunction = void (*)(void*);
+
+// Sets the current thread identity to the given value.  'reclaimer' is a
+// pointer to the global function for cleaning up instances on thread
+// destruction.
+void SetCurrentThreadIdentity(ThreadIdentity* identity,
+                              ThreadIdentityReclaimerFunction reclaimer);
+
+// Removes the currently associated ThreadIdentity from the running thread.
+// This must be called from inside the ThreadIdentityReclaimerFunction, and only
+// from that function.
+void ClearCurrentThreadIdentity();
+
+// May be chosen at compile time via: -DABSL_FORCE_THREAD_IDENTITY_MODE=<mode
+// index>
+#ifdef ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC
+#error ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC cannot be direcly set
+#else
+#define ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC 0
+#endif
+
+#ifdef ABSL_THREAD_IDENTITY_MODE_USE_TLS
+#error ABSL_THREAD_IDENTITY_MODE_USE_TLS cannot be direcly set
+#else
+#define ABSL_THREAD_IDENTITY_MODE_USE_TLS 1
+#endif
+
+#ifdef ABSL_THREAD_IDENTITY_MODE_USE_CPP11
+#error ABSL_THREAD_IDENTITY_MODE_USE_CPP11 cannot be direcly set
+#else
+#define ABSL_THREAD_IDENTITY_MODE_USE_CPP11 2
+#endif
+
+#ifdef ABSL_THREAD_IDENTITY_MODE
+#error ABSL_THREAD_IDENTITY_MODE cannot be direcly set
+#elif defined(ABSL_FORCE_THREAD_IDENTITY_MODE)
+#define ABSL_THREAD_IDENTITY_MODE ABSL_FORCE_THREAD_IDENTITY_MODE
+#elif defined(_WIN32)
+#define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_CPP11
+#elif ABSL_PER_THREAD_TLS && defined(__GOOGLE_GRTE_VERSION__) && \
+    (__GOOGLE_GRTE_VERSION__ >= 20140228L)
+// Support for async-safe TLS was specifically added in GRTEv4.  It's not
+// present in the upstream eglibc.
+// Note:  Current default for production systems.
+#define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_TLS
+#else
+#define ABSL_THREAD_IDENTITY_MODE \
+  ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC
+#endif
+
+#if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_TLS || \
+    ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_CPP11
+
+extern ABSL_PER_THREAD_TLS_KEYWORD ThreadIdentity* thread_identity_ptr;
+
+inline ThreadIdentity* CurrentThreadIdentityIfPresent() {
+  return thread_identity_ptr;
+}
+
+#elif ABSL_THREAD_IDENTITY_MODE != \
+    ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC
+#error Unknown ABSL_THREAD_IDENTITY_MODE
+#endif
+
+}  // namespace base_internal
+}  // namespace absl
+#endif  // ABSL_BASE_INTERNAL_THREAD_IDENTITY_H_
diff --git a/absl/base/internal/thread_identity_test.cc b/absl/base/internal/thread_identity_test.cc
new file mode 100644
index 000000000000..a2b053d96ada
--- /dev/null
+++ b/absl/base/internal/thread_identity_test.cc
@@ -0,0 +1,124 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/internal/thread_identity.h"
+
+#include <thread>  // NOLINT(build/c++11)
+#include <vector>
+
+#include "gtest/gtest.h"
+#include "absl/base/internal/spinlock.h"
+#include "absl/synchronization/internal/per_thread_sem.h"
+#include "absl/synchronization/mutex.h"
+
+namespace absl {
+namespace base_internal {
+namespace {
+
+// protects num_identities_reused
+static absl::base_internal::SpinLock map_lock(
+    absl::base_internal::kLinkerInitialized);
+static int num_identities_reused;
+
+static const void* const kCheckNoIdentity = reinterpret_cast<void*>(1);
+
+static void TestThreadIdentityCurrent(const void* assert_no_identity) {
+  ThreadIdentity* identity;
+
+  // We have to test this conditionally, because if the test framework relies
+  // on Abseil, then some previous action may have already allocated an
+  // identity.
+  if (assert_no_identity == kCheckNoIdentity) {
+    identity = CurrentThreadIdentityIfPresent();
+    EXPECT_TRUE(identity == nullptr);
+  }
+
+  identity = synchronization_internal::GetOrCreateCurrentThreadIdentity();
+  EXPECT_TRUE(identity != nullptr);
+  ThreadIdentity* identity_no_init;
+  identity_no_init = CurrentThreadIdentityIfPresent();
+  EXPECT_TRUE(identity == identity_no_init);
+
+  // Check that per_thread_synch is correctly aligned.
+  EXPECT_EQ(0, reinterpret_cast<intptr_t>(&identity->per_thread_synch) %
+                   PerThreadSynch::kAlignment);
+  EXPECT_EQ(identity, identity->per_thread_synch.thread_identity());
+
+  absl::base_internal::SpinLockHolder l(&map_lock);
+  num_identities_reused++;
+}
+
+TEST(ThreadIdentityTest, BasicIdentityWorks) {
+  // This tests for the main() thread.
+  TestThreadIdentityCurrent(nullptr);
+}
+
+TEST(ThreadIdentityTest, BasicIdentityWorksThreaded) {
+  // Now try the same basic test with multiple threads being created and
+  // destroyed.  This makes sure that:
+  // - New threads are created without a ThreadIdentity.
+  // - We re-allocate ThreadIdentity objects from the free-list.
+  // - If a thread implementation chooses to recycle threads, that
+  //   correct re-initialization occurs.
+  static const int kNumLoops = 3;
+  static const int kNumThreads = 400;
+  for (int iter = 0; iter < kNumLoops; iter++) {
+    std::vector<std::thread> threads;
+    for (int i = 0; i < kNumThreads; ++i) {
+      threads.push_back(
+          std::thread(TestThreadIdentityCurrent, kCheckNoIdentity));
+    }
+    for (auto& thread : threads) {
+      thread.join();
+    }
+  }
+
+  // We should have recycled ThreadIdentity objects above; while (external)
+  // library threads allocating their own identities may preclude some
+  // reuse, we should have sufficient repetitions to exclude this.
+  EXPECT_LT(kNumThreads, num_identities_reused);
+}
+
+TEST(ThreadIdentityTest, ReusedThreadIdentityMutexTest) {
+  // This test repeatly creates and joins a series of threads, each of
+  // which acquires and releases shared Mutex locks. This verifies
+  // Mutex operations work correctly under a reused
+  // ThreadIdentity. Note that the most likely failure mode of this
+  // test is a crash or deadlock.
+  static const int kNumLoops = 10;
+  static const int kNumThreads = 12;
+  static const int kNumMutexes = 3;
+  static const int kNumLockLoops = 5;
+
+  Mutex mutexes[kNumMutexes];
+  for (int iter = 0; iter < kNumLoops; ++iter) {
+    std::vector<std::thread> threads;
+    for (int thread = 0; thread < kNumThreads; ++thread) {
+      threads.push_back(std::thread([&]() {
+        for (int l = 0; l < kNumLockLoops; ++l) {
+          for (int m = 0; m < kNumMutexes; ++m) {
+            MutexLock lock(&mutexes[m]);
+          }
+        }
+      }));
+    }
+    for (auto& thread : threads) {
+      thread.join();
+    }
+  }
+}
+
+}  // namespace
+}  // namespace base_internal
+}  // namespace absl
diff --git a/absl/base/internal/throw_delegate.cc b/absl/base/internal/throw_delegate.cc
new file mode 100644
index 000000000000..46dc573cfa84
--- /dev/null
+++ b/absl/base/internal/throw_delegate.cc
@@ -0,0 +1,106 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/internal/throw_delegate.h"
+
+#include <cstdlib>
+#include <functional>
+#include <new>
+#include <stdexcept>
+#include "absl/base/config.h"
+#include "absl/base/internal/raw_logging.h"
+
+namespace absl {
+namespace base_internal {
+
+namespace {
+template <typename T>
+[[noreturn]] void Throw(const T& error) {
+#ifdef ABSL_HAVE_EXCEPTIONS
+  throw error;
+#else
+  ABSL_RAW_LOG(ERROR, "%s", error.what());
+  abort();
+#endif
+}
+}  // namespace
+
+void ThrowStdLogicError(const std::string& what_arg) {
+  Throw(std::logic_error(what_arg));
+}
+void ThrowStdLogicError(const char* what_arg) {
+  Throw(std::logic_error(what_arg));
+}
+void ThrowStdInvalidArgument(const std::string& what_arg) {
+  Throw(std::invalid_argument(what_arg));
+}
+void ThrowStdInvalidArgument(const char* what_arg) {
+  Throw(std::invalid_argument(what_arg));
+}
+
+void ThrowStdDomainError(const std::string& what_arg) {
+  Throw(std::domain_error(what_arg));
+}
+void ThrowStdDomainError(const char* what_arg) {
+  Throw(std::domain_error(what_arg));
+}
+
+void ThrowStdLengthError(const std::string& what_arg) {
+  Throw(std::length_error(what_arg));
+}
+void ThrowStdLengthError(const char* what_arg) {
+  Throw(std::length_error(what_arg));
+}
+
+void ThrowStdOutOfRange(const std::string& what_arg) {
+  Throw(std::out_of_range(what_arg));
+}
+void ThrowStdOutOfRange(const char* what_arg) {
+  Throw(std::out_of_range(what_arg));
+}
+
+void ThrowStdRuntimeError(const std::string& what_arg) {
+  Throw(std::runtime_error(what_arg));
+}
+void ThrowStdRuntimeError(const char* what_arg) {
+  Throw(std::runtime_error(what_arg));
+}
+
+void ThrowStdRangeError(const std::string& what_arg) {
+  Throw(std::range_error(what_arg));
+}
+void ThrowStdRangeError(const char* what_arg) {
+  Throw(std::range_error(what_arg));
+}
+
+void ThrowStdOverflowError(const std::string& what_arg) {
+  Throw(std::overflow_error(what_arg));
+}
+void ThrowStdOverflowError(const char* what_arg) {
+  Throw(std::overflow_error(what_arg));
+}
+
+void ThrowStdUnderflowError(const std::string& what_arg) {
+  Throw(std::underflow_error(what_arg));
+}
+void ThrowStdUnderflowError(const char* what_arg) {
+  Throw(std::underflow_error(what_arg));
+}
+
+void ThrowStdBadFunctionCall() { Throw(std::bad_function_call()); }
+
+void ThrowStdBadAlloc() { Throw(std::bad_alloc()); }
+
+}  // namespace base_internal
+}  // namespace absl
diff --git a/absl/base/internal/throw_delegate.h b/absl/base/internal/throw_delegate.h
new file mode 100644
index 000000000000..70e2d7709e5b
--- /dev/null
+++ b/absl/base/internal/throw_delegate.h
@@ -0,0 +1,71 @@
+//
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef ABSL_BASE_INTERNAL_THROW_DELEGATE_H_
+#define ABSL_BASE_INTERNAL_THROW_DELEGATE_H_
+
+#include <string>
+
+namespace absl {
+namespace base_internal {
+
+// Helper functions that allow throwing exceptions consistently from anywhere.
+// The main use case is for header-based libraries (eg templates), as they will
+// be built by many different targets with their own compiler options.
+// In particular, this will allow a safe way to throw exceptions even if the
+// caller is compiled with -fno-exceptions.  This is intended for implementing
+// things like map<>::at(), which the standard documents as throwing an
+// exception on error.
+//
+// Using other techniques like #if tricks could lead to ODR violations.
+//
+// You shouldn't use it unless you're writing code that you know will be built
+// both with and without exceptions and you need to conform to an interface
+// that uses exceptions.
+
+[[noreturn]] void ThrowStdLogicError(const std::string& what_arg);
+[[noreturn]] void ThrowStdLogicError(const char* what_arg);
+[[noreturn]] void ThrowStdInvalidArgument(const std::string& what_arg);
+[[noreturn]] void ThrowStdInvalidArgument(const char* what_arg);
+[[noreturn]] void ThrowStdDomainError(const std::string& what_arg);
+[[noreturn]] void ThrowStdDomainError(const char* what_arg);
+[[noreturn]] void ThrowStdLengthError(const std::string& what_arg);
+[[noreturn]] void ThrowStdLengthError(const char* what_arg);
+[[noreturn]] void ThrowStdOutOfRange(const std::string& what_arg);
+[[noreturn]] void ThrowStdOutOfRange(const char* what_arg);
+[[noreturn]] void ThrowStdRuntimeError(const std::string& what_arg);
+[[noreturn]] void ThrowStdRuntimeError(const char* what_arg);
+[[noreturn]] void ThrowStdRangeError(const std::string& what_arg);
+[[noreturn]] void ThrowStdRangeError(const char* what_arg);
+[[noreturn]] void ThrowStdOverflowError(const std::string& what_arg);
+[[noreturn]] void ThrowStdOverflowError(const char* what_arg);
+[[noreturn]] void ThrowStdUnderflowError(const std::string& what_arg);
+[[noreturn]] void ThrowStdUnderflowError(const char* what_arg);
+
+[[noreturn]] void ThrowStdBadFunctionCall();
+[[noreturn]] void ThrowStdBadAlloc();
+
+// ThrowStdBadArrayNewLength() cannot be consistently supported because
+// std::bad_array_new_length is missing in libstdc++ until 4.9.0.
+// https://gcc.gnu.org/onlinedocs/gcc-4.8.3/libstdc++/api/a01379_source.html
+// https://gcc.gnu.org/onlinedocs/gcc-4.9.0/libstdc++/api/a01327_source.html
+// libcxx (as of 3.2) and msvc (as of 2015) both have it.
+// [[noreturn]] void ThrowStdBadArrayNewLength();
+
+}  // namespace base_internal
+}  // namespace absl
+
+#endif  // ABSL_BASE_INTERNAL_THROW_DELEGATE_H_
diff --git a/absl/base/internal/tsan_mutex_interface.h b/absl/base/internal/tsan_mutex_interface.h
new file mode 100644
index 000000000000..a1303e67c8f5
--- /dev/null
+++ b/absl/base/internal/tsan_mutex_interface.h
@@ -0,0 +1,51 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// This file is intended solely for spinlock.h.
+// It provides ThreadSanitizer annotations for custom mutexes.
+// See <sanitizer/tsan_interface.h> for meaning of these annotations.
+
+#ifndef ABSL_BASE_INTERNAL_TSAN_MUTEX_INTERFACE_H_
+#define ABSL_BASE_INTERNAL_TSAN_MUTEX_INTERFACE_H_
+
+#ifdef THREAD_SANITIZER
+#include <sanitizer/tsan_interface.h>
+
+#define ABSL_TSAN_MUTEX_CREATE __tsan_mutex_create
+#define ABSL_TSAN_MUTEX_DESTROY __tsan_mutex_destroy
+#define ABSL_TSAN_MUTEX_PRE_LOCK __tsan_mutex_pre_lock
+#define ABSL_TSAN_MUTEX_POST_LOCK __tsan_mutex_post_lock
+#define ABSL_TSAN_MUTEX_PRE_UNLOCK __tsan_mutex_pre_unlock
+#define ABSL_TSAN_MUTEX_POST_UNLOCK __tsan_mutex_post_unlock
+#define ABSL_TSAN_MUTEX_PRE_SIGNAL __tsan_mutex_pre_signal
+#define ABSL_TSAN_MUTEX_POST_SIGNAL __tsan_mutex_post_signal
+#define ABSL_TSAN_MUTEX_PRE_DIVERT __tsan_mutex_pre_divert
+#define ABSL_TSAN_MUTEX_POST_DIVERT __tsan_mutex_post_divert
+
+#else
+
+#define ABSL_TSAN_MUTEX_CREATE(...)
+#define ABSL_TSAN_MUTEX_DESTROY(...)
+#define ABSL_TSAN_MUTEX_PRE_LOCK(...)
+#define ABSL_TSAN_MUTEX_POST_LOCK(...)
+#define ABSL_TSAN_MUTEX_PRE_UNLOCK(...)
+#define ABSL_TSAN_MUTEX_POST_UNLOCK(...)
+#define ABSL_TSAN_MUTEX_PRE_SIGNAL(...)
+#define ABSL_TSAN_MUTEX_POST_SIGNAL(...)
+#define ABSL_TSAN_MUTEX_PRE_DIVERT(...)
+#define ABSL_TSAN_MUTEX_POST_DIVERT(...)
+
+#endif
+
+#endif  // ABSL_BASE_INTERNAL_TSAN_MUTEX_INTERFACE_H_
diff --git a/absl/base/internal/unaligned_access.h b/absl/base/internal/unaligned_access.h
new file mode 100644
index 000000000000..ea30829b0e18
--- /dev/null
+++ b/absl/base/internal/unaligned_access.h
@@ -0,0 +1,256 @@
+//
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef ABSL_BASE_INTERNAL_UNALIGNED_ACCESS_H_
+#define ABSL_BASE_INTERNAL_UNALIGNED_ACCESS_H_
+
+#include <string.h>
+#include <cstdint>
+
+#include "absl/base/attributes.h"
+
+// unaligned APIs
+
+// Portable handling of unaligned loads, stores, and copies.
+// On some platforms, like ARM, the copy functions can be more efficient
+// then a load and a store.
+//
+// It is possible to implement all of these these using constant-length memcpy
+// calls, which is portable and will usually be inlined into simple loads and
+// stores if the architecture supports it. However, such inlining usually
+// happens in a pass that's quite late in compilation, which means the resulting
+// loads and stores cannot participate in many other optimizations, leading to
+// overall worse code.
+
+// The unaligned API is C++ only.  The declarations use C++ features
+// (namespaces, inline) which are absent or incompatible in C.
+#if defined(__cplusplus)
+
+#if defined(ADDRESS_SANITIZER) || defined(THREAD_SANITIZER) ||\
+    defined(MEMORY_SANITIZER)
+// Consider we have an unaligned load/store of 4 bytes from address 0x...05.
+// AddressSanitizer will treat it as a 3-byte access to the range 05:07 and
+// will miss a bug if 08 is the first unaddressable byte.
+// ThreadSanitizer will also treat this as a 3-byte access to 05:07 and will
+// miss a race between this access and some other accesses to 08.
+// MemorySanitizer will correctly propagate the shadow on unaligned stores
+// and correctly report bugs on unaligned loads, but it may not properly
+// update and report the origin of the uninitialized memory.
+// For all three tools, replacing an unaligned access with a tool-specific
+// callback solves the problem.
+
+// Make sure uint16_t/uint32_t/uint64_t are defined.
+#include <stdint.h>
+
+extern "C" {
+uint16_t __sanitizer_unaligned_load16(const void *p);
+uint32_t __sanitizer_unaligned_load32(const void *p);
+uint64_t __sanitizer_unaligned_load64(const void *p);
+void __sanitizer_unaligned_store16(void *p, uint16_t v);
+void __sanitizer_unaligned_store32(void *p, uint32_t v);
+void __sanitizer_unaligned_store64(void *p, uint64_t v);
+}  // extern "C"
+
+namespace absl {
+
+inline uint16_t UnalignedLoad16(const void *p) {
+  return __sanitizer_unaligned_load16(p);
+}
+
+inline uint32_t UnalignedLoad32(const void *p) {
+  return __sanitizer_unaligned_load32(p);
+}
+
+inline uint64_t UnalignedLoad64(const void *p) {
+  return __sanitizer_unaligned_load64(p);
+}
+
+inline void UnalignedStore16(void *p, uint16_t v) {
+  __sanitizer_unaligned_store16(p, v);
+}
+
+inline void UnalignedStore32(void *p, uint32_t v) {
+  __sanitizer_unaligned_store32(p, v);
+}
+
+inline void UnalignedStore64(void *p, uint64_t v) {
+  __sanitizer_unaligned_store64(p, v);
+}
+
+}  // namespace absl
+
+#define ABSL_INTERNAL_UNALIGNED_LOAD16(_p) (absl::UnalignedLoad16(_p))
+#define ABSL_INTERNAL_UNALIGNED_LOAD32(_p) (absl::UnalignedLoad32(_p))
+#define ABSL_INTERNAL_UNALIGNED_LOAD64(_p) (absl::UnalignedLoad64(_p))
+
+#define ABSL_INTERNAL_UNALIGNED_STORE16(_p, _val) \
+  (absl::UnalignedStore16(_p, _val))
+#define ABSL_INTERNAL_UNALIGNED_STORE32(_p, _val) \
+  (absl::UnalignedStore32(_p, _val))
+#define ABSL_INTERNAL_UNALIGNED_STORE64(_p, _val) \
+  (absl::UnalignedStore64(_p, _val))
+
+#elif defined(__x86_64__) || defined(_M_X64) || defined(__i386) || \
+    defined(_M_IX86) || defined(__ppc__) || defined(__PPC__) ||    \
+    defined(__ppc64__) || defined(__PPC64__)
+
+// x86 and x86-64 can perform unaligned loads/stores directly;
+// modern PowerPC hardware can also do unaligned integer loads and stores;
+// but note: the FPU still sends unaligned loads and stores to a trap handler!
+
+#define ABSL_INTERNAL_UNALIGNED_LOAD16(_p) \
+  (*reinterpret_cast<const uint16_t *>(_p))
+#define ABSL_INTERNAL_UNALIGNED_LOAD32(_p) \
+  (*reinterpret_cast<const uint32_t *>(_p))
+#define ABSL_INTERNAL_UNALIGNED_LOAD64(_p) \
+  (*reinterpret_cast<const uint64_t *>(_p))
+
+#define ABSL_INTERNAL_UNALIGNED_STORE16(_p, _val) \
+  (*reinterpret_cast<uint16_t *>(_p) = (_val))
+#define ABSL_INTERNAL_UNALIGNED_STORE32(_p, _val) \
+  (*reinterpret_cast<uint32_t *>(_p) = (_val))
+#define ABSL_INTERNAL_UNALIGNED_STORE64(_p, _val) \
+  (*reinterpret_cast<uint64_t *>(_p) = (_val))
+
+#elif defined(__arm__) && \
+      !defined(__ARM_ARCH_5__) && \
+      !defined(__ARM_ARCH_5T__) && \
+      !defined(__ARM_ARCH_5TE__) && \
+      !defined(__ARM_ARCH_5TEJ__) && \
+      !defined(__ARM_ARCH_6__) && \
+      !defined(__ARM_ARCH_6J__) && \
+      !defined(__ARM_ARCH_6K__) && \
+      !defined(__ARM_ARCH_6Z__) && \
+      !defined(__ARM_ARCH_6ZK__) && \
+      !defined(__ARM_ARCH_6T2__)
+
+
+// ARMv7 and newer support native unaligned accesses, but only of 16-bit
+// and 32-bit values (not 64-bit); older versions either raise a fatal signal,
+// do an unaligned read and rotate the words around a bit, or do the reads very
+// slowly (trip through kernel mode). There's no simple #define that says just
+// โ€œARMv7 or higherโ€, so we have to filter away all ARMv5 and ARMv6
+// sub-architectures. Newer gcc (>= 4.6) set an __ARM_FEATURE_ALIGNED #define,
+// so in time, maybe we can move on to that.
+//
+// This is a mess, but there's not much we can do about it.
+//
+// To further complicate matters, only LDR instructions (single reads) are
+// allowed to be unaligned, not LDRD (two reads) or LDM (many reads). Unless we
+// explicitly tell the compiler that these accesses can be unaligned, it can and
+// will combine accesses. On armcc, the way to signal this is done by accessing
+// through the type (uint32_t __packed *), but GCC has no such attribute
+// (it ignores __attribute__((packed)) on individual variables). However,
+// we can tell it that a _struct_ is unaligned, which has the same effect,
+// so we do that.
+
+namespace absl {
+namespace internal {
+
+struct Unaligned16Struct {
+  uint16_t value;
+  uint8_t dummy;  // To make the size non-power-of-two.
+} ABSL_ATTRIBUTE_PACKED;
+
+struct Unaligned32Struct {
+  uint32_t value;
+  uint8_t dummy;  // To make the size non-power-of-two.
+} ABSL_ATTRIBUTE_PACKED;
+
+}  // namespace internal
+}  // namespace absl
+
+#define ABSL_INTERNAL_UNALIGNED_LOAD16(_p) \
+  ((reinterpret_cast<const ::absl::internal::Unaligned16Struct *>(_p))->value)
+#define ABSL_INTERNAL_UNALIGNED_LOAD32(_p) \
+  ((reinterpret_cast<const ::absl::internal::Unaligned32Struct *>(_p))->value)
+
+#define ABSL_INTERNAL_UNALIGNED_STORE16(_p, _val)                          \
+  ((reinterpret_cast< ::absl::internal::Unaligned16Struct *>(_p))->value = \
+       (_val))
+#define ABSL_INTERNAL_UNALIGNED_STORE32(_p, _val)                          \
+  ((reinterpret_cast< ::absl::internal::Unaligned32Struct *>(_p))->value = \
+       (_val))
+
+namespace absl {
+
+inline uint64_t UnalignedLoad64(const void *p) {
+  uint64_t t;
+  memcpy(&t, p, sizeof t);
+  return t;
+}
+
+inline void UnalignedStore64(void *p, uint64_t v) { memcpy(p, &v, sizeof v); }
+
+}  // namespace absl
+
+#define ABSL_INTERNAL_UNALIGNED_LOAD64(_p) (absl::UnalignedLoad64(_p))
+#define ABSL_INTERNAL_UNALIGNED_STORE64(_p, _val) \
+  (absl::UnalignedStore64(_p, _val))
+
+#else
+
+// ABSL_INTERNAL_NEED_ALIGNED_LOADS is defined when the underlying platform
+// doesn't support unaligned access.
+#define ABSL_INTERNAL_NEED_ALIGNED_LOADS
+
+// These functions are provided for architectures that don't support
+// unaligned loads and stores.
+
+namespace absl {
+
+inline uint16_t UnalignedLoad16(const void *p) {
+  uint16_t t;
+  memcpy(&t, p, sizeof t);
+  return t;
+}
+
+inline uint32_t UnalignedLoad32(const void *p) {
+  uint32_t t;
+  memcpy(&t, p, sizeof t);
+  return t;
+}
+
+inline uint64_t UnalignedLoad64(const void *p) {
+  uint64_t t;
+  memcpy(&t, p, sizeof t);
+  return t;
+}
+
+inline void UnalignedStore16(void *p, uint16_t v) { memcpy(p, &v, sizeof v); }
+
+inline void UnalignedStore32(void *p, uint32_t v) { memcpy(p, &v, sizeof v); }
+
+inline void UnalignedStore64(void *p, uint64_t v) { memcpy(p, &v, sizeof v); }
+
+}  // namespace absl
+
+#define ABSL_INTERNAL_UNALIGNED_LOAD16(_p) (absl::UnalignedLoad16(_p))
+#define ABSL_INTERNAL_UNALIGNED_LOAD32(_p) (absl::UnalignedLoad32(_p))
+#define ABSL_INTERNAL_UNALIGNED_LOAD64(_p) (absl::UnalignedLoad64(_p))
+
+#define ABSL_INTERNAL_UNALIGNED_STORE16(_p, _val) \
+  (absl::UnalignedStore16(_p, _val))
+#define ABSL_INTERNAL_UNALIGNED_STORE32(_p, _val) \
+  (absl::UnalignedStore32(_p, _val))
+#define ABSL_INTERNAL_UNALIGNED_STORE64(_p, _val) \
+  (absl::UnalignedStore64(_p, _val))
+
+#endif
+
+#endif  // defined(__cplusplus), end of unaligned API
+
+#endif  // ABSL_BASE_INTERNAL_UNALIGNED_ACCESS_H_
diff --git a/absl/base/internal/unscaledcycleclock.cc b/absl/base/internal/unscaledcycleclock.cc
new file mode 100644
index 000000000000..a12d68bd10af
--- /dev/null
+++ b/absl/base/internal/unscaledcycleclock.cc
@@ -0,0 +1,101 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/internal/unscaledcycleclock.h"
+
+#if ABSL_USE_UNSCALED_CYCLECLOCK
+
+#if defined(_WIN32)
+#include <intrin.h>
+#endif
+
+#if defined(__powerpc__) || defined(__ppc__)
+#include <sys/platform/ppc.h>
+#endif
+
+#include "absl/base/internal/sysinfo.h"
+
+namespace absl {
+namespace base_internal {
+
+#if defined(__i386__)
+
+int64_t UnscaledCycleClock::Now() {
+  int64_t ret;
+  __asm__ volatile("rdtsc" : "=A"(ret));
+  return ret;
+}
+
+double UnscaledCycleClock::Frequency() {
+  return base_internal::NominalCPUFrequency();
+}
+
+#elif defined(__x86_64__)
+
+int64_t UnscaledCycleClock::Now() {
+  uint64_t low, high;
+  __asm__ volatile("rdtsc" : "=a"(low), "=d"(high));
+  return (high << 32) | low;
+}
+
+double UnscaledCycleClock::Frequency() {
+  return base_internal::NominalCPUFrequency();
+}
+
+#elif defined(__powerpc__) || defined(__ppc__)
+
+int64_t UnscaledCycleClock::Now() {
+  return __ppc_get_timebase();
+}
+
+double UnscaledCycleClock::Frequency() {
+  return __ppc_get_timebase_freq();
+}
+
+#elif defined(__aarch64__)
+
+// System timer of ARMv8 runs at a different frequency than the CPU's.
+// The frequency is fixed, typically in the range 1-50MHz.  It can be
+// read at CNTFRQ special register.  We assume the OS has set up
+// the virtual timer properly.
+int64_t UnscaledCycleClock::Now() {
+  int64_t virtual_timer_value;
+  asm volatile("mrs %0, cntvct_el0" : "=r"(virtual_timer_value));
+  return virtual_timer_value;
+}
+
+double UnscaledCycleClock::Frequency() {
+  uint64_t aarch64_timer_frequency;
+  asm volatile("mrs %0, cntfrq_el0" : "=r"(aarch64_timer_frequency));
+  return aarch64_timer_frequency;
+}
+
+#elif defined(_M_IX86) || defined(_M_X64)
+
+#pragma intrinsic(__rdtsc)
+
+int64_t UnscaledCycleClock::Now() {
+  return __rdtsc();
+}
+
+double UnscaledCycleClock::Frequency() {
+  return base_internal::NominalCPUFrequency();
+}
+
+#endif
+
+}  // namespace base_internal
+}  // namespace absl
+
+#endif  // ABSL_USE_UNSCALED_CYCLECLOCK
diff --git a/absl/base/internal/unscaledcycleclock.h b/absl/base/internal/unscaledcycleclock.h
new file mode 100644
index 000000000000..ddf6a5e5a79e
--- /dev/null
+++ b/absl/base/internal/unscaledcycleclock.h
@@ -0,0 +1,118 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// UnscaledCycleClock
+//    An UnscaledCycleClock yields the value and frequency of a cycle counter
+//    that increments at a rate that is approximately constant.
+//    This class is for internal / whitelisted use only, you should consider
+//    using CycleClock instead.
+//
+// Notes:
+// The cycle counter frequency is not necessarily the core clock frequency.
+// That is, CycleCounter cycles are not necessarily "CPU cycles".
+//
+// An arbitrary offset may have been added to the counter at power on.
+//
+// On some platforms, the rate and offset of the counter may differ
+// slightly when read from different CPUs of a multiprocessor.  Usually,
+// we try to ensure that the operating system adjusts values periodically
+// so that values agree approximately.   If you need stronger guarantees,
+// consider using alternate interfaces.
+//
+// The CPU is not required to maintain the ordering of a cycle counter read
+// with respect to surrounding instructions.
+
+#ifndef ABSL_BASE_INTERNAL_UNSCALEDCYCLECLOCK_H_
+#define ABSL_BASE_INTERNAL_UNSCALEDCYCLECLOCK_H_
+
+#include <cstdint>
+
+#if defined(__APPLE__)
+#include <TargetConditionals.h>
+#endif
+
+#include "absl/base/port.h"
+
+// The following platforms have an implementation of a hardware counter.
+#if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__) || \
+  defined(__powerpc__) || defined(__ppc__) || \
+  defined(_M_IX86) || defined(_M_X64)
+#define ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION 1
+#else
+#define ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION 0
+#endif
+
+// The following platforms often disable access to the hardware
+// counter (through a sandbox) even if the underlying hardware has a
+// usable counter. The CycleTimer interface also requires a *scaled*
+// CycleClock that runs at atleast 1 MHz. We've found some Android
+// ARM64 devices where this is not the case, so we disable it by
+// default on Android ARM64.
+#if defined(__native_client__) || TARGET_OS_IPHONE || \
+    (defined(__ANDROID__) && defined(__aarch64__))
+#define ABSL_USE_UNSCALED_CYCLECLOCK_DEFAULT 0
+#else
+#define ABSL_USE_UNSCALED_CYCLECLOCK_DEFAULT 1
+#endif
+
+// UnscaledCycleClock is an optional internal feature.
+// Use "#if ABSL_USE_UNSCALED_CYCLECLOCK" to test for its presence.
+// Can be overridden at compile-time via -DABSL_USE_UNSCALED_CYCLECLOCK=0|1
+#if !defined(ABSL_USE_UNSCALED_CYCLECLOCK)
+#define ABSL_USE_UNSCALED_CYCLECLOCK               \
+  (ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION && \
+   ABSL_USE_UNSCALED_CYCLECLOCK_DEFAULT)
+#endif
+
+#if ABSL_USE_UNSCALED_CYCLECLOCK
+
+// This macro can be used to test if UnscaledCycleClock::Frequency()
+// is NominalCPUFrequency() on a particular platform.
+#if  (defined(__i386__) || defined(__x86_64__) || \
+      defined(_M_IX86) || defined(_M_X64))
+#define ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY
+#endif
+namespace absl {
+namespace time_internal {
+class UnscaledCycleClockWrapperForGetCurrentTime;
+}  // namespace time_internal
+
+namespace base_internal {
+class CycleClock;
+class UnscaledCycleClockWrapperForInitializeFrequency;
+class UnscaledCycleClock {
+ private:
+  UnscaledCycleClock() = delete;
+
+  // Return the value of a cycle counter that counts at a rate that is
+  // approximately constant.
+  static int64_t Now();
+
+  // Return the how much UnscaledCycleClock::Now() increases per second.
+  // This is not necessarily the core CPU clock frequency.
+  // It may be the nominal value report by the kernel, rather than a measured
+  // value.
+  static double Frequency();
+
+  // Whitelisted friends.
+  friend class base_internal::CycleClock;
+  friend class time_internal::UnscaledCycleClockWrapperForGetCurrentTime;
+  friend class base_internal::UnscaledCycleClockWrapperForInitializeFrequency;
+};
+
+}  // namespace base_internal
+}  // namespace absl
+#endif  // ABSL_USE_UNSCALED_CYCLECLOCK
+
+#endif  // ABSL_BASE_INTERNAL_UNSCALEDCYCLECLOCK_H_
diff --git a/absl/base/invoke_test.cc b/absl/base/invoke_test.cc
new file mode 100644
index 000000000000..2bbfc47752b6
--- /dev/null
+++ b/absl/base/invoke_test.cc
@@ -0,0 +1,199 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/internal/invoke.h"
+
+#include <functional>
+#include <memory>
+#include <string>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/memory/memory.h"
+#include "absl/strings/str_cat.h"
+
+namespace absl {
+namespace base_internal {
+namespace {
+
+int Function(int a, int b) { return a - b; }
+
+int Sink(std::unique_ptr<int> p) {
+  return *p;
+}
+
+std::unique_ptr<int> Factory(int n) {
+  return make_unique<int>(n);
+}
+
+void NoOp() {}
+
+struct ConstFunctor {
+  int operator()(int a, int b) const { return a - b; }
+};
+
+struct MutableFunctor {
+  int operator()(int a, int b) { return a - b; }
+};
+
+struct EphemeralFunctor {
+  int operator()(int a, int b) && { return a - b; }
+};
+
+struct OverloadedFunctor {
+  template <typename... Args>
+  std::string operator()(const Args&... args) & {
+    return StrCat("&", args...);
+  }
+  template <typename... Args>
+  std::string operator()(const Args&... args) const& {
+    return StrCat("const&", args...);
+  }
+  template <typename... Args>
+  std::string operator()(const Args&... args) && {
+    return StrCat("&&", args...);
+  }
+};
+
+struct Class {
+  int Method(int a, int b) { return a - b; }
+  int ConstMethod(int a, int b) const { return a - b; }
+
+  int member;
+};
+
+struct FlipFlop {
+  int ConstMethod() const { return member; }
+  FlipFlop operator*() const { return {-member}; }
+
+  int member;
+};
+
+// CallMaybeWithArg(f) resolves either to Invoke(f) or Invoke(f, 42), depending
+// on which one is valid.
+template <typename F>
+decltype(Invoke(std::declval<const F&>())) CallMaybeWithArg(const F& f) {
+  return Invoke(f);
+}
+
+template <typename F>
+decltype(Invoke(std::declval<const F&>(), 42)) CallMaybeWithArg(const F& f) {
+  return Invoke(f, 42);
+}
+
+TEST(InvokeTest, Function) {
+  EXPECT_EQ(1, Invoke(Function, 3, 2));
+  EXPECT_EQ(1, Invoke(&Function, 3, 2));
+}
+
+TEST(InvokeTest, NonCopyableArgument) {
+  EXPECT_EQ(42, Invoke(Sink, make_unique<int>(42)));
+}
+
+TEST(InvokeTest, NonCopyableResult) {
+  EXPECT_THAT(Invoke(Factory, 42), ::testing::Pointee(42));
+}
+
+TEST(InvokeTest, VoidResult) {
+  Invoke(NoOp);
+}
+
+TEST(InvokeTest, ConstFunctor) {
+  EXPECT_EQ(1, Invoke(ConstFunctor(), 3, 2));
+}
+
+TEST(InvokeTest, MutableFunctor) {
+  MutableFunctor f;
+  EXPECT_EQ(1, Invoke(f, 3, 2));
+  EXPECT_EQ(1, Invoke(MutableFunctor(), 3, 2));
+}
+
+TEST(InvokeTest, EphemeralFunctor) {
+  EphemeralFunctor f;
+  EXPECT_EQ(1, Invoke(std::move(f), 3, 2));
+  EXPECT_EQ(1, Invoke(EphemeralFunctor(), 3, 2));
+}
+
+TEST(InvokeTest, OverloadedFunctor) {
+  OverloadedFunctor f;
+  const OverloadedFunctor& cf = f;
+
+  EXPECT_EQ("&", Invoke(f));
+  EXPECT_EQ("& 42", Invoke(f, " 42"));
+
+  EXPECT_EQ("const&", Invoke(cf));
+  EXPECT_EQ("const& 42", Invoke(cf, " 42"));
+
+  EXPECT_EQ("&&", Invoke(std::move(f)));
+  EXPECT_EQ("&& 42", Invoke(std::move(f), " 42"));
+}
+
+TEST(InvokeTest, ReferenceWrapper) {
+  ConstFunctor cf;
+  MutableFunctor mf;
+  EXPECT_EQ(1, Invoke(std::cref(cf), 3, 2));
+  EXPECT_EQ(1, Invoke(std::ref(cf), 3, 2));
+  EXPECT_EQ(1, Invoke(std::ref(mf), 3, 2));
+}
+
+TEST(InvokeTest, MemberFunction) {
+  std::unique_ptr<Class> p(new Class);
+  std::unique_ptr<const Class> cp(new Class);
+  EXPECT_EQ(1, Invoke(&Class::Method, p, 3, 2));
+  EXPECT_EQ(1, Invoke(&Class::Method, p.get(), 3, 2));
+
+  EXPECT_EQ(1, Invoke(&Class::ConstMethod, p, 3, 2));
+  EXPECT_EQ(1, Invoke(&Class::ConstMethod, p.get(), 3, 2));
+  EXPECT_EQ(1, Invoke(&Class::ConstMethod, *p, 3, 2));
+
+  EXPECT_EQ(1, Invoke(&Class::ConstMethod, cp, 3, 2));
+  EXPECT_EQ(1, Invoke(&Class::ConstMethod, cp.get(), 3, 2));
+  EXPECT_EQ(1, Invoke(&Class::ConstMethod, *cp, 3, 2));
+
+  EXPECT_EQ(1, Invoke(&Class::Method, make_unique<Class>(), 3, 2));
+  EXPECT_EQ(1, Invoke(&Class::ConstMethod, make_unique<Class>(), 3, 2));
+  EXPECT_EQ(1, Invoke(&Class::ConstMethod, make_unique<const Class>(), 3, 2));
+}
+
+TEST(InvokeTest, DataMember) {
+  std::unique_ptr<Class> p(new Class{42});
+  std::unique_ptr<const Class> cp(new Class{42});
+  EXPECT_EQ(42, Invoke(&Class::member, p));
+  EXPECT_EQ(42, Invoke(&Class::member, *p));
+  EXPECT_EQ(42, Invoke(&Class::member, p.get()));
+
+  Invoke(&Class::member, p) = 42;
+  Invoke(&Class::member, p.get()) = 42;
+
+  EXPECT_EQ(42, Invoke(&Class::member, cp));
+  EXPECT_EQ(42, Invoke(&Class::member, *cp));
+  EXPECT_EQ(42, Invoke(&Class::member, cp.get()));
+}
+
+TEST(InvokeTest, FlipFlop) {
+  FlipFlop obj = {42};
+  // This call could resolve to (obj.*&FlipFlop::ConstMethod)() or
+  // ((*obj).*&FlipFlop::ConstMethod)(). We verify that it's the former.
+  EXPECT_EQ(42, Invoke(&FlipFlop::ConstMethod, obj));
+  EXPECT_EQ(42, Invoke(&FlipFlop::member, obj));
+}
+
+TEST(InvokeTest, SfinaeFriendly) {
+  CallMaybeWithArg(NoOp);
+  EXPECT_THAT(CallMaybeWithArg(Factory), ::testing::Pointee(42));
+}
+
+}  // namespace
+}  // namespace base_internal
+}  // namespace absl
diff --git a/absl/base/macros.h b/absl/base/macros.h
new file mode 100644
index 000000000000..259970fea89a
--- /dev/null
+++ b/absl/base/macros.h
@@ -0,0 +1,201 @@
+//
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: macros.h
+// -----------------------------------------------------------------------------
+//
+// This header file defines the set of language macros used within Abseil code.
+// For the set of macros used to determine supported compilers and platforms,
+// see absl/base/config.h instead.
+//
+// This code is compiled directly on many platforms, including client
+// platforms like Windows, Mac, and embedded systems.  Before making
+// any changes here, make sure that you're not breaking any platforms.
+//
+
+#ifndef ABSL_BASE_MACROS_H_
+#define ABSL_BASE_MACROS_H_
+
+#include <cstddef>
+
+#include "absl/base/port.h"
+
+// ABSL_ARRAYSIZE()
+//
+// Returns the # of elements in an array as a compile-time constant, which can
+// be used in defining new arrays. If you use this macro on a pointer by
+// mistake, you will get a compile-time error.
+//
+// Note: this template function declaration is used in defining arraysize.
+// Note that the function doesn't need an implementation, as we only
+// use its type.
+namespace absl {
+namespace macros_internal {
+template <typename T, size_t N>
+char (&ArraySizeHelper(T (&array)[N]))[N];
+}  // namespace macros_internal
+}  // namespace absl
+#define ABSL_ARRAYSIZE(array) \
+  (sizeof(::absl::macros_internal::ArraySizeHelper(array)))
+
+// kLinkerInitialized
+//
+// An enum used only as a constructor argument to indicate that a variable has
+// static storage duration, and that the constructor should do nothing to its
+// state. Use of this macro indicates to the reader that it is legal to
+// declare a static instance of the class, provided the constructor is given
+// the absl::base_internal::kLinkerInitialized argument.
+//
+// Normally, it is unsafe to declare a static variable that has a constructor or
+// a destructor because invocation order is undefined. However, if the type can
+// be zero-initialized (which the loader does for static variables) into a valid
+// state and the type's destructor does not affect storage, then a constructor
+// for static initialization can be declared.
+//
+// Example:
+//       // Declaration
+//       explicit MyClass(absl::base_internal:LinkerInitialized x) {}
+//
+//       // Invocation
+//       static MyClass my_global(absl::base_internal::kLinkerInitialized);
+namespace absl {
+namespace base_internal {
+enum LinkerInitialized {
+  kLinkerInitialized = 0,
+};
+}  // namespace base_internal
+}  // namespace absl
+
+// ABSL_FALLTHROUGH_INTENDED
+//
+// Annotates implicit fall-through between switch labels, allowing a case to
+// indicate intentional fallthrough and turn off warnings about any lack of a
+// `break` statement. The ABSL_FALLTHROUGH_INTENDED macro should be followed by
+// a semicolon and can be used in most places where `break` can, provided that
+// no statements exist between it and the next switch label.
+//
+// Example:
+//
+//  switch (x) {
+//    case 40:
+//    case 41:
+//      if (truth_is_out_there) {
+//        ++x;
+//        ABSL_FALLTHROUGH_INTENDED;  // Use instead of/along with annotations
+//                                    // in comments
+//      } else {
+//        return x;
+//      }
+//    case 42:
+//      ...
+//
+// Notes: when compiled with clang in C++11 mode, the ABSL_FALLTHROUGH_INTENDED
+// macro is expanded to the [[clang::fallthrough]] attribute, which is analysed
+// when  performing switch labels fall-through diagnostic
+// (`-Wimplicit-fallthrough`). See clang documentation on language extensions
+// for details:
+// http://clang.llvm.org/docs/AttributeReference.html#fallthrough-clang-fallthrough
+//
+// When used with unsupported compilers, the ABSL_FALLTHROUGH_INTENDED macro
+// has no effect on diagnostics. In any case this macro has no effect on runtime
+// behavior and performance of code.
+#ifdef ABSL_FALLTHROUGH_INTENDED
+#error "ABSL_FALLTHROUGH_INTENDED should not be defined."
+#endif
+
+// TODO(zhangxy): Use c++17 standard [[fallthrough]] macro, when supported.
+#if defined(__clang__) && defined(__has_warning)
+#if __has_feature(cxx_attributes) && __has_warning("-Wimplicit-fallthrough")
+#define ABSL_FALLTHROUGH_INTENDED [[clang::fallthrough]]
+#endif
+#elif defined(__GNUC__) && __GNUC__ >= 7
+#define ABSL_FALLTHROUGH_INTENDED [[gnu::fallthrough]]
+#endif
+
+#ifndef ABSL_FALLTHROUGH_INTENDED
+#define ABSL_FALLTHROUGH_INTENDED \
+  do {                            \
+  } while (0)
+#endif
+
+// ABSL_DEPRECATED()
+//
+// Marks a deprecated class, struct, enum, function, method and variable
+// declarations. The macro argument is used as a custom diagnostic message (e.g.
+// suggestion of a better alternative).
+//
+// Example:
+//
+//   class ABSL_DEPRECATED("Use Bar instead") Foo {...};
+//   ABSL_DEPRECATED("Use Baz instead") void Bar() {...}
+//
+// Every usage of a deprecated entity will trigger a warning when compiled with
+// clang's `-Wdeprecated-declarations` option. This option is turned off by
+// default, but the warnings will be reported by go/clang-tidy.
+#if defined(__clang__) && __cplusplus >= 201103L && defined(__has_warning)
+#define ABSL_DEPRECATED(message) __attribute__((deprecated(message)))
+#endif
+
+#ifndef ABSL_DEPRECATED
+#define ABSL_DEPRECATED(message)
+#endif
+
+// ABSL_BAD_CALL_IF()
+//
+// Used on a function overload to trap bad calls: any call that matches the
+// overload will cause a compile-time error. This macro uses a clang-specific
+// "enable_if" attribute, as described at
+// http://clang.llvm.org/docs/AttributeReference.html#enable-if
+//
+// Overloads which use this macro should be bracketed by
+// `#ifdef ABSL_BAD_CALL_IF`.
+//
+// Example:
+//
+//   int isdigit(int c);
+//   #ifdef ABSL_BAD_CALL_IF
+//   int isdigit(int c)
+//     ABSL_BAD_CALL_IF(c <= -1 || c > 255,
+//                       "'c' must have the value of an unsigned char or EOF");
+//   #endif // ABSL_BAD_CALL_IF
+
+#if defined(__clang__)
+# if __has_attribute(enable_if)
+#  define ABSL_BAD_CALL_IF(expr, msg) \
+    __attribute__((enable_if(expr, "Bad call trap"), unavailable(msg)))
+# endif
+#endif
+
+// ABSL_ASSERT()
+//
+// In C++11, `assert` can't be used portably within constexpr functions.
+// ABSL_ASSERT functions as a runtime assert but works in C++11 constexpr
+// functions.  Example:
+//
+// constexpr double Divide(double a, double b) {
+//   return ABSL_ASSERT(b != 0), a / b;
+// }
+//
+// This macro is inspired by
+// https://akrzemi1.wordpress.com/2017/05/18/asserts-in-constexpr-functions/
+#if defined(NDEBUG)
+#define ABSL_ASSERT(expr) (false ? (void)(expr) : (void)0)
+#else
+#define ABSL_ASSERT(expr) \
+  (ABSL_PREDICT_TRUE((expr)) ? (void)0 : [] { assert(false && #expr); }())
+#endif
+
+#endif  // ABSL_BASE_MACROS_H_
diff --git a/absl/base/optimization.h b/absl/base/optimization.h
new file mode 100644
index 000000000000..db8beafa3f2e
--- /dev/null
+++ b/absl/base/optimization.h
@@ -0,0 +1,164 @@
+//
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: optimization.h
+// -----------------------------------------------------------------------------
+//
+// This header file defines portable macros for performance optimization.
+
+#ifndef ABSL_BASE_OPTIMIZATION_H_
+#define ABSL_BASE_OPTIMIZATION_H_
+
+#include "absl/base/config.h"
+
+// ABSL_BLOCK_TAIL_CALL_OPTIMIZATION
+//
+// Instructs the compiler to avoid optimizing tail-call recursion. Use of this
+// macro is useful when you wish to preserve the existing function order within
+// a stack trace for logging, debugging, or profiling purposes.
+//
+// Example:
+//
+//   int f() {
+//     int result = g();
+//     ABSL_BLOCK_TAIL_CALL_OPTIMIZATION();
+//     return result;
+//   }
+#if defined(__pnacl__)
+#define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() if (volatile int x = 0) { (void)x; }
+#elif defined(__clang__)
+// Clang will not tail call given inline volatile assembly.
+#define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() __asm__ __volatile__("")
+#elif defined(__GNUC__)
+// GCC will not tail call given inline volatile assembly.
+#define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() __asm__ __volatile__("")
+#elif defined(_MSC_VER)
+// The __nop() intrinsic blocks the optimisation.
+#define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() __nop()
+#else
+#define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() if (volatile int x = 0) { (void)x; }
+#endif
+
+// ABSL_CACHELINE_SIZE
+//
+// Explicitly defines the size of the L1 cache for purposes of alignment.
+// Setting the cacheline size allows you to specify that certain objects be
+// aligned on a cacheline boundary with `ABSL_CACHELINE_ALIGNED` declarations.
+// (See below.)
+//
+// NOTE: this macro should be replaced with the following C++17 features, when
+// those are generally available:
+//
+//   * `std::hardware_constructive_interference_size`
+//   * `std::hardware_destructive_interference_size`
+//
+// See http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2016/p0154r1.html
+// for more information.
+#if defined(__GNUC__)
+// Cache line alignment
+#if defined(__i386__) || defined(__x86_64__)
+#define ABSL_CACHELINE_SIZE 64
+#elif defined(__powerpc64__)
+#define ABSL_CACHELINE_SIZE 128
+#elif defined(__aarch64__)
+// We would need to read special register ctr_el0 to find out L1 dcache size.
+// This value is a good estimate based on a real aarch64 machine.
+#define ABSL_CACHELINE_SIZE 64
+#elif defined(__arm__)
+// Cache line sizes for ARM: These values are not strictly correct since
+// cache line sizes depend on implementations, not architectures.  There
+// are even implementations with cache line sizes configurable at boot
+// time.
+#if defined(__ARM_ARCH_5T__)
+#define ABSL_CACHELINE_SIZE 32
+#elif defined(__ARM_ARCH_7A__)
+#define ABSL_CACHELINE_SIZE 64
+#endif
+#endif
+
+#ifndef ABSL_CACHELINE_SIZE
+// A reasonable default guess.  Note that overestimates tend to waste more
+// space, while underestimates tend to waste more time.
+#define ABSL_CACHELINE_SIZE 64
+#endif
+
+// ABSL_CACHELINE_ALIGNED
+//
+// Indicates that the declared object be cache aligned using
+// `ABSL_CACHELINE_SIZE` (see above). Cacheline aligning objects allows you to
+// load a set of related objects in the L1 cache for performance improvements.
+// Cacheline aligning objects properly allows constructive memory sharing and
+// prevents destructive (or "false") memory sharing.
+//
+// NOTE: this macro should be replaced with usage of `alignas()` using
+// `std::hardware_constructive_interference_size` and/or
+// `std::hardware_destructive_interference_size` when available within C++17.
+//
+// See http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2016/p0154r1.html
+// for more information.
+//
+// On some compilers, `ABSL_CACHELINE_ALIGNED` expands to
+// `__attribute__((aligned(ABSL_CACHELINE_SIZE)))`. For compilers where this is
+// not known to work, the macro expands to nothing.
+//
+// No further guarantees are made here. The result of applying the macro
+// to variables and types is always implementation-defined.
+//
+// WARNING: It is easy to use this attribute incorrectly, even to the point
+// of causing bugs that are difficult to diagnose, crash, etc. It does not
+// of itself guarantee that objects are aligned to a cache line.
+//
+// Recommendations:
+//
+// 1) Consult compiler documentation; this comment is not kept in sync as
+//    toolchains evolve.
+// 2) Verify your use has the intended effect. This often requires inspecting
+//    the generated machine code.
+// 3) Prefer applying this attribute to individual variables. Avoid
+//    applying it to types. This tends to localize the effect.
+#define ABSL_CACHELINE_ALIGNED __attribute__((aligned(ABSL_CACHELINE_SIZE)))
+
+#else  // not GCC
+#define ABSL_CACHELINE_SIZE 64
+#define ABSL_CACHELINE_ALIGNED
+#endif
+
+// ABSL_PREDICT_TRUE, ABSL_PREDICT_FALSE
+//
+// Enables the compiler to prioritize compilation using static analysis for
+// likely paths within a boolean branch.
+//
+// Example:
+//
+//   if (ABSL_PREDICT_TRUE(expression)) {
+//     return result;                        // Faster if more likely
+//   } else {
+//     return 0;
+//   }
+//
+// Compilers can use the information that a certain branch is not likely to be
+// taken (for instance, a CHECK failure) to optimize for the common case in
+// the absence of better information (ie. compiling gcc with `-fprofile-arcs`).
+#if ABSL_HAVE_BUILTIN(__builtin_expect) || \
+    (defined(__GNUC__) && !defined(__clang__))
+#define ABSL_PREDICT_FALSE(x) (__builtin_expect(x, 0))
+#define ABSL_PREDICT_TRUE(x) (__builtin_expect(!!(x), 1))
+#else
+#define ABSL_PREDICT_FALSE(x) x
+#define ABSL_PREDICT_TRUE(x) x
+#endif
+
+#endif  // ABSL_BASE_OPTIMIZATION_H_
diff --git a/absl/base/policy_checks.h b/absl/base/policy_checks.h
new file mode 100644
index 000000000000..17c05c14681a
--- /dev/null
+++ b/absl/base/policy_checks.h
@@ -0,0 +1,99 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: policy_checks.h
+// -----------------------------------------------------------------------------
+//
+// This header enforces a minimum set of policies at build time, such as the
+// supported compiler and library versions. Unsupported configurations are
+// reported with `#error`. This enforcement is best effort, so successfully
+// compiling this header does not guarantee a supported configuration.
+
+#ifndef ABSL_BASE_POLICY_CHECKS_H_
+#define ABSL_BASE_POLICY_CHECKS_H_
+
+// Included for the __GLIBC_PREREQ macro used below.
+#include <limits.h>
+
+// Included for the _STLPORT_VERSION macro used below.
+#if defined(__cplusplus)
+#include <cstddef>
+#endif
+
+// -----------------------------------------------------------------------------
+// Operating System Check
+// -----------------------------------------------------------------------------
+
+#if defined(__CYGWIN__)
+#error "Cygwin is not supported."
+#endif
+
+// -----------------------------------------------------------------------------
+// Compiler Check
+// -----------------------------------------------------------------------------
+
+// We support MSVC++ 14.0 update 2 and later.
+// This minimum will go up.
+#if defined(_MSC_FULL_VER) && _MSC_FULL_VER < 190023918
+#error "This package requires Visual Studio 2015 Update 2 or higher"
+#endif
+
+// We support gcc 4.7 and later.
+// This minimum will go up.
+#if defined(__GNUC__) && !defined(__clang__)
+#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 7)
+#error "This package requires gcc 4.7 or higher"
+#endif
+#endif
+
+// We support Apple Xcode clang 4.2.1 (version 421.11.65) and later.
+// This corresponds to Apple Xcode version 4.5.
+// This minimum will go up.
+#if defined(__apple_build_version__) && __apple_build_version__ < 4211165
+#error "This package requires __apple_build_version__ of 4211165 or higher"
+#endif
+
+// -----------------------------------------------------------------------------
+// C++ Version Check
+// -----------------------------------------------------------------------------
+
+// Enforce C++11 as the minimum.  Note that Visual Studio has not
+// advanced __cplusplus despite being good enough for our purposes, so
+// so we exempt it from the check.
+#if defined(__cplusplus) && !defined(_MSC_VER)
+#if __cplusplus < 201103L
+#error "C++ versions less than C++11 are not supported."
+#endif
+#endif
+
+// -----------------------------------------------------------------------------
+// Standard Library Check
+// -----------------------------------------------------------------------------
+
+// We have chosen glibc 2.12 as the minimum as it was tagged for release
+// in May, 2010 and includes some functionality used in Google software
+// (for instance pthread_setname_np):
+// https://sourceware.org/ml/libc-alpha/2010-05/msg00000.html
+#ifdef __GLIBC_PREREQ
+#if !__GLIBC_PREREQ(2, 12)
+#error "Minimum required version of glibc is 2.12."
+#endif
+#endif
+
+#if defined(_STLPORT_VERSION)
+#error "STLPort is not supported."
+#endif
+
+#endif  // ABSL_BASE_POLICY_CHECKS_H_
diff --git a/absl/base/port.h b/absl/base/port.h
new file mode 100644
index 000000000000..1c67257fd8f3
--- /dev/null
+++ b/absl/base/port.h
@@ -0,0 +1,26 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// This files is a forwarding header for other headers containing various
+// portability macros and functions.
+// This file is used for both C and C++!
+
+#ifndef ABSL_BASE_PORT_H_
+#define ABSL_BASE_PORT_H_
+
+#include "absl/base/attributes.h"
+#include "absl/base/config.h"
+#include "absl/base/optimization.h"
+
+#endif  // ABSL_BASE_PORT_H_
diff --git a/absl/base/raw_logging_test.cc b/absl/base/raw_logging_test.cc
new file mode 100644
index 000000000000..dae4b35138c5
--- /dev/null
+++ b/absl/base/raw_logging_test.cc
@@ -0,0 +1,50 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This test serves primarily as a compilation test for base/raw_logging.h.
+// Raw logging testing is covered by logging_unittest.cc, which is not as
+// portable as this test.
+
+#include "absl/base/internal/raw_logging.h"
+
+#include "gtest/gtest.h"
+
+namespace {
+
+TEST(RawLoggingCompilationTest, Log) {
+  ABSL_RAW_LOG(INFO, "RAW INFO: %d", 1);
+  ABSL_RAW_LOG(ERROR, "RAW ERROR: %d", 1);
+}
+
+TEST(RawLoggingCompilationTest, PassingCheck) {
+  ABSL_RAW_CHECK(true, "RAW CHECK");
+}
+
+// Not all platforms support output from raw log, so we don't verify any
+// particular output for RAW check failures (expecting the empty std::string
+// accomplishes this).  This test is primarily a compilation test, but we
+// are verifying process death when EXPECT_DEATH works for a platform.
+const char kExpectedDeathOutput[] = "";
+
+TEST(RawLoggingDeathTest, FailingCheck) {
+  EXPECT_DEATH_IF_SUPPORTED(ABSL_RAW_CHECK(1 == 0, "explanation"),
+                            kExpectedDeathOutput);
+}
+
+TEST(RawLoggingDeathTest, LogFatal) {
+  EXPECT_DEATH_IF_SUPPORTED(ABSL_RAW_LOG(FATAL, "my dog has fleas"),
+                            kExpectedDeathOutput);
+}
+
+}  // namespace
diff --git a/absl/base/spinlock_test_common.cc b/absl/base/spinlock_test_common.cc
new file mode 100644
index 000000000000..b1212eaf7e91
--- /dev/null
+++ b/absl/base/spinlock_test_common.cc
@@ -0,0 +1,265 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// A bunch of threads repeatedly hash an array of ints protected by a
+// spinlock.  If the spinlock is working properly, all elements of the
+// array should be equal at the end of the test.
+
+#include <cstdint>
+#include <limits>
+#include <random>
+#include <thread>  // NOLINT(build/c++11)
+#include <vector>
+
+#include "gtest/gtest.h"
+#include "absl/base/internal/low_level_scheduling.h"
+#include "absl/base/internal/spinlock.h"
+#include "absl/base/internal/sysinfo.h"
+#include "absl/synchronization/blocking_counter.h"
+#include "absl/synchronization/notification.h"
+
+constexpr int32_t kNumThreads = 10;
+constexpr int32_t kIters = 1000;
+
+namespace absl {
+namespace base_internal {
+
+// This is defined outside of anonymous namespace so that it can be
+// a friend of SpinLock to access protected methods for testing.
+struct SpinLockTest {
+  static uint32_t EncodeWaitCycles(int64_t wait_start_time,
+                                   int64_t wait_end_time) {
+    return SpinLock::EncodeWaitCycles(wait_start_time, wait_end_time);
+  }
+  static uint64_t DecodeWaitCycles(uint32_t lock_value) {
+    return SpinLock::DecodeWaitCycles(lock_value);
+  }
+};
+
+namespace {
+
+static constexpr int kArrayLength = 10;
+static uint32_t values[kArrayLength];
+
+static SpinLock static_spinlock(base_internal::kLinkerInitialized);
+static SpinLock static_cooperative_spinlock(
+    base_internal::kLinkerInitialized,
+    base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL);
+static SpinLock static_noncooperative_spinlock(
+    base_internal::kLinkerInitialized, base_internal::SCHEDULE_KERNEL_ONLY);
+
+// Simple integer hash function based on the public domain lookup2 hash.
+// http://burtleburtle.net/bob/c/lookup2.c
+static uint32_t Hash32(uint32_t a, uint32_t c) {
+  uint32_t b = 0x9e3779b9UL;  // The golden ratio; an arbitrary value.
+  a -= b; a -= c; a ^= (c >> 13);
+  b -= c; b -= a; b ^= (a << 8);
+  c -= a; c -= b; c ^= (b >> 13);
+  a -= b; a -= c; a ^= (c >> 12);
+  b -= c; b -= a; b ^= (a << 16);
+  c -= a; c -= b; c ^= (b >> 5);
+  a -= b; a -= c; a ^= (c >> 3);
+  b -= c; b -= a; b ^= (a << 10);
+  c -= a; c -= b; c ^= (b >> 15);
+  return c;
+}
+
+static void TestFunction(int thread_salt, SpinLock* spinlock) {
+  for (int i = 0; i < kIters; i++) {
+    SpinLockHolder h(spinlock);
+    for (int j = 0; j < kArrayLength; j++) {
+      const int index = (j + thread_salt) % kArrayLength;
+      values[index] = Hash32(values[index], thread_salt);
+      std::this_thread::yield();
+    }
+  }
+}
+
+static void ThreadedTest(SpinLock* spinlock) {
+  std::vector<std::thread> threads;
+  for (int i = 0; i < kNumThreads; ++i) {
+    threads.push_back(std::thread(TestFunction, i, spinlock));
+  }
+  for (auto& thread : threads) {
+    thread.join();
+  }
+
+  SpinLockHolder h(spinlock);
+  for (int i = 1; i < kArrayLength; i++) {
+    EXPECT_EQ(values[0], values[i]);
+  }
+}
+
+TEST(SpinLock, StackNonCooperativeDisablesScheduling) {
+  SpinLock spinlock(base_internal::SCHEDULE_KERNEL_ONLY);
+  spinlock.Lock();
+  EXPECT_FALSE(base_internal::SchedulingGuard::ReschedulingIsAllowed());
+  spinlock.Unlock();
+}
+
+TEST(SpinLock, StaticNonCooperativeDisablesScheduling) {
+  static_noncooperative_spinlock.Lock();
+  EXPECT_FALSE(base_internal::SchedulingGuard::ReschedulingIsAllowed());
+  static_noncooperative_spinlock.Unlock();
+}
+
+TEST(SpinLock, WaitCyclesEncoding) {
+  // These are implementation details not exported by SpinLock.
+  const int kProfileTimestampShift = 7;
+  const int kLockwordReservedShift = 3;
+  const uint32_t kSpinLockSleeper = 8;
+
+  // We should be able to encode up to (1^kMaxCycleBits - 1) without clamping
+  // but the lower kProfileTimestampShift will be dropped.
+  const int kMaxCyclesShift =
+    32 - kLockwordReservedShift + kProfileTimestampShift;
+  const uint64_t kMaxCycles = (int64_t{1} << kMaxCyclesShift) - 1;
+
+  // These bits should be zero after encoding.
+  const uint32_t kLockwordReservedMask = (1 << kLockwordReservedShift) - 1;
+
+  // These bits are dropped when wait cycles are encoded.
+  const uint64_t kProfileTimestampMask = (1 << kProfileTimestampShift) - 1;
+
+  // Test a bunch of random values
+  std::default_random_engine generator;
+  // Shift to avoid overflow below.
+  std::uniform_int_distribution<uint64_t> time_distribution(
+      0, std::numeric_limits<uint64_t>::max() >> 4);
+  std::uniform_int_distribution<uint64_t> cycle_distribution(0, kMaxCycles);
+
+  for (int i = 0; i < 100; i++) {
+    int64_t start_time = time_distribution(generator);
+    int64_t cycles = cycle_distribution(generator);
+    int64_t end_time = start_time + cycles;
+    uint32_t lock_value = SpinLockTest::EncodeWaitCycles(start_time, end_time);
+    EXPECT_EQ(0, lock_value & kLockwordReservedMask);
+    uint64_t decoded = SpinLockTest::DecodeWaitCycles(lock_value);
+    EXPECT_EQ(0, decoded & kProfileTimestampMask);
+    EXPECT_EQ(cycles & ~kProfileTimestampMask, decoded);
+  }
+
+  // Test corner cases
+  int64_t start_time = time_distribution(generator);
+  EXPECT_EQ(0, SpinLockTest::EncodeWaitCycles(start_time, start_time));
+  EXPECT_EQ(0, SpinLockTest::DecodeWaitCycles(0));
+  EXPECT_EQ(0, SpinLockTest::DecodeWaitCycles(kLockwordReservedMask));
+  EXPECT_EQ(kMaxCycles & ~kProfileTimestampMask,
+            SpinLockTest::DecodeWaitCycles(~kLockwordReservedMask));
+
+  // Check that we cannot produce kSpinLockSleeper during encoding.
+  int64_t sleeper_cycles =
+      kSpinLockSleeper << (kProfileTimestampShift - kLockwordReservedShift);
+  uint32_t sleeper_value =
+      SpinLockTest::EncodeWaitCycles(start_time, start_time + sleeper_cycles);
+  EXPECT_NE(sleeper_value, kSpinLockSleeper);
+
+  // Test clamping
+  uint32_t max_value =
+    SpinLockTest::EncodeWaitCycles(start_time, start_time + kMaxCycles);
+  uint64_t max_value_decoded = SpinLockTest::DecodeWaitCycles(max_value);
+  uint64_t expected_max_value_decoded = kMaxCycles & ~kProfileTimestampMask;
+  EXPECT_EQ(expected_max_value_decoded, max_value_decoded);
+
+  const int64_t step = (1 << kProfileTimestampShift);
+  uint32_t after_max_value =
+    SpinLockTest::EncodeWaitCycles(start_time, start_time + kMaxCycles + step);
+  uint64_t after_max_value_decoded =
+      SpinLockTest::DecodeWaitCycles(after_max_value);
+  EXPECT_EQ(expected_max_value_decoded, after_max_value_decoded);
+
+  uint32_t before_max_value = SpinLockTest::EncodeWaitCycles(
+      start_time, start_time + kMaxCycles - step);
+  uint64_t before_max_value_decoded =
+    SpinLockTest::DecodeWaitCycles(before_max_value);
+  EXPECT_GT(expected_max_value_decoded, before_max_value_decoded);
+}
+
+TEST(SpinLockWithThreads, StaticSpinLock) {
+  ThreadedTest(&static_spinlock);
+}
+
+TEST(SpinLockWithThreads, StackSpinLock) {
+  SpinLock spinlock;
+  ThreadedTest(&spinlock);
+}
+
+TEST(SpinLockWithThreads, StackCooperativeSpinLock) {
+  SpinLock spinlock(base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL);
+  ThreadedTest(&spinlock);
+}
+
+TEST(SpinLockWithThreads, StackNonCooperativeSpinLock) {
+  SpinLock spinlock(base_internal::SCHEDULE_KERNEL_ONLY);
+  ThreadedTest(&spinlock);
+}
+
+TEST(SpinLockWithThreads, StaticCooperativeSpinLock) {
+  ThreadedTest(&static_cooperative_spinlock);
+}
+
+TEST(SpinLockWithThreads, StaticNonCooperativeSpinLock) {
+  ThreadedTest(&static_noncooperative_spinlock);
+}
+
+TEST(SpinLockWithThreads, DoesNotDeadlock) {
+  struct Helper {
+    static void NotifyThenLock(Notification* locked, SpinLock* spinlock,
+                               BlockingCounter* b) {
+      locked->WaitForNotification();  // Wait for LockThenWait() to hold "s".
+      b->DecrementCount();
+      SpinLockHolder l(spinlock);
+    }
+
+    static void LockThenWait(Notification* locked, SpinLock* spinlock,
+                             BlockingCounter* b) {
+      SpinLockHolder l(spinlock);
+      locked->Notify();
+      b->Wait();
+    }
+
+    static void DeadlockTest(SpinLock* spinlock, int num_spinners) {
+      Notification locked;
+      BlockingCounter counter(num_spinners);
+      std::vector<std::thread> threads;
+
+      threads.push_back(
+          std::thread(Helper::LockThenWait, &locked, spinlock, &counter));
+      for (int i = 0; i < num_spinners; ++i) {
+        threads.push_back(
+            std::thread(Helper::NotifyThenLock, &locked, spinlock, &counter));
+      }
+
+      for (auto& thread : threads) {
+        thread.join();
+      }
+    }
+  };
+
+  SpinLock stack_cooperative_spinlock(
+      base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL);
+  SpinLock stack_noncooperative_spinlock(base_internal::SCHEDULE_KERNEL_ONLY);
+  Helper::DeadlockTest(&stack_cooperative_spinlock,
+                       base_internal::NumCPUs() * 2);
+  Helper::DeadlockTest(&stack_noncooperative_spinlock,
+                       base_internal::NumCPUs() * 2);
+  Helper::DeadlockTest(&static_cooperative_spinlock,
+                       base_internal::NumCPUs() * 2);
+  Helper::DeadlockTest(&static_noncooperative_spinlock,
+                       base_internal::NumCPUs() * 2);
+}
+
+}  // namespace
+}  // namespace base_internal
+}  // namespace absl
diff --git a/absl/base/thread_annotations.h b/absl/base/thread_annotations.h
new file mode 100644
index 000000000000..025a8548d5d7
--- /dev/null
+++ b/absl/base/thread_annotations.h
@@ -0,0 +1,247 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: thread_annotations.h
+// -----------------------------------------------------------------------------
+//
+// This header file contains macro definitions for thread safety annotations
+// that allow developers to document the locking policies of multi-threaded
+// code. The annotations can also help program analysis tools to identify
+// potential thread safety issues.
+//
+//
+// These annotations are implemented using compiler attributes. Using the macros
+// defined here instead of raw attributes allow for portability and future
+// compatibility.
+//
+// When referring to mutexes in the arguments of the attributes, you should
+// use variable names or more complex expressions (e.g. my_object->mutex_)
+// that evaluate to a concrete mutex object whenever possible. If the mutex
+// you want to refer to is not in scope, you may use a member pointer
+// (e.g. &MyClass::mutex_) to refer to a mutex in some (unknown) object.
+//
+
+#ifndef ABSL_BASE_THREAD_ANNOTATIONS_H_
+#define ABSL_BASE_THREAD_ANNOTATIONS_H_
+#if defined(__clang__)
+#define THREAD_ANNOTATION_ATTRIBUTE__(x)   __attribute__((x))
+#else
+#define THREAD_ANNOTATION_ATTRIBUTE__(x)   // no-op
+#endif
+
+// GUARDED_BY()
+//
+// Documents if a shared variable/field needs to be protected by a mutex.
+// GUARDED_BY() allows the user to specify a particular mutex that should be
+// held when accessing the annotated variable.
+//
+// Example:
+//
+//   Mutex mu;
+//   int p1 GUARDED_BY(mu);
+#define GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(guarded_by(x))
+#define GUARDED_VAR   THREAD_ANNOTATION_ATTRIBUTE__(guarded)
+
+// PT_GUARDED_BY()
+//
+// Documents if the memory location pointed to by a pointer should be guarded
+// by a mutex when dereferencing the pointer.
+//
+// Example:
+//   Mutex mu;
+//   int *p1 PT_GUARDED_BY(mu);
+//
+// Note that a pointer variable to a shared memory location could itself be a
+// shared variable.
+//
+// Example:
+//
+//     // `q`, guarded by `mu1`, points to a shared memory location that is
+//     // guarded by `mu2`:
+//     int *q GUARDED_BY(mu1) PT_GUARDED_BY(mu2);
+#define PT_GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(pt_guarded_by(x))
+#define PT_GUARDED_VAR   THREAD_ANNOTATION_ATTRIBUTE__(pt_guarded)
+
+// ACQUIRED_AFTER() / ACQUIRED_BEFORE()
+//
+// Documents the acquisition order between locks that can be held
+// simultaneously by a thread. For any two locks that need to be annotated
+// to establish an acquisition order, only one of them needs the annotation.
+// (i.e. You don't have to annotate both locks with both ACQUIRED_AFTER
+// and ACQUIRED_BEFORE.)
+//
+// Example:
+//
+//   Mutex m1;
+//   Mutex m2 ACQUIRED_AFTER(m1);
+#define ACQUIRED_AFTER(...) \
+  THREAD_ANNOTATION_ATTRIBUTE__(acquired_after(__VA_ARGS__))
+
+#define ACQUIRED_BEFORE(...) \
+  THREAD_ANNOTATION_ATTRIBUTE__(acquired_before(__VA_ARGS__))
+
+// EXCLUSIVE_LOCKS_REQUIRED() / SHARED_LOCKS_REQUIRED()
+//
+// Documents a function that expects a mutex to be held prior to entry.
+// The mutex is expected to be held both on entry to, and exit from, the
+// function.
+//
+// Example:
+//
+//   Mutex mu1, mu2;
+//   int a GUARDED_BY(mu1);
+//   int b GUARDED_BY(mu2);
+//
+//   void foo() EXCLUSIVE_LOCKS_REQUIRED(mu1, mu2) { ... };
+#define EXCLUSIVE_LOCKS_REQUIRED(...) \
+  THREAD_ANNOTATION_ATTRIBUTE__(exclusive_locks_required(__VA_ARGS__))
+
+#define SHARED_LOCKS_REQUIRED(...) \
+  THREAD_ANNOTATION_ATTRIBUTE__(shared_locks_required(__VA_ARGS__))
+
+// LOCKS_EXCLUDED()
+//
+// Documents the locks acquired in the body of the function. These locks
+// cannot be held when calling this function (as Abseil's `Mutex` locks are
+// non-reentrant).
+#define LOCKS_EXCLUDED(...) \
+  THREAD_ANNOTATION_ATTRIBUTE__(locks_excluded(__VA_ARGS__))
+
+// LOCK_RETURNED()
+//
+// Documents a function that returns a mutex without acquiring it.  For example,
+// a public getter method that returns a pointer to a private mutex should
+// be annotated with LOCK_RETURNED.
+#define LOCK_RETURNED(x) \
+  THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x))
+
+// LOCKABLE
+//
+// Documents if a class/type is a lockable type (such as the `Mutex` class).
+#define LOCKABLE \
+  THREAD_ANNOTATION_ATTRIBUTE__(lockable)
+
+// SCOPED_LOCKABLE
+//
+// Documents if a class does RAII locking (such as the `MutexLock` class).
+// The constructor should use `LOCK_FUNCTION()` to specify the mutex that is
+// acquired, and the destructor should use `UNLOCK_FUNCTION()` with no
+// arguments; the analysis will assume that the destructor unlocks whatever the
+// constructor locked.
+#define SCOPED_LOCKABLE \
+  THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable)
+
+// EXCLUSIVE_LOCK_FUNCTION()
+//
+// Documents functions that acquire a lock in the body of a function, and do
+// not release it.
+#define EXCLUSIVE_LOCK_FUNCTION(...) \
+  THREAD_ANNOTATION_ATTRIBUTE__(exclusive_lock_function(__VA_ARGS__))
+
+// SHARED_LOCK_FUNCTION()
+//
+// Documents functions that acquire a shared (reader) lock in the body of a
+// function, and do not release it.
+#define SHARED_LOCK_FUNCTION(...) \
+  THREAD_ANNOTATION_ATTRIBUTE__(shared_lock_function(__VA_ARGS__))
+
+// UNLOCK_FUNCTION()
+//
+// Documents functions that expect a lock to be held on entry to the function,
+// and release it in the body of the function.
+#define UNLOCK_FUNCTION(...) \
+  THREAD_ANNOTATION_ATTRIBUTE__(unlock_function(__VA_ARGS__))
+
+// EXCLUSIVE_TRYLOCK_FUNCTION() / SHARED_TRYLOCK_FUNCTION()
+//
+// Documents functions that try to acquire a lock, and return success or failure
+// (or a non-boolean value that can be interpreted as a boolean).
+// The first argument should be `true` for functions that return `true` on
+// success, or `false` for functions that return `false` on success. The second
+// argument specifies the mutex that is locked on success. If unspecified, this
+// mutex is assumed to be `this`.
+#define EXCLUSIVE_TRYLOCK_FUNCTION(...) \
+  THREAD_ANNOTATION_ATTRIBUTE__(exclusive_trylock_function(__VA_ARGS__))
+
+#define SHARED_TRYLOCK_FUNCTION(...) \
+  THREAD_ANNOTATION_ATTRIBUTE__(shared_trylock_function(__VA_ARGS__))
+
+// ASSERT_EXCLUSIVE_LOCK() / ASSERT_SHARED_LOCK()
+//
+// Documents functions that dynamically check to see if a lock is held, and fail
+// if it is not held.
+#define ASSERT_EXCLUSIVE_LOCK(...) \
+  THREAD_ANNOTATION_ATTRIBUTE__(assert_exclusive_lock(__VA_ARGS__))
+
+#define ASSERT_SHARED_LOCK(...) \
+  THREAD_ANNOTATION_ATTRIBUTE__(assert_shared_lock(__VA_ARGS__))
+
+// NO_THREAD_SAFETY_ANALYSIS
+//
+// Turns off thread safety checking within the body of a particular function.
+// This annotation is used to mark functions that are known to be correct, but
+// the locking behavior is more complicated than the analyzer can handle.
+#define NO_THREAD_SAFETY_ANALYSIS \
+  THREAD_ANNOTATION_ATTRIBUTE__(no_thread_safety_analysis)
+
+//------------------------------------------------------------------------------
+// Tool-Supplied Annotations
+//------------------------------------------------------------------------------
+
+// TS_UNCHECKED should be placed around lock expressions that are not valid
+// C++ syntax, but which are present for documentation purposes.  These
+// annotations will be ignored by the analysis.
+#define TS_UNCHECKED(x) ""
+
+// TS_FIXME is used to mark lock expressions that are not valid C++ syntax.
+// It is used by automated tools to mark and disable invalid expressions.
+// The annotation should either be fixed, or changed to TS_UNCHECKED.
+#define TS_FIXME(x) ""
+
+// Like NO_THREAD_SAFETY_ANALYSIS, this turns off checking within the body of
+// a particular function.  However, this attribute is used to mark functions
+// that are incorrect and need to be fixed.  It is used by automated tools to
+// avoid breaking the build when the analysis is updated.
+// Code owners are expected to eventually fix the routine.
+#define NO_THREAD_SAFETY_ANALYSIS_FIXME  NO_THREAD_SAFETY_ANALYSIS
+
+// Similar to NO_THREAD_SAFETY_ANALYSIS_FIXME, this macro marks a GUARDED_BY
+// annotation that needs to be fixed, because it is producing thread safety
+// warning.  It disables the GUARDED_BY.
+#define GUARDED_BY_FIXME(x)
+
+// Disables warnings for a single read operation.  This can be used to avoid
+// warnings when it is known that the read is not actually involved in a race,
+// but the compiler cannot confirm that.
+#define TS_UNCHECKED_READ(x) thread_safety_analysis::ts_unchecked_read(x)
+
+
+namespace thread_safety_analysis {
+
+// Takes a reference to a guarded data member, and returns an unguarded
+// reference.
+template <typename T>
+inline const T& ts_unchecked_read(const T& v) NO_THREAD_SAFETY_ANALYSIS {
+  return v;
+}
+
+template <typename T>
+inline T& ts_unchecked_read(T& v) NO_THREAD_SAFETY_ANALYSIS {
+  return v;
+}
+
+}  // namespace thread_safety_analysis
+
+#endif  // ABSL_BASE_THREAD_ANNOTATIONS_H_
diff --git a/absl/base/throw_delegate_test.cc b/absl/base/throw_delegate_test.cc
new file mode 100644
index 000000000000..f9494452e032
--- /dev/null
+++ b/absl/base/throw_delegate_test.cc
@@ -0,0 +1,95 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/internal/throw_delegate.h"
+
+#include <functional>
+#include <new>
+#include <stdexcept>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+
+namespace {
+
+using absl::base_internal::ThrowStdLogicError;
+using absl::base_internal::ThrowStdInvalidArgument;
+using absl::base_internal::ThrowStdDomainError;
+using absl::base_internal::ThrowStdLengthError;
+using absl::base_internal::ThrowStdOutOfRange;
+using absl::base_internal::ThrowStdRuntimeError;
+using absl::base_internal::ThrowStdRangeError;
+using absl::base_internal::ThrowStdOverflowError;
+using absl::base_internal::ThrowStdUnderflowError;
+using absl::base_internal::ThrowStdBadFunctionCall;
+using absl::base_internal::ThrowStdBadAlloc;
+
+constexpr const char* what_arg = "The quick brown fox jumps over the lazy dog";
+
+template <typename E>
+void ExpectThrowChar(void (*f)(const char*)) {
+  try {
+    f(what_arg);
+    FAIL() << "Didn't throw";
+  } catch (const E& e) {
+    EXPECT_STREQ(e.what(), what_arg);
+  }
+}
+
+template <typename E>
+void ExpectThrowString(void (*f)(const std::string&)) {
+  try {
+    f(what_arg);
+    FAIL() << "Didn't throw";
+  } catch (const E& e) {
+    EXPECT_STREQ(e.what(), what_arg);
+  }
+}
+
+template <typename E>
+void ExpectThrowNoWhat(void (*f)()) {
+  try {
+    f();
+    FAIL() << "Didn't throw";
+  } catch (const E& e) {
+  }
+}
+
+TEST(ThrowHelper, Test) {
+  // Not using EXPECT_THROW because we want to check the .what() message too.
+  ExpectThrowChar<std::logic_error>(ThrowStdLogicError);
+  ExpectThrowChar<std::invalid_argument>(ThrowStdInvalidArgument);
+  ExpectThrowChar<std::domain_error>(ThrowStdDomainError);
+  ExpectThrowChar<std::length_error>(ThrowStdLengthError);
+  ExpectThrowChar<std::out_of_range>(ThrowStdOutOfRange);
+  ExpectThrowChar<std::runtime_error>(ThrowStdRuntimeError);
+  ExpectThrowChar<std::range_error>(ThrowStdRangeError);
+  ExpectThrowChar<std::overflow_error>(ThrowStdOverflowError);
+  ExpectThrowChar<std::underflow_error>(ThrowStdUnderflowError);
+
+  ExpectThrowString<std::logic_error>(ThrowStdLogicError);
+  ExpectThrowString<std::invalid_argument>(ThrowStdInvalidArgument);
+  ExpectThrowString<std::domain_error>(ThrowStdDomainError);
+  ExpectThrowString<std::length_error>(ThrowStdLengthError);
+  ExpectThrowString<std::out_of_range>(ThrowStdOutOfRange);
+  ExpectThrowString<std::runtime_error>(ThrowStdRuntimeError);
+  ExpectThrowString<std::range_error>(ThrowStdRangeError);
+  ExpectThrowString<std::overflow_error>(ThrowStdOverflowError);
+  ExpectThrowString<std::underflow_error>(ThrowStdUnderflowError);
+
+  ExpectThrowNoWhat<std::bad_function_call>(ThrowStdBadFunctionCall);
+  ExpectThrowNoWhat<std::bad_alloc>(ThrowStdBadAlloc);
+}
+
+}  // namespace