diff options
author | Vincent Ambo <tazjin@google.com> | 2020-05-20T01·32+0100 |
---|---|---|
committer | Vincent Ambo <tazjin@google.com> | 2020-05-20T01·32+0100 |
commit | fc8dc48020ac5b52731d0828a96ea4d2526c77ba (patch) | |
tree | 353204eea3268095a9ad3f5345720f32c2615c69 /third_party/abseil_cpp/absl/base | |
parent | ffb2ae54beb5796cd408fbe15d2d2da09ff37adf (diff) | |
parent | 768eb2ca2857342673fcd462792ce04b8bac3fa3 (diff) |
Add 'third_party/abseil_cpp/' from commit '768eb2ca2857342673fcd462792ce04b8bac3fa3' r/781
git-subtree-dir: third_party/abseil_cpp git-subtree-mainline: ffb2ae54beb5796cd408fbe15d2d2da09ff37adf git-subtree-split: 768eb2ca2857342673fcd462792ce04b8bac3fa3
Diffstat (limited to 'third_party/abseil_cpp/absl/base')
100 files changed, 16826 insertions, 0 deletions
diff --git a/third_party/abseil_cpp/absl/base/BUILD.bazel b/third_party/abseil_cpp/absl/base/BUILD.bazel new file mode 100644 index 000000000000..76122dab30fe --- /dev/null +++ b/third_party/abseil_cpp/absl/base/BUILD.bazel @@ -0,0 +1,793 @@ +# +# Copyright 2017 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +load("@rules_cc//cc:defs.bzl", "cc_binary", "cc_library", "cc_test") +load( + "//absl:copts/configure_copts.bzl", + "ABSL_DEFAULT_COPTS", + "ABSL_DEFAULT_LINKOPTS", + "ABSL_TEST_COPTS", +) + +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) # Apache 2.0 + +cc_library( + name = "atomic_hook", + hdrs = ["internal/atomic_hook.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + visibility = [ + "//absl:__subpackages__", + ], + deps = [ + ":config", + ":core_headers", + ], +) + +cc_library( + name = "errno_saver", + hdrs = ["internal/errno_saver.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + visibility = [ + "//absl:__subpackages__", + ], + deps = [":config"], +) + +cc_library( + name = "log_severity", + srcs = ["log_severity.cc"], + hdrs = ["log_severity.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":config", + ":core_headers", + ], +) + +cc_library( + name = "raw_logging_internal", + srcs = ["internal/raw_logging.cc"], + hdrs = ["internal/raw_logging.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + visibility = [ + "//absl:__subpackages__", + ], + deps = [ + ":atomic_hook", + ":config", + ":core_headers", + ":log_severity", + ], +) + +cc_library( + name = "spinlock_wait", + srcs = [ + "internal/spinlock_akaros.inc", + "internal/spinlock_linux.inc", + "internal/spinlock_posix.inc", + "internal/spinlock_wait.cc", + "internal/spinlock_win32.inc", + ], + hdrs = ["internal/spinlock_wait.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + visibility = [ + "//absl/base:__pkg__", + ], + deps = [ + ":base_internal", + ":core_headers", + ":errno_saver", + ], +) + +cc_library( + name = "config", + hdrs = [ + "config.h", + "options.h", + "policy_checks.h", + ], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, +) + +cc_library( + name = "dynamic_annotations", + srcs = ["dynamic_annotations.cc"], + hdrs = ["dynamic_annotations.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, +) + +cc_library( + name = "core_headers", + srcs = [ + "internal/thread_annotations.h", + ], + hdrs = [ + "attributes.h", + "const_init.h", + "macros.h", + "optimization.h", + "port.h", + "thread_annotations.h", + ], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":config", + ], +) + +cc_library( + name = "malloc_internal", + srcs = [ + "internal/low_level_alloc.cc", + ], + hdrs = [ + "internal/direct_mmap.h", + "internal/low_level_alloc.h", + ], + copts = ABSL_DEFAULT_COPTS, + linkopts = select({ + "//absl:windows": [], + "//conditions:default": ["-pthread"], + }) + ABSL_DEFAULT_LINKOPTS, + visibility = [ + "//visibility:public", + ], + deps = [ + ":base", + ":base_internal", + ":config", + ":core_headers", + ":dynamic_annotations", + ":raw_logging_internal", + ], +) + +cc_library( + name = "base_internal", + hdrs = [ + "internal/hide_ptr.h", + "internal/identity.h", + "internal/inline_variable.h", + "internal/invoke.h", + "internal/scheduling_mode.h", + ], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + visibility = [ + "//absl:__subpackages__", + ], + deps = [ + ":config", + "//absl/meta:type_traits", + ], +) + +cc_library( + name = "base", + srcs = [ + "internal/cycleclock.cc", + "internal/spinlock.cc", + "internal/sysinfo.cc", + "internal/thread_identity.cc", + "internal/unscaledcycleclock.cc", + ], + hdrs = [ + "call_once.h", + "casts.h", + "internal/cycleclock.h", + "internal/low_level_scheduling.h", + "internal/per_thread_tls.h", + "internal/spinlock.h", + "internal/sysinfo.h", + "internal/thread_identity.h", + "internal/tsan_mutex_interface.h", + "internal/unscaledcycleclock.h", + ], + copts = ABSL_DEFAULT_COPTS, + linkopts = select({ + "//absl:windows": [ + "-DEFAULTLIB:advapi32.lib", + ], + "//conditions:default": ["-pthread"], + }) + ABSL_DEFAULT_LINKOPTS, + deps = [ + ":atomic_hook", + ":base_internal", + ":config", + ":core_headers", + ":dynamic_annotations", + ":log_severity", + ":raw_logging_internal", + ":spinlock_wait", + "//absl/meta:type_traits", + ], +) + +cc_library( + name = "atomic_hook_test_helper", + testonly = 1, + srcs = ["internal/atomic_hook_test_helper.cc"], + hdrs = ["internal/atomic_hook_test_helper.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":atomic_hook", + ":core_headers", + ], +) + +cc_test( + name = "atomic_hook_test", + size = "small", + srcs = ["internal/atomic_hook_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":atomic_hook", + ":atomic_hook_test_helper", + ":core_headers", + "@com_google_googletest//:gtest_main", + ], +) + +cc_test( + name = "bit_cast_test", + size = "small", + srcs = [ + "bit_cast_test.cc", + ], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":base", + ":core_headers", + "@com_google_googletest//:gtest_main", + ], +) + +cc_library( + name = "throw_delegate", + srcs = ["internal/throw_delegate.cc"], + hdrs = ["internal/throw_delegate.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + visibility = [ + "//absl:__subpackages__", + ], + deps = [ + ":config", + ":raw_logging_internal", + ], +) + +cc_test( + name = "throw_delegate_test", + srcs = ["throw_delegate_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":config", + ":throw_delegate", + "@com_google_googletest//:gtest_main", + ], +) + +cc_test( + name = "errno_saver_test", + size = "small", + srcs = ["internal/errno_saver_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":errno_saver", + ":strerror", + "@com_google_googletest//:gtest_main", + ], +) + +cc_library( + name = "exception_testing", + testonly = 1, + hdrs = ["internal/exception_testing.h"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + visibility = [ + "//absl:__subpackages__", + ], + deps = [ + ":config", + "@com_google_googletest//:gtest", + ], +) + +cc_library( + name = "pretty_function", + hdrs = ["internal/pretty_function.h"], + linkopts = ABSL_DEFAULT_LINKOPTS, + visibility = ["//absl:__subpackages__"], +) + +cc_library( + name = "exception_safety_testing", + testonly = 1, + srcs = ["internal/exception_safety_testing.cc"], + hdrs = ["internal/exception_safety_testing.h"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":config", + ":pretty_function", + "//absl/memory", + "//absl/meta:type_traits", + "//absl/strings", + "//absl/utility", + "@com_google_googletest//:gtest", + ], +) + +cc_test( + name = "exception_safety_testing_test", + srcs = ["exception_safety_testing_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":exception_safety_testing", + "//absl/memory", + "@com_google_googletest//:gtest_main", + ], +) + +cc_test( + name = "inline_variable_test", + size = "small", + srcs = [ + "inline_variable_test.cc", + "inline_variable_test_a.cc", + "inline_variable_test_b.cc", + "internal/inline_variable_testing.h", + ], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":base_internal", + "@com_google_googletest//:gtest_main", + ], +) + +cc_test( + name = "invoke_test", + size = "small", + srcs = ["invoke_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":base_internal", + "//absl/memory", + "//absl/strings", + "@com_google_googletest//:gtest_main", + ], +) + +# Common test library made available for use in non-absl code that overrides +# AbslInternalSpinLockDelay and AbslInternalSpinLockWake. +cc_library( + name = "spinlock_test_common", + testonly = 1, + srcs = ["spinlock_test_common.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":base", + ":base_internal", + ":core_headers", + "//absl/synchronization", + "@com_google_googletest//:gtest", + ], + alwayslink = 1, +) + +cc_test( + name = "spinlock_test", + size = "medium", + srcs = ["spinlock_test_common.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":base", + ":base_internal", + ":core_headers", + "//absl/synchronization", + "@com_google_googletest//:gtest_main", + ], +) + +cc_library( + name = "spinlock_benchmark_common", + testonly = 1, + srcs = ["internal/spinlock_benchmark.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + visibility = [ + "//absl/base:__pkg__", + ], + deps = [ + ":base", + ":base_internal", + ":raw_logging_internal", + "//absl/synchronization", + "@com_github_google_benchmark//:benchmark_main", + ], + alwayslink = 1, +) + +cc_binary( + name = "spinlock_benchmark", + testonly = 1, + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + tags = ["benchmark"], + visibility = ["//visibility:private"], + deps = [ + ":spinlock_benchmark_common", + ], +) + +cc_library( + name = "endian", + hdrs = [ + "internal/endian.h", + "internal/unaligned_access.h", + ], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":config", + ":core_headers", + ], +) + +cc_test( + name = "endian_test", + srcs = ["internal/endian_test.cc"], + copts = ABSL_TEST_COPTS, + deps = [ + ":config", + ":endian", + "@com_google_googletest//:gtest_main", + ], +) + +cc_test( + name = "config_test", + srcs = ["config_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":config", + "//absl/synchronization:thread_pool", + "@com_google_googletest//:gtest_main", + ], +) + +cc_test( + name = "call_once_test", + srcs = ["call_once_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":base", + ":core_headers", + "//absl/synchronization", + "@com_google_googletest//:gtest_main", + ], +) + +cc_test( + name = "raw_logging_test", + srcs = ["raw_logging_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":raw_logging_internal", + "//absl/strings", + "@com_google_googletest//:gtest_main", + ], +) + +cc_test( + name = "sysinfo_test", + size = "small", + srcs = ["internal/sysinfo_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":base", + "//absl/synchronization", + "@com_google_googletest//:gtest_main", + ], +) + +cc_test( + name = "low_level_alloc_test", + size = "medium", + srcs = ["internal/low_level_alloc_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + tags = ["no_test_ios_x86_64"], + deps = [ + ":malloc_internal", + "//absl/container:node_hash_map", + ], +) + +cc_test( + name = "thread_identity_test", + size = "small", + srcs = ["internal/thread_identity_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":base", + ":core_headers", + "//absl/synchronization", + "@com_google_googletest//:gtest_main", + ], +) + +cc_test( + name = "thread_identity_benchmark", + srcs = ["internal/thread_identity_benchmark.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + tags = ["benchmark"], + visibility = ["//visibility:private"], + deps = [ + ":base", + "//absl/synchronization", + "@com_github_google_benchmark//:benchmark_main", + ], +) + +cc_library( + name = "bits", + hdrs = ["internal/bits.h"], + linkopts = ABSL_DEFAULT_LINKOPTS, + visibility = [ + "//absl:__subpackages__", + ], + deps = [ + ":config", + ":core_headers", + ], +) + +cc_test( + name = "bits_test", + size = "small", + srcs = ["internal/bits_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":bits", + "@com_google_googletest//:gtest_main", + ], +) + +cc_library( + name = "exponential_biased", + srcs = ["internal/exponential_biased.cc"], + hdrs = ["internal/exponential_biased.h"], + linkopts = ABSL_DEFAULT_LINKOPTS, + visibility = [ + "//absl:__subpackages__", + ], + deps = [ + ":config", + ":core_headers", + ], +) + +cc_test( + name = "exponential_biased_test", + size = "small", + srcs = ["internal/exponential_biased_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + visibility = ["//visibility:private"], + deps = [ + ":exponential_biased", + "//absl/strings", + "@com_google_googletest//:gtest_main", + ], +) + +cc_library( + name = "periodic_sampler", + srcs = ["internal/periodic_sampler.cc"], + hdrs = ["internal/periodic_sampler.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":core_headers", + ":exponential_biased", + ], +) + +cc_test( + name = "periodic_sampler_test", + size = "small", + srcs = ["internal/periodic_sampler_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + visibility = ["//visibility:private"], + deps = [ + ":core_headers", + ":periodic_sampler", + "@com_google_googletest//:gtest_main", + ], +) + +cc_binary( + name = "periodic_sampler_benchmark", + testonly = 1, + srcs = ["internal/periodic_sampler_benchmark.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + tags = ["benchmark"], + visibility = ["//visibility:private"], + deps = [ + ":core_headers", + ":periodic_sampler", + "@com_github_google_benchmark//:benchmark_main", + ], +) + +cc_library( + name = "scoped_set_env", + testonly = 1, + srcs = ["internal/scoped_set_env.cc"], + hdrs = ["internal/scoped_set_env.h"], + linkopts = ABSL_DEFAULT_LINKOPTS, + visibility = [ + "//absl:__subpackages__", + ], + deps = [ + ":config", + ":raw_logging_internal", + ], +) + +cc_test( + name = "scoped_set_env_test", + size = "small", + srcs = ["internal/scoped_set_env_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":scoped_set_env", + "@com_google_googletest//:gtest_main", + ], +) + +cc_test( + name = "log_severity_test", + size = "small", + srcs = ["log_severity_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":log_severity", + "//absl/flags:flag_internal", + "//absl/flags:marshalling", + "//absl/strings", + "@com_google_googletest//:gtest_main", + ], +) + +cc_library( + name = "strerror", + srcs = ["internal/strerror.cc"], + hdrs = ["internal/strerror.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + visibility = [ + "//absl:__subpackages__", + ], + deps = [ + ":config", + ":core_headers", + ":errno_saver", + ], +) + +cc_test( + name = "strerror_test", + size = "small", + srcs = ["internal/strerror_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":strerror", + "//absl/strings", + "@com_google_googletest//:gtest_main", + ], +) + +cc_binary( + name = "strerror_benchmark", + testonly = 1, + srcs = ["internal/strerror_benchmark.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + tags = ["benchmark"], + visibility = ["//visibility:private"], + deps = [ + ":strerror", + "@com_github_google_benchmark//:benchmark_main", + ], +) + +cc_library( + name = "fast_type_id", + hdrs = ["internal/fast_type_id.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + visibility = [ + "//absl:__subpackages__", + ], + deps = [ + ":config", + ], +) + +cc_test( + name = "fast_type_id_test", + size = "small", + srcs = ["internal/fast_type_id_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":fast_type_id", + "@com_google_googletest//:gtest_main", + ], +) + +cc_test( + name = "unique_small_name_test", + size = "small", + srcs = ["internal/unique_small_name_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + linkstatic = 1, + deps = [ + ":core_headers", + "//absl/strings", + "@com_google_googletest//:gtest_main", + ], +) diff --git a/third_party/abseil_cpp/absl/base/CMakeLists.txt b/third_party/abseil_cpp/absl/base/CMakeLists.txt new file mode 100644 index 000000000000..292998b3bf8d --- /dev/null +++ b/third_party/abseil_cpp/absl/base/CMakeLists.txt @@ -0,0 +1,700 @@ +# +# Copyright 2017 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +find_library(LIBRT rt) + +absl_cc_library( + NAME + atomic_hook + HDRS + "internal/atomic_hook.h" + DEPS + absl::config + absl::core_headers + COPTS + ${ABSL_DEFAULT_COPTS} +) + +absl_cc_library( + NAME + errno_saver + HDRS + "internal/errno_saver.h" + DEPS + absl::config + COPTS + ${ABSL_DEFAULT_COPTS} +) + +absl_cc_library( + NAME + log_severity + HDRS + "log_severity.h" + SRCS + "log_severity.cc" + DEPS + absl::core_headers + COPTS + ${ABSL_DEFAULT_COPTS} +) + +absl_cc_library( + NAME + raw_logging_internal + HDRS + "internal/raw_logging.h" + SRCS + "internal/raw_logging.cc" + DEPS + absl::atomic_hook + absl::config + absl::core_headers + absl::log_severity + COPTS + ${ABSL_DEFAULT_COPTS} +) + +absl_cc_library( + NAME + spinlock_wait + HDRS + "internal/spinlock_wait.h" + SRCS + "internal/spinlock_akaros.inc" + "internal/spinlock_linux.inc" + "internal/spinlock_posix.inc" + "internal/spinlock_wait.cc" + "internal/spinlock_win32.inc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::base_internal + absl::core_headers + absl::errno_saver +) + +absl_cc_library( + NAME + config + HDRS + "config.h" + "options.h" + "policy_checks.h" + COPTS + ${ABSL_DEFAULT_COPTS} + PUBLIC +) + +absl_cc_library( + NAME + dynamic_annotations + HDRS + "dynamic_annotations.h" + SRCS + "dynamic_annotations.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + PUBLIC +) + +absl_cc_library( + NAME + core_headers + HDRS + "attributes.h" + "const_init.h" + "macros.h" + "optimization.h" + "port.h" + "thread_annotations.h" + "internal/thread_annotations.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + PUBLIC +) + +absl_cc_library( + NAME + malloc_internal + HDRS + "internal/direct_mmap.h" + "internal/low_level_alloc.h" + SRCS + "internal/low_level_alloc.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::base + absl::base_internal + absl::config + absl::core_headers + absl::dynamic_annotations + absl::raw_logging_internal + Threads::Threads +) + +absl_cc_library( + NAME + base_internal + HDRS + "internal/hide_ptr.h" + "internal/identity.h" + "internal/inline_variable.h" + "internal/invoke.h" + "internal/scheduling_mode.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + absl::type_traits +) + +absl_cc_library( + NAME + base + HDRS + "call_once.h" + "casts.h" + "internal/cycleclock.h" + "internal/low_level_scheduling.h" + "internal/per_thread_tls.h" + "internal/spinlock.h" + "internal/sysinfo.h" + "internal/thread_identity.h" + "internal/tsan_mutex_interface.h" + "internal/unscaledcycleclock.h" + SRCS + "internal/cycleclock.cc" + "internal/spinlock.cc" + "internal/sysinfo.cc" + "internal/thread_identity.cc" + "internal/unscaledcycleclock.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + $<$<BOOL:${LIBRT}>:-lrt> + $<$<BOOL:${MINGW}>:"advapi32"> + DEPS + absl::atomic_hook + absl::base_internal + absl::config + absl::core_headers + absl::dynamic_annotations + absl::log_severity + absl::raw_logging_internal + absl::spinlock_wait + absl::type_traits + Threads::Threads + PUBLIC +) + +absl_cc_library( + NAME + throw_delegate + HDRS + "internal/throw_delegate.h" + SRCS + "internal/throw_delegate.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + absl::raw_logging_internal +) + +absl_cc_library( + NAME + exception_testing + HDRS + "internal/exception_testing.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + gtest + TESTONLY +) + +absl_cc_library( + NAME + pretty_function + HDRS + "internal/pretty_function.h" + COPTS + ${ABSL_DEFAULT_COPTS} +) + +absl_cc_library( + NAME + exception_safety_testing + HDRS + "internal/exception_safety_testing.h" + SRCS + "internal/exception_safety_testing.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::config + absl::pretty_function + absl::memory + absl::meta + absl::strings + absl::utility + gtest + TESTONLY +) + +absl_cc_test( + NAME + absl_exception_safety_testing_test + SRCS + "exception_safety_testing_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::exception_safety_testing + absl::memory + gtest_main +) + +absl_cc_library( + NAME + atomic_hook_test_helper + SRCS + "internal/atomic_hook_test_helper.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::atomic_hook + absl::core_headers + TESTONLY +) + +absl_cc_test( + NAME + atomic_hook_test + SRCS + "internal/atomic_hook_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::atomic_hook_test_helper + absl::atomic_hook + absl::core_headers + gmock + gtest_main +) + +absl_cc_test( + NAME + bit_cast_test + SRCS + "bit_cast_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::base + absl::core_headers + gtest_main +) + +absl_cc_test( + NAME + errno_saver_test + SRCS + "internal/errno_saver_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::errno_saver + absl::strerror + gmock + gtest_main +) + +absl_cc_test( + NAME + throw_delegate_test + SRCS + "throw_delegate_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::base + absl::config + absl::throw_delegate + gtest_main +) + +absl_cc_test( + NAME + inline_variable_test + SRCS + "internal/inline_variable_testing.h" + "inline_variable_test.cc" + "inline_variable_test_a.cc" + "inline_variable_test_b.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::base_internal + gtest_main +) + +absl_cc_test( + NAME + invoke_test + SRCS + "invoke_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::base_internal + absl::memory + absl::strings + gmock + gtest_main +) + +absl_cc_library( + NAME + spinlock_test_common + SRCS + "spinlock_test_common.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::base + absl::base_internal + absl::core_headers + absl::synchronization + gtest + TESTONLY +) + +# On bazel BUILD this target use "alwayslink = 1" which is not implemented here +absl_cc_test( + NAME + spinlock_test + SRCS + "spinlock_test_common.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::base + absl::base_internal + absl::core_headers + absl::synchronization + gtest_main +) + +absl_cc_library( + NAME + endian + HDRS + "internal/endian.h" + "internal/unaligned_access.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + absl::core_headers + PUBLIC +) + +absl_cc_test( + NAME + endian_test + SRCS + "internal/endian_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::base + absl::config + absl::endian + gtest_main +) + +absl_cc_test( + NAME + config_test + SRCS + "config_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::config + absl::synchronization + gtest_main +) + +absl_cc_test( + NAME + call_once_test + SRCS + "call_once_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::base + absl::core_headers + absl::synchronization + gtest_main +) + +absl_cc_test( + NAME + raw_logging_test + SRCS + "raw_logging_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::raw_logging_internal + absl::strings + gtest_main +) + +absl_cc_test( + NAME + sysinfo_test + SRCS + "internal/sysinfo_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::base + absl::synchronization + gtest_main +) + +absl_cc_test( + NAME + low_level_alloc_test + SRCS + "internal/low_level_alloc_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::malloc_internal + absl::node_hash_map + Threads::Threads +) + +absl_cc_test( + NAME + thread_identity_test + SRCS + "internal/thread_identity_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::base + absl::core_headers + absl::synchronization + Threads::Threads + gtest_main +) + +absl_cc_library( + NAME + bits + HDRS + "internal/bits.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + absl::core_headers +) + +absl_cc_test( + NAME + bits_test + SRCS + "internal/bits_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::bits + gtest_main +) + +absl_cc_library( + NAME + exponential_biased + SRCS + "internal/exponential_biased.cc" + HDRS + "internal/exponential_biased.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + absl::core_headers +) + +absl_cc_test( + NAME + exponential_biased_test + SRCS + "internal/exponential_biased_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::exponential_biased + absl::strings + gmock_main +) + +absl_cc_library( + NAME + periodic_sampler + SRCS + "internal/periodic_sampler.cc" + HDRS + "internal/periodic_sampler.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::core_headers + absl::exponential_biased +) + +absl_cc_test( + NAME + periodic_sampler_test + SRCS + "internal/periodic_sampler_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::core_headers + absl::periodic_sampler + gmock_main +) + +absl_cc_library( + NAME + scoped_set_env + SRCS + "internal/scoped_set_env.cc" + HDRS + "internal/scoped_set_env.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + absl::raw_logging_internal +) + +absl_cc_test( + NAME + scoped_set_env_test + SRCS + "internal/scoped_set_env_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::scoped_set_env + gtest_main +) + +absl_cc_test( + NAME + cmake_thread_test + SRCS + "internal/cmake_thread_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::base +) + +absl_cc_test( + NAME + log_severity_test + SRCS + "log_severity_test.cc" + DEPS + absl::flags_internal + absl::flags_marshalling + absl::log_severity + absl::strings + gmock + gtest_main +) + +absl_cc_library( + NAME + strerror + SRCS + "internal/strerror.cc" + HDRS + "internal/strerror.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::core_headers + absl::errno_saver +) + +absl_cc_test( + NAME + strerror_test + SRCS + "internal/strerror_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::strerror + absl::strings + gmock + gtest_main +) + +absl_cc_library( + NAME + fast_type_id + HDRS + "internal/fast_type_id.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config +) + +absl_cc_test( + NAME + fast_type_id_test + SRCS + "internal/fast_type_id_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::fast_type_id + gtest_main +) diff --git a/third_party/abseil_cpp/absl/base/attributes.h b/third_party/abseil_cpp/absl/base/attributes.h new file mode 100644 index 000000000000..c4fd81b002fe --- /dev/null +++ b/third_party/abseil_cpp/absl/base/attributes.h @@ -0,0 +1,623 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This header file defines macros for declaring attributes for functions, +// types, and variables. +// +// These macros are used within Abseil and allow the compiler to optimize, where +// applicable, certain function calls. +// +// This file is used for both C and C++! +// +// Most macros here are exposing GCC or Clang features, and are stubbed out for +// other compilers. +// +// GCC attributes documentation: +// https://gcc.gnu.org/onlinedocs/gcc-4.7.0/gcc/Function-Attributes.html +// https://gcc.gnu.org/onlinedocs/gcc-4.7.0/gcc/Variable-Attributes.html +// https://gcc.gnu.org/onlinedocs/gcc-4.7.0/gcc/Type-Attributes.html +// +// Most attributes in this file are already supported by GCC 4.7. However, some +// of them are not supported in older version of Clang. Thus, we check +// `__has_attribute()` first. If the check fails, we check if we are on GCC and +// assume the attribute exists on GCC (which is verified on GCC 4.7). +// +// ----------------------------------------------------------------------------- +// Sanitizer Attributes +// ----------------------------------------------------------------------------- +// +// Sanitizer-related attributes are not "defined" in this file (and indeed +// are not defined as such in any file). To utilize the following +// sanitizer-related attributes within your builds, define the following macros +// within your build using a `-D` flag, along with the given value for +// `-fsanitize`: +// +// * `ADDRESS_SANITIZER` + `-fsanitize=address` (Clang, GCC 4.8) +// * `MEMORY_SANITIZER` + `-fsanitize=memory` (Clang-only) +// * `THREAD_SANITIZER` + `-fsanitize=thread` (Clang, GCC 4.8+) +// * `UNDEFINED_BEHAVIOR_SANITIZER` + `-fsanitize=undefined` (Clang, GCC 4.9+) +// * `CONTROL_FLOW_INTEGRITY` + `-fsanitize=cfi` (Clang-only) +// +// Example: +// +// // Enable branches in the Abseil code that are tagged for ASan: +// $ bazel build --copt=-DADDRESS_SANITIZER --copt=-fsanitize=address +// --linkopt=-fsanitize=address *target* +// +// Since these macro names are only supported by GCC and Clang, we only check +// for `__GNUC__` (GCC or Clang) and the above macros. +#ifndef ABSL_BASE_ATTRIBUTES_H_ +#define ABSL_BASE_ATTRIBUTES_H_ + +// ABSL_HAVE_ATTRIBUTE +// +// A function-like feature checking macro that is a wrapper around +// `__has_attribute`, which is defined by GCC 5+ and Clang and evaluates to a +// nonzero constant integer if the attribute is supported or 0 if not. +// +// It evaluates to zero if `__has_attribute` is not defined by the compiler. +// +// GCC: https://gcc.gnu.org/gcc-5/changes.html +// Clang: https://clang.llvm.org/docs/LanguageExtensions.html +#ifdef __has_attribute +#define ABSL_HAVE_ATTRIBUTE(x) __has_attribute(x) +#else +#define ABSL_HAVE_ATTRIBUTE(x) 0 +#endif + +// ABSL_HAVE_CPP_ATTRIBUTE +// +// A function-like feature checking macro that accepts C++11 style attributes. +// It's a wrapper around `__has_cpp_attribute`, defined by ISO C++ SD-6 +// (https://en.cppreference.com/w/cpp/experimental/feature_test). If we don't +// find `__has_cpp_attribute`, will evaluate to 0. +#if defined(__cplusplus) && defined(__has_cpp_attribute) +// NOTE: requiring __cplusplus above should not be necessary, but +// works around https://bugs.llvm.org/show_bug.cgi?id=23435. +#define ABSL_HAVE_CPP_ATTRIBUTE(x) __has_cpp_attribute(x) +#else +#define ABSL_HAVE_CPP_ATTRIBUTE(x) 0 +#endif + +// ----------------------------------------------------------------------------- +// Function Attributes +// ----------------------------------------------------------------------------- +// +// GCC: https://gcc.gnu.org/onlinedocs/gcc/Function-Attributes.html +// Clang: https://clang.llvm.org/docs/AttributeReference.html + +// ABSL_PRINTF_ATTRIBUTE +// ABSL_SCANF_ATTRIBUTE +// +// Tells the compiler to perform `printf` format string checking if the +// compiler supports it; see the 'format' attribute in +// <https://gcc.gnu.org/onlinedocs/gcc-4.7.0/gcc/Function-Attributes.html>. +// +// Note: As the GCC manual states, "[s]ince non-static C++ methods +// have an implicit 'this' argument, the arguments of such methods +// should be counted from two, not one." +#if ABSL_HAVE_ATTRIBUTE(format) || (defined(__GNUC__) && !defined(__clang__)) +#define ABSL_PRINTF_ATTRIBUTE(string_index, first_to_check) \ + __attribute__((__format__(__printf__, string_index, first_to_check))) +#define ABSL_SCANF_ATTRIBUTE(string_index, first_to_check) \ + __attribute__((__format__(__scanf__, string_index, first_to_check))) +#else +#define ABSL_PRINTF_ATTRIBUTE(string_index, first_to_check) +#define ABSL_SCANF_ATTRIBUTE(string_index, first_to_check) +#endif + +// ABSL_ATTRIBUTE_ALWAYS_INLINE +// ABSL_ATTRIBUTE_NOINLINE +// +// Forces functions to either inline or not inline. Introduced in gcc 3.1. +#if ABSL_HAVE_ATTRIBUTE(always_inline) || \ + (defined(__GNUC__) && !defined(__clang__)) +#define ABSL_ATTRIBUTE_ALWAYS_INLINE __attribute__((always_inline)) +#define ABSL_HAVE_ATTRIBUTE_ALWAYS_INLINE 1 +#else +#define ABSL_ATTRIBUTE_ALWAYS_INLINE +#endif + +#if ABSL_HAVE_ATTRIBUTE(noinline) || (defined(__GNUC__) && !defined(__clang__)) +#define ABSL_ATTRIBUTE_NOINLINE __attribute__((noinline)) +#define ABSL_HAVE_ATTRIBUTE_NOINLINE 1 +#else +#define ABSL_ATTRIBUTE_NOINLINE +#endif + +// ABSL_ATTRIBUTE_NO_TAIL_CALL +// +// Prevents the compiler from optimizing away stack frames for functions which +// end in a call to another function. +#if ABSL_HAVE_ATTRIBUTE(disable_tail_calls) +#define ABSL_HAVE_ATTRIBUTE_NO_TAIL_CALL 1 +#define ABSL_ATTRIBUTE_NO_TAIL_CALL __attribute__((disable_tail_calls)) +#elif defined(__GNUC__) && !defined(__clang__) +#define ABSL_HAVE_ATTRIBUTE_NO_TAIL_CALL 1 +#define ABSL_ATTRIBUTE_NO_TAIL_CALL \ + __attribute__((optimize("no-optimize-sibling-calls"))) +#else +#define ABSL_ATTRIBUTE_NO_TAIL_CALL +#define ABSL_HAVE_ATTRIBUTE_NO_TAIL_CALL 0 +#endif + +// ABSL_ATTRIBUTE_WEAK +// +// Tags a function as weak for the purposes of compilation and linking. +// Weak attributes currently do not work properly in LLVM's Windows backend, +// so disable them there. See https://bugs.llvm.org/show_bug.cgi?id=37598 +// for further information. +// The MinGW compiler doesn't complain about the weak attribute until the link +// step, presumably because Windows doesn't use ELF binaries. +#if (ABSL_HAVE_ATTRIBUTE(weak) || \ + (defined(__GNUC__) && !defined(__clang__))) && \ + !(defined(__llvm__) && defined(_WIN32)) && !defined(__MINGW32__) +#undef ABSL_ATTRIBUTE_WEAK +#define ABSL_ATTRIBUTE_WEAK __attribute__((weak)) +#define ABSL_HAVE_ATTRIBUTE_WEAK 1 +#else +#define ABSL_ATTRIBUTE_WEAK +#define ABSL_HAVE_ATTRIBUTE_WEAK 0 +#endif + +// ABSL_ATTRIBUTE_NONNULL +// +// Tells the compiler either (a) that a particular function parameter +// should be a non-null pointer, or (b) that all pointer arguments should +// be non-null. +// +// Note: As the GCC manual states, "[s]ince non-static C++ methods +// have an implicit 'this' argument, the arguments of such methods +// should be counted from two, not one." +// +// Args are indexed starting at 1. +// +// For non-static class member functions, the implicit `this` argument +// is arg 1, and the first explicit argument is arg 2. For static class member +// functions, there is no implicit `this`, and the first explicit argument is +// arg 1. +// +// Example: +// +// /* arg_a cannot be null, but arg_b can */ +// void Function(void* arg_a, void* arg_b) ABSL_ATTRIBUTE_NONNULL(1); +// +// class C { +// /* arg_a cannot be null, but arg_b can */ +// void Method(void* arg_a, void* arg_b) ABSL_ATTRIBUTE_NONNULL(2); +// +// /* arg_a cannot be null, but arg_b can */ +// static void StaticMethod(void* arg_a, void* arg_b) +// ABSL_ATTRIBUTE_NONNULL(1); +// }; +// +// If no arguments are provided, then all pointer arguments should be non-null. +// +// /* No pointer arguments may be null. */ +// void Function(void* arg_a, void* arg_b, int arg_c) ABSL_ATTRIBUTE_NONNULL(); +// +// NOTE: The GCC nonnull attribute actually accepts a list of arguments, but +// ABSL_ATTRIBUTE_NONNULL does not. +#if ABSL_HAVE_ATTRIBUTE(nonnull) || (defined(__GNUC__) && !defined(__clang__)) +#define ABSL_ATTRIBUTE_NONNULL(arg_index) __attribute__((nonnull(arg_index))) +#else +#define ABSL_ATTRIBUTE_NONNULL(...) +#endif + +// ABSL_ATTRIBUTE_NORETURN +// +// Tells the compiler that a given function never returns. +#if ABSL_HAVE_ATTRIBUTE(noreturn) || (defined(__GNUC__) && !defined(__clang__)) +#define ABSL_ATTRIBUTE_NORETURN __attribute__((noreturn)) +#elif defined(_MSC_VER) +#define ABSL_ATTRIBUTE_NORETURN __declspec(noreturn) +#else +#define ABSL_ATTRIBUTE_NORETURN +#endif + +// ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS +// +// Tells the AddressSanitizer (or other memory testing tools) to ignore a given +// function. Useful for cases when a function reads random locations on stack, +// calls _exit from a cloned subprocess, deliberately accesses buffer +// out of bounds or does other scary things with memory. +// NOTE: GCC supports AddressSanitizer(asan) since 4.8. +// https://gcc.gnu.org/gcc-4.8/changes.html +#if defined(__GNUC__) +#define ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS __attribute__((no_sanitize_address)) +#else +#define ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS +#endif + +// ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY +// +// Tells the MemorySanitizer to relax the handling of a given function. All +// "Use of uninitialized value" warnings from such functions will be suppressed, +// and all values loaded from memory will be considered fully initialized. +// This attribute is similar to the ADDRESS_SANITIZER attribute above, but deals +// with initialized-ness rather than addressability issues. +// NOTE: MemorySanitizer(msan) is supported by Clang but not GCC. +#if defined(__clang__) +#define ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY __attribute__((no_sanitize_memory)) +#else +#define ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY +#endif + +// ABSL_ATTRIBUTE_NO_SANITIZE_THREAD +// +// Tells the ThreadSanitizer to not instrument a given function. +// NOTE: GCC supports ThreadSanitizer(tsan) since 4.8. +// https://gcc.gnu.org/gcc-4.8/changes.html +#if defined(__GNUC__) +#define ABSL_ATTRIBUTE_NO_SANITIZE_THREAD __attribute__((no_sanitize_thread)) +#else +#define ABSL_ATTRIBUTE_NO_SANITIZE_THREAD +#endif + +// ABSL_ATTRIBUTE_NO_SANITIZE_UNDEFINED +// +// Tells the UndefinedSanitizer to ignore a given function. Useful for cases +// where certain behavior (eg. division by zero) is being used intentionally. +// NOTE: GCC supports UndefinedBehaviorSanitizer(ubsan) since 4.9. +// https://gcc.gnu.org/gcc-4.9/changes.html +#if defined(__GNUC__) && \ + (defined(UNDEFINED_BEHAVIOR_SANITIZER) || defined(ADDRESS_SANITIZER)) +#define ABSL_ATTRIBUTE_NO_SANITIZE_UNDEFINED \ + __attribute__((no_sanitize("undefined"))) +#else +#define ABSL_ATTRIBUTE_NO_SANITIZE_UNDEFINED +#endif + +// ABSL_ATTRIBUTE_NO_SANITIZE_CFI +// +// Tells the ControlFlowIntegrity sanitizer to not instrument a given function. +// See https://clang.llvm.org/docs/ControlFlowIntegrity.html for details. +#if defined(__GNUC__) && defined(CONTROL_FLOW_INTEGRITY) +#define ABSL_ATTRIBUTE_NO_SANITIZE_CFI __attribute__((no_sanitize("cfi"))) +#else +#define ABSL_ATTRIBUTE_NO_SANITIZE_CFI +#endif + +// ABSL_ATTRIBUTE_NO_SANITIZE_SAFESTACK +// +// Tells the SafeStack to not instrument a given function. +// See https://clang.llvm.org/docs/SafeStack.html for details. +#if defined(__GNUC__) && defined(SAFESTACK_SANITIZER) +#define ABSL_ATTRIBUTE_NO_SANITIZE_SAFESTACK \ + __attribute__((no_sanitize("safe-stack"))) +#else +#define ABSL_ATTRIBUTE_NO_SANITIZE_SAFESTACK +#endif + +// ABSL_ATTRIBUTE_RETURNS_NONNULL +// +// Tells the compiler that a particular function never returns a null pointer. +#if ABSL_HAVE_ATTRIBUTE(returns_nonnull) || \ + (defined(__GNUC__) && \ + (__GNUC__ > 5 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 9)) && \ + !defined(__clang__)) +#define ABSL_ATTRIBUTE_RETURNS_NONNULL __attribute__((returns_nonnull)) +#else +#define ABSL_ATTRIBUTE_RETURNS_NONNULL +#endif + +// ABSL_HAVE_ATTRIBUTE_SECTION +// +// Indicates whether labeled sections are supported. Weak symbol support is +// a prerequisite. Labeled sections are not supported on Darwin/iOS. +#ifdef ABSL_HAVE_ATTRIBUTE_SECTION +#error ABSL_HAVE_ATTRIBUTE_SECTION cannot be directly set +#elif (ABSL_HAVE_ATTRIBUTE(section) || \ + (defined(__GNUC__) && !defined(__clang__))) && \ + !defined(__APPLE__) && ABSL_HAVE_ATTRIBUTE_WEAK +#define ABSL_HAVE_ATTRIBUTE_SECTION 1 + +// ABSL_ATTRIBUTE_SECTION +// +// Tells the compiler/linker to put a given function into a section and define +// `__start_ ## name` and `__stop_ ## name` symbols to bracket the section. +// This functionality is supported by GNU linker. Any function annotated with +// `ABSL_ATTRIBUTE_SECTION` must not be inlined, or it will be placed into +// whatever section its caller is placed into. +// +#ifndef ABSL_ATTRIBUTE_SECTION +#define ABSL_ATTRIBUTE_SECTION(name) \ + __attribute__((section(#name))) __attribute__((noinline)) +#endif + + +// ABSL_ATTRIBUTE_SECTION_VARIABLE +// +// Tells the compiler/linker to put a given variable into a section and define +// `__start_ ## name` and `__stop_ ## name` symbols to bracket the section. +// This functionality is supported by GNU linker. +#ifndef ABSL_ATTRIBUTE_SECTION_VARIABLE +#define ABSL_ATTRIBUTE_SECTION_VARIABLE(name) __attribute__((section(#name))) +#endif + +// ABSL_DECLARE_ATTRIBUTE_SECTION_VARS +// +// A weak section declaration to be used as a global declaration +// for ABSL_ATTRIBUTE_SECTION_START|STOP(name) to compile and link +// even without functions with ABSL_ATTRIBUTE_SECTION(name). +// ABSL_DEFINE_ATTRIBUTE_SECTION should be in the exactly one file; it's +// a no-op on ELF but not on Mach-O. +// +#ifndef ABSL_DECLARE_ATTRIBUTE_SECTION_VARS +#define ABSL_DECLARE_ATTRIBUTE_SECTION_VARS(name) \ + extern char __start_##name[] ABSL_ATTRIBUTE_WEAK; \ + extern char __stop_##name[] ABSL_ATTRIBUTE_WEAK +#endif +#ifndef ABSL_DEFINE_ATTRIBUTE_SECTION_VARS +#define ABSL_INIT_ATTRIBUTE_SECTION_VARS(name) +#define ABSL_DEFINE_ATTRIBUTE_SECTION_VARS(name) +#endif + +// ABSL_ATTRIBUTE_SECTION_START +// +// Returns `void*` pointers to start/end of a section of code with +// functions having ABSL_ATTRIBUTE_SECTION(name). +// Returns 0 if no such functions exist. +// One must ABSL_DECLARE_ATTRIBUTE_SECTION_VARS(name) for this to compile and +// link. +// +#define ABSL_ATTRIBUTE_SECTION_START(name) \ + (reinterpret_cast<void *>(__start_##name)) +#define ABSL_ATTRIBUTE_SECTION_STOP(name) \ + (reinterpret_cast<void *>(__stop_##name)) + +#else // !ABSL_HAVE_ATTRIBUTE_SECTION + +#define ABSL_HAVE_ATTRIBUTE_SECTION 0 + +// provide dummy definitions +#define ABSL_ATTRIBUTE_SECTION(name) +#define ABSL_ATTRIBUTE_SECTION_VARIABLE(name) +#define ABSL_INIT_ATTRIBUTE_SECTION_VARS(name) +#define ABSL_DEFINE_ATTRIBUTE_SECTION_VARS(name) +#define ABSL_DECLARE_ATTRIBUTE_SECTION_VARS(name) +#define ABSL_ATTRIBUTE_SECTION_START(name) (reinterpret_cast<void *>(0)) +#define ABSL_ATTRIBUTE_SECTION_STOP(name) (reinterpret_cast<void *>(0)) + +#endif // ABSL_ATTRIBUTE_SECTION + +// ABSL_ATTRIBUTE_STACK_ALIGN_FOR_OLD_LIBC +// +// Support for aligning the stack on 32-bit x86. +#if ABSL_HAVE_ATTRIBUTE(force_align_arg_pointer) || \ + (defined(__GNUC__) && !defined(__clang__)) +#if defined(__i386__) +#define ABSL_ATTRIBUTE_STACK_ALIGN_FOR_OLD_LIBC \ + __attribute__((force_align_arg_pointer)) +#define ABSL_REQUIRE_STACK_ALIGN_TRAMPOLINE (0) +#elif defined(__x86_64__) +#define ABSL_REQUIRE_STACK_ALIGN_TRAMPOLINE (1) +#define ABSL_ATTRIBUTE_STACK_ALIGN_FOR_OLD_LIBC +#else // !__i386__ && !__x86_64 +#define ABSL_REQUIRE_STACK_ALIGN_TRAMPOLINE (0) +#define ABSL_ATTRIBUTE_STACK_ALIGN_FOR_OLD_LIBC +#endif // __i386__ +#else +#define ABSL_ATTRIBUTE_STACK_ALIGN_FOR_OLD_LIBC +#define ABSL_REQUIRE_STACK_ALIGN_TRAMPOLINE (0) +#endif + +// ABSL_MUST_USE_RESULT +// +// Tells the compiler to warn about unused results. +// +// When annotating a function, it must appear as the first part of the +// declaration or definition. The compiler will warn if the return value from +// such a function is unused: +// +// ABSL_MUST_USE_RESULT Sprocket* AllocateSprocket(); +// AllocateSprocket(); // Triggers a warning. +// +// When annotating a class, it is equivalent to annotating every function which +// returns an instance. +// +// class ABSL_MUST_USE_RESULT Sprocket {}; +// Sprocket(); // Triggers a warning. +// +// Sprocket MakeSprocket(); +// MakeSprocket(); // Triggers a warning. +// +// Note that references and pointers are not instances: +// +// Sprocket* SprocketPointer(); +// SprocketPointer(); // Does *not* trigger a warning. +// +// ABSL_MUST_USE_RESULT allows using cast-to-void to suppress the unused result +// warning. For that, warn_unused_result is used only for clang but not for gcc. +// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=66425 +// +// Note: past advice was to place the macro after the argument list. +#if ABSL_HAVE_ATTRIBUTE(nodiscard) +#define ABSL_MUST_USE_RESULT [[nodiscard]] +#elif defined(__clang__) && ABSL_HAVE_ATTRIBUTE(warn_unused_result) +#define ABSL_MUST_USE_RESULT __attribute__((warn_unused_result)) +#else +#define ABSL_MUST_USE_RESULT +#endif + +// ABSL_ATTRIBUTE_HOT, ABSL_ATTRIBUTE_COLD +// +// Tells GCC that a function is hot or cold. GCC can use this information to +// improve static analysis, i.e. a conditional branch to a cold function +// is likely to be not-taken. +// This annotation is used for function declarations. +// +// Example: +// +// int foo() ABSL_ATTRIBUTE_HOT; +#if ABSL_HAVE_ATTRIBUTE(hot) || (defined(__GNUC__) && !defined(__clang__)) +#define ABSL_ATTRIBUTE_HOT __attribute__((hot)) +#else +#define ABSL_ATTRIBUTE_HOT +#endif + +#if ABSL_HAVE_ATTRIBUTE(cold) || (defined(__GNUC__) && !defined(__clang__)) +#define ABSL_ATTRIBUTE_COLD __attribute__((cold)) +#else +#define ABSL_ATTRIBUTE_COLD +#endif + +// ABSL_XRAY_ALWAYS_INSTRUMENT, ABSL_XRAY_NEVER_INSTRUMENT, ABSL_XRAY_LOG_ARGS +// +// We define the ABSL_XRAY_ALWAYS_INSTRUMENT and ABSL_XRAY_NEVER_INSTRUMENT +// macro used as an attribute to mark functions that must always or never be +// instrumented by XRay. Currently, this is only supported in Clang/LLVM. +// +// For reference on the LLVM XRay instrumentation, see +// http://llvm.org/docs/XRay.html. +// +// A function with the XRAY_ALWAYS_INSTRUMENT macro attribute in its declaration +// will always get the XRay instrumentation sleds. These sleds may introduce +// some binary size and runtime overhead and must be used sparingly. +// +// These attributes only take effect when the following conditions are met: +// +// * The file/target is built in at least C++11 mode, with a Clang compiler +// that supports XRay attributes. +// * The file/target is built with the -fxray-instrument flag set for the +// Clang/LLVM compiler. +// * The function is defined in the translation unit (the compiler honors the +// attribute in either the definition or the declaration, and must match). +// +// There are cases when, even when building with XRay instrumentation, users +// might want to control specifically which functions are instrumented for a +// particular build using special-case lists provided to the compiler. These +// special case lists are provided to Clang via the +// -fxray-always-instrument=... and -fxray-never-instrument=... flags. The +// attributes in source take precedence over these special-case lists. +// +// To disable the XRay attributes at build-time, users may define +// ABSL_NO_XRAY_ATTRIBUTES. Do NOT define ABSL_NO_XRAY_ATTRIBUTES on specific +// packages/targets, as this may lead to conflicting definitions of functions at +// link-time. +// +// XRay isn't currently supported on Android: +// https://github.com/android/ndk/issues/368 +#if ABSL_HAVE_CPP_ATTRIBUTE(clang::xray_always_instrument) && \ + !defined(ABSL_NO_XRAY_ATTRIBUTES) && !defined(__ANDROID__) +#define ABSL_XRAY_ALWAYS_INSTRUMENT [[clang::xray_always_instrument]] +#define ABSL_XRAY_NEVER_INSTRUMENT [[clang::xray_never_instrument]] +#if ABSL_HAVE_CPP_ATTRIBUTE(clang::xray_log_args) +#define ABSL_XRAY_LOG_ARGS(N) \ + [[clang::xray_always_instrument, clang::xray_log_args(N)]] +#else +#define ABSL_XRAY_LOG_ARGS(N) [[clang::xray_always_instrument]] +#endif +#else +#define ABSL_XRAY_ALWAYS_INSTRUMENT +#define ABSL_XRAY_NEVER_INSTRUMENT +#define ABSL_XRAY_LOG_ARGS(N) +#endif + +// ABSL_ATTRIBUTE_REINITIALIZES +// +// Indicates that a member function reinitializes the entire object to a known +// state, independent of the previous state of the object. +// +// The clang-tidy check bugprone-use-after-move allows member functions marked +// with this attribute to be called on objects that have been moved from; +// without the attribute, this would result in a use-after-move warning. +#if ABSL_HAVE_CPP_ATTRIBUTE(clang::reinitializes) +#define ABSL_ATTRIBUTE_REINITIALIZES [[clang::reinitializes]] +#else +#define ABSL_ATTRIBUTE_REINITIALIZES +#endif + +// ----------------------------------------------------------------------------- +// Variable Attributes +// ----------------------------------------------------------------------------- + +// ABSL_ATTRIBUTE_UNUSED +// +// Prevents the compiler from complaining about variables that appear unused. +#if ABSL_HAVE_ATTRIBUTE(unused) || (defined(__GNUC__) && !defined(__clang__)) +#undef ABSL_ATTRIBUTE_UNUSED +#define ABSL_ATTRIBUTE_UNUSED __attribute__((__unused__)) +#else +#define ABSL_ATTRIBUTE_UNUSED +#endif + +// ABSL_ATTRIBUTE_INITIAL_EXEC +// +// Tells the compiler to use "initial-exec" mode for a thread-local variable. +// See http://people.redhat.com/drepper/tls.pdf for the gory details. +#if ABSL_HAVE_ATTRIBUTE(tls_model) || (defined(__GNUC__) && !defined(__clang__)) +#define ABSL_ATTRIBUTE_INITIAL_EXEC __attribute__((tls_model("initial-exec"))) +#else +#define ABSL_ATTRIBUTE_INITIAL_EXEC +#endif + +// ABSL_ATTRIBUTE_PACKED +// +// Instructs the compiler not to use natural alignment for a tagged data +// structure, but instead to reduce its alignment to 1. This attribute can +// either be applied to members of a structure or to a structure in its +// entirety. Applying this attribute (judiciously) to a structure in its +// entirety to optimize the memory footprint of very commonly-used structs is +// fine. Do not apply this attribute to a structure in its entirety if the +// purpose is to control the offsets of the members in the structure. Instead, +// apply this attribute only to structure members that need it. +// +// When applying ABSL_ATTRIBUTE_PACKED only to specific structure members the +// natural alignment of structure members not annotated is preserved. Aligned +// member accesses are faster than non-aligned member accesses even if the +// targeted microprocessor supports non-aligned accesses. +#if ABSL_HAVE_ATTRIBUTE(packed) || (defined(__GNUC__) && !defined(__clang__)) +#define ABSL_ATTRIBUTE_PACKED __attribute__((__packed__)) +#else +#define ABSL_ATTRIBUTE_PACKED +#endif + +// ABSL_ATTRIBUTE_FUNC_ALIGN +// +// Tells the compiler to align the function start at least to certain +// alignment boundary +#if ABSL_HAVE_ATTRIBUTE(aligned) || (defined(__GNUC__) && !defined(__clang__)) +#define ABSL_ATTRIBUTE_FUNC_ALIGN(bytes) __attribute__((aligned(bytes))) +#else +#define ABSL_ATTRIBUTE_FUNC_ALIGN(bytes) +#endif + +// ABSL_CONST_INIT +// +// A variable declaration annotated with the `ABSL_CONST_INIT` attribute will +// not compile (on supported platforms) unless the variable has a constant +// initializer. This is useful for variables with static and thread storage +// duration, because it guarantees that they will not suffer from the so-called +// "static init order fiasco". Prefer to put this attribute on the most visible +// declaration of the variable, if there's more than one, because code that +// accesses the variable can then use the attribute for optimization. +// +// Example: +// +// class MyClass { +// public: +// ABSL_CONST_INIT static MyType my_var; +// }; +// +// MyType MyClass::my_var = MakeMyType(...); +// +// Note that this attribute is redundant if the variable is declared constexpr. +#if ABSL_HAVE_CPP_ATTRIBUTE(clang::require_constant_initialization) +#define ABSL_CONST_INIT [[clang::require_constant_initialization]] +#else +#define ABSL_CONST_INIT +#endif // ABSL_HAVE_CPP_ATTRIBUTE(clang::require_constant_initialization) + +#endif // ABSL_BASE_ATTRIBUTES_H_ diff --git a/third_party/abseil_cpp/absl/base/bit_cast_test.cc b/third_party/abseil_cpp/absl/base/bit_cast_test.cc new file mode 100644 index 000000000000..8a3a41ea0231 --- /dev/null +++ b/third_party/abseil_cpp/absl/base/bit_cast_test.cc @@ -0,0 +1,109 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Unit test for bit_cast template. + +#include <cstdint> +#include <cstring> + +#include "gtest/gtest.h" +#include "absl/base/casts.h" +#include "absl/base/macros.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace { + +template <int N> +struct marshall { char buf[N]; }; + +template <typename T> +void TestMarshall(const T values[], int num_values) { + for (int i = 0; i < num_values; ++i) { + T t0 = values[i]; + marshall<sizeof(T)> m0 = absl::bit_cast<marshall<sizeof(T)> >(t0); + T t1 = absl::bit_cast<T>(m0); + marshall<sizeof(T)> m1 = absl::bit_cast<marshall<sizeof(T)> >(t1); + ASSERT_EQ(0, memcmp(&t0, &t1, sizeof(T))); + ASSERT_EQ(0, memcmp(&m0, &m1, sizeof(T))); + } +} + +// Convert back and forth to an integral type. The C++ standard does +// not guarantee this will work, but we test that this works on all the +// platforms we support. +// +// Likewise, we below make assumptions about sizeof(float) and +// sizeof(double) which the standard does not guarantee, but which hold on the +// platforms we support. + +template <typename T, typename I> +void TestIntegral(const T values[], int num_values) { + for (int i = 0; i < num_values; ++i) { + T t0 = values[i]; + I i0 = absl::bit_cast<I>(t0); + T t1 = absl::bit_cast<T>(i0); + I i1 = absl::bit_cast<I>(t1); + ASSERT_EQ(0, memcmp(&t0, &t1, sizeof(T))); + ASSERT_EQ(i0, i1); + } +} + +TEST(BitCast, Bool) { + static const bool bool_list[] = { false, true }; + TestMarshall<bool>(bool_list, ABSL_ARRAYSIZE(bool_list)); +} + +TEST(BitCast, Int32) { + static const int32_t int_list[] = + { 0, 1, 100, 2147483647, -1, -100, -2147483647, -2147483647-1 }; + TestMarshall<int32_t>(int_list, ABSL_ARRAYSIZE(int_list)); +} + +TEST(BitCast, Int64) { + static const int64_t int64_list[] = + { 0, 1, 1LL << 40, -1, -(1LL<<40) }; + TestMarshall<int64_t>(int64_list, ABSL_ARRAYSIZE(int64_list)); +} + +TEST(BitCast, Uint64) { + static const uint64_t uint64_list[] = + { 0, 1, 1LLU << 40, 1LLU << 63 }; + TestMarshall<uint64_t>(uint64_list, ABSL_ARRAYSIZE(uint64_list)); +} + +TEST(BitCast, Float) { + static const float float_list[] = + { 0.0f, 1.0f, -1.0f, 10.0f, -10.0f, + 1e10f, 1e20f, 1e-10f, 1e-20f, + 2.71828f, 3.14159f }; + TestMarshall<float>(float_list, ABSL_ARRAYSIZE(float_list)); + TestIntegral<float, int>(float_list, ABSL_ARRAYSIZE(float_list)); + TestIntegral<float, unsigned>(float_list, ABSL_ARRAYSIZE(float_list)); +} + +TEST(BitCast, Double) { + static const double double_list[] = + { 0.0, 1.0, -1.0, 10.0, -10.0, + 1e10, 1e100, 1e-10, 1e-100, + 2.718281828459045, + 3.141592653589793238462643383279502884197169399375105820974944 }; + TestMarshall<double>(double_list, ABSL_ARRAYSIZE(double_list)); + TestIntegral<double, int64_t>(double_list, ABSL_ARRAYSIZE(double_list)); + TestIntegral<double, uint64_t>(double_list, ABSL_ARRAYSIZE(double_list)); +} + +} // namespace +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third_party/abseil_cpp/absl/base/call_once.h b/third_party/abseil_cpp/absl/base/call_once.h new file mode 100644 index 000000000000..bc5ec9370413 --- /dev/null +++ b/third_party/abseil_cpp/absl/base/call_once.h @@ -0,0 +1,226 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: call_once.h +// ----------------------------------------------------------------------------- +// +// This header file provides an Abseil version of `std::call_once` for invoking +// a given function at most once, across all threads. This Abseil version is +// faster than the C++11 version and incorporates the C++17 argument-passing +// fix, so that (for example) non-const references may be passed to the invoked +// function. + +#ifndef ABSL_BASE_CALL_ONCE_H_ +#define ABSL_BASE_CALL_ONCE_H_ + +#include <algorithm> +#include <atomic> +#include <cstdint> +#include <type_traits> +#include <utility> + +#include "absl/base/internal/invoke.h" +#include "absl/base/internal/low_level_scheduling.h" +#include "absl/base/internal/raw_logging.h" +#include "absl/base/internal/scheduling_mode.h" +#include "absl/base/internal/spinlock_wait.h" +#include "absl/base/macros.h" +#include "absl/base/optimization.h" +#include "absl/base/port.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +class once_flag; + +namespace base_internal { +std::atomic<uint32_t>* ControlWord(absl::once_flag* flag); +} // namespace base_internal + +// call_once() +// +// For all invocations using a given `once_flag`, invokes a given `fn` exactly +// once across all threads. The first call to `call_once()` with a particular +// `once_flag` argument (that does not throw an exception) will run the +// specified function with the provided `args`; other calls with the same +// `once_flag` argument will not run the function, but will wait +// for the provided function to finish running (if it is still running). +// +// This mechanism provides a safe, simple, and fast mechanism for one-time +// initialization in a multi-threaded process. +// +// Example: +// +// class MyInitClass { +// public: +// ... +// mutable absl::once_flag once_; +// +// MyInitClass* init() const { +// absl::call_once(once_, &MyInitClass::Init, this); +// return ptr_; +// } +// +template <typename Callable, typename... Args> +void call_once(absl::once_flag& flag, Callable&& fn, Args&&... args); + +// once_flag +// +// Objects of this type are used to distinguish calls to `call_once()` and +// ensure the provided function is only invoked once across all threads. This +// type is not copyable or movable. However, it has a `constexpr` +// constructor, and is safe to use as a namespace-scoped global variable. +class once_flag { + public: + constexpr once_flag() : control_(0) {} + once_flag(const once_flag&) = delete; + once_flag& operator=(const once_flag&) = delete; + + private: + friend std::atomic<uint32_t>* base_internal::ControlWord(once_flag* flag); + std::atomic<uint32_t> control_; +}; + +//------------------------------------------------------------------------------ +// End of public interfaces. +// Implementation details follow. +//------------------------------------------------------------------------------ + +namespace base_internal { + +// Like call_once, but uses KERNEL_ONLY scheduling. Intended to be used to +// initialize entities used by the scheduler implementation. +template <typename Callable, typename... Args> +void LowLevelCallOnce(absl::once_flag* flag, Callable&& fn, Args&&... args); + +// Disables scheduling while on stack when scheduling mode is non-cooperative. +// No effect for cooperative scheduling modes. +class SchedulingHelper { + public: + explicit SchedulingHelper(base_internal::SchedulingMode mode) : mode_(mode) { + if (mode_ == base_internal::SCHEDULE_KERNEL_ONLY) { + guard_result_ = base_internal::SchedulingGuard::DisableRescheduling(); + } + } + + ~SchedulingHelper() { + if (mode_ == base_internal::SCHEDULE_KERNEL_ONLY) { + base_internal::SchedulingGuard::EnableRescheduling(guard_result_); + } + } + + private: + base_internal::SchedulingMode mode_; + bool guard_result_; +}; + +// Bit patterns for call_once state machine values. Internal implementation +// detail, not for use by clients. +// +// The bit patterns are arbitrarily chosen from unlikely values, to aid in +// debugging. However, kOnceInit must be 0, so that a zero-initialized +// once_flag will be valid for immediate use. +enum { + kOnceInit = 0, + kOnceRunning = 0x65C2937B, + kOnceWaiter = 0x05A308D2, + // A very small constant is chosen for kOnceDone so that it fit in a single + // compare with immediate instruction for most common ISAs. This is verified + // for x86, POWER and ARM. + kOnceDone = 221, // Random Number +}; + +template <typename Callable, typename... Args> +ABSL_ATTRIBUTE_NOINLINE +void CallOnceImpl(std::atomic<uint32_t>* control, + base_internal::SchedulingMode scheduling_mode, Callable&& fn, + Args&&... args) { +#ifndef NDEBUG + { + uint32_t old_control = control->load(std::memory_order_relaxed); + if (old_control != kOnceInit && + old_control != kOnceRunning && + old_control != kOnceWaiter && + old_control != kOnceDone) { + ABSL_RAW_LOG(FATAL, "Unexpected value for control word: 0x%lx", + static_cast<unsigned long>(old_control)); // NOLINT + } + } +#endif // NDEBUG + static const base_internal::SpinLockWaitTransition trans[] = { + {kOnceInit, kOnceRunning, true}, + {kOnceRunning, kOnceWaiter, false}, + {kOnceDone, kOnceDone, true}}; + + // Must do this before potentially modifying control word's state. + base_internal::SchedulingHelper maybe_disable_scheduling(scheduling_mode); + // Short circuit the simplest case to avoid procedure call overhead. + // The base_internal::SpinLockWait() call returns either kOnceInit or + // kOnceDone. If it returns kOnceDone, it must have loaded the control word + // with std::memory_order_acquire and seen a value of kOnceDone. + uint32_t old_control = kOnceInit; + if (control->compare_exchange_strong(old_control, kOnceRunning, + std::memory_order_relaxed) || + base_internal::SpinLockWait(control, ABSL_ARRAYSIZE(trans), trans, + scheduling_mode) == kOnceInit) { + base_internal::Invoke(std::forward<Callable>(fn), + std::forward<Args>(args)...); + // The call to SpinLockWake below is an optimization, because the waiter + // in SpinLockWait is waiting with a short timeout. The atomic load/store + // sequence is slightly faster than an atomic exchange: + // old_control = control->exchange(base_internal::kOnceDone, + // std::memory_order_release); + // We opt for a slightly faster case when there are no waiters, in spite + // of longer tail latency when there are waiters. + old_control = control->load(std::memory_order_relaxed); + control->store(base_internal::kOnceDone, std::memory_order_release); + if (old_control == base_internal::kOnceWaiter) { + base_internal::SpinLockWake(control, true); + } + } // else *control is already kOnceDone +} + +inline std::atomic<uint32_t>* ControlWord(once_flag* flag) { + return &flag->control_; +} + +template <typename Callable, typename... Args> +void LowLevelCallOnce(absl::once_flag* flag, Callable&& fn, Args&&... args) { + std::atomic<uint32_t>* once = base_internal::ControlWord(flag); + uint32_t s = once->load(std::memory_order_acquire); + if (ABSL_PREDICT_FALSE(s != base_internal::kOnceDone)) { + base_internal::CallOnceImpl(once, base_internal::SCHEDULE_KERNEL_ONLY, + std::forward<Callable>(fn), + std::forward<Args>(args)...); + } +} + +} // namespace base_internal + +template <typename Callable, typename... Args> +void call_once(absl::once_flag& flag, Callable&& fn, Args&&... args) { + std::atomic<uint32_t>* once = base_internal::ControlWord(&flag); + uint32_t s = once->load(std::memory_order_acquire); + if (ABSL_PREDICT_FALSE(s != base_internal::kOnceDone)) { + base_internal::CallOnceImpl( + once, base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL, + std::forward<Callable>(fn), std::forward<Args>(args)...); + } +} + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_CALL_ONCE_H_ diff --git a/third_party/abseil_cpp/absl/base/call_once_test.cc b/third_party/abseil_cpp/absl/base/call_once_test.cc new file mode 100644 index 000000000000..11d26c44d1a4 --- /dev/null +++ b/third_party/abseil_cpp/absl/base/call_once_test.cc @@ -0,0 +1,107 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/call_once.h" + +#include <thread> +#include <vector> + +#include "gtest/gtest.h" +#include "absl/base/attributes.h" +#include "absl/base/const_init.h" +#include "absl/base/thread_annotations.h" +#include "absl/synchronization/mutex.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace { + +absl::once_flag once; + +ABSL_CONST_INIT Mutex counters_mu(absl::kConstInit); + +int running_thread_count ABSL_GUARDED_BY(counters_mu) = 0; +int call_once_invoke_count ABSL_GUARDED_BY(counters_mu) = 0; +int call_once_finished_count ABSL_GUARDED_BY(counters_mu) = 0; +int call_once_return_count ABSL_GUARDED_BY(counters_mu) = 0; +bool done_blocking ABSL_GUARDED_BY(counters_mu) = false; + +// Function to be called from absl::call_once. Waits for a notification. +void WaitAndIncrement() { + counters_mu.Lock(); + ++call_once_invoke_count; + counters_mu.Unlock(); + + counters_mu.LockWhen(Condition(&done_blocking)); + ++call_once_finished_count; + counters_mu.Unlock(); +} + +void ThreadBody() { + counters_mu.Lock(); + ++running_thread_count; + counters_mu.Unlock(); + + absl::call_once(once, WaitAndIncrement); + + counters_mu.Lock(); + ++call_once_return_count; + counters_mu.Unlock(); +} + +// Returns true if all threads are set up for the test. +bool ThreadsAreSetup(void*) ABSL_EXCLUSIVE_LOCKS_REQUIRED(counters_mu) { + // All ten threads must be running, and WaitAndIncrement should be blocked. + return running_thread_count == 10 && call_once_invoke_count == 1; +} + +TEST(CallOnceTest, ExecutionCount) { + std::vector<std::thread> threads; + + // Start 10 threads all calling call_once on the same once_flag. + for (int i = 0; i < 10; ++i) { + threads.emplace_back(ThreadBody); + } + + + // Wait until all ten threads have started, and WaitAndIncrement has been + // invoked. + counters_mu.LockWhen(Condition(ThreadsAreSetup, nullptr)); + + // WaitAndIncrement should have been invoked by exactly one call_once() + // instance. That thread should be blocking on a notification, and all other + // call_once instances should be blocking as well. + EXPECT_EQ(call_once_invoke_count, 1); + EXPECT_EQ(call_once_finished_count, 0); + EXPECT_EQ(call_once_return_count, 0); + + // Allow WaitAndIncrement to finish executing. Once it does, the other + // call_once waiters will be unblocked. + done_blocking = true; + counters_mu.Unlock(); + + for (std::thread& thread : threads) { + thread.join(); + } + + counters_mu.Lock(); + EXPECT_EQ(call_once_invoke_count, 1); + EXPECT_EQ(call_once_finished_count, 1); + EXPECT_EQ(call_once_return_count, 10); + counters_mu.Unlock(); +} + +} // namespace +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third_party/abseil_cpp/absl/base/casts.h b/third_party/abseil_cpp/absl/base/casts.h new file mode 100644 index 000000000000..322cc1d243f6 --- /dev/null +++ b/third_party/abseil_cpp/absl/base/casts.h @@ -0,0 +1,184 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: casts.h +// ----------------------------------------------------------------------------- +// +// This header file defines casting templates to fit use cases not covered by +// the standard casts provided in the C++ standard. As with all cast operations, +// use these with caution and only if alternatives do not exist. + +#ifndef ABSL_BASE_CASTS_H_ +#define ABSL_BASE_CASTS_H_ + +#include <cstring> +#include <memory> +#include <type_traits> +#include <utility> + +#include "absl/base/internal/identity.h" +#include "absl/base/macros.h" +#include "absl/meta/type_traits.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +namespace internal_casts { + +template <class Dest, class Source> +struct is_bitcastable + : std::integral_constant< + bool, + sizeof(Dest) == sizeof(Source) && + type_traits_internal::is_trivially_copyable<Source>::value && + type_traits_internal::is_trivially_copyable<Dest>::value && + std::is_default_constructible<Dest>::value> {}; + +} // namespace internal_casts + +// implicit_cast() +// +// Performs an implicit conversion between types following the language +// rules for implicit conversion; if an implicit conversion is otherwise +// allowed by the language in the given context, this function performs such an +// implicit conversion. +// +// Example: +// +// // If the context allows implicit conversion: +// From from; +// To to = from; +// +// // Such code can be replaced by: +// implicit_cast<To>(from); +// +// An `implicit_cast()` may also be used to annotate numeric type conversions +// that, although safe, may produce compiler warnings (such as `long` to `int`). +// Additionally, an `implicit_cast()` is also useful within return statements to +// indicate a specific implicit conversion is being undertaken. +// +// Example: +// +// return implicit_cast<double>(size_in_bytes) / capacity_; +// +// Annotating code with `implicit_cast()` allows you to explicitly select +// particular overloads and template instantiations, while providing a safer +// cast than `reinterpret_cast()` or `static_cast()`. +// +// Additionally, an `implicit_cast()` can be used to allow upcasting within a +// type hierarchy where incorrect use of `static_cast()` could accidentally +// allow downcasting. +// +// Finally, an `implicit_cast()` can be used to perform implicit conversions +// from unrelated types that otherwise couldn't be implicitly cast directly; +// C++ will normally only implicitly cast "one step" in such conversions. +// +// That is, if C is a type which can be implicitly converted to B, with B being +// a type that can be implicitly converted to A, an `implicit_cast()` can be +// used to convert C to B (which the compiler can then implicitly convert to A +// using language rules). +// +// Example: +// +// // Assume an object C is convertible to B, which is implicitly convertible +// // to A +// A a = implicit_cast<B>(C); +// +// Such implicit cast chaining may be useful within template logic. +template <typename To> +constexpr To implicit_cast(typename absl::internal::identity_t<To> to) { + return to; +} + +// bit_cast() +// +// Performs a bitwise cast on a type without changing the underlying bit +// representation of that type's value. The two types must be of the same size +// and both types must be trivially copyable. As with most casts, use with +// caution. A `bit_cast()` might be needed when you need to temporarily treat a +// type as some other type, such as in the following cases: +// +// * Serialization (casting temporarily to `char *` for those purposes is +// always allowed by the C++ standard) +// * Managing the individual bits of a type within mathematical operations +// that are not normally accessible through that type +// * Casting non-pointer types to pointer types (casting the other way is +// allowed by `reinterpret_cast()` but round-trips cannot occur the other +// way). +// +// Example: +// +// float f = 3.14159265358979; +// int i = bit_cast<int32_t>(f); +// // i = 0x40490fdb +// +// Casting non-pointer types to pointer types and then dereferencing them +// traditionally produces undefined behavior. +// +// Example: +// +// // WRONG +// float f = 3.14159265358979; // WRONG +// int i = * reinterpret_cast<int*>(&f); // WRONG +// +// The address-casting method produces undefined behavior according to the ISO +// C++ specification section [basic.lval]. Roughly, this section says: if an +// object in memory has one type, and a program accesses it with a different +// type, the result is undefined behavior for most values of "different type". +// +// Such casting results in type punning: holding an object in memory of one type +// and reading its bits back using a different type. A `bit_cast()` avoids this +// issue by implementing its casts using `memcpy()`, which avoids introducing +// this undefined behavior. +// +// NOTE: The requirements here are more strict than the bit_cast of standard +// proposal p0476 due to the need for workarounds and lack of intrinsics. +// Specifically, this implementation also requires `Dest` to be +// default-constructible. +template < + typename Dest, typename Source, + typename std::enable_if<internal_casts::is_bitcastable<Dest, Source>::value, + int>::type = 0> +inline Dest bit_cast(const Source& source) { + Dest dest; + memcpy(static_cast<void*>(std::addressof(dest)), + static_cast<const void*>(std::addressof(source)), sizeof(dest)); + return dest; +} + +// NOTE: This overload is only picked if the requirements of bit_cast are not +// met. It is therefore UB, but is provided temporarily as previous versions of +// this function template were unchecked. Do not use this in new code. +template < + typename Dest, typename Source, + typename std::enable_if< + !internal_casts::is_bitcastable<Dest, Source>::value, int>::type = 0> +ABSL_DEPRECATED( + "absl::bit_cast type requirements were violated. Update the types being " + "used such that they are the same size and are both TriviallyCopyable.") +inline Dest bit_cast(const Source& source) { + static_assert(sizeof(Dest) == sizeof(Source), + "Source and destination types should have equal sizes."); + + Dest dest; + memcpy(&dest, &source, sizeof(dest)); + return dest; +} + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_CASTS_H_ diff --git a/third_party/abseil_cpp/absl/base/config.h b/third_party/abseil_cpp/absl/base/config.h new file mode 100644 index 000000000000..f54466deebad --- /dev/null +++ b/third_party/abseil_cpp/absl/base/config.h @@ -0,0 +1,664 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: config.h +// ----------------------------------------------------------------------------- +// +// This header file defines a set of macros for checking the presence of +// important compiler and platform features. Such macros can be used to +// produce portable code by parameterizing compilation based on the presence or +// lack of a given feature. +// +// We define a "feature" as some interface we wish to program to: for example, +// a library function or system call. A value of `1` indicates support for +// that feature; any other value indicates the feature support is undefined. +// +// Example: +// +// Suppose a programmer wants to write a program that uses the 'mmap()' system +// call. The Abseil macro for that feature (`ABSL_HAVE_MMAP`) allows you to +// selectively include the `mmap.h` header and bracket code using that feature +// in the macro: +// +// #include "absl/base/config.h" +// +// #ifdef ABSL_HAVE_MMAP +// #include "sys/mman.h" +// #endif //ABSL_HAVE_MMAP +// +// ... +// #ifdef ABSL_HAVE_MMAP +// void *ptr = mmap(...); +// ... +// #endif // ABSL_HAVE_MMAP + +#ifndef ABSL_BASE_CONFIG_H_ +#define ABSL_BASE_CONFIG_H_ + +// Included for the __GLIBC__ macro (or similar macros on other systems). +#include <limits.h> + +#ifdef __cplusplus +// Included for __GLIBCXX__, _LIBCPP_VERSION +#include <cstddef> +#endif // __cplusplus + +#if defined(__APPLE__) +// Included for TARGET_OS_IPHONE, __IPHONE_OS_VERSION_MIN_REQUIRED, +// __IPHONE_8_0. +#include <Availability.h> +#include <TargetConditionals.h> +#endif + +#include "absl/base/options.h" +#include "absl/base/policy_checks.h" + +// Helper macro to convert a CPP variable to a string literal. +#define ABSL_INTERNAL_DO_TOKEN_STR(x) #x +#define ABSL_INTERNAL_TOKEN_STR(x) ABSL_INTERNAL_DO_TOKEN_STR(x) + +// ----------------------------------------------------------------------------- +// Abseil namespace annotations +// ----------------------------------------------------------------------------- + +// ABSL_NAMESPACE_BEGIN/ABSL_NAMESPACE_END +// +// An annotation placed at the beginning/end of each `namespace absl` scope. +// This is used to inject an inline namespace. +// +// The proper way to write Abseil code in the `absl` namespace is: +// +// namespace absl { +// ABSL_NAMESPACE_BEGIN +// +// void Foo(); // absl::Foo(). +// +// ABSL_NAMESPACE_END +// } // namespace absl +// +// Users of Abseil should not use these macros, because users of Abseil should +// not write `namespace absl {` in their own code for any reason. (Abseil does +// not support forward declarations of its own types, nor does it support +// user-provided specialization of Abseil templates. Code that violates these +// rules may be broken without warning.) +#if !defined(ABSL_OPTION_USE_INLINE_NAMESPACE) || \ + !defined(ABSL_OPTION_INLINE_NAMESPACE_NAME) +#error options.h is misconfigured. +#endif + +// Check that ABSL_OPTION_INLINE_NAMESPACE_NAME is neither "head" nor "" +#if defined(__cplusplus) && ABSL_OPTION_USE_INLINE_NAMESPACE == 1 + +#define ABSL_INTERNAL_INLINE_NAMESPACE_STR \ + ABSL_INTERNAL_TOKEN_STR(ABSL_OPTION_INLINE_NAMESPACE_NAME) + +static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != '\0', + "options.h misconfigured: ABSL_OPTION_INLINE_NAMESPACE_NAME must " + "not be empty."); +static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || + ABSL_INTERNAL_INLINE_NAMESPACE_STR[1] != 'e' || + ABSL_INTERNAL_INLINE_NAMESPACE_STR[2] != 'a' || + ABSL_INTERNAL_INLINE_NAMESPACE_STR[3] != 'd' || + ABSL_INTERNAL_INLINE_NAMESPACE_STR[4] != '\0', + "options.h misconfigured: ABSL_OPTION_INLINE_NAMESPACE_NAME must " + "be changed to a new, unique identifier name."); + +#endif + +#if ABSL_OPTION_USE_INLINE_NAMESPACE == 0 +#define ABSL_NAMESPACE_BEGIN +#define ABSL_NAMESPACE_END +#elif ABSL_OPTION_USE_INLINE_NAMESPACE == 1 +#define ABSL_NAMESPACE_BEGIN \ + inline namespace ABSL_OPTION_INLINE_NAMESPACE_NAME { +#define ABSL_NAMESPACE_END } +#else +#error options.h is misconfigured. +#endif + +// ----------------------------------------------------------------------------- +// Compiler Feature Checks +// ----------------------------------------------------------------------------- + +// ABSL_HAVE_BUILTIN() +// +// Checks whether the compiler supports a Clang Feature Checking Macro, and if +// so, checks whether it supports the provided builtin function "x" where x +// is one of the functions noted in +// https://clang.llvm.org/docs/LanguageExtensions.html +// +// Note: Use this macro to avoid an extra level of #ifdef __has_builtin check. +// http://releases.llvm.org/3.3/tools/clang/docs/LanguageExtensions.html +#ifdef __has_builtin +#define ABSL_HAVE_BUILTIN(x) __has_builtin(x) +#else +#define ABSL_HAVE_BUILTIN(x) 0 +#endif + +#if defined(__is_identifier) +#define ABSL_INTERNAL_HAS_KEYWORD(x) !(__is_identifier(x)) +#else +#define ABSL_INTERNAL_HAS_KEYWORD(x) 0 +#endif + +// ABSL_HAVE_TLS is defined to 1 when __thread should be supported. +// We assume __thread is supported on Linux when compiled with Clang or compiled +// against libstdc++ with _GLIBCXX_HAVE_TLS defined. +#ifdef ABSL_HAVE_TLS +#error ABSL_HAVE_TLS cannot be directly set +#elif defined(__linux__) && (defined(__clang__) || defined(_GLIBCXX_HAVE_TLS)) +#define ABSL_HAVE_TLS 1 +#endif + +// ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE +// +// Checks whether `std::is_trivially_destructible<T>` is supported. +// +// Notes: All supported compilers using libc++ support this feature, as does +// gcc >= 4.8.1 using libstdc++, and Visual Studio. +#ifdef ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE +#error ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE cannot be directly set +#elif defined(_LIBCPP_VERSION) || \ + (!defined(__clang__) && defined(__GNUC__) && defined(__GLIBCXX__) && \ + (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8))) || \ + defined(_MSC_VER) +#define ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE 1 +#endif + +// ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE +// +// Checks whether `std::is_trivially_default_constructible<T>` and +// `std::is_trivially_copy_constructible<T>` are supported. + +// ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE +// +// Checks whether `std::is_trivially_copy_assignable<T>` is supported. + +// Notes: Clang with libc++ supports these features, as does gcc >= 5.1 with +// either libc++ or libstdc++, and Visual Studio (but not NVCC). +#if defined(ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE) +#error ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE cannot be directly set +#elif defined(ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE) +#error ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE cannot directly set +#elif (defined(__clang__) && defined(_LIBCPP_VERSION)) || \ + (!defined(__clang__) && defined(__GNUC__) && \ + (__GNUC__ > 7 || (__GNUC__ == 7 && __GNUC_MINOR__ >= 4)) && \ + (defined(_LIBCPP_VERSION) || defined(__GLIBCXX__))) || \ + (defined(_MSC_VER) && !defined(__NVCC__)) +#define ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE 1 +#define ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE 1 +#endif + +// ABSL_HAVE_SOURCE_LOCATION_CURRENT +// +// Indicates whether `absl::SourceLocation::current()` will return useful +// information in some contexts. +#ifndef ABSL_HAVE_SOURCE_LOCATION_CURRENT +#if ABSL_INTERNAL_HAS_KEYWORD(__builtin_LINE) && \ + ABSL_INTERNAL_HAS_KEYWORD(__builtin_FILE) +#define ABSL_HAVE_SOURCE_LOCATION_CURRENT 1 +#endif +#endif + +// ABSL_HAVE_THREAD_LOCAL +// +// Checks whether C++11's `thread_local` storage duration specifier is +// supported. +#ifdef ABSL_HAVE_THREAD_LOCAL +#error ABSL_HAVE_THREAD_LOCAL cannot be directly set +#elif defined(__APPLE__) +// Notes: +// * Xcode's clang did not support `thread_local` until version 8, and +// even then not for all iOS < 9.0. +// * Xcode 9.3 started disallowing `thread_local` for 32-bit iOS simulator +// targeting iOS 9.x. +// * Xcode 10 moves the deployment target check for iOS < 9.0 to link time +// making __has_feature unreliable there. +// +// Otherwise, `__has_feature` is only supported by Clang so it has be inside +// `defined(__APPLE__)` check. +#if __has_feature(cxx_thread_local) && \ + !(TARGET_OS_IPHONE && __IPHONE_OS_VERSION_MIN_REQUIRED < __IPHONE_9_0) +#define ABSL_HAVE_THREAD_LOCAL 1 +#endif +#else // !defined(__APPLE__) +#define ABSL_HAVE_THREAD_LOCAL 1 +#endif + +// There are platforms for which TLS should not be used even though the compiler +// makes it seem like it's supported (Android NDK < r12b for example). +// This is primarily because of linker problems and toolchain misconfiguration: +// Abseil does not intend to support this indefinitely. Currently, the newest +// toolchain that we intend to support that requires this behavior is the +// r11 NDK - allowing for a 5 year support window on that means this option +// is likely to be removed around June of 2021. +// TLS isn't supported until NDK r12b per +// https://developer.android.com/ndk/downloads/revision_history.html +// Since NDK r16, `__NDK_MAJOR__` and `__NDK_MINOR__` are defined in +// <android/ndk-version.h>. For NDK < r16, users should define these macros, +// e.g. `-D__NDK_MAJOR__=11 -D__NKD_MINOR__=0` for NDK r11. +#if defined(__ANDROID__) && defined(__clang__) +#if __has_include(<android/ndk-version.h>) +#include <android/ndk-version.h> +#endif // __has_include(<android/ndk-version.h>) +#if defined(__ANDROID__) && defined(__clang__) && defined(__NDK_MAJOR__) && \ + defined(__NDK_MINOR__) && \ + ((__NDK_MAJOR__ < 12) || ((__NDK_MAJOR__ == 12) && (__NDK_MINOR__ < 1))) +#undef ABSL_HAVE_TLS +#undef ABSL_HAVE_THREAD_LOCAL +#endif +#endif // defined(__ANDROID__) && defined(__clang__) + +// ABSL_HAVE_INTRINSIC_INT128 +// +// Checks whether the __int128 compiler extension for a 128-bit integral type is +// supported. +// +// Note: __SIZEOF_INT128__ is defined by Clang and GCC when __int128 is +// supported, but we avoid using it in certain cases: +// * On Clang: +// * Building using Clang for Windows, where the Clang runtime library has +// 128-bit support only on LP64 architectures, but Windows is LLP64. +// * On Nvidia's nvcc: +// * nvcc also defines __GNUC__ and __SIZEOF_INT128__, but not all versions +// actually support __int128. +#ifdef ABSL_HAVE_INTRINSIC_INT128 +#error ABSL_HAVE_INTRINSIC_INT128 cannot be directly set +#elif defined(__SIZEOF_INT128__) +#if (defined(__clang__) && !defined(_WIN32)) || \ + (defined(__CUDACC__) && __CUDACC_VER_MAJOR__ >= 9) || \ + (defined(__GNUC__) && !defined(__clang__) && !defined(__CUDACC__)) +#define ABSL_HAVE_INTRINSIC_INT128 1 +#elif defined(__CUDACC__) +// __CUDACC_VER__ is a full version number before CUDA 9, and is defined to a +// string explaining that it has been removed starting with CUDA 9. We use +// nested #ifs because there is no short-circuiting in the preprocessor. +// NOTE: `__CUDACC__` could be undefined while `__CUDACC_VER__` is defined. +#if __CUDACC_VER__ >= 70000 +#define ABSL_HAVE_INTRINSIC_INT128 1 +#endif // __CUDACC_VER__ >= 70000 +#endif // defined(__CUDACC__) +#endif // ABSL_HAVE_INTRINSIC_INT128 + +// ABSL_HAVE_EXCEPTIONS +// +// Checks whether the compiler both supports and enables exceptions. Many +// compilers support a "no exceptions" mode that disables exceptions. +// +// Generally, when ABSL_HAVE_EXCEPTIONS is not defined: +// +// * Code using `throw` and `try` may not compile. +// * The `noexcept` specifier will still compile and behave as normal. +// * The `noexcept` operator may still return `false`. +// +// For further details, consult the compiler's documentation. +#ifdef ABSL_HAVE_EXCEPTIONS +#error ABSL_HAVE_EXCEPTIONS cannot be directly set. + +#elif defined(__clang__) + +#if __clang_major__ > 3 || (__clang_major__ == 3 && __clang_minor__ >= 6) +// Clang >= 3.6 +#if __has_feature(cxx_exceptions) +#define ABSL_HAVE_EXCEPTIONS 1 +#endif // __has_feature(cxx_exceptions) +#else +// Clang < 3.6 +// http://releases.llvm.org/3.6.0/tools/clang/docs/ReleaseNotes.html#the-exceptions-macro +#if defined(__EXCEPTIONS) && __has_feature(cxx_exceptions) +#define ABSL_HAVE_EXCEPTIONS 1 +#endif // defined(__EXCEPTIONS) && __has_feature(cxx_exceptions) +#endif // __clang_major__ > 3 || (__clang_major__ == 3 && __clang_minor__ >= 6) + +// Handle remaining special cases and default to exceptions being supported. +#elif !(defined(__GNUC__) && (__GNUC__ < 5) && !defined(__EXCEPTIONS)) && \ + !(defined(__GNUC__) && (__GNUC__ >= 5) && !defined(__cpp_exceptions)) && \ + !(defined(_MSC_VER) && !defined(_CPPUNWIND)) +#define ABSL_HAVE_EXCEPTIONS 1 +#endif + +// ----------------------------------------------------------------------------- +// Platform Feature Checks +// ----------------------------------------------------------------------------- + +// Currently supported operating systems and associated preprocessor +// symbols: +// +// Linux and Linux-derived __linux__ +// Android __ANDROID__ (implies __linux__) +// Linux (non-Android) __linux__ && !__ANDROID__ +// Darwin (macOS and iOS) __APPLE__ +// Akaros (http://akaros.org) __ros__ +// Windows _WIN32 +// NaCL __native_client__ +// AsmJS __asmjs__ +// WebAssembly __wasm__ +// Fuchsia __Fuchsia__ +// +// Note that since Android defines both __ANDROID__ and __linux__, one +// may probe for either Linux or Android by simply testing for __linux__. + +// ABSL_HAVE_MMAP +// +// Checks whether the platform has an mmap(2) implementation as defined in +// POSIX.1-2001. +#ifdef ABSL_HAVE_MMAP +#error ABSL_HAVE_MMAP cannot be directly set +#elif defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \ + defined(__ros__) || defined(__native_client__) || defined(__asmjs__) || \ + defined(__wasm__) || defined(__Fuchsia__) || defined(__sun) || \ + defined(__ASYLO__) +#define ABSL_HAVE_MMAP 1 +#endif + +// ABSL_HAVE_PTHREAD_GETSCHEDPARAM +// +// Checks whether the platform implements the pthread_(get|set)schedparam(3) +// functions as defined in POSIX.1-2001. +#ifdef ABSL_HAVE_PTHREAD_GETSCHEDPARAM +#error ABSL_HAVE_PTHREAD_GETSCHEDPARAM cannot be directly set +#elif defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \ + defined(__ros__) +#define ABSL_HAVE_PTHREAD_GETSCHEDPARAM 1 +#endif + +// ABSL_HAVE_SCHED_YIELD +// +// Checks whether the platform implements sched_yield(2) as defined in +// POSIX.1-2001. +#ifdef ABSL_HAVE_SCHED_YIELD +#error ABSL_HAVE_SCHED_YIELD cannot be directly set +#elif defined(__linux__) || defined(__ros__) || defined(__native_client__) +#define ABSL_HAVE_SCHED_YIELD 1 +#endif + +// ABSL_HAVE_SEMAPHORE_H +// +// Checks whether the platform supports the <semaphore.h> header and sem_init(3) +// family of functions as standardized in POSIX.1-2001. +// +// Note: While Apple provides <semaphore.h> for both iOS and macOS, it is +// explicitly deprecated and will cause build failures if enabled for those +// platforms. We side-step the issue by not defining it here for Apple +// platforms. +#ifdef ABSL_HAVE_SEMAPHORE_H +#error ABSL_HAVE_SEMAPHORE_H cannot be directly set +#elif defined(__linux__) || defined(__ros__) +#define ABSL_HAVE_SEMAPHORE_H 1 +#endif + +// ABSL_HAVE_ALARM +// +// Checks whether the platform supports the <signal.h> header and alarm(2) +// function as standardized in POSIX.1-2001. +#ifdef ABSL_HAVE_ALARM +#error ABSL_HAVE_ALARM cannot be directly set +#elif defined(__GOOGLE_GRTE_VERSION__) +// feature tests for Google's GRTE +#define ABSL_HAVE_ALARM 1 +#elif defined(__GLIBC__) +// feature test for glibc +#define ABSL_HAVE_ALARM 1 +#elif defined(_MSC_VER) +// feature tests for Microsoft's library +#elif defined(__MINGW32__) +// mingw32 doesn't provide alarm(2): +// https://osdn.net/projects/mingw/scm/git/mingw-org-wsl/blobs/5.2-trunk/mingwrt/include/unistd.h +// mingw-w64 provides a no-op implementation: +// https://sourceforge.net/p/mingw-w64/mingw-w64/ci/master/tree/mingw-w64-crt/misc/alarm.c +#elif defined(__EMSCRIPTEN__) +// emscripten doesn't support signals +#elif defined(__Fuchsia__) +// Signals don't exist on fuchsia. +#elif defined(__native_client__) +#else +// other standard libraries +#define ABSL_HAVE_ALARM 1 +#endif + +// ABSL_IS_LITTLE_ENDIAN +// ABSL_IS_BIG_ENDIAN +// +// Checks the endianness of the platform. +// +// Notes: uses the built in endian macros provided by GCC (since 4.6) and +// Clang (since 3.2); see +// https://gcc.gnu.org/onlinedocs/cpp/Common-Predefined-Macros.html. +// Otherwise, if _WIN32, assume little endian. Otherwise, bail with an error. +#if defined(ABSL_IS_BIG_ENDIAN) +#error "ABSL_IS_BIG_ENDIAN cannot be directly set." +#endif +#if defined(ABSL_IS_LITTLE_ENDIAN) +#error "ABSL_IS_LITTLE_ENDIAN cannot be directly set." +#endif + +#if (defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && \ + __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) +#define ABSL_IS_LITTLE_ENDIAN 1 +#elif defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && \ + __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ +#define ABSL_IS_BIG_ENDIAN 1 +#elif defined(_WIN32) +#define ABSL_IS_LITTLE_ENDIAN 1 +#else +#error "absl endian detection needs to be set up for your compiler" +#endif + +// macOS 10.13 and iOS 10.11 don't let you use <any>, <optional>, or <variant> +// even though the headers exist and are publicly noted to work. See +// https://github.com/abseil/abseil-cpp/issues/207 and +// https://developer.apple.com/documentation/xcode_release_notes/xcode_10_release_notes +// libc++ spells out the availability requirements in the file +// llvm-project/libcxx/include/__config via the #define +// _LIBCPP_AVAILABILITY_BAD_OPTIONAL_ACCESS. +#if defined(__APPLE__) && defined(_LIBCPP_VERSION) && \ + ((defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && \ + __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 101400) || \ + (defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) && \ + __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ < 120000) || \ + (defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) && \ + __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ < 120000) || \ + (defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) && \ + __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ < 50000)) +#define ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE 1 +#else +#define ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE 0 +#endif + +// ABSL_HAVE_STD_ANY +// +// Checks whether C++17 std::any is available by checking whether <any> exists. +#ifdef ABSL_HAVE_STD_ANY +#error "ABSL_HAVE_STD_ANY cannot be directly set." +#endif + +#ifdef __has_include +#if __has_include(<any>) && __cplusplus >= 201703L && \ + !ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE +#define ABSL_HAVE_STD_ANY 1 +#endif +#endif + +// ABSL_HAVE_STD_OPTIONAL +// +// Checks whether C++17 std::optional is available. +#ifdef ABSL_HAVE_STD_OPTIONAL +#error "ABSL_HAVE_STD_OPTIONAL cannot be directly set." +#endif + +#ifdef __has_include +#if __has_include(<optional>) && __cplusplus >= 201703L && \ + !ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE +#define ABSL_HAVE_STD_OPTIONAL 1 +#endif +#endif + +// ABSL_HAVE_STD_VARIANT +// +// Checks whether C++17 std::variant is available. +#ifdef ABSL_HAVE_STD_VARIANT +#error "ABSL_HAVE_STD_VARIANT cannot be directly set." +#endif + +#ifdef __has_include +#if __has_include(<variant>) && __cplusplus >= 201703L && \ + !ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE +#define ABSL_HAVE_STD_VARIANT 1 +#endif +#endif + +// ABSL_HAVE_STD_STRING_VIEW +// +// Checks whether C++17 std::string_view is available. +#ifdef ABSL_HAVE_STD_STRING_VIEW +#error "ABSL_HAVE_STD_STRING_VIEW cannot be directly set." +#endif + +#ifdef __has_include +#if __has_include(<string_view>) && __cplusplus >= 201703L +#define ABSL_HAVE_STD_STRING_VIEW 1 +#endif +#endif + +// For MSVC, `__has_include` is supported in VS 2017 15.3, which is later than +// the support for <optional>, <any>, <string_view>, <variant>. So we use +// _MSC_VER to check whether we have VS 2017 RTM (when <optional>, <any>, +// <string_view>, <variant> is implemented) or higher. Also, `__cplusplus` is +// not correctly set by MSVC, so we use `_MSVC_LANG` to check the language +// version. +// TODO(zhangxy): fix tests before enabling aliasing for `std::any`. +#if defined(_MSC_VER) && _MSC_VER >= 1910 && \ + ((defined(_MSVC_LANG) && _MSVC_LANG > 201402) || __cplusplus > 201402) +// #define ABSL_HAVE_STD_ANY 1 +#define ABSL_HAVE_STD_OPTIONAL 1 +#define ABSL_HAVE_STD_VARIANT 1 +#define ABSL_HAVE_STD_STRING_VIEW 1 +#endif + +// ABSL_USES_STD_ANY +// +// Indicates whether absl::any is an alias for std::any. +#if !defined(ABSL_OPTION_USE_STD_ANY) +#error options.h is misconfigured. +#elif ABSL_OPTION_USE_STD_ANY == 0 || \ + (ABSL_OPTION_USE_STD_ANY == 2 && !defined(ABSL_HAVE_STD_ANY)) +#undef ABSL_USES_STD_ANY +#elif ABSL_OPTION_USE_STD_ANY == 1 || \ + (ABSL_OPTION_USE_STD_ANY == 2 && defined(ABSL_HAVE_STD_ANY)) +#define ABSL_USES_STD_ANY 1 +#else +#error options.h is misconfigured. +#endif + +// ABSL_USES_STD_OPTIONAL +// +// Indicates whether absl::optional is an alias for std::optional. +#if !defined(ABSL_OPTION_USE_STD_OPTIONAL) +#error options.h is misconfigured. +#elif ABSL_OPTION_USE_STD_OPTIONAL == 0 || \ + (ABSL_OPTION_USE_STD_OPTIONAL == 2 && !defined(ABSL_HAVE_STD_OPTIONAL)) +#undef ABSL_USES_STD_OPTIONAL +#elif ABSL_OPTION_USE_STD_OPTIONAL == 1 || \ + (ABSL_OPTION_USE_STD_OPTIONAL == 2 && defined(ABSL_HAVE_STD_OPTIONAL)) +#define ABSL_USES_STD_OPTIONAL 1 +#else +#error options.h is misconfigured. +#endif + +// ABSL_USES_STD_VARIANT +// +// Indicates whether absl::variant is an alias for std::variant. +#if !defined(ABSL_OPTION_USE_STD_VARIANT) +#error options.h is misconfigured. +#elif ABSL_OPTION_USE_STD_VARIANT == 0 || \ + (ABSL_OPTION_USE_STD_VARIANT == 2 && !defined(ABSL_HAVE_STD_VARIANT)) +#undef ABSL_USES_STD_VARIANT +#elif ABSL_OPTION_USE_STD_VARIANT == 1 || \ + (ABSL_OPTION_USE_STD_VARIANT == 2 && defined(ABSL_HAVE_STD_VARIANT)) +#define ABSL_USES_STD_VARIANT 1 +#else +#error options.h is misconfigured. +#endif + +// ABSL_USES_STD_STRING_VIEW +// +// Indicates whether absl::string_view is an alias for std::string_view. +#if !defined(ABSL_OPTION_USE_STD_STRING_VIEW) +#error options.h is misconfigured. +#elif ABSL_OPTION_USE_STD_STRING_VIEW == 0 || \ + (ABSL_OPTION_USE_STD_STRING_VIEW == 2 && \ + !defined(ABSL_HAVE_STD_STRING_VIEW)) +#undef ABSL_USES_STD_STRING_VIEW +#elif ABSL_OPTION_USE_STD_STRING_VIEW == 1 || \ + (ABSL_OPTION_USE_STD_STRING_VIEW == 2 && \ + defined(ABSL_HAVE_STD_STRING_VIEW)) +#define ABSL_USES_STD_STRING_VIEW 1 +#else +#error options.h is misconfigured. +#endif + +// In debug mode, MSVC 2017's std::variant throws a EXCEPTION_ACCESS_VIOLATION +// SEH exception from emplace for variant<SomeStruct> when constructing the +// struct can throw. This defeats some of variant_test and +// variant_exception_safety_test. +#if defined(_MSC_VER) && _MSC_VER >= 1700 && defined(_DEBUG) +#define ABSL_INTERNAL_MSVC_2017_DBG_MODE +#endif + +// ABSL_INTERNAL_MANGLED_NS +// ABSL_INTERNAL_MANGLED_BACKREFERENCE +// +// Internal macros for building up mangled names in our internal fork of CCTZ. +// This implementation detail is only needed and provided for the MSVC build. +// +// These macros both expand to string literals. ABSL_INTERNAL_MANGLED_NS is +// the mangled spelling of the `absl` namespace, and +// ABSL_INTERNAL_MANGLED_BACKREFERENCE is a back-reference integer representing +// the proper count to skip past the CCTZ fork namespace names. (This number +// is one larger when there is an inline namespace name to skip.) +#if defined(_MSC_VER) +#if ABSL_OPTION_USE_INLINE_NAMESPACE == 0 +#define ABSL_INTERNAL_MANGLED_NS "absl" +#define ABSL_INTERNAL_MANGLED_BACKREFERENCE "5" +#else +#define ABSL_INTERNAL_MANGLED_NS \ + ABSL_INTERNAL_TOKEN_STR(ABSL_OPTION_INLINE_NAMESPACE_NAME) "@absl" +#define ABSL_INTERNAL_MANGLED_BACKREFERENCE "6" +#endif +#endif + +#undef ABSL_INTERNAL_HAS_KEYWORD + +// ABSL_DLL +// +// When building Abseil as a DLL, this macro expands to `__declspec(dllexport)` +// so we can annotate symbols appropriately as being exported. When used in +// headers consuming a DLL, this macro expands to `__declspec(dllimport)` so +// that consumers know the symbol is defined inside the DLL. In all other cases, +// the macro expands to nothing. +#if defined(_MSC_VER) +#if defined(ABSL_BUILD_DLL) +#define ABSL_DLL __declspec(dllexport) +#elif defined(ABSL_CONSUME_DLL) +#define ABSL_DLL __declspec(dllimport) +#else +#define ABSL_DLL +#endif +#else +#define ABSL_DLL +#endif // defined(_MSC_VER) + +#endif // ABSL_BASE_CONFIG_H_ diff --git a/third_party/abseil_cpp/absl/base/config_test.cc b/third_party/abseil_cpp/absl/base/config_test.cc new file mode 100644 index 000000000000..7e0c033de59f --- /dev/null +++ b/third_party/abseil_cpp/absl/base/config_test.cc @@ -0,0 +1,60 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/config.h" + +#include <cstdint> + +#include "gtest/gtest.h" +#include "absl/synchronization/internal/thread_pool.h" + +namespace { + +TEST(ConfigTest, Endianness) { + union { + uint32_t value; + uint8_t data[sizeof(uint32_t)]; + } number; + number.data[0] = 0x00; + number.data[1] = 0x01; + number.data[2] = 0x02; + number.data[3] = 0x03; +#if defined(ABSL_IS_LITTLE_ENDIAN) && defined(ABSL_IS_BIG_ENDIAN) +#error Both ABSL_IS_LITTLE_ENDIAN and ABSL_IS_BIG_ENDIAN are defined +#elif defined(ABSL_IS_LITTLE_ENDIAN) + EXPECT_EQ(UINT32_C(0x03020100), number.value); +#elif defined(ABSL_IS_BIG_ENDIAN) + EXPECT_EQ(UINT32_C(0x00010203), number.value); +#else +#error Unknown endianness +#endif +} + +#if defined(ABSL_HAVE_THREAD_LOCAL) +TEST(ConfigTest, ThreadLocal) { + static thread_local int mine_mine_mine = 16; + EXPECT_EQ(16, mine_mine_mine); + { + absl::synchronization_internal::ThreadPool pool(1); + pool.Schedule([&] { + EXPECT_EQ(16, mine_mine_mine); + mine_mine_mine = 32; + EXPECT_EQ(32, mine_mine_mine); + }); + } + EXPECT_EQ(16, mine_mine_mine); +} +#endif + +} // namespace diff --git a/third_party/abseil_cpp/absl/base/const_init.h b/third_party/abseil_cpp/absl/base/const_init.h new file mode 100644 index 000000000000..16520b61d95b --- /dev/null +++ b/third_party/abseil_cpp/absl/base/const_init.h @@ -0,0 +1,76 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// kConstInit +// ----------------------------------------------------------------------------- +// +// A constructor tag used to mark an object as safe for use as a global +// variable, avoiding the usual lifetime issues that can affect globals. + +#ifndef ABSL_BASE_CONST_INIT_H_ +#define ABSL_BASE_CONST_INIT_H_ + +#include "absl/base/config.h" + +// In general, objects with static storage duration (such as global variables) +// can trigger tricky object lifetime situations. Attempting to access them +// from the constructors or destructors of other global objects can result in +// undefined behavior, unless their constructors and destructors are designed +// with this issue in mind. +// +// The normal way to deal with this issue in C++11 is to use constant +// initialization and trivial destructors. +// +// Constant initialization is guaranteed to occur before any other code +// executes. Constructors that are declared 'constexpr' are eligible for +// constant initialization. You can annotate a variable declaration with the +// ABSL_CONST_INIT macro to express this intent. For compilers that support +// it, this annotation will cause a compilation error for declarations that +// aren't subject to constant initialization (perhaps because a runtime value +// was passed as a constructor argument). +// +// On program shutdown, lifetime issues can be avoided on global objects by +// ensuring that they contain trivial destructors. A class has a trivial +// destructor unless it has a user-defined destructor, a virtual method or base +// class, or a data member or base class with a non-trivial destructor of its +// own. Objects with static storage duration and a trivial destructor are not +// cleaned up on program shutdown, and are thus safe to access from other code +// running during shutdown. +// +// For a few core Abseil classes, we make a best effort to allow for safe global +// instances, even though these classes have non-trivial destructors. These +// objects can be created with the absl::kConstInit tag. For example: +// ABSL_CONST_INIT absl::Mutex global_mutex(absl::kConstInit); +// +// The line above declares a global variable of type absl::Mutex which can be +// accessed at any point during startup or shutdown. global_mutex's destructor +// will still run, but will not invalidate the object. Note that C++ specifies +// that accessing an object after its destructor has run results in undefined +// behavior, but this pattern works on the toolchains we support. +// +// The absl::kConstInit tag should only be used to define objects with static +// or thread_local storage duration. + +namespace absl { +ABSL_NAMESPACE_BEGIN + +enum ConstInitType { + kConstInit, +}; + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_CONST_INIT_H_ diff --git a/third_party/abseil_cpp/absl/base/dynamic_annotations.cc b/third_party/abseil_cpp/absl/base/dynamic_annotations.cc new file mode 100644 index 000000000000..21e822e53cfb --- /dev/null +++ b/third_party/abseil_cpp/absl/base/dynamic_annotations.cc @@ -0,0 +1,129 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include <stdlib.h> +#include <string.h> + +#include "absl/base/dynamic_annotations.h" + +#ifndef __has_feature +#define __has_feature(x) 0 +#endif + +/* Compiler-based ThreadSanitizer defines + DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL = 1 + and provides its own definitions of the functions. */ + +#ifndef DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL +# define DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL 0 +#endif + +/* Each function is empty and called (via a macro) only in debug mode. + The arguments are captured by dynamic tools at runtime. */ + +#if DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL == 0 && !defined(__native_client__) + +#if __has_feature(memory_sanitizer) +#include <sanitizer/msan_interface.h> +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +void AnnotateRWLockCreate(const char *, int, + const volatile void *){} +void AnnotateRWLockDestroy(const char *, int, + const volatile void *){} +void AnnotateRWLockAcquired(const char *, int, + const volatile void *, long){} +void AnnotateRWLockReleased(const char *, int, + const volatile void *, long){} +void AnnotateBenignRace(const char *, int, + const volatile void *, + const char *){} +void AnnotateBenignRaceSized(const char *, int, + const volatile void *, + size_t, + const char *) {} +void AnnotateThreadName(const char *, int, + const char *){} +void AnnotateIgnoreReadsBegin(const char *, int){} +void AnnotateIgnoreReadsEnd(const char *, int){} +void AnnotateIgnoreWritesBegin(const char *, int){} +void AnnotateIgnoreWritesEnd(const char *, int){} +void AnnotateEnableRaceDetection(const char *, int, int){} +void AnnotateMemoryIsInitialized(const char *, int, + const volatile void *mem, size_t size) { +#if __has_feature(memory_sanitizer) + __msan_unpoison(mem, size); +#else + (void)mem; + (void)size; +#endif +} + +void AnnotateMemoryIsUninitialized(const char *, int, + const volatile void *mem, size_t size) { +#if __has_feature(memory_sanitizer) + __msan_allocated_memory(mem, size); +#else + (void)mem; + (void)size; +#endif +} + +static int GetRunningOnValgrind(void) { +#ifdef RUNNING_ON_VALGRIND + if (RUNNING_ON_VALGRIND) return 1; +#endif + char *running_on_valgrind_str = getenv("RUNNING_ON_VALGRIND"); + if (running_on_valgrind_str) { + return strcmp(running_on_valgrind_str, "0") != 0; + } + return 0; +} + +/* See the comments in dynamic_annotations.h */ +int RunningOnValgrind(void) { + static volatile int running_on_valgrind = -1; + int local_running_on_valgrind = running_on_valgrind; + /* C doesn't have thread-safe initialization of statics, and we + don't want to depend on pthread_once here, so hack it. */ + ANNOTATE_BENIGN_RACE(&running_on_valgrind, "safe hack"); + if (local_running_on_valgrind == -1) + running_on_valgrind = local_running_on_valgrind = GetRunningOnValgrind(); + return local_running_on_valgrind; +} + +/* See the comments in dynamic_annotations.h */ +double ValgrindSlowdown(void) { + /* Same initialization hack as in RunningOnValgrind(). */ + static volatile double slowdown = 0.0; + double local_slowdown = slowdown; + ANNOTATE_BENIGN_RACE(&slowdown, "safe hack"); + if (RunningOnValgrind() == 0) { + return 1.0; + } + if (local_slowdown == 0.0) { + char *env = getenv("VALGRIND_SLOWDOWN"); + slowdown = local_slowdown = env ? atof(env) : 50.0; + } + return local_slowdown; +} + +#ifdef __cplusplus +} // extern "C" +#endif +#endif /* DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL == 0 */ diff --git a/third_party/abseil_cpp/absl/base/dynamic_annotations.h b/third_party/abseil_cpp/absl/base/dynamic_annotations.h new file mode 100644 index 000000000000..2d98526075eb --- /dev/null +++ b/third_party/abseil_cpp/absl/base/dynamic_annotations.h @@ -0,0 +1,386 @@ +/* + * Copyright 2017 The Abseil Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/* This file defines dynamic annotations for use with dynamic analysis + tool such as valgrind, PIN, etc. + + Dynamic annotation is a source code annotation that affects + the generated code (that is, the annotation is not a comment). + Each such annotation is attached to a particular + instruction and/or to a particular object (address) in the program. + + The annotations that should be used by users are macros in all upper-case + (e.g., ANNOTATE_THREAD_NAME). + + Actual implementation of these macros may differ depending on the + dynamic analysis tool being used. + + This file supports the following configurations: + - Dynamic Annotations enabled (with static thread-safety warnings disabled). + In this case, macros expand to functions implemented by Thread Sanitizer, + when building with TSan. When not provided an external implementation, + dynamic_annotations.cc provides no-op implementations. + + - Static Clang thread-safety warnings enabled. + When building with a Clang compiler that supports thread-safety warnings, + a subset of annotations can be statically-checked at compile-time. We + expand these macros to static-inline functions that can be analyzed for + thread-safety, but afterwards elided when building the final binary. + + - All annotations are disabled. + If neither Dynamic Annotations nor Clang thread-safety warnings are + enabled, then all annotation-macros expand to empty. */ + +#ifndef ABSL_BASE_DYNAMIC_ANNOTATIONS_H_ +#define ABSL_BASE_DYNAMIC_ANNOTATIONS_H_ + +#ifndef DYNAMIC_ANNOTATIONS_ENABLED +# define DYNAMIC_ANNOTATIONS_ENABLED 0 +#endif + +#if DYNAMIC_ANNOTATIONS_ENABLED != 0 + + /* ------------------------------------------------------------- + Annotations that suppress errors. It is usually better to express the + program's synchronization using the other annotations, but these can + be used when all else fails. */ + + /* Report that we may have a benign race at "pointer", with size + "sizeof(*(pointer))". "pointer" must be a non-void* pointer. Insert at the + point where "pointer" has been allocated, preferably close to the point + where the race happens. See also ANNOTATE_BENIGN_RACE_STATIC. */ + #define ANNOTATE_BENIGN_RACE(pointer, description) \ + AnnotateBenignRaceSized(__FILE__, __LINE__, pointer, \ + sizeof(*(pointer)), description) + + /* Same as ANNOTATE_BENIGN_RACE(address, description), but applies to + the memory range [address, address+size). */ + #define ANNOTATE_BENIGN_RACE_SIZED(address, size, description) \ + AnnotateBenignRaceSized(__FILE__, __LINE__, address, size, description) + + /* Enable (enable!=0) or disable (enable==0) race detection for all threads. + This annotation could be useful if you want to skip expensive race analysis + during some period of program execution, e.g. during initialization. */ + #define ANNOTATE_ENABLE_RACE_DETECTION(enable) \ + AnnotateEnableRaceDetection(__FILE__, __LINE__, enable) + + /* ------------------------------------------------------------- + Annotations useful for debugging. */ + + /* Report the current thread name to a race detector. */ + #define ANNOTATE_THREAD_NAME(name) \ + AnnotateThreadName(__FILE__, __LINE__, name) + + /* ------------------------------------------------------------- + Annotations useful when implementing locks. They are not + normally needed by modules that merely use locks. + The "lock" argument is a pointer to the lock object. */ + + /* Report that a lock has been created at address "lock". */ + #define ANNOTATE_RWLOCK_CREATE(lock) \ + AnnotateRWLockCreate(__FILE__, __LINE__, lock) + + /* Report that a linker initialized lock has been created at address "lock". + */ +#ifdef THREAD_SANITIZER + #define ANNOTATE_RWLOCK_CREATE_STATIC(lock) \ + AnnotateRWLockCreateStatic(__FILE__, __LINE__, lock) +#else + #define ANNOTATE_RWLOCK_CREATE_STATIC(lock) ANNOTATE_RWLOCK_CREATE(lock) +#endif + + /* Report that the lock at address "lock" is about to be destroyed. */ + #define ANNOTATE_RWLOCK_DESTROY(lock) \ + AnnotateRWLockDestroy(__FILE__, __LINE__, lock) + + /* Report that the lock at address "lock" has been acquired. + is_w=1 for writer lock, is_w=0 for reader lock. */ + #define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) \ + AnnotateRWLockAcquired(__FILE__, __LINE__, lock, is_w) + + /* Report that the lock at address "lock" is about to be released. */ + #define ANNOTATE_RWLOCK_RELEASED(lock, is_w) \ + AnnotateRWLockReleased(__FILE__, __LINE__, lock, is_w) + +#else /* DYNAMIC_ANNOTATIONS_ENABLED == 0 */ + + #define ANNOTATE_RWLOCK_CREATE(lock) /* empty */ + #define ANNOTATE_RWLOCK_CREATE_STATIC(lock) /* empty */ + #define ANNOTATE_RWLOCK_DESTROY(lock) /* empty */ + #define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) /* empty */ + #define ANNOTATE_RWLOCK_RELEASED(lock, is_w) /* empty */ + #define ANNOTATE_BENIGN_RACE(address, description) /* empty */ + #define ANNOTATE_BENIGN_RACE_SIZED(address, size, description) /* empty */ + #define ANNOTATE_THREAD_NAME(name) /* empty */ + #define ANNOTATE_ENABLE_RACE_DETECTION(enable) /* empty */ + +#endif /* DYNAMIC_ANNOTATIONS_ENABLED */ + +/* These annotations are also made available to LLVM's Memory Sanitizer */ +#if DYNAMIC_ANNOTATIONS_ENABLED == 1 || defined(MEMORY_SANITIZER) + #define ANNOTATE_MEMORY_IS_INITIALIZED(address, size) \ + AnnotateMemoryIsInitialized(__FILE__, __LINE__, address, size) + + #define ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) \ + AnnotateMemoryIsUninitialized(__FILE__, __LINE__, address, size) +#else + #define ANNOTATE_MEMORY_IS_INITIALIZED(address, size) /* empty */ + #define ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) /* empty */ +#endif /* DYNAMIC_ANNOTATIONS_ENABLED || MEMORY_SANITIZER */ + +#if defined(__clang__) && !defined(SWIG) + + #if DYNAMIC_ANNOTATIONS_ENABLED == 0 + #define ANNOTALYSIS_ENABLED + #endif + + /* When running in opt-mode, GCC will issue a warning, if these attributes are + compiled. Only include them when compiling using Clang. */ + #define ATTRIBUTE_IGNORE_READS_BEGIN \ + __attribute((exclusive_lock_function("*"))) + #define ATTRIBUTE_IGNORE_READS_END \ + __attribute((unlock_function("*"))) +#else + #define ATTRIBUTE_IGNORE_READS_BEGIN /* empty */ + #define ATTRIBUTE_IGNORE_READS_END /* empty */ +#endif /* defined(__clang__) && ... */ + +#if (DYNAMIC_ANNOTATIONS_ENABLED != 0) || defined(ANNOTALYSIS_ENABLED) + #define ANNOTATIONS_ENABLED +#endif + +#if (DYNAMIC_ANNOTATIONS_ENABLED != 0) + + /* Request the analysis tool to ignore all reads in the current thread + until ANNOTATE_IGNORE_READS_END is called. + Useful to ignore intentional racey reads, while still checking + other reads and all writes. + See also ANNOTATE_UNPROTECTED_READ. */ + #define ANNOTATE_IGNORE_READS_BEGIN() \ + AnnotateIgnoreReadsBegin(__FILE__, __LINE__) + + /* Stop ignoring reads. */ + #define ANNOTATE_IGNORE_READS_END() \ + AnnotateIgnoreReadsEnd(__FILE__, __LINE__) + + /* Similar to ANNOTATE_IGNORE_READS_BEGIN, but ignore writes instead. */ + #define ANNOTATE_IGNORE_WRITES_BEGIN() \ + AnnotateIgnoreWritesBegin(__FILE__, __LINE__) + + /* Stop ignoring writes. */ + #define ANNOTATE_IGNORE_WRITES_END() \ + AnnotateIgnoreWritesEnd(__FILE__, __LINE__) + +/* Clang provides limited support for static thread-safety analysis + through a feature called Annotalysis. We configure macro-definitions + according to whether Annotalysis support is available. */ +#elif defined(ANNOTALYSIS_ENABLED) + + #define ANNOTATE_IGNORE_READS_BEGIN() \ + StaticAnnotateIgnoreReadsBegin(__FILE__, __LINE__) + + #define ANNOTATE_IGNORE_READS_END() \ + StaticAnnotateIgnoreReadsEnd(__FILE__, __LINE__) + + #define ANNOTATE_IGNORE_WRITES_BEGIN() \ + StaticAnnotateIgnoreWritesBegin(__FILE__, __LINE__) + + #define ANNOTATE_IGNORE_WRITES_END() \ + StaticAnnotateIgnoreWritesEnd(__FILE__, __LINE__) + +#else + #define ANNOTATE_IGNORE_READS_BEGIN() /* empty */ + #define ANNOTATE_IGNORE_READS_END() /* empty */ + #define ANNOTATE_IGNORE_WRITES_BEGIN() /* empty */ + #define ANNOTATE_IGNORE_WRITES_END() /* empty */ +#endif + +/* Implement the ANNOTATE_IGNORE_READS_AND_WRITES_* annotations using the more + primitive annotations defined above. */ +#if defined(ANNOTATIONS_ENABLED) + + /* Start ignoring all memory accesses (both reads and writes). */ + #define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() \ + do { \ + ANNOTATE_IGNORE_READS_BEGIN(); \ + ANNOTATE_IGNORE_WRITES_BEGIN(); \ + }while (0) + + /* Stop ignoring both reads and writes. */ + #define ANNOTATE_IGNORE_READS_AND_WRITES_END() \ + do { \ + ANNOTATE_IGNORE_WRITES_END(); \ + ANNOTATE_IGNORE_READS_END(); \ + }while (0) + +#else + #define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() /* empty */ + #define ANNOTATE_IGNORE_READS_AND_WRITES_END() /* empty */ +#endif + +/* Use the macros above rather than using these functions directly. */ +#include <stddef.h> +#ifdef __cplusplus +extern "C" { +#endif +void AnnotateRWLockCreate(const char *file, int line, + const volatile void *lock); +void AnnotateRWLockCreateStatic(const char *file, int line, + const volatile void *lock); +void AnnotateRWLockDestroy(const char *file, int line, + const volatile void *lock); +void AnnotateRWLockAcquired(const char *file, int line, + const volatile void *lock, long is_w); /* NOLINT */ +void AnnotateRWLockReleased(const char *file, int line, + const volatile void *lock, long is_w); /* NOLINT */ +void AnnotateBenignRace(const char *file, int line, + const volatile void *address, + const char *description); +void AnnotateBenignRaceSized(const char *file, int line, + const volatile void *address, + size_t size, + const char *description); +void AnnotateThreadName(const char *file, int line, + const char *name); +void AnnotateEnableRaceDetection(const char *file, int line, int enable); +void AnnotateMemoryIsInitialized(const char *file, int line, + const volatile void *mem, size_t size); +void AnnotateMemoryIsUninitialized(const char *file, int line, + const volatile void *mem, size_t size); + +/* Annotations expand to these functions, when Dynamic Annotations are enabled. + These functions are either implemented as no-op calls, if no Sanitizer is + attached, or provided with externally-linked implementations by a library + like ThreadSanitizer. */ +void AnnotateIgnoreReadsBegin(const char *file, int line) + ATTRIBUTE_IGNORE_READS_BEGIN; +void AnnotateIgnoreReadsEnd(const char *file, int line) + ATTRIBUTE_IGNORE_READS_END; +void AnnotateIgnoreWritesBegin(const char *file, int line); +void AnnotateIgnoreWritesEnd(const char *file, int line); + +#if defined(ANNOTALYSIS_ENABLED) +/* When Annotalysis is enabled without Dynamic Annotations, the use of + static-inline functions allows the annotations to be read at compile-time, + while still letting the compiler elide the functions from the final build. + + TODO(delesley) -- The exclusive lock here ignores writes as well, but + allows IGNORE_READS_AND_WRITES to work properly. */ +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-function" +static inline void StaticAnnotateIgnoreReadsBegin(const char *file, int line) + ATTRIBUTE_IGNORE_READS_BEGIN { (void)file; (void)line; } +static inline void StaticAnnotateIgnoreReadsEnd(const char *file, int line) + ATTRIBUTE_IGNORE_READS_END { (void)file; (void)line; } +static inline void StaticAnnotateIgnoreWritesBegin( + const char *file, int line) { (void)file; (void)line; } +static inline void StaticAnnotateIgnoreWritesEnd( + const char *file, int line) { (void)file; (void)line; } +#pragma GCC diagnostic pop +#endif + +/* Return non-zero value if running under valgrind. + + If "valgrind.h" is included into dynamic_annotations.cc, + the regular valgrind mechanism will be used. + See http://valgrind.org/docs/manual/manual-core-adv.html about + RUNNING_ON_VALGRIND and other valgrind "client requests". + The file "valgrind.h" may be obtained by doing + svn co svn://svn.valgrind.org/valgrind/trunk/include + + If for some reason you can't use "valgrind.h" or want to fake valgrind, + there are two ways to make this function return non-zero: + - Use environment variable: export RUNNING_ON_VALGRIND=1 + - Make your tool intercept the function RunningOnValgrind() and + change its return value. + */ +int RunningOnValgrind(void); + +/* ValgrindSlowdown returns: + * 1.0, if (RunningOnValgrind() == 0) + * 50.0, if (RunningOnValgrind() != 0 && getenv("VALGRIND_SLOWDOWN") == NULL) + * atof(getenv("VALGRIND_SLOWDOWN")) otherwise + This function can be used to scale timeout values: + EXAMPLE: + for (;;) { + DoExpensiveBackgroundTask(); + SleepForSeconds(5 * ValgrindSlowdown()); + } + */ +double ValgrindSlowdown(void); + +#ifdef __cplusplus +} +#endif + +/* ANNOTATE_UNPROTECTED_READ is the preferred way to annotate racey reads. + + Instead of doing + ANNOTATE_IGNORE_READS_BEGIN(); + ... = x; + ANNOTATE_IGNORE_READS_END(); + one can use + ... = ANNOTATE_UNPROTECTED_READ(x); */ +#if defined(__cplusplus) && defined(ANNOTATIONS_ENABLED) +template <typename T> +inline T ANNOTATE_UNPROTECTED_READ(const volatile T &x) { /* NOLINT */ + ANNOTATE_IGNORE_READS_BEGIN(); + T res = x; + ANNOTATE_IGNORE_READS_END(); + return res; + } +#else + #define ANNOTATE_UNPROTECTED_READ(x) (x) +#endif + +#if DYNAMIC_ANNOTATIONS_ENABLED != 0 && defined(__cplusplus) + /* Apply ANNOTATE_BENIGN_RACE_SIZED to a static variable. */ + #define ANNOTATE_BENIGN_RACE_STATIC(static_var, description) \ + namespace { \ + class static_var ## _annotator { \ + public: \ + static_var ## _annotator() { \ + ANNOTATE_BENIGN_RACE_SIZED(&static_var, \ + sizeof(static_var), \ + # static_var ": " description); \ + } \ + }; \ + static static_var ## _annotator the ## static_var ## _annotator;\ + } // namespace +#else /* DYNAMIC_ANNOTATIONS_ENABLED == 0 */ + #define ANNOTATE_BENIGN_RACE_STATIC(static_var, description) /* empty */ +#endif /* DYNAMIC_ANNOTATIONS_ENABLED */ + +#ifdef ADDRESS_SANITIZER +/* Describe the current state of a contiguous container such as e.g. + * std::vector or std::string. For more details see + * sanitizer/common_interface_defs.h, which is provided by the compiler. */ +#include <sanitizer/common_interface_defs.h> +#define ANNOTATE_CONTIGUOUS_CONTAINER(beg, end, old_mid, new_mid) \ + __sanitizer_annotate_contiguous_container(beg, end, old_mid, new_mid) +#define ADDRESS_SANITIZER_REDZONE(name) \ + struct { char x[8] __attribute__ ((aligned (8))); } name +#else +#define ANNOTATE_CONTIGUOUS_CONTAINER(beg, end, old_mid, new_mid) +#define ADDRESS_SANITIZER_REDZONE(name) static_assert(true, "") +#endif // ADDRESS_SANITIZER + +/* Undefine the macros intended only in this file. */ +#undef ANNOTALYSIS_ENABLED +#undef ANNOTATIONS_ENABLED +#undef ATTRIBUTE_IGNORE_READS_BEGIN +#undef ATTRIBUTE_IGNORE_READS_END + +#endif /* ABSL_BASE_DYNAMIC_ANNOTATIONS_H_ */ diff --git a/third_party/abseil_cpp/absl/base/exception_safety_testing_test.cc b/third_party/abseil_cpp/absl/base/exception_safety_testing_test.cc new file mode 100644 index 000000000000..a59be29e919c --- /dev/null +++ b/third_party/abseil_cpp/absl/base/exception_safety_testing_test.cc @@ -0,0 +1,956 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/exception_safety_testing.h" + +#ifdef ABSL_HAVE_EXCEPTIONS + +#include <cstddef> +#include <exception> +#include <iostream> +#include <list> +#include <type_traits> +#include <vector> + +#include "gtest/gtest-spi.h" +#include "gtest/gtest.h" +#include "absl/memory/memory.h" + +namespace testing { + +namespace { + +using ::testing::exceptions_internal::SetCountdown; +using ::testing::exceptions_internal::TestException; +using ::testing::exceptions_internal::UnsetCountdown; + +// EXPECT_NO_THROW can't inspect the thrown inspection in general. +template <typename F> +void ExpectNoThrow(const F& f) { + try { + f(); + } catch (const TestException& e) { + ADD_FAILURE() << "Unexpected exception thrown from " << e.what(); + } +} + +TEST(ThrowingValueTest, Throws) { + SetCountdown(); + EXPECT_THROW(ThrowingValue<> bomb, TestException); + + // It's not guaranteed that every operator only throws *once*. The default + // ctor only throws once, though, so use it to make sure we only throw when + // the countdown hits 0 + SetCountdown(2); + ExpectNoThrow([]() { ThrowingValue<> bomb; }); + ExpectNoThrow([]() { ThrowingValue<> bomb; }); + EXPECT_THROW(ThrowingValue<> bomb, TestException); + + UnsetCountdown(); +} + +// Tests that an operation throws when the countdown is at 0, doesn't throw when +// the countdown doesn't hit 0, and doesn't modify the state of the +// ThrowingValue if it throws +template <typename F> +void TestOp(const F& f) { + ExpectNoThrow(f); + + SetCountdown(); + EXPECT_THROW(f(), TestException); + UnsetCountdown(); +} + +TEST(ThrowingValueTest, ThrowingCtors) { + ThrowingValue<> bomb; + + TestOp([]() { ThrowingValue<> bomb(1); }); + TestOp([&]() { ThrowingValue<> bomb1 = bomb; }); + TestOp([&]() { ThrowingValue<> bomb1 = std::move(bomb); }); +} + +TEST(ThrowingValueTest, ThrowingAssignment) { + ThrowingValue<> bomb, bomb1; + + TestOp([&]() { bomb = bomb1; }); + TestOp([&]() { bomb = std::move(bomb1); }); + + // Test that when assignment throws, the assignment should fail (lhs != rhs) + // and strong guarantee fails (lhs != lhs_copy). + { + ThrowingValue<> lhs(39), rhs(42); + ThrowingValue<> lhs_copy(lhs); + SetCountdown(); + EXPECT_THROW(lhs = rhs, TestException); + UnsetCountdown(); + EXPECT_NE(lhs, rhs); + EXPECT_NE(lhs_copy, lhs); + } + { + ThrowingValue<> lhs(39), rhs(42); + ThrowingValue<> lhs_copy(lhs), rhs_copy(rhs); + SetCountdown(); + EXPECT_THROW(lhs = std::move(rhs), TestException); + UnsetCountdown(); + EXPECT_NE(lhs, rhs_copy); + EXPECT_NE(lhs_copy, lhs); + } +} + +TEST(ThrowingValueTest, ThrowingComparisons) { + ThrowingValue<> bomb1, bomb2; + TestOp([&]() { return bomb1 == bomb2; }); + TestOp([&]() { return bomb1 != bomb2; }); + TestOp([&]() { return bomb1 < bomb2; }); + TestOp([&]() { return bomb1 <= bomb2; }); + TestOp([&]() { return bomb1 > bomb2; }); + TestOp([&]() { return bomb1 >= bomb2; }); +} + +TEST(ThrowingValueTest, ThrowingArithmeticOps) { + ThrowingValue<> bomb1(1), bomb2(2); + + TestOp([&bomb1]() { +bomb1; }); + TestOp([&bomb1]() { -bomb1; }); + TestOp([&bomb1]() { ++bomb1; }); + TestOp([&bomb1]() { bomb1++; }); + TestOp([&bomb1]() { --bomb1; }); + TestOp([&bomb1]() { bomb1--; }); + + TestOp([&]() { bomb1 + bomb2; }); + TestOp([&]() { bomb1 - bomb2; }); + TestOp([&]() { bomb1* bomb2; }); + TestOp([&]() { bomb1 / bomb2; }); + TestOp([&]() { bomb1 << 1; }); + TestOp([&]() { bomb1 >> 1; }); +} + +TEST(ThrowingValueTest, ThrowingLogicalOps) { + ThrowingValue<> bomb1, bomb2; + + TestOp([&bomb1]() { !bomb1; }); + TestOp([&]() { bomb1&& bomb2; }); + TestOp([&]() { bomb1 || bomb2; }); +} + +TEST(ThrowingValueTest, ThrowingBitwiseOps) { + ThrowingValue<> bomb1, bomb2; + + TestOp([&bomb1]() { ~bomb1; }); + TestOp([&]() { bomb1& bomb2; }); + TestOp([&]() { bomb1 | bomb2; }); + TestOp([&]() { bomb1 ^ bomb2; }); +} + +TEST(ThrowingValueTest, ThrowingCompoundAssignmentOps) { + ThrowingValue<> bomb1(1), bomb2(2); + + TestOp([&]() { bomb1 += bomb2; }); + TestOp([&]() { bomb1 -= bomb2; }); + TestOp([&]() { bomb1 *= bomb2; }); + TestOp([&]() { bomb1 /= bomb2; }); + TestOp([&]() { bomb1 %= bomb2; }); + TestOp([&]() { bomb1 &= bomb2; }); + TestOp([&]() { bomb1 |= bomb2; }); + TestOp([&]() { bomb1 ^= bomb2; }); + TestOp([&]() { bomb1 *= bomb2; }); +} + +TEST(ThrowingValueTest, ThrowingStreamOps) { + ThrowingValue<> bomb; + + TestOp([&]() { + std::istringstream stream; + stream >> bomb; + }); + TestOp([&]() { + std::stringstream stream; + stream << bomb; + }); +} + +// Tests the operator<< of ThrowingValue by forcing ConstructorTracker to emit +// a nonfatal failure that contains the string representation of the Thrower +TEST(ThrowingValueTest, StreamOpsOutput) { + using ::testing::TypeSpec; + exceptions_internal::ConstructorTracker ct(exceptions_internal::countdown); + + // Test default spec list (kEverythingThrows) + EXPECT_NONFATAL_FAILURE( + { + using Thrower = ThrowingValue<TypeSpec{}>; + auto thrower = Thrower(123); + thrower.~Thrower(); + }, + "ThrowingValue<>(123)"); + + // Test with one item in spec list (kNoThrowCopy) + EXPECT_NONFATAL_FAILURE( + { + using Thrower = ThrowingValue<TypeSpec::kNoThrowCopy>; + auto thrower = Thrower(234); + thrower.~Thrower(); + }, + "ThrowingValue<kNoThrowCopy>(234)"); + + // Test with multiple items in spec list (kNoThrowMove, kNoThrowNew) + EXPECT_NONFATAL_FAILURE( + { + using Thrower = + ThrowingValue<TypeSpec::kNoThrowMove | TypeSpec::kNoThrowNew>; + auto thrower = Thrower(345); + thrower.~Thrower(); + }, + "ThrowingValue<kNoThrowMove | kNoThrowNew>(345)"); + + // Test with all items in spec list (kNoThrowCopy, kNoThrowMove, kNoThrowNew) + EXPECT_NONFATAL_FAILURE( + { + using Thrower = ThrowingValue<static_cast<TypeSpec>(-1)>; + auto thrower = Thrower(456); + thrower.~Thrower(); + }, + "ThrowingValue<kNoThrowCopy | kNoThrowMove | kNoThrowNew>(456)"); +} + +template <typename F> +void TestAllocatingOp(const F& f) { + ExpectNoThrow(f); + + SetCountdown(); + EXPECT_THROW(f(), exceptions_internal::TestBadAllocException); + UnsetCountdown(); +} + +TEST(ThrowingValueTest, ThrowingAllocatingOps) { + // make_unique calls unqualified operator new, so these exercise the + // ThrowingValue overloads. + TestAllocatingOp([]() { return absl::make_unique<ThrowingValue<>>(1); }); + TestAllocatingOp([]() { return absl::make_unique<ThrowingValue<>[]>(2); }); +} + +TEST(ThrowingValueTest, NonThrowingMoveCtor) { + ThrowingValue<TypeSpec::kNoThrowMove> nothrow_ctor; + + SetCountdown(); + ExpectNoThrow([¬hrow_ctor]() { + ThrowingValue<TypeSpec::kNoThrowMove> nothrow1 = std::move(nothrow_ctor); + }); + UnsetCountdown(); +} + +TEST(ThrowingValueTest, NonThrowingMoveAssign) { + ThrowingValue<TypeSpec::kNoThrowMove> nothrow_assign1, nothrow_assign2; + + SetCountdown(); + ExpectNoThrow([¬hrow_assign1, ¬hrow_assign2]() { + nothrow_assign1 = std::move(nothrow_assign2); + }); + UnsetCountdown(); +} + +TEST(ThrowingValueTest, ThrowingCopyCtor) { + ThrowingValue<> tv; + + TestOp([&]() { ThrowingValue<> tv_copy(tv); }); +} + +TEST(ThrowingValueTest, ThrowingCopyAssign) { + ThrowingValue<> tv1, tv2; + + TestOp([&]() { tv1 = tv2; }); +} + +TEST(ThrowingValueTest, NonThrowingCopyCtor) { + ThrowingValue<TypeSpec::kNoThrowCopy> nothrow_ctor; + + SetCountdown(); + ExpectNoThrow([¬hrow_ctor]() { + ThrowingValue<TypeSpec::kNoThrowCopy> nothrow1(nothrow_ctor); + }); + UnsetCountdown(); +} + +TEST(ThrowingValueTest, NonThrowingCopyAssign) { + ThrowingValue<TypeSpec::kNoThrowCopy> nothrow_assign1, nothrow_assign2; + + SetCountdown(); + ExpectNoThrow([¬hrow_assign1, ¬hrow_assign2]() { + nothrow_assign1 = nothrow_assign2; + }); + UnsetCountdown(); +} + +TEST(ThrowingValueTest, ThrowingSwap) { + ThrowingValue<> bomb1, bomb2; + TestOp([&]() { std::swap(bomb1, bomb2); }); +} + +TEST(ThrowingValueTest, NonThrowingSwap) { + ThrowingValue<TypeSpec::kNoThrowMove> bomb1, bomb2; + ExpectNoThrow([&]() { std::swap(bomb1, bomb2); }); +} + +TEST(ThrowingValueTest, NonThrowingAllocation) { + ThrowingValue<TypeSpec::kNoThrowNew>* allocated; + ThrowingValue<TypeSpec::kNoThrowNew>* array; + + ExpectNoThrow([&allocated]() { + allocated = new ThrowingValue<TypeSpec::kNoThrowNew>(1); + delete allocated; + }); + ExpectNoThrow([&array]() { + array = new ThrowingValue<TypeSpec::kNoThrowNew>[2]; + delete[] array; + }); +} + +TEST(ThrowingValueTest, NonThrowingDelete) { + auto* allocated = new ThrowingValue<>(1); + auto* array = new ThrowingValue<>[2]; + + SetCountdown(); + ExpectNoThrow([allocated]() { delete allocated; }); + SetCountdown(); + ExpectNoThrow([array]() { delete[] array; }); + + UnsetCountdown(); +} + +TEST(ThrowingValueTest, NonThrowingPlacementDelete) { + constexpr int kArrayLen = 2; + // We intentionally create extra space to store the tag allocated by placement + // new[]. + constexpr int kStorageLen = 4; + + alignas(ThrowingValue<>) unsigned char buf[sizeof(ThrowingValue<>)]; + alignas(ThrowingValue<>) unsigned char + array_buf[sizeof(ThrowingValue<>[kStorageLen])]; + auto* placed = new (&buf) ThrowingValue<>(1); + auto placed_array = new (&array_buf) ThrowingValue<>[kArrayLen]; + + SetCountdown(); + ExpectNoThrow([placed, &buf]() { + placed->~ThrowingValue<>(); + ThrowingValue<>::operator delete(placed, &buf); + }); + + SetCountdown(); + ExpectNoThrow([&, placed_array]() { + for (int i = 0; i < kArrayLen; ++i) placed_array[i].~ThrowingValue<>(); + ThrowingValue<>::operator delete[](placed_array, &array_buf); + }); + + UnsetCountdown(); +} + +TEST(ThrowingValueTest, NonThrowingDestructor) { + auto* allocated = new ThrowingValue<>(); + + SetCountdown(); + ExpectNoThrow([allocated]() { delete allocated; }); + UnsetCountdown(); +} + +TEST(ThrowingBoolTest, ThrowingBool) { + ThrowingBool t = true; + + // Test that it's contextually convertible to bool + if (t) { // NOLINT(whitespace/empty_if_body) + } + EXPECT_TRUE(t); + + TestOp([&]() { (void)!t; }); +} + +TEST(ThrowingAllocatorTest, MemoryManagement) { + // Just exercise the memory management capabilities under LSan to make sure we + // don't leak. + ThrowingAllocator<int> int_alloc; + int* ip = int_alloc.allocate(1); + int_alloc.deallocate(ip, 1); + int* i_array = int_alloc.allocate(2); + int_alloc.deallocate(i_array, 2); + + ThrowingAllocator<ThrowingValue<>> tv_alloc; + ThrowingValue<>* ptr = tv_alloc.allocate(1); + tv_alloc.deallocate(ptr, 1); + ThrowingValue<>* tv_array = tv_alloc.allocate(2); + tv_alloc.deallocate(tv_array, 2); +} + +TEST(ThrowingAllocatorTest, CallsGlobalNew) { + ThrowingAllocator<ThrowingValue<>, AllocSpec::kNoThrowAllocate> nothrow_alloc; + ThrowingValue<>* ptr; + + SetCountdown(); + // This will only throw if ThrowingValue::new is called. + ExpectNoThrow([&]() { ptr = nothrow_alloc.allocate(1); }); + nothrow_alloc.deallocate(ptr, 1); + + UnsetCountdown(); +} + +TEST(ThrowingAllocatorTest, ThrowingConstructors) { + ThrowingAllocator<int> int_alloc; + int* ip = nullptr; + + SetCountdown(); + EXPECT_THROW(ip = int_alloc.allocate(1), TestException); + ExpectNoThrow([&]() { ip = int_alloc.allocate(1); }); + + *ip = 1; + SetCountdown(); + EXPECT_THROW(int_alloc.construct(ip, 2), TestException); + EXPECT_EQ(*ip, 1); + int_alloc.deallocate(ip, 1); + + UnsetCountdown(); +} + +TEST(ThrowingAllocatorTest, NonThrowingConstruction) { + { + ThrowingAllocator<int, AllocSpec::kNoThrowAllocate> int_alloc; + int* ip = nullptr; + + SetCountdown(); + ExpectNoThrow([&]() { ip = int_alloc.allocate(1); }); + + SetCountdown(); + ExpectNoThrow([&]() { int_alloc.construct(ip, 2); }); + + EXPECT_EQ(*ip, 2); + int_alloc.deallocate(ip, 1); + + UnsetCountdown(); + } + + { + ThrowingAllocator<int> int_alloc; + int* ip = nullptr; + ExpectNoThrow([&]() { ip = int_alloc.allocate(1); }); + ExpectNoThrow([&]() { int_alloc.construct(ip, 2); }); + EXPECT_EQ(*ip, 2); + int_alloc.deallocate(ip, 1); + } + + { + ThrowingAllocator<ThrowingValue<>, AllocSpec::kNoThrowAllocate> + nothrow_alloc; + ThrowingValue<>* ptr; + + SetCountdown(); + ExpectNoThrow([&]() { ptr = nothrow_alloc.allocate(1); }); + + SetCountdown(); + ExpectNoThrow( + [&]() { nothrow_alloc.construct(ptr, 2, testing::nothrow_ctor); }); + + EXPECT_EQ(ptr->Get(), 2); + nothrow_alloc.destroy(ptr); + nothrow_alloc.deallocate(ptr, 1); + + UnsetCountdown(); + } + + { + ThrowingAllocator<int> a; + + SetCountdown(); + ExpectNoThrow([&]() { ThrowingAllocator<double> a1 = a; }); + + SetCountdown(); + ExpectNoThrow([&]() { ThrowingAllocator<double> a1 = std::move(a); }); + + UnsetCountdown(); + } +} + +TEST(ThrowingAllocatorTest, ThrowingAllocatorConstruction) { + ThrowingAllocator<int> a; + TestOp([]() { ThrowingAllocator<int> a; }); + TestOp([&]() { a.select_on_container_copy_construction(); }); +} + +TEST(ThrowingAllocatorTest, State) { + ThrowingAllocator<int> a1, a2; + EXPECT_NE(a1, a2); + + auto a3 = a1; + EXPECT_EQ(a3, a1); + int* ip = a1.allocate(1); + EXPECT_EQ(a3, a1); + a3.deallocate(ip, 1); + EXPECT_EQ(a3, a1); +} + +TEST(ThrowingAllocatorTest, InVector) { + std::vector<ThrowingValue<>, ThrowingAllocator<ThrowingValue<>>> v; + for (int i = 0; i < 20; ++i) v.push_back({}); + for (int i = 0; i < 20; ++i) v.pop_back(); +} + +TEST(ThrowingAllocatorTest, InList) { + std::list<ThrowingValue<>, ThrowingAllocator<ThrowingValue<>>> l; + for (int i = 0; i < 20; ++i) l.push_back({}); + for (int i = 0; i < 20; ++i) l.pop_back(); + for (int i = 0; i < 20; ++i) l.push_front({}); + for (int i = 0; i < 20; ++i) l.pop_front(); +} + +template <typename TesterInstance, typename = void> +struct NullaryTestValidator : public std::false_type {}; + +template <typename TesterInstance> +struct NullaryTestValidator< + TesterInstance, + absl::void_t<decltype(std::declval<TesterInstance>().Test())>> + : public std::true_type {}; + +template <typename TesterInstance> +bool HasNullaryTest(const TesterInstance&) { + return NullaryTestValidator<TesterInstance>::value; +} + +void DummyOp(void*) {} + +template <typename TesterInstance, typename = void> +struct UnaryTestValidator : public std::false_type {}; + +template <typename TesterInstance> +struct UnaryTestValidator< + TesterInstance, + absl::void_t<decltype(std::declval<TesterInstance>().Test(DummyOp))>> + : public std::true_type {}; + +template <typename TesterInstance> +bool HasUnaryTest(const TesterInstance&) { + return UnaryTestValidator<TesterInstance>::value; +} + +TEST(ExceptionSafetyTesterTest, IncompleteTypesAreNotTestable) { + using T = exceptions_internal::UninitializedT; + auto op = [](T* t) {}; + auto inv = [](T*) { return testing::AssertionSuccess(); }; + auto fac = []() { return absl::make_unique<T>(); }; + + // Test that providing operation and inveriants still does not allow for the + // the invocation of .Test() and .Test(op) because it lacks a factory + auto without_fac = + testing::MakeExceptionSafetyTester().WithOperation(op).WithContracts( + inv, testing::strong_guarantee); + EXPECT_FALSE(HasNullaryTest(without_fac)); + EXPECT_FALSE(HasUnaryTest(without_fac)); + + // Test that providing contracts and factory allows the invocation of + // .Test(op) but does not allow for .Test() because it lacks an operation + auto without_op = testing::MakeExceptionSafetyTester() + .WithContracts(inv, testing::strong_guarantee) + .WithFactory(fac); + EXPECT_FALSE(HasNullaryTest(without_op)); + EXPECT_TRUE(HasUnaryTest(without_op)); + + // Test that providing operation and factory still does not allow for the + // the invocation of .Test() and .Test(op) because it lacks contracts + auto without_inv = + testing::MakeExceptionSafetyTester().WithOperation(op).WithFactory(fac); + EXPECT_FALSE(HasNullaryTest(without_inv)); + EXPECT_FALSE(HasUnaryTest(without_inv)); +} + +struct ExampleStruct {}; + +std::unique_ptr<ExampleStruct> ExampleFunctionFactory() { + return absl::make_unique<ExampleStruct>(); +} + +void ExampleFunctionOperation(ExampleStruct*) {} + +testing::AssertionResult ExampleFunctionContract(ExampleStruct*) { + return testing::AssertionSuccess(); +} + +struct { + std::unique_ptr<ExampleStruct> operator()() const { + return ExampleFunctionFactory(); + } +} example_struct_factory; + +struct { + void operator()(ExampleStruct*) const {} +} example_struct_operation; + +struct { + testing::AssertionResult operator()(ExampleStruct* example_struct) const { + return ExampleFunctionContract(example_struct); + } +} example_struct_contract; + +auto example_lambda_factory = []() { return ExampleFunctionFactory(); }; + +auto example_lambda_operation = [](ExampleStruct*) {}; + +auto example_lambda_contract = [](ExampleStruct* example_struct) { + return ExampleFunctionContract(example_struct); +}; + +// Testing that function references, pointers, structs with operator() and +// lambdas can all be used with ExceptionSafetyTester +TEST(ExceptionSafetyTesterTest, MixedFunctionTypes) { + // function reference + EXPECT_TRUE(testing::MakeExceptionSafetyTester() + .WithFactory(ExampleFunctionFactory) + .WithOperation(ExampleFunctionOperation) + .WithContracts(ExampleFunctionContract) + .Test()); + + // function pointer + EXPECT_TRUE(testing::MakeExceptionSafetyTester() + .WithFactory(&ExampleFunctionFactory) + .WithOperation(&ExampleFunctionOperation) + .WithContracts(&ExampleFunctionContract) + .Test()); + + // struct + EXPECT_TRUE(testing::MakeExceptionSafetyTester() + .WithFactory(example_struct_factory) + .WithOperation(example_struct_operation) + .WithContracts(example_struct_contract) + .Test()); + + // lambda + EXPECT_TRUE(testing::MakeExceptionSafetyTester() + .WithFactory(example_lambda_factory) + .WithOperation(example_lambda_operation) + .WithContracts(example_lambda_contract) + .Test()); +} + +struct NonNegative { + bool operator==(const NonNegative& other) const { return i == other.i; } + int i; +}; + +testing::AssertionResult CheckNonNegativeInvariants(NonNegative* g) { + if (g->i >= 0) { + return testing::AssertionSuccess(); + } + return testing::AssertionFailure() + << "i should be non-negative but is " << g->i; +} + +struct { + template <typename T> + void operator()(T* t) const { + (*t)(); + } +} invoker; + +auto tester = + testing::MakeExceptionSafetyTester().WithOperation(invoker).WithContracts( + CheckNonNegativeInvariants); +auto strong_tester = tester.WithContracts(testing::strong_guarantee); + +struct FailsBasicGuarantee : public NonNegative { + void operator()() { + --i; + ThrowingValue<> bomb; + ++i; + } +}; + +TEST(ExceptionCheckTest, BasicGuaranteeFailure) { + EXPECT_FALSE(tester.WithInitialValue(FailsBasicGuarantee{}).Test()); +} + +struct FollowsBasicGuarantee : public NonNegative { + void operator()() { + ++i; + ThrowingValue<> bomb; + } +}; + +TEST(ExceptionCheckTest, BasicGuarantee) { + EXPECT_TRUE(tester.WithInitialValue(FollowsBasicGuarantee{}).Test()); +} + +TEST(ExceptionCheckTest, StrongGuaranteeFailure) { + EXPECT_FALSE(strong_tester.WithInitialValue(FailsBasicGuarantee{}).Test()); + EXPECT_FALSE(strong_tester.WithInitialValue(FollowsBasicGuarantee{}).Test()); +} + +struct BasicGuaranteeWithExtraContracts : public NonNegative { + // After operator(), i is incremented. If operator() throws, i is set to 9999 + void operator()() { + int old_i = i; + i = kExceptionSentinel; + ThrowingValue<> bomb; + i = ++old_i; + } + + static constexpr int kExceptionSentinel = 9999; +}; +constexpr int BasicGuaranteeWithExtraContracts::kExceptionSentinel; + +TEST(ExceptionCheckTest, BasicGuaranteeWithExtraContracts) { + auto tester_with_val = + tester.WithInitialValue(BasicGuaranteeWithExtraContracts{}); + EXPECT_TRUE(tester_with_val.Test()); + EXPECT_TRUE( + tester_with_val + .WithContracts([](BasicGuaranteeWithExtraContracts* o) { + if (o->i == BasicGuaranteeWithExtraContracts::kExceptionSentinel) { + return testing::AssertionSuccess(); + } + return testing::AssertionFailure() + << "i should be " + << BasicGuaranteeWithExtraContracts::kExceptionSentinel + << ", but is " << o->i; + }) + .Test()); +} + +struct FollowsStrongGuarantee : public NonNegative { + void operator()() { ThrowingValue<> bomb; } +}; + +TEST(ExceptionCheckTest, StrongGuarantee) { + EXPECT_TRUE(tester.WithInitialValue(FollowsStrongGuarantee{}).Test()); + EXPECT_TRUE(strong_tester.WithInitialValue(FollowsStrongGuarantee{}).Test()); +} + +struct HasReset : public NonNegative { + void operator()() { + i = -1; + ThrowingValue<> bomb; + i = 1; + } + + void reset() { i = 0; } +}; + +testing::AssertionResult CheckHasResetContracts(HasReset* h) { + h->reset(); + return testing::AssertionResult(h->i == 0); +} + +TEST(ExceptionCheckTest, ModifyingChecker) { + auto set_to_1000 = [](FollowsBasicGuarantee* g) { + g->i = 1000; + return testing::AssertionSuccess(); + }; + auto is_1000 = [](FollowsBasicGuarantee* g) { + return testing::AssertionResult(g->i == 1000); + }; + auto increment = [](FollowsStrongGuarantee* g) { + ++g->i; + return testing::AssertionSuccess(); + }; + + EXPECT_FALSE(tester.WithInitialValue(FollowsBasicGuarantee{}) + .WithContracts(set_to_1000, is_1000) + .Test()); + EXPECT_TRUE(strong_tester.WithInitialValue(FollowsStrongGuarantee{}) + .WithContracts(increment) + .Test()); + EXPECT_TRUE(testing::MakeExceptionSafetyTester() + .WithInitialValue(HasReset{}) + .WithContracts(CheckHasResetContracts) + .Test(invoker)); +} + +TEST(ExceptionSafetyTesterTest, ResetsCountdown) { + auto test = + testing::MakeExceptionSafetyTester() + .WithInitialValue(ThrowingValue<>()) + .WithContracts([](ThrowingValue<>*) { return AssertionSuccess(); }) + .WithOperation([](ThrowingValue<>*) {}); + ASSERT_TRUE(test.Test()); + // If the countdown isn't reset because there were no exceptions thrown, then + // this will fail with a termination from an unhandled exception + EXPECT_TRUE(test.Test()); +} + +struct NonCopyable : public NonNegative { + NonCopyable(const NonCopyable&) = delete; + NonCopyable() : NonNegative{0} {} + + void operator()() { ThrowingValue<> bomb; } +}; + +TEST(ExceptionCheckTest, NonCopyable) { + auto factory = []() { return absl::make_unique<NonCopyable>(); }; + EXPECT_TRUE(tester.WithFactory(factory).Test()); + EXPECT_TRUE(strong_tester.WithFactory(factory).Test()); +} + +struct NonEqualityComparable : public NonNegative { + void operator()() { ThrowingValue<> bomb; } + + void ModifyOnThrow() { + ++i; + ThrowingValue<> bomb; + static_cast<void>(bomb); + --i; + } +}; + +TEST(ExceptionCheckTest, NonEqualityComparable) { + auto nec_is_strong = [](NonEqualityComparable* nec) { + return testing::AssertionResult(nec->i == NonEqualityComparable().i); + }; + auto strong_nec_tester = tester.WithInitialValue(NonEqualityComparable{}) + .WithContracts(nec_is_strong); + + EXPECT_TRUE(strong_nec_tester.Test()); + EXPECT_FALSE(strong_nec_tester.Test( + [](NonEqualityComparable* n) { n->ModifyOnThrow(); })); +} + +template <typename T> +struct ExhaustivenessTester { + void operator()() { + successes |= 1; + T b1; + static_cast<void>(b1); + successes |= (1 << 1); + T b2; + static_cast<void>(b2); + successes |= (1 << 2); + T b3; + static_cast<void>(b3); + successes |= (1 << 3); + } + + bool operator==(const ExhaustivenessTester<ThrowingValue<>>&) const { + return true; + } + + static unsigned char successes; +}; + +struct { + template <typename T> + testing::AssertionResult operator()(ExhaustivenessTester<T>*) const { + return testing::AssertionSuccess(); + } +} CheckExhaustivenessTesterContracts; + +template <typename T> +unsigned char ExhaustivenessTester<T>::successes = 0; + +TEST(ExceptionCheckTest, Exhaustiveness) { + auto exhaust_tester = testing::MakeExceptionSafetyTester() + .WithContracts(CheckExhaustivenessTesterContracts) + .WithOperation(invoker); + + EXPECT_TRUE( + exhaust_tester.WithInitialValue(ExhaustivenessTester<int>{}).Test()); + EXPECT_EQ(ExhaustivenessTester<int>::successes, 0xF); + + EXPECT_TRUE( + exhaust_tester.WithInitialValue(ExhaustivenessTester<ThrowingValue<>>{}) + .WithContracts(testing::strong_guarantee) + .Test()); + EXPECT_EQ(ExhaustivenessTester<ThrowingValue<>>::successes, 0xF); +} + +struct LeaksIfCtorThrows : private exceptions_internal::TrackedObject { + LeaksIfCtorThrows() : TrackedObject(ABSL_PRETTY_FUNCTION) { + ++counter; + ThrowingValue<> v; + static_cast<void>(v); + --counter; + } + LeaksIfCtorThrows(const LeaksIfCtorThrows&) noexcept + : TrackedObject(ABSL_PRETTY_FUNCTION) {} + static int counter; +}; +int LeaksIfCtorThrows::counter = 0; + +TEST(ExceptionCheckTest, TestLeakyCtor) { + testing::TestThrowingCtor<LeaksIfCtorThrows>(); + EXPECT_EQ(LeaksIfCtorThrows::counter, 1); + LeaksIfCtorThrows::counter = 0; +} + +struct Tracked : private exceptions_internal::TrackedObject { + Tracked() : TrackedObject(ABSL_PRETTY_FUNCTION) {} +}; + +TEST(ConstructorTrackerTest, CreatedBefore) { + Tracked a, b, c; + exceptions_internal::ConstructorTracker ct(exceptions_internal::countdown); +} + +TEST(ConstructorTrackerTest, CreatedAfter) { + exceptions_internal::ConstructorTracker ct(exceptions_internal::countdown); + Tracked a, b, c; +} + +TEST(ConstructorTrackerTest, NotDestroyedAfter) { + alignas(Tracked) unsigned char storage[sizeof(Tracked)]; + EXPECT_NONFATAL_FAILURE( + { + exceptions_internal::ConstructorTracker ct( + exceptions_internal::countdown); + new (&storage) Tracked(); + }, + "not destroyed"); +} + +TEST(ConstructorTrackerTest, DestroyedTwice) { + exceptions_internal::ConstructorTracker ct(exceptions_internal::countdown); + EXPECT_NONFATAL_FAILURE( + { + Tracked t; + t.~Tracked(); + }, + "re-destroyed"); +} + +TEST(ConstructorTrackerTest, ConstructedTwice) { + exceptions_internal::ConstructorTracker ct(exceptions_internal::countdown); + alignas(Tracked) unsigned char storage[sizeof(Tracked)]; + EXPECT_NONFATAL_FAILURE( + { + new (&storage) Tracked(); + new (&storage) Tracked(); + reinterpret_cast<Tracked*>(&storage)->~Tracked(); + }, + "re-constructed"); +} + +TEST(ThrowingValueTraitsTest, RelationalOperators) { + ThrowingValue<> a, b; + EXPECT_TRUE((std::is_convertible<decltype(a == b), bool>::value)); + EXPECT_TRUE((std::is_convertible<decltype(a != b), bool>::value)); + EXPECT_TRUE((std::is_convertible<decltype(a < b), bool>::value)); + EXPECT_TRUE((std::is_convertible<decltype(a <= b), bool>::value)); + EXPECT_TRUE((std::is_convertible<decltype(a > b), bool>::value)); + EXPECT_TRUE((std::is_convertible<decltype(a >= b), bool>::value)); +} + +TEST(ThrowingAllocatorTraitsTest, Assignablility) { + EXPECT_TRUE(absl::is_move_assignable<ThrowingAllocator<int>>::value); + EXPECT_TRUE(absl::is_copy_assignable<ThrowingAllocator<int>>::value); + EXPECT_TRUE(std::is_nothrow_move_assignable<ThrowingAllocator<int>>::value); + EXPECT_TRUE(std::is_nothrow_copy_assignable<ThrowingAllocator<int>>::value); +} + +} // namespace + +} // namespace testing + +#endif // ABSL_HAVE_EXCEPTIONS diff --git a/third_party/abseil_cpp/absl/base/inline_variable_test.cc b/third_party/abseil_cpp/absl/base/inline_variable_test.cc new file mode 100644 index 000000000000..37a40e1e40a6 --- /dev/null +++ b/third_party/abseil_cpp/absl/base/inline_variable_test.cc @@ -0,0 +1,64 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include <type_traits> + +#include "absl/base/internal/inline_variable.h" +#include "absl/base/internal/inline_variable_testing.h" + +#include "gtest/gtest.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace inline_variable_testing_internal { +namespace { + +TEST(InlineVariableTest, Constexpr) { + static_assert(inline_variable_foo.value == 5, ""); + static_assert(other_inline_variable_foo.value == 5, ""); + static_assert(inline_variable_int == 5, ""); + static_assert(other_inline_variable_int == 5, ""); +} + +TEST(InlineVariableTest, DefaultConstructedIdentityEquality) { + EXPECT_EQ(get_foo_a().value, 5); + EXPECT_EQ(get_foo_b().value, 5); + EXPECT_EQ(&get_foo_a(), &get_foo_b()); +} + +TEST(InlineVariableTest, DefaultConstructedIdentityInequality) { + EXPECT_NE(&inline_variable_foo, &other_inline_variable_foo); +} + +TEST(InlineVariableTest, InitializedIdentityEquality) { + EXPECT_EQ(get_int_a(), 5); + EXPECT_EQ(get_int_b(), 5); + EXPECT_EQ(&get_int_a(), &get_int_b()); +} + +TEST(InlineVariableTest, InitializedIdentityInequality) { + EXPECT_NE(&inline_variable_int, &other_inline_variable_int); +} + +TEST(InlineVariableTest, FunPtrType) { + static_assert( + std::is_same<void(*)(), + std::decay<decltype(inline_variable_fun_ptr)>::type>::value, + ""); +} + +} // namespace +} // namespace inline_variable_testing_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third_party/abseil_cpp/absl/base/inline_variable_test_a.cc b/third_party/abseil_cpp/absl/base/inline_variable_test_a.cc new file mode 100644 index 000000000000..f96a58d9b422 --- /dev/null +++ b/third_party/abseil_cpp/absl/base/inline_variable_test_a.cc @@ -0,0 +1,27 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/inline_variable_testing.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace inline_variable_testing_internal { + +const Foo& get_foo_a() { return inline_variable_foo; } + +const int& get_int_a() { return inline_variable_int; } + +} // namespace inline_variable_testing_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third_party/abseil_cpp/absl/base/inline_variable_test_b.cc b/third_party/abseil_cpp/absl/base/inline_variable_test_b.cc new file mode 100644 index 000000000000..038adc30a9fb --- /dev/null +++ b/third_party/abseil_cpp/absl/base/inline_variable_test_b.cc @@ -0,0 +1,27 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/inline_variable_testing.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace inline_variable_testing_internal { + +const Foo& get_foo_b() { return inline_variable_foo; } + +const int& get_int_b() { return inline_variable_int; } + +} // namespace inline_variable_testing_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third_party/abseil_cpp/absl/base/internal/atomic_hook.h b/third_party/abseil_cpp/absl/base/internal/atomic_hook.h new file mode 100644 index 000000000000..ae21cd7fe50a --- /dev/null +++ b/third_party/abseil_cpp/absl/base/internal/atomic_hook.h @@ -0,0 +1,200 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INTERNAL_ATOMIC_HOOK_H_ +#define ABSL_BASE_INTERNAL_ATOMIC_HOOK_H_ + +#include <atomic> +#include <cassert> +#include <cstdint> +#include <utility> + +#include "absl/base/attributes.h" +#include "absl/base/config.h" + +#if defined(_MSC_VER) && !defined(__clang__) +#define ABSL_HAVE_WORKING_CONSTEXPR_STATIC_INIT 0 +#else +#define ABSL_HAVE_WORKING_CONSTEXPR_STATIC_INIT 1 +#endif + +#if defined(_MSC_VER) +#define ABSL_HAVE_WORKING_ATOMIC_POINTER 0 +#else +#define ABSL_HAVE_WORKING_ATOMIC_POINTER 1 +#endif + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +template <typename T> +class AtomicHook; + +// To workaround AtomicHook not being constant-initializable on some platforms, +// prefer to annotate instances with `ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES` +// instead of `ABSL_CONST_INIT`. +#if ABSL_HAVE_WORKING_CONSTEXPR_STATIC_INIT +#define ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES ABSL_CONST_INIT +#else +#define ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES +#endif + +// `AtomicHook` is a helper class, templatized on a raw function pointer type, +// for implementing Abseil customization hooks. It is a callable object that +// dispatches to the registered hook. Objects of type `AtomicHook` must have +// static or thread storage duration. +// +// A default constructed object performs a no-op (and returns a default +// constructed object) if no hook has been registered. +// +// Hooks can be pre-registered via constant initialization, for example: +// +// ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES static AtomicHook<void(*)()> +// my_hook(DefaultAction); +// +// and then changed at runtime via a call to `Store()`. +// +// Reads and writes guarantee memory_order_acquire/memory_order_release +// semantics. +template <typename ReturnType, typename... Args> +class AtomicHook<ReturnType (*)(Args...)> { + public: + using FnPtr = ReturnType (*)(Args...); + + // Constructs an object that by default performs a no-op (and + // returns a default constructed object) when no hook as been registered. + constexpr AtomicHook() : AtomicHook(DummyFunction) {} + + // Constructs an object that by default dispatches to/returns the + // pre-registered default_fn when no hook has been registered at runtime. +#if ABSL_HAVE_WORKING_ATOMIC_POINTER && ABSL_HAVE_WORKING_CONSTEXPR_STATIC_INIT + explicit constexpr AtomicHook(FnPtr default_fn) + : hook_(default_fn), default_fn_(default_fn) {} +#elif ABSL_HAVE_WORKING_CONSTEXPR_STATIC_INIT + explicit constexpr AtomicHook(FnPtr default_fn) + : hook_(kUninitialized), default_fn_(default_fn) {} +#else + // As of January 2020, on all known versions of MSVC this constructor runs in + // the global constructor sequence. If `Store()` is called by a dynamic + // initializer, we want to preserve the value, even if this constructor runs + // after the call to `Store()`. If not, `hook_` will be + // zero-initialized by the linker and we have no need to set it. + // https://developercommunity.visualstudio.com/content/problem/336946/class-with-constexpr-constructor-not-using-static.html + explicit constexpr AtomicHook(FnPtr default_fn) + : /* hook_(deliberately omitted), */ default_fn_(default_fn) { + static_assert(kUninitialized == 0, "here we rely on zero-initialization"); + } +#endif + + // Stores the provided function pointer as the value for this hook. + // + // This is intended to be called once. Multiple calls are legal only if the + // same function pointer is provided for each call. The store is implemented + // as a memory_order_release operation, and read accesses are implemented as + // memory_order_acquire. + void Store(FnPtr fn) { + bool success = DoStore(fn); + static_cast<void>(success); + assert(success); + } + + // Invokes the registered callback. If no callback has yet been registered, a + // default-constructed object of the appropriate type is returned instead. + template <typename... CallArgs> + ReturnType operator()(CallArgs&&... args) const { + return DoLoad()(std::forward<CallArgs>(args)...); + } + + // Returns the registered callback, or nullptr if none has been registered. + // Useful if client code needs to conditionalize behavior based on whether a + // callback was registered. + // + // Note that atomic_hook.Load()() and atomic_hook() have different semantics: + // operator()() will perform a no-op if no callback was registered, while + // Load()() will dereference a null function pointer. Prefer operator()() to + // Load()() unless you must conditionalize behavior on whether a hook was + // registered. + FnPtr Load() const { + FnPtr ptr = DoLoad(); + return (ptr == DummyFunction) ? nullptr : ptr; + } + + private: + static ReturnType DummyFunction(Args...) { + return ReturnType(); + } + + // Current versions of MSVC (as of September 2017) have a broken + // implementation of std::atomic<T*>: Its constructor attempts to do the + // equivalent of a reinterpret_cast in a constexpr context, which is not + // allowed. + // + // This causes an issue when building with LLVM under Windows. To avoid this, + // we use a less-efficient, intptr_t-based implementation on Windows. +#if ABSL_HAVE_WORKING_ATOMIC_POINTER + // Return the stored value, or DummyFunction if no value has been stored. + FnPtr DoLoad() const { return hook_.load(std::memory_order_acquire); } + + // Store the given value. Returns false if a different value was already + // stored to this object. + bool DoStore(FnPtr fn) { + assert(fn); + FnPtr expected = default_fn_; + const bool store_succeeded = hook_.compare_exchange_strong( + expected, fn, std::memory_order_acq_rel, std::memory_order_acquire); + const bool same_value_already_stored = (expected == fn); + return store_succeeded || same_value_already_stored; + } + + std::atomic<FnPtr> hook_; +#else // !ABSL_HAVE_WORKING_ATOMIC_POINTER + // Use a sentinel value unlikely to be the address of an actual function. + static constexpr intptr_t kUninitialized = 0; + + static_assert(sizeof(intptr_t) >= sizeof(FnPtr), + "intptr_t can't contain a function pointer"); + + FnPtr DoLoad() const { + const intptr_t value = hook_.load(std::memory_order_acquire); + if (value == kUninitialized) { + return default_fn_; + } + return reinterpret_cast<FnPtr>(value); + } + + bool DoStore(FnPtr fn) { + assert(fn); + const auto value = reinterpret_cast<intptr_t>(fn); + intptr_t expected = kUninitialized; + const bool store_succeeded = hook_.compare_exchange_strong( + expected, value, std::memory_order_acq_rel, std::memory_order_acquire); + const bool same_value_already_stored = (expected == value); + return store_succeeded || same_value_already_stored; + } + + std::atomic<intptr_t> hook_; +#endif + + const FnPtr default_fn_; +}; + +#undef ABSL_HAVE_WORKING_ATOMIC_POINTER +#undef ABSL_HAVE_WORKING_CONSTEXPR_STATIC_INIT + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_ATOMIC_HOOK_H_ diff --git a/third_party/abseil_cpp/absl/base/internal/atomic_hook_test.cc b/third_party/abseil_cpp/absl/base/internal/atomic_hook_test.cc new file mode 100644 index 000000000000..e577a8fd930b --- /dev/null +++ b/third_party/abseil_cpp/absl/base/internal/atomic_hook_test.cc @@ -0,0 +1,97 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/atomic_hook.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/base/attributes.h" +#include "absl/base/internal/atomic_hook_test_helper.h" + +namespace { + +using ::testing::Eq; + +int value = 0; +void TestHook(int x) { value = x; } + +TEST(AtomicHookTest, NoDefaultFunction) { + ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES static absl::base_internal::AtomicHook< + void (*)(int)> + hook; + value = 0; + + // Test the default DummyFunction. + EXPECT_TRUE(hook.Load() == nullptr); + EXPECT_EQ(value, 0); + hook(1); + EXPECT_EQ(value, 0); + + // Test a stored hook. + hook.Store(TestHook); + EXPECT_TRUE(hook.Load() == TestHook); + EXPECT_EQ(value, 0); + hook(1); + EXPECT_EQ(value, 1); + + // Calling Store() with the same hook should not crash. + hook.Store(TestHook); + EXPECT_TRUE(hook.Load() == TestHook); + EXPECT_EQ(value, 1); + hook(2); + EXPECT_EQ(value, 2); +} + +TEST(AtomicHookTest, WithDefaultFunction) { + // Set the default value to TestHook at compile-time. + ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES static absl::base_internal::AtomicHook< + void (*)(int)> + hook(TestHook); + value = 0; + + // Test the default value is TestHook. + EXPECT_TRUE(hook.Load() == TestHook); + EXPECT_EQ(value, 0); + hook(1); + EXPECT_EQ(value, 1); + + // Calling Store() with the same hook should not crash. + hook.Store(TestHook); + EXPECT_TRUE(hook.Load() == TestHook); + EXPECT_EQ(value, 1); + hook(2); + EXPECT_EQ(value, 2); +} + +ABSL_CONST_INIT int override_func_calls = 0; +void OverrideFunc() { override_func_calls++; } +static struct OverrideInstaller { + OverrideInstaller() { absl::atomic_hook_internal::func.Store(OverrideFunc); } +} override_installer; + +TEST(AtomicHookTest, DynamicInitFromAnotherTU) { + // MSVC 14.2 doesn't do constexpr static init correctly; in particular it + // tends to sequence static init (i.e. defaults) of `AtomicHook` objects + // after their dynamic init (i.e. overrides), overwriting whatever value was + // written during dynamic init. This regression test validates the fix. + // https://developercommunity.visualstudio.com/content/problem/336946/class-with-constexpr-constructor-not-using-static.html + EXPECT_THAT(absl::atomic_hook_internal::default_func_calls, Eq(0)); + EXPECT_THAT(override_func_calls, Eq(0)); + absl::atomic_hook_internal::func(); + EXPECT_THAT(absl::atomic_hook_internal::default_func_calls, Eq(0)); + EXPECT_THAT(override_func_calls, Eq(1)); + EXPECT_THAT(absl::atomic_hook_internal::func.Load(), Eq(OverrideFunc)); +} + +} // namespace diff --git a/third_party/abseil_cpp/absl/base/internal/atomic_hook_test_helper.cc b/third_party/abseil_cpp/absl/base/internal/atomic_hook_test_helper.cc new file mode 100644 index 000000000000..537d47cd2d2f --- /dev/null +++ b/third_party/abseil_cpp/absl/base/internal/atomic_hook_test_helper.cc @@ -0,0 +1,32 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/atomic_hook_test_helper.h" + +#include "absl/base/attributes.h" +#include "absl/base/internal/atomic_hook.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace atomic_hook_internal { + +ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES absl::base_internal::AtomicHook<VoidF> + func(DefaultFunc); +ABSL_CONST_INIT int default_func_calls = 0; +void DefaultFunc() { default_func_calls++; } +void RegisterFunc(VoidF f) { func.Store(f); } + +} // namespace atomic_hook_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third_party/abseil_cpp/absl/base/internal/atomic_hook_test_helper.h b/third_party/abseil_cpp/absl/base/internal/atomic_hook_test_helper.h new file mode 100644 index 000000000000..3e72b4977d2f --- /dev/null +++ b/third_party/abseil_cpp/absl/base/internal/atomic_hook_test_helper.h @@ -0,0 +1,34 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_ATOMIC_HOOK_TEST_HELPER_H_ +#define ABSL_BASE_ATOMIC_HOOK_TEST_HELPER_H_ + +#include "absl/base/internal/atomic_hook.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace atomic_hook_internal { + +using VoidF = void (*)(); +extern absl::base_internal::AtomicHook<VoidF> func; +extern int default_func_calls; +void DefaultFunc(); +void RegisterFunc(VoidF func); + +} // namespace atomic_hook_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_ATOMIC_HOOK_TEST_HELPER_H_ diff --git a/third_party/abseil_cpp/absl/base/internal/bits.h b/third_party/abseil_cpp/absl/base/internal/bits.h new file mode 100644 index 000000000000..14c51d8b3013 --- /dev/null +++ b/third_party/abseil_cpp/absl/base/internal/bits.h @@ -0,0 +1,218 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INTERNAL_BITS_H_ +#define ABSL_BASE_INTERNAL_BITS_H_ + +// This file contains bitwise ops which are implementation details of various +// absl libraries. + +#include <cstdint> + +#include "absl/base/config.h" + +// Clang on Windows has __builtin_clzll; otherwise we need to use the +// windows intrinsic functions. +#if defined(_MSC_VER) && !defined(__clang__) +#include <intrin.h> +#if defined(_M_X64) +#pragma intrinsic(_BitScanReverse64) +#pragma intrinsic(_BitScanForward64) +#endif +#pragma intrinsic(_BitScanReverse) +#pragma intrinsic(_BitScanForward) +#endif + +#include "absl/base/attributes.h" + +#if defined(_MSC_VER) && !defined(__clang__) +// We can achieve something similar to attribute((always_inline)) with MSVC by +// using the __forceinline keyword, however this is not perfect. MSVC is +// much less aggressive about inlining, and even with the __forceinline keyword. +#define ABSL_BASE_INTERNAL_FORCEINLINE __forceinline +#else +// Use default attribute inline. +#define ABSL_BASE_INTERNAL_FORCEINLINE inline ABSL_ATTRIBUTE_ALWAYS_INLINE +#endif + + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +ABSL_BASE_INTERNAL_FORCEINLINE int CountLeadingZeros64Slow(uint64_t n) { + int zeroes = 60; + if (n >> 32) { + zeroes -= 32; + n >>= 32; + } + if (n >> 16) { + zeroes -= 16; + n >>= 16; + } + if (n >> 8) { + zeroes -= 8; + n >>= 8; + } + if (n >> 4) { + zeroes -= 4; + n >>= 4; + } + return "\4\3\2\2\1\1\1\1\0\0\0\0\0\0\0"[n] + zeroes; +} + +ABSL_BASE_INTERNAL_FORCEINLINE int CountLeadingZeros64(uint64_t n) { +#if defined(_MSC_VER) && !defined(__clang__) && defined(_M_X64) + // MSVC does not have __buitin_clzll. Use _BitScanReverse64. + unsigned long result = 0; // NOLINT(runtime/int) + if (_BitScanReverse64(&result, n)) { + return 63 - result; + } + return 64; +#elif defined(_MSC_VER) && !defined(__clang__) + // MSVC does not have __buitin_clzll. Compose two calls to _BitScanReverse + unsigned long result = 0; // NOLINT(runtime/int) + if ((n >> 32) && _BitScanReverse(&result, n >> 32)) { + return 31 - result; + } + if (_BitScanReverse(&result, n)) { + return 63 - result; + } + return 64; +#elif defined(__GNUC__) || defined(__clang__) + // Use __builtin_clzll, which uses the following instructions: + // x86: bsr + // ARM64: clz + // PPC: cntlzd + static_assert(sizeof(unsigned long long) == sizeof(n), // NOLINT(runtime/int) + "__builtin_clzll does not take 64-bit arg"); + + // Handle 0 as a special case because __builtin_clzll(0) is undefined. + if (n == 0) { + return 64; + } + return __builtin_clzll(n); +#else + return CountLeadingZeros64Slow(n); +#endif +} + +ABSL_BASE_INTERNAL_FORCEINLINE int CountLeadingZeros32Slow(uint64_t n) { + int zeroes = 28; + if (n >> 16) { + zeroes -= 16; + n >>= 16; + } + if (n >> 8) { + zeroes -= 8; + n >>= 8; + } + if (n >> 4) { + zeroes -= 4; + n >>= 4; + } + return "\4\3\2\2\1\1\1\1\0\0\0\0\0\0\0"[n] + zeroes; +} + +ABSL_BASE_INTERNAL_FORCEINLINE int CountLeadingZeros32(uint32_t n) { +#if defined(_MSC_VER) && !defined(__clang__) + unsigned long result = 0; // NOLINT(runtime/int) + if (_BitScanReverse(&result, n)) { + return 31 - result; + } + return 32; +#elif defined(__GNUC__) || defined(__clang__) + // Use __builtin_clz, which uses the following instructions: + // x86: bsr + // ARM64: clz + // PPC: cntlzd + static_assert(sizeof(int) == sizeof(n), + "__builtin_clz does not take 32-bit arg"); + + // Handle 0 as a special case because __builtin_clz(0) is undefined. + if (n == 0) { + return 32; + } + return __builtin_clz(n); +#else + return CountLeadingZeros32Slow(n); +#endif +} + +ABSL_BASE_INTERNAL_FORCEINLINE int CountTrailingZerosNonZero64Slow(uint64_t n) { + int c = 63; + n &= ~n + 1; + if (n & 0x00000000FFFFFFFF) c -= 32; + if (n & 0x0000FFFF0000FFFF) c -= 16; + if (n & 0x00FF00FF00FF00FF) c -= 8; + if (n & 0x0F0F0F0F0F0F0F0F) c -= 4; + if (n & 0x3333333333333333) c -= 2; + if (n & 0x5555555555555555) c -= 1; + return c; +} + +ABSL_BASE_INTERNAL_FORCEINLINE int CountTrailingZerosNonZero64(uint64_t n) { +#if defined(_MSC_VER) && !defined(__clang__) && defined(_M_X64) + unsigned long result = 0; // NOLINT(runtime/int) + _BitScanForward64(&result, n); + return result; +#elif defined(_MSC_VER) && !defined(__clang__) + unsigned long result = 0; // NOLINT(runtime/int) + if (static_cast<uint32_t>(n) == 0) { + _BitScanForward(&result, n >> 32); + return result + 32; + } + _BitScanForward(&result, n); + return result; +#elif defined(__GNUC__) || defined(__clang__) + static_assert(sizeof(unsigned long long) == sizeof(n), // NOLINT(runtime/int) + "__builtin_ctzll does not take 64-bit arg"); + return __builtin_ctzll(n); +#else + return CountTrailingZerosNonZero64Slow(n); +#endif +} + +ABSL_BASE_INTERNAL_FORCEINLINE int CountTrailingZerosNonZero32Slow(uint32_t n) { + int c = 31; + n &= ~n + 1; + if (n & 0x0000FFFF) c -= 16; + if (n & 0x00FF00FF) c -= 8; + if (n & 0x0F0F0F0F) c -= 4; + if (n & 0x33333333) c -= 2; + if (n & 0x55555555) c -= 1; + return c; +} + +ABSL_BASE_INTERNAL_FORCEINLINE int CountTrailingZerosNonZero32(uint32_t n) { +#if defined(_MSC_VER) && !defined(__clang__) + unsigned long result = 0; // NOLINT(runtime/int) + _BitScanForward(&result, n); + return result; +#elif defined(__GNUC__) || defined(__clang__) + static_assert(sizeof(int) == sizeof(n), + "__builtin_ctz does not take 32-bit arg"); + return __builtin_ctz(n); +#else + return CountTrailingZerosNonZero32Slow(n); +#endif +} + +#undef ABSL_BASE_INTERNAL_FORCEINLINE + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_BITS_H_ diff --git a/third_party/abseil_cpp/absl/base/internal/bits_test.cc b/third_party/abseil_cpp/absl/base/internal/bits_test.cc new file mode 100644 index 000000000000..7855fa629724 --- /dev/null +++ b/third_party/abseil_cpp/absl/base/internal/bits_test.cc @@ -0,0 +1,97 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/bits.h" + +#include "gtest/gtest.h" + +namespace { + +int CLZ64(uint64_t n) { + int fast = absl::base_internal::CountLeadingZeros64(n); + int slow = absl::base_internal::CountLeadingZeros64Slow(n); + EXPECT_EQ(fast, slow) << n; + return fast; +} + +TEST(BitsTest, CountLeadingZeros64) { + EXPECT_EQ(64, CLZ64(uint64_t{})); + EXPECT_EQ(0, CLZ64(~uint64_t{})); + + for (int index = 0; index < 64; index++) { + uint64_t x = static_cast<uint64_t>(1) << index; + const auto cnt = 63 - index; + ASSERT_EQ(cnt, CLZ64(x)) << index; + ASSERT_EQ(cnt, CLZ64(x + x - 1)) << index; + } +} + +int CLZ32(uint32_t n) { + int fast = absl::base_internal::CountLeadingZeros32(n); + int slow = absl::base_internal::CountLeadingZeros32Slow(n); + EXPECT_EQ(fast, slow) << n; + return fast; +} + +TEST(BitsTest, CountLeadingZeros32) { + EXPECT_EQ(32, CLZ32(uint32_t{})); + EXPECT_EQ(0, CLZ32(~uint32_t{})); + + for (int index = 0; index < 32; index++) { + uint32_t x = static_cast<uint32_t>(1) << index; + const auto cnt = 31 - index; + ASSERT_EQ(cnt, CLZ32(x)) << index; + ASSERT_EQ(cnt, CLZ32(x + x - 1)) << index; + ASSERT_EQ(CLZ64(x), CLZ32(x) + 32); + } +} + +int CTZ64(uint64_t n) { + int fast = absl::base_internal::CountTrailingZerosNonZero64(n); + int slow = absl::base_internal::CountTrailingZerosNonZero64Slow(n); + EXPECT_EQ(fast, slow) << n; + return fast; +} + +TEST(BitsTest, CountTrailingZerosNonZero64) { + EXPECT_EQ(0, CTZ64(~uint64_t{})); + + for (int index = 0; index < 64; index++) { + uint64_t x = static_cast<uint64_t>(1) << index; + const auto cnt = index; + ASSERT_EQ(cnt, CTZ64(x)) << index; + ASSERT_EQ(cnt, CTZ64(~(x - 1))) << index; + } +} + +int CTZ32(uint32_t n) { + int fast = absl::base_internal::CountTrailingZerosNonZero32(n); + int slow = absl::base_internal::CountTrailingZerosNonZero32Slow(n); + EXPECT_EQ(fast, slow) << n; + return fast; +} + +TEST(BitsTest, CountTrailingZerosNonZero32) { + EXPECT_EQ(0, CTZ32(~uint32_t{})); + + for (int index = 0; index < 32; index++) { + uint32_t x = static_cast<uint32_t>(1) << index; + const auto cnt = index; + ASSERT_EQ(cnt, CTZ32(x)) << index; + ASSERT_EQ(cnt, CTZ32(~(x - 1))) << index; + } +} + + +} // namespace diff --git a/third_party/abseil_cpp/absl/base/internal/cmake_thread_test.cc b/third_party/abseil_cpp/absl/base/internal/cmake_thread_test.cc new file mode 100644 index 000000000000..f70bb24eb7b4 --- /dev/null +++ b/third_party/abseil_cpp/absl/base/internal/cmake_thread_test.cc @@ -0,0 +1,22 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include <iostream> +#include "absl/base/internal/thread_identity.h" + +int main() { + auto* tid = absl::base_internal::CurrentThreadIdentityIfPresent(); + // Make sure the above call can't be optimized out + std::cout << (void*)tid << std::endl; +} diff --git a/third_party/abseil_cpp/absl/base/internal/cycleclock.cc b/third_party/abseil_cpp/absl/base/internal/cycleclock.cc new file mode 100644 index 000000000000..0e65005b8914 --- /dev/null +++ b/third_party/abseil_cpp/absl/base/internal/cycleclock.cc @@ -0,0 +1,107 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// The implementation of CycleClock::Frequency. +// +// NOTE: only i386 and x86_64 have been well tested. +// PPC, sparc, alpha, and ia64 are based on +// http://peter.kuscsik.com/wordpress/?p=14 +// with modifications by m3b. See also +// https://setisvn.ssl.berkeley.edu/svn/lib/fftw-3.0.1/kernel/cycle.h + +#include "absl/base/internal/cycleclock.h" + +#include <atomic> +#include <chrono> // NOLINT(build/c++11) + +#include "absl/base/internal/unscaledcycleclock.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +#if ABSL_USE_UNSCALED_CYCLECLOCK + +namespace { + +#ifdef NDEBUG +#ifdef ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY +// Not debug mode and the UnscaledCycleClock frequency is the CPU +// frequency. Scale the CycleClock to prevent overflow if someone +// tries to represent the time as cycles since the Unix epoch. +static constexpr int32_t kShift = 1; +#else +// Not debug mode and the UnscaledCycleClock isn't operating at the +// raw CPU frequency. There is no need to do any scaling, so don't +// needlessly sacrifice precision. +static constexpr int32_t kShift = 0; +#endif +#else +// In debug mode use a different shift to discourage depending on a +// particular shift value. +static constexpr int32_t kShift = 2; +#endif + +static constexpr double kFrequencyScale = 1.0 / (1 << kShift); +static std::atomic<CycleClockSourceFunc> cycle_clock_source; + +CycleClockSourceFunc LoadCycleClockSource() { + // Optimize for the common case (no callback) by first doing a relaxed load; + // this is significantly faster on non-x86 platforms. + if (cycle_clock_source.load(std::memory_order_relaxed) == nullptr) { + return nullptr; + } + // This corresponds to the store(std::memory_order_release) in + // CycleClockSource::Register, and makes sure that any updates made prior to + // registering the callback are visible to this thread before the callback is + // invoked. + return cycle_clock_source.load(std::memory_order_acquire); +} + +} // namespace + +int64_t CycleClock::Now() { + auto fn = LoadCycleClockSource(); + if (fn == nullptr) { + return base_internal::UnscaledCycleClock::Now() >> kShift; + } + return fn() >> kShift; +} + +double CycleClock::Frequency() { + return kFrequencyScale * base_internal::UnscaledCycleClock::Frequency(); +} + +void CycleClockSource::Register(CycleClockSourceFunc source) { + // Corresponds to the load(std::memory_order_acquire) in LoadCycleClockSource. + cycle_clock_source.store(source, std::memory_order_release); +} + +#else + +int64_t CycleClock::Now() { + return std::chrono::duration_cast<std::chrono::nanoseconds>( + std::chrono::steady_clock::now().time_since_epoch()) + .count(); +} + +double CycleClock::Frequency() { + return 1e9; +} + +#endif + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third_party/abseil_cpp/absl/base/internal/cycleclock.h b/third_party/abseil_cpp/absl/base/internal/cycleclock.h new file mode 100644 index 000000000000..a18b58444560 --- /dev/null +++ b/third_party/abseil_cpp/absl/base/internal/cycleclock.h @@ -0,0 +1,94 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// ----------------------------------------------------------------------------- +// File: cycleclock.h +// ----------------------------------------------------------------------------- +// +// This header file defines a `CycleClock`, which yields the value and frequency +// of a cycle counter that increments at a rate that is approximately constant. +// +// NOTE: +// +// The cycle counter frequency is not necessarily related to the core clock +// frequency and should not be treated as such. That is, `CycleClock` cycles are +// not necessarily "CPU cycles" and code should not rely on that behavior, even +// if experimentally observed. +// +// An arbitrary offset may have been added to the counter at power on. +// +// On some platforms, the rate and offset of the counter may differ +// slightly when read from different CPUs of a multiprocessor. Usually, +// we try to ensure that the operating system adjusts values periodically +// so that values agree approximately. If you need stronger guarantees, +// consider using alternate interfaces. +// +// The CPU is not required to maintain the ordering of a cycle counter read +// with respect to surrounding instructions. + +#ifndef ABSL_BASE_INTERNAL_CYCLECLOCK_H_ +#define ABSL_BASE_INTERNAL_CYCLECLOCK_H_ + +#include <cstdint> + +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +// ----------------------------------------------------------------------------- +// CycleClock +// ----------------------------------------------------------------------------- +class CycleClock { + public: + // CycleClock::Now() + // + // Returns the value of a cycle counter that counts at a rate that is + // approximately constant. + static int64_t Now(); + + // CycleClock::Frequency() + // + // Returns the amount by which `CycleClock::Now()` increases per second. Note + // that this value may not necessarily match the core CPU clock frequency. + static double Frequency(); + + private: + CycleClock() = delete; // no instances + CycleClock(const CycleClock&) = delete; + CycleClock& operator=(const CycleClock&) = delete; +}; + +using CycleClockSourceFunc = int64_t (*)(); + +class CycleClockSource { + private: + // CycleClockSource::Register() + // + // Register a function that provides an alternate source for the unscaled CPU + // cycle count value. The source function must be async signal safe, must not + // call CycleClock::Now(), and must have a frequency that matches that of the + // unscaled clock used by CycleClock. A nullptr value resets CycleClock to use + // the default source. + static void Register(CycleClockSourceFunc source); +}; + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_CYCLECLOCK_H_ diff --git a/third_party/abseil_cpp/absl/base/internal/direct_mmap.h b/third_party/abseil_cpp/absl/base/internal/direct_mmap.h new file mode 100644 index 000000000000..16accf096604 --- /dev/null +++ b/third_party/abseil_cpp/absl/base/internal/direct_mmap.h @@ -0,0 +1,166 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Functions for directly invoking mmap() via syscall, avoiding the case where +// mmap() has been locally overridden. + +#ifndef ABSL_BASE_INTERNAL_DIRECT_MMAP_H_ +#define ABSL_BASE_INTERNAL_DIRECT_MMAP_H_ + +#include "absl/base/config.h" + +#if ABSL_HAVE_MMAP + +#include <sys/mman.h> + +#ifdef __linux__ + +#include <sys/types.h> +#ifdef __BIONIC__ +#include <sys/syscall.h> +#else +#include <syscall.h> +#endif + +#include <linux/unistd.h> +#include <unistd.h> +#include <cerrno> +#include <cstdarg> +#include <cstdint> + +#ifdef __mips__ +// Include definitions of the ABI currently in use. +#ifdef __BIONIC__ +// Android doesn't have sgidefs.h, but does have asm/sgidefs.h, which has the +// definitions we need. +#include <asm/sgidefs.h> +#else +#include <sgidefs.h> +#endif // __BIONIC__ +#endif // __mips__ + +// SYS_mmap and SYS_munmap are not defined in Android. +#ifdef __BIONIC__ +extern "C" void* __mmap2(void*, size_t, int, int, int, size_t); +#if defined(__NR_mmap) && !defined(SYS_mmap) +#define SYS_mmap __NR_mmap +#endif +#ifndef SYS_munmap +#define SYS_munmap __NR_munmap +#endif +#endif // __BIONIC__ + +#if defined(__NR_mmap2) && !defined(SYS_mmap2) +#define SYS_mmap2 __NR_mmap2 +#endif + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +// Platform specific logic extracted from +// https://chromium.googlesource.com/linux-syscall-support/+/master/linux_syscall_support.h +inline void* DirectMmap(void* start, size_t length, int prot, int flags, int fd, + off64_t offset) noexcept { +#if defined(__i386__) || defined(__ARM_ARCH_3__) || defined(__ARM_EABI__) || \ + (defined(__mips__) && _MIPS_SIM == _MIPS_SIM_ABI32) || \ + (defined(__PPC__) && !defined(__PPC64__)) || \ + (defined(__riscv) && __riscv_xlen == 32) || \ + (defined(__s390__) && !defined(__s390x__)) + // On these architectures, implement mmap with mmap2. + static int pagesize = 0; + if (pagesize == 0) { +#if defined(__wasm__) || defined(__asmjs__) + pagesize = getpagesize(); +#else + pagesize = sysconf(_SC_PAGESIZE); +#endif + } + if (offset < 0 || offset % pagesize != 0) { + errno = EINVAL; + return MAP_FAILED; + } +#ifdef __BIONIC__ + // SYS_mmap2 has problems on Android API level <= 16. + // Workaround by invoking __mmap2() instead. + return __mmap2(start, length, prot, flags, fd, offset / pagesize); +#else + return reinterpret_cast<void*>( + syscall(SYS_mmap2, start, length, prot, flags, fd, + static_cast<off_t>(offset / pagesize))); +#endif +#elif defined(__s390x__) + // On s390x, mmap() arguments are passed in memory. + unsigned long buf[6] = {reinterpret_cast<unsigned long>(start), // NOLINT + static_cast<unsigned long>(length), // NOLINT + static_cast<unsigned long>(prot), // NOLINT + static_cast<unsigned long>(flags), // NOLINT + static_cast<unsigned long>(fd), // NOLINT + static_cast<unsigned long>(offset)}; // NOLINT + return reinterpret_cast<void*>(syscall(SYS_mmap, buf)); +#elif defined(__x86_64__) +// The x32 ABI has 32 bit longs, but the syscall interface is 64 bit. +// We need to explicitly cast to an unsigned 64 bit type to avoid implicit +// sign extension. We can't cast pointers directly because those are +// 32 bits, and gcc will dump ugly warnings about casting from a pointer +// to an integer of a different size. We also need to make sure __off64_t +// isn't truncated to 32-bits under x32. +#define MMAP_SYSCALL_ARG(x) ((uint64_t)(uintptr_t)(x)) + return reinterpret_cast<void*>( + syscall(SYS_mmap, MMAP_SYSCALL_ARG(start), MMAP_SYSCALL_ARG(length), + MMAP_SYSCALL_ARG(prot), MMAP_SYSCALL_ARG(flags), + MMAP_SYSCALL_ARG(fd), static_cast<uint64_t>(offset))); +#undef MMAP_SYSCALL_ARG +#else // Remaining 64-bit aritectures. + static_assert(sizeof(unsigned long) == 8, "Platform is not 64-bit"); + return reinterpret_cast<void*>( + syscall(SYS_mmap, start, length, prot, flags, fd, offset)); +#endif +} + +inline int DirectMunmap(void* start, size_t length) { + return static_cast<int>(syscall(SYS_munmap, start, length)); +} + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#else // !__linux__ + +// For non-linux platforms where we have mmap, just dispatch directly to the +// actual mmap()/munmap() methods. + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +inline void* DirectMmap(void* start, size_t length, int prot, int flags, int fd, + off_t offset) { + return mmap(start, length, prot, flags, fd, offset); +} + +inline int DirectMunmap(void* start, size_t length) { + return munmap(start, length); +} + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // __linux__ + +#endif // ABSL_HAVE_MMAP + +#endif // ABSL_BASE_INTERNAL_DIRECT_MMAP_H_ diff --git a/third_party/abseil_cpp/absl/base/internal/endian.h b/third_party/abseil_cpp/absl/base/internal/endian.h new file mode 100644 index 000000000000..9677530e8de3 --- /dev/null +++ b/third_party/abseil_cpp/absl/base/internal/endian.h @@ -0,0 +1,266 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef ABSL_BASE_INTERNAL_ENDIAN_H_ +#define ABSL_BASE_INTERNAL_ENDIAN_H_ + +// The following guarantees declaration of the byte swap functions +#ifdef _MSC_VER +#include <stdlib.h> // NOLINT(build/include) +#elif defined(__FreeBSD__) +#include <sys/endian.h> +#elif defined(__GLIBC__) +#include <byteswap.h> // IWYU pragma: export +#endif + +#include <cstdint> +#include "absl/base/config.h" +#include "absl/base/internal/unaligned_access.h" +#include "absl/base/port.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +// Use compiler byte-swapping intrinsics if they are available. 32-bit +// and 64-bit versions are available in Clang and GCC as of GCC 4.3.0. +// The 16-bit version is available in Clang and GCC only as of GCC 4.8.0. +// For simplicity, we enable them all only for GCC 4.8.0 or later. +#if defined(__clang__) || \ + (defined(__GNUC__) && \ + ((__GNUC__ == 4 && __GNUC_MINOR__ >= 8) || __GNUC__ >= 5)) +inline uint64_t gbswap_64(uint64_t host_int) { + return __builtin_bswap64(host_int); +} +inline uint32_t gbswap_32(uint32_t host_int) { + return __builtin_bswap32(host_int); +} +inline uint16_t gbswap_16(uint16_t host_int) { + return __builtin_bswap16(host_int); +} + +#elif defined(_MSC_VER) +inline uint64_t gbswap_64(uint64_t host_int) { + return _byteswap_uint64(host_int); +} +inline uint32_t gbswap_32(uint32_t host_int) { + return _byteswap_ulong(host_int); +} +inline uint16_t gbswap_16(uint16_t host_int) { + return _byteswap_ushort(host_int); +} + +#else +inline uint64_t gbswap_64(uint64_t host_int) { +#if defined(__GNUC__) && defined(__x86_64__) && !defined(__APPLE__) + // Adapted from /usr/include/byteswap.h. Not available on Mac. + if (__builtin_constant_p(host_int)) { + return __bswap_constant_64(host_int); + } else { + uint64_t result; + __asm__("bswap %0" : "=r"(result) : "0"(host_int)); + return result; + } +#elif defined(__GLIBC__) + return bswap_64(host_int); +#else + return (((host_int & uint64_t{0xFF}) << 56) | + ((host_int & uint64_t{0xFF00}) << 40) | + ((host_int & uint64_t{0xFF0000}) << 24) | + ((host_int & uint64_t{0xFF000000}) << 8) | + ((host_int & uint64_t{0xFF00000000}) >> 8) | + ((host_int & uint64_t{0xFF0000000000}) >> 24) | + ((host_int & uint64_t{0xFF000000000000}) >> 40) | + ((host_int & uint64_t{0xFF00000000000000}) >> 56)); +#endif // bswap_64 +} + +inline uint32_t gbswap_32(uint32_t host_int) { +#if defined(__GLIBC__) + return bswap_32(host_int); +#else + return (((host_int & uint32_t{0xFF}) << 24) | + ((host_int & uint32_t{0xFF00}) << 8) | + ((host_int & uint32_t{0xFF0000}) >> 8) | + ((host_int & uint32_t{0xFF000000}) >> 24)); +#endif +} + +inline uint16_t gbswap_16(uint16_t host_int) { +#if defined(__GLIBC__) + return bswap_16(host_int); +#else + return (((host_int & uint16_t{0xFF}) << 8) | + ((host_int & uint16_t{0xFF00}) >> 8)); +#endif +} + +#endif // intrinsics available + +#ifdef ABSL_IS_LITTLE_ENDIAN + +// Definitions for ntohl etc. that don't require us to include +// netinet/in.h. We wrap gbswap_32 and gbswap_16 in functions rather +// than just #defining them because in debug mode, gcc doesn't +// correctly handle the (rather involved) definitions of bswap_32. +// gcc guarantees that inline functions are as fast as macros, so +// this isn't a performance hit. +inline uint16_t ghtons(uint16_t x) { return gbswap_16(x); } +inline uint32_t ghtonl(uint32_t x) { return gbswap_32(x); } +inline uint64_t ghtonll(uint64_t x) { return gbswap_64(x); } + +#elif defined ABSL_IS_BIG_ENDIAN + +// These definitions are simpler on big-endian machines +// These are functions instead of macros to avoid self-assignment warnings +// on calls such as "i = ghtnol(i);". This also provides type checking. +inline uint16_t ghtons(uint16_t x) { return x; } +inline uint32_t ghtonl(uint32_t x) { return x; } +inline uint64_t ghtonll(uint64_t x) { return x; } + +#else +#error \ + "Unsupported byte order: Either ABSL_IS_BIG_ENDIAN or " \ + "ABSL_IS_LITTLE_ENDIAN must be defined" +#endif // byte order + +inline uint16_t gntohs(uint16_t x) { return ghtons(x); } +inline uint32_t gntohl(uint32_t x) { return ghtonl(x); } +inline uint64_t gntohll(uint64_t x) { return ghtonll(x); } + +// Utilities to convert numbers between the current hosts's native byte +// order and little-endian byte order +// +// Load/Store methods are alignment safe +namespace little_endian { +// Conversion functions. +#ifdef ABSL_IS_LITTLE_ENDIAN + +inline uint16_t FromHost16(uint16_t x) { return x; } +inline uint16_t ToHost16(uint16_t x) { return x; } + +inline uint32_t FromHost32(uint32_t x) { return x; } +inline uint32_t ToHost32(uint32_t x) { return x; } + +inline uint64_t FromHost64(uint64_t x) { return x; } +inline uint64_t ToHost64(uint64_t x) { return x; } + +inline constexpr bool IsLittleEndian() { return true; } + +#elif defined ABSL_IS_BIG_ENDIAN + +inline uint16_t FromHost16(uint16_t x) { return gbswap_16(x); } +inline uint16_t ToHost16(uint16_t x) { return gbswap_16(x); } + +inline uint32_t FromHost32(uint32_t x) { return gbswap_32(x); } +inline uint32_t ToHost32(uint32_t x) { return gbswap_32(x); } + +inline uint64_t FromHost64(uint64_t x) { return gbswap_64(x); } +inline uint64_t ToHost64(uint64_t x) { return gbswap_64(x); } + +inline constexpr bool IsLittleEndian() { return false; } + +#endif /* ENDIAN */ + +// Functions to do unaligned loads and stores in little-endian order. +inline uint16_t Load16(const void *p) { + return ToHost16(ABSL_INTERNAL_UNALIGNED_LOAD16(p)); +} + +inline void Store16(void *p, uint16_t v) { + ABSL_INTERNAL_UNALIGNED_STORE16(p, FromHost16(v)); +} + +inline uint32_t Load32(const void *p) { + return ToHost32(ABSL_INTERNAL_UNALIGNED_LOAD32(p)); +} + +inline void Store32(void *p, uint32_t v) { + ABSL_INTERNAL_UNALIGNED_STORE32(p, FromHost32(v)); +} + +inline uint64_t Load64(const void *p) { + return ToHost64(ABSL_INTERNAL_UNALIGNED_LOAD64(p)); +} + +inline void Store64(void *p, uint64_t v) { + ABSL_INTERNAL_UNALIGNED_STORE64(p, FromHost64(v)); +} + +} // namespace little_endian + +// Utilities to convert numbers between the current hosts's native byte +// order and big-endian byte order (same as network byte order) +// +// Load/Store methods are alignment safe +namespace big_endian { +#ifdef ABSL_IS_LITTLE_ENDIAN + +inline uint16_t FromHost16(uint16_t x) { return gbswap_16(x); } +inline uint16_t ToHost16(uint16_t x) { return gbswap_16(x); } + +inline uint32_t FromHost32(uint32_t x) { return gbswap_32(x); } +inline uint32_t ToHost32(uint32_t x) { return gbswap_32(x); } + +inline uint64_t FromHost64(uint64_t x) { return gbswap_64(x); } +inline uint64_t ToHost64(uint64_t x) { return gbswap_64(x); } + +inline constexpr bool IsLittleEndian() { return true; } + +#elif defined ABSL_IS_BIG_ENDIAN + +inline uint16_t FromHost16(uint16_t x) { return x; } +inline uint16_t ToHost16(uint16_t x) { return x; } + +inline uint32_t FromHost32(uint32_t x) { return x; } +inline uint32_t ToHost32(uint32_t x) { return x; } + +inline uint64_t FromHost64(uint64_t x) { return x; } +inline uint64_t ToHost64(uint64_t x) { return x; } + +inline constexpr bool IsLittleEndian() { return false; } + +#endif /* ENDIAN */ + +// Functions to do unaligned loads and stores in big-endian order. +inline uint16_t Load16(const void *p) { + return ToHost16(ABSL_INTERNAL_UNALIGNED_LOAD16(p)); +} + +inline void Store16(void *p, uint16_t v) { + ABSL_INTERNAL_UNALIGNED_STORE16(p, FromHost16(v)); +} + +inline uint32_t Load32(const void *p) { + return ToHost32(ABSL_INTERNAL_UNALIGNED_LOAD32(p)); +} + +inline void Store32(void *p, uint32_t v) { + ABSL_INTERNAL_UNALIGNED_STORE32(p, FromHost32(v)); +} + +inline uint64_t Load64(const void *p) { + return ToHost64(ABSL_INTERNAL_UNALIGNED_LOAD64(p)); +} + +inline void Store64(void *p, uint64_t v) { + ABSL_INTERNAL_UNALIGNED_STORE64(p, FromHost64(v)); +} + +} // namespace big_endian + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_ENDIAN_H_ diff --git a/third_party/abseil_cpp/absl/base/internal/endian_test.cc b/third_party/abseil_cpp/absl/base/internal/endian_test.cc new file mode 100644 index 000000000000..a1691b1f82c0 --- /dev/null +++ b/third_party/abseil_cpp/absl/base/internal/endian_test.cc @@ -0,0 +1,263 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/endian.h" + +#include <algorithm> +#include <cstdint> +#include <limits> +#include <random> +#include <vector> + +#include "gtest/gtest.h" +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace { + +const uint64_t kInitialNumber{0x0123456789abcdef}; +const uint64_t k64Value{kInitialNumber}; +const uint32_t k32Value{0x01234567}; +const uint16_t k16Value{0x0123}; +const int kNumValuesToTest = 1000000; +const int kRandomSeed = 12345; + +#if defined(ABSL_IS_BIG_ENDIAN) +const uint64_t kInitialInNetworkOrder{kInitialNumber}; +const uint64_t k64ValueLE{0xefcdab8967452301}; +const uint32_t k32ValueLE{0x67452301}; +const uint16_t k16ValueLE{0x2301}; + +const uint64_t k64ValueBE{kInitialNumber}; +const uint32_t k32ValueBE{k32Value}; +const uint16_t k16ValueBE{k16Value}; +#elif defined(ABSL_IS_LITTLE_ENDIAN) +const uint64_t kInitialInNetworkOrder{0xefcdab8967452301}; +const uint64_t k64ValueLE{kInitialNumber}; +const uint32_t k32ValueLE{k32Value}; +const uint16_t k16ValueLE{k16Value}; + +const uint64_t k64ValueBE{0xefcdab8967452301}; +const uint32_t k32ValueBE{0x67452301}; +const uint16_t k16ValueBE{0x2301}; +#endif + +std::vector<uint16_t> GenerateAllUint16Values() { + std::vector<uint16_t> result; + result.reserve(size_t{1} << (sizeof(uint16_t) * 8)); + for (uint32_t i = std::numeric_limits<uint16_t>::min(); + i <= std::numeric_limits<uint16_t>::max(); ++i) { + result.push_back(static_cast<uint16_t>(i)); + } + return result; +} + +template<typename T> +std::vector<T> GenerateRandomIntegers(size_t num_values_to_test) { + std::vector<T> result; + result.reserve(num_values_to_test); + std::mt19937_64 rng(kRandomSeed); + for (size_t i = 0; i < num_values_to_test; ++i) { + result.push_back(rng()); + } + return result; +} + +void ManualByteSwap(char* bytes, int length) { + if (length == 1) + return; + + EXPECT_EQ(0, length % 2); + for (int i = 0; i < length / 2; ++i) { + int j = (length - 1) - i; + using std::swap; + swap(bytes[i], bytes[j]); + } +} + +template<typename T> +inline T UnalignedLoad(const char* p) { + static_assert( + sizeof(T) == 1 || sizeof(T) == 2 || sizeof(T) == 4 || sizeof(T) == 8, + "Unexpected type size"); + + switch (sizeof(T)) { + case 1: return *reinterpret_cast<const T*>(p); + case 2: + return ABSL_INTERNAL_UNALIGNED_LOAD16(p); + case 4: + return ABSL_INTERNAL_UNALIGNED_LOAD32(p); + case 8: + return ABSL_INTERNAL_UNALIGNED_LOAD64(p); + default: + // Suppresses invalid "not all control paths return a value" on MSVC + return {}; + } +} + +template <typename T, typename ByteSwapper> +static void GBSwapHelper(const std::vector<T>& host_values_to_test, + const ByteSwapper& byte_swapper) { + // Test byte_swapper against a manual byte swap. + for (typename std::vector<T>::const_iterator it = host_values_to_test.begin(); + it != host_values_to_test.end(); ++it) { + T host_value = *it; + + char actual_value[sizeof(host_value)]; + memcpy(actual_value, &host_value, sizeof(host_value)); + byte_swapper(actual_value); + + char expected_value[sizeof(host_value)]; + memcpy(expected_value, &host_value, sizeof(host_value)); + ManualByteSwap(expected_value, sizeof(host_value)); + + ASSERT_EQ(0, memcmp(actual_value, expected_value, sizeof(host_value))) + << "Swap output for 0x" << std::hex << host_value << " does not match. " + << "Expected: 0x" << UnalignedLoad<T>(expected_value) << "; " + << "actual: 0x" << UnalignedLoad<T>(actual_value); + } +} + +void Swap16(char* bytes) { + ABSL_INTERNAL_UNALIGNED_STORE16( + bytes, gbswap_16(ABSL_INTERNAL_UNALIGNED_LOAD16(bytes))); +} + +void Swap32(char* bytes) { + ABSL_INTERNAL_UNALIGNED_STORE32( + bytes, gbswap_32(ABSL_INTERNAL_UNALIGNED_LOAD32(bytes))); +} + +void Swap64(char* bytes) { + ABSL_INTERNAL_UNALIGNED_STORE64( + bytes, gbswap_64(ABSL_INTERNAL_UNALIGNED_LOAD64(bytes))); +} + +TEST(EndianessTest, Uint16) { + GBSwapHelper(GenerateAllUint16Values(), &Swap16); +} + +TEST(EndianessTest, Uint32) { + GBSwapHelper(GenerateRandomIntegers<uint32_t>(kNumValuesToTest), &Swap32); +} + +TEST(EndianessTest, Uint64) { + GBSwapHelper(GenerateRandomIntegers<uint64_t>(kNumValuesToTest), &Swap64); +} + +TEST(EndianessTest, ghtonll_gntohll) { + // Test that absl::ghtonl compiles correctly + uint32_t test = 0x01234567; + EXPECT_EQ(absl::gntohl(absl::ghtonl(test)), test); + + uint64_t comp = absl::ghtonll(kInitialNumber); + EXPECT_EQ(comp, kInitialInNetworkOrder); + comp = absl::gntohll(kInitialInNetworkOrder); + EXPECT_EQ(comp, kInitialNumber); + + // Test that htonll and ntohll are each others' inverse functions on a + // somewhat assorted batch of numbers. 37 is chosen to not be anything + // particularly nice base 2. + uint64_t value = 1; + for (int i = 0; i < 100; ++i) { + comp = absl::ghtonll(absl::gntohll(value)); + EXPECT_EQ(value, comp); + comp = absl::gntohll(absl::ghtonll(value)); + EXPECT_EQ(value, comp); + value *= 37; + } +} + +TEST(EndianessTest, little_endian) { + // Check little_endian uint16_t. + uint64_t comp = little_endian::FromHost16(k16Value); + EXPECT_EQ(comp, k16ValueLE); + comp = little_endian::ToHost16(k16ValueLE); + EXPECT_EQ(comp, k16Value); + + // Check little_endian uint32_t. + comp = little_endian::FromHost32(k32Value); + EXPECT_EQ(comp, k32ValueLE); + comp = little_endian::ToHost32(k32ValueLE); + EXPECT_EQ(comp, k32Value); + + // Check little_endian uint64_t. + comp = little_endian::FromHost64(k64Value); + EXPECT_EQ(comp, k64ValueLE); + comp = little_endian::ToHost64(k64ValueLE); + EXPECT_EQ(comp, k64Value); + + // Check little-endian Load and store functions. + uint16_t u16Buf; + uint32_t u32Buf; + uint64_t u64Buf; + + little_endian::Store16(&u16Buf, k16Value); + EXPECT_EQ(u16Buf, k16ValueLE); + comp = little_endian::Load16(&u16Buf); + EXPECT_EQ(comp, k16Value); + + little_endian::Store32(&u32Buf, k32Value); + EXPECT_EQ(u32Buf, k32ValueLE); + comp = little_endian::Load32(&u32Buf); + EXPECT_EQ(comp, k32Value); + + little_endian::Store64(&u64Buf, k64Value); + EXPECT_EQ(u64Buf, k64ValueLE); + comp = little_endian::Load64(&u64Buf); + EXPECT_EQ(comp, k64Value); +} + +TEST(EndianessTest, big_endian) { + // Check big-endian Load and store functions. + uint16_t u16Buf; + uint32_t u32Buf; + uint64_t u64Buf; + + unsigned char buffer[10]; + big_endian::Store16(&u16Buf, k16Value); + EXPECT_EQ(u16Buf, k16ValueBE); + uint64_t comp = big_endian::Load16(&u16Buf); + EXPECT_EQ(comp, k16Value); + + big_endian::Store32(&u32Buf, k32Value); + EXPECT_EQ(u32Buf, k32ValueBE); + comp = big_endian::Load32(&u32Buf); + EXPECT_EQ(comp, k32Value); + + big_endian::Store64(&u64Buf, k64Value); + EXPECT_EQ(u64Buf, k64ValueBE); + comp = big_endian::Load64(&u64Buf); + EXPECT_EQ(comp, k64Value); + + big_endian::Store16(buffer + 1, k16Value); + EXPECT_EQ(u16Buf, k16ValueBE); + comp = big_endian::Load16(buffer + 1); + EXPECT_EQ(comp, k16Value); + + big_endian::Store32(buffer + 1, k32Value); + EXPECT_EQ(u32Buf, k32ValueBE); + comp = big_endian::Load32(buffer + 1); + EXPECT_EQ(comp, k32Value); + + big_endian::Store64(buffer + 1, k64Value); + EXPECT_EQ(u64Buf, k64ValueBE); + comp = big_endian::Load64(buffer + 1); + EXPECT_EQ(comp, k64Value); +} + +} // namespace +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third_party/abseil_cpp/absl/base/internal/errno_saver.h b/third_party/abseil_cpp/absl/base/internal/errno_saver.h new file mode 100644 index 000000000000..251de510fc93 --- /dev/null +++ b/third_party/abseil_cpp/absl/base/internal/errno_saver.h @@ -0,0 +1,43 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INTERNAL_ERRNO_SAVER_H_ +#define ABSL_BASE_INTERNAL_ERRNO_SAVER_H_ + +#include <cerrno> + +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +// `ErrnoSaver` captures the value of `errno` upon construction and restores it +// upon deletion. It is used in low-level code and must be super fast. Do not +// add instrumentation, even in debug modes. +class ErrnoSaver { + public: + ErrnoSaver() : saved_errno_(errno) {} + ~ErrnoSaver() { errno = saved_errno_; } + int operator()() const { return saved_errno_; } + + private: + const int saved_errno_; +}; + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_ERRNO_SAVER_H_ diff --git a/third_party/abseil_cpp/absl/base/internal/errno_saver_test.cc b/third_party/abseil_cpp/absl/base/internal/errno_saver_test.cc new file mode 100644 index 000000000000..e9b742c588b0 --- /dev/null +++ b/third_party/abseil_cpp/absl/base/internal/errno_saver_test.cc @@ -0,0 +1,45 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/errno_saver.h" + +#include <cerrno> + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/base/internal/strerror.h" + +namespace { +using ::testing::Eq; + +struct ErrnoPrinter { + int no; +}; +std::ostream &operator<<(std::ostream &os, ErrnoPrinter ep) { + return os << absl::base_internal::StrError(ep.no) << " [" << ep.no << "]"; +} +bool operator==(ErrnoPrinter one, ErrnoPrinter two) { return one.no == two.no; } + +TEST(ErrnoSaverTest, Works) { + errno = EDOM; + { + absl::base_internal::ErrnoSaver errno_saver; + EXPECT_THAT(ErrnoPrinter{errno}, Eq(ErrnoPrinter{EDOM})); + errno = ERANGE; + EXPECT_THAT(ErrnoPrinter{errno}, Eq(ErrnoPrinter{ERANGE})); + EXPECT_THAT(ErrnoPrinter{errno_saver()}, Eq(ErrnoPrinter{EDOM})); + } + EXPECT_THAT(ErrnoPrinter{errno}, Eq(ErrnoPrinter{EDOM})); +} +} // namespace diff --git a/third_party/abseil_cpp/absl/base/internal/exception_safety_testing.cc b/third_party/abseil_cpp/absl/base/internal/exception_safety_testing.cc new file mode 100644 index 000000000000..6ccac41864b1 --- /dev/null +++ b/third_party/abseil_cpp/absl/base/internal/exception_safety_testing.cc @@ -0,0 +1,79 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/exception_safety_testing.h" + +#ifdef ABSL_HAVE_EXCEPTIONS + +#include "gtest/gtest.h" +#include "absl/meta/type_traits.h" + +namespace testing { + +exceptions_internal::NoThrowTag nothrow_ctor; + +exceptions_internal::StrongGuaranteeTagType strong_guarantee; + +exceptions_internal::ExceptionSafetyTestBuilder<> MakeExceptionSafetyTester() { + return {}; +} + +namespace exceptions_internal { + +int countdown = -1; + +ConstructorTracker* ConstructorTracker::current_tracker_instance_ = nullptr; + +void MaybeThrow(absl::string_view msg, bool throw_bad_alloc) { + if (countdown-- == 0) { + if (throw_bad_alloc) throw TestBadAllocException(msg); + throw TestException(msg); + } +} + +testing::AssertionResult FailureMessage(const TestException& e, + int countdown) noexcept { + return testing::AssertionFailure() << "Exception thrown from " << e.what(); +} + +std::string GetSpecString(TypeSpec spec) { + std::string out; + absl::string_view sep; + const auto append = [&](absl::string_view s) { + absl::StrAppend(&out, sep, s); + sep = " | "; + }; + if (static_cast<bool>(TypeSpec::kNoThrowCopy & spec)) { + append("kNoThrowCopy"); + } + if (static_cast<bool>(TypeSpec::kNoThrowMove & spec)) { + append("kNoThrowMove"); + } + if (static_cast<bool>(TypeSpec::kNoThrowNew & spec)) { + append("kNoThrowNew"); + } + return out; +} + +std::string GetSpecString(AllocSpec spec) { + return static_cast<bool>(AllocSpec::kNoThrowAllocate & spec) + ? "kNoThrowAllocate" + : ""; +} + +} // namespace exceptions_internal + +} // namespace testing + +#endif // ABSL_HAVE_EXCEPTIONS diff --git a/third_party/abseil_cpp/absl/base/internal/exception_safety_testing.h b/third_party/abseil_cpp/absl/base/internal/exception_safety_testing.h new file mode 100644 index 000000000000..6ba89d05dfca --- /dev/null +++ b/third_party/abseil_cpp/absl/base/internal/exception_safety_testing.h @@ -0,0 +1,1101 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Utilities for testing exception-safety + +#ifndef ABSL_BASE_INTERNAL_EXCEPTION_SAFETY_TESTING_H_ +#define ABSL_BASE_INTERNAL_EXCEPTION_SAFETY_TESTING_H_ + +#include "absl/base/config.h" + +#ifdef ABSL_HAVE_EXCEPTIONS + +#include <cstddef> +#include <cstdint> +#include <functional> +#include <initializer_list> +#include <iosfwd> +#include <string> +#include <tuple> +#include <unordered_map> + +#include "gtest/gtest.h" +#include "absl/base/internal/pretty_function.h" +#include "absl/memory/memory.h" +#include "absl/meta/type_traits.h" +#include "absl/strings/string_view.h" +#include "absl/strings/substitute.h" +#include "absl/utility/utility.h" + +namespace testing { + +enum class TypeSpec; +enum class AllocSpec; + +constexpr TypeSpec operator|(TypeSpec a, TypeSpec b) { + using T = absl::underlying_type_t<TypeSpec>; + return static_cast<TypeSpec>(static_cast<T>(a) | static_cast<T>(b)); +} + +constexpr TypeSpec operator&(TypeSpec a, TypeSpec b) { + using T = absl::underlying_type_t<TypeSpec>; + return static_cast<TypeSpec>(static_cast<T>(a) & static_cast<T>(b)); +} + +constexpr AllocSpec operator|(AllocSpec a, AllocSpec b) { + using T = absl::underlying_type_t<AllocSpec>; + return static_cast<AllocSpec>(static_cast<T>(a) | static_cast<T>(b)); +} + +constexpr AllocSpec operator&(AllocSpec a, AllocSpec b) { + using T = absl::underlying_type_t<AllocSpec>; + return static_cast<AllocSpec>(static_cast<T>(a) & static_cast<T>(b)); +} + +namespace exceptions_internal { + +std::string GetSpecString(TypeSpec); +std::string GetSpecString(AllocSpec); + +struct NoThrowTag {}; +struct StrongGuaranteeTagType {}; + +// A simple exception class. We throw this so that test code can catch +// exceptions specifically thrown by ThrowingValue. +class TestException { + public: + explicit TestException(absl::string_view msg) : msg_(msg) {} + virtual ~TestException() {} + virtual const char* what() const noexcept { return msg_.c_str(); } + + private: + std::string msg_; +}; + +// TestBadAllocException exists because allocation functions must throw an +// exception which can be caught by a handler of std::bad_alloc. We use a child +// class of std::bad_alloc so we can customise the error message, and also +// derive from TestException so we don't accidentally end up catching an actual +// bad_alloc exception in TestExceptionSafety. +class TestBadAllocException : public std::bad_alloc, public TestException { + public: + explicit TestBadAllocException(absl::string_view msg) : TestException(msg) {} + using TestException::what; +}; + +extern int countdown; + +// Allows the countdown variable to be set manually (defaulting to the initial +// value of 0) +inline void SetCountdown(int i = 0) { countdown = i; } +// Sets the countdown to the terminal value -1 +inline void UnsetCountdown() { SetCountdown(-1); } + +void MaybeThrow(absl::string_view msg, bool throw_bad_alloc = false); + +testing::AssertionResult FailureMessage(const TestException& e, + int countdown) noexcept; + +struct TrackedAddress { + bool is_alive; + std::string description; +}; + +// Inspects the constructions and destructions of anything inheriting from +// TrackedObject. This allows us to safely "leak" TrackedObjects, as +// ConstructorTracker will destroy everything left over in its destructor. +class ConstructorTracker { + public: + explicit ConstructorTracker(int count) : countdown_(count) { + assert(current_tracker_instance_ == nullptr); + current_tracker_instance_ = this; + } + + ~ConstructorTracker() { + assert(current_tracker_instance_ == this); + current_tracker_instance_ = nullptr; + + for (auto& it : address_map_) { + void* address = it.first; + TrackedAddress& tracked_address = it.second; + if (tracked_address.is_alive) { + ADD_FAILURE() << ErrorMessage(address, tracked_address.description, + countdown_, "Object was not destroyed."); + } + } + } + + static void ObjectConstructed(void* address, std::string description) { + if (!CurrentlyTracking()) return; + + TrackedAddress& tracked_address = + current_tracker_instance_->address_map_[address]; + if (tracked_address.is_alive) { + ADD_FAILURE() << ErrorMessage( + address, tracked_address.description, + current_tracker_instance_->countdown_, + "Object was re-constructed. Current object was constructed by " + + description); + } + tracked_address = {true, std::move(description)}; + } + + static void ObjectDestructed(void* address) { + if (!CurrentlyTracking()) return; + + auto it = current_tracker_instance_->address_map_.find(address); + // Not tracked. Ignore. + if (it == current_tracker_instance_->address_map_.end()) return; + + TrackedAddress& tracked_address = it->second; + if (!tracked_address.is_alive) { + ADD_FAILURE() << ErrorMessage(address, tracked_address.description, + current_tracker_instance_->countdown_, + "Object was re-destroyed."); + } + tracked_address.is_alive = false; + } + + private: + static bool CurrentlyTracking() { + return current_tracker_instance_ != nullptr; + } + + static std::string ErrorMessage(void* address, + const std::string& address_description, + int countdown, + const std::string& error_description) { + return absl::Substitute( + "With coundtown at $0:\n" + " $1\n" + " Object originally constructed by $2\n" + " Object address: $3\n", + countdown, error_description, address_description, address); + } + + std::unordered_map<void*, TrackedAddress> address_map_; + int countdown_; + + static ConstructorTracker* current_tracker_instance_; +}; + +class TrackedObject { + public: + TrackedObject(const TrackedObject&) = delete; + TrackedObject(TrackedObject&&) = delete; + + protected: + explicit TrackedObject(std::string description) { + ConstructorTracker::ObjectConstructed(this, std::move(description)); + } + + ~TrackedObject() noexcept { ConstructorTracker::ObjectDestructed(this); } +}; +} // namespace exceptions_internal + +extern exceptions_internal::NoThrowTag nothrow_ctor; + +extern exceptions_internal::StrongGuaranteeTagType strong_guarantee; + +// A test class which is convertible to bool. The conversion can be +// instrumented to throw at a controlled time. +class ThrowingBool { + public: + ThrowingBool(bool b) noexcept : b_(b) {} // NOLINT(runtime/explicit) + operator bool() const { // NOLINT + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return b_; + } + + private: + bool b_; +}; + +/* + * Configuration enum for the ThrowingValue type that defines behavior for the + * lifetime of the instance. Use testing::nothrow_ctor to prevent the integer + * constructor from throwing. + * + * kEverythingThrows: Every operation can throw an exception + * kNoThrowCopy: Copy construction and copy assignment will not throw + * kNoThrowMove: Move construction and move assignment will not throw + * kNoThrowNew: Overloaded operators new and new[] will not throw + */ +enum class TypeSpec { + kEverythingThrows = 0, + kNoThrowCopy = 1, + kNoThrowMove = 1 << 1, + kNoThrowNew = 1 << 2, +}; + +/* + * A testing class instrumented to throw an exception at a controlled time. + * + * ThrowingValue implements a slightly relaxed version of the Regular concept -- + * that is it's a value type with the expected semantics. It also implements + * arithmetic operations. It doesn't implement member and pointer operators + * like operator-> or operator[]. + * + * ThrowingValue can be instrumented to have certain operations be noexcept by + * using compile-time bitfield template arguments. That is, to make an + * ThrowingValue which has noexcept move construction/assignment and noexcept + * copy construction/assignment, use the following: + * ThrowingValue<testing::kNoThrowMove | testing::kNoThrowCopy> my_thrwr{val}; + */ +template <TypeSpec Spec = TypeSpec::kEverythingThrows> +class ThrowingValue : private exceptions_internal::TrackedObject { + static constexpr bool IsSpecified(TypeSpec spec) { + return static_cast<bool>(Spec & spec); + } + + static constexpr int kDefaultValue = 0; + static constexpr int kBadValue = 938550620; + + public: + ThrowingValue() : TrackedObject(GetInstanceString(kDefaultValue)) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ = kDefaultValue; + } + + ThrowingValue(const ThrowingValue& other) noexcept( + IsSpecified(TypeSpec::kNoThrowCopy)) + : TrackedObject(GetInstanceString(other.dummy_)) { + if (!IsSpecified(TypeSpec::kNoThrowCopy)) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + } + dummy_ = other.dummy_; + } + + ThrowingValue(ThrowingValue&& other) noexcept( + IsSpecified(TypeSpec::kNoThrowMove)) + : TrackedObject(GetInstanceString(other.dummy_)) { + if (!IsSpecified(TypeSpec::kNoThrowMove)) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + } + dummy_ = other.dummy_; + } + + explicit ThrowingValue(int i) : TrackedObject(GetInstanceString(i)) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ = i; + } + + ThrowingValue(int i, exceptions_internal::NoThrowTag) noexcept + : TrackedObject(GetInstanceString(i)), dummy_(i) {} + + // absl expects nothrow destructors + ~ThrowingValue() noexcept = default; + + ThrowingValue& operator=(const ThrowingValue& other) noexcept( + IsSpecified(TypeSpec::kNoThrowCopy)) { + dummy_ = kBadValue; + if (!IsSpecified(TypeSpec::kNoThrowCopy)) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + } + dummy_ = other.dummy_; + return *this; + } + + ThrowingValue& operator=(ThrowingValue&& other) noexcept( + IsSpecified(TypeSpec::kNoThrowMove)) { + dummy_ = kBadValue; + if (!IsSpecified(TypeSpec::kNoThrowMove)) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + } + dummy_ = other.dummy_; + return *this; + } + + // Arithmetic Operators + ThrowingValue operator+(const ThrowingValue& other) const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(dummy_ + other.dummy_, nothrow_ctor); + } + + ThrowingValue operator+() const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(dummy_, nothrow_ctor); + } + + ThrowingValue operator-(const ThrowingValue& other) const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(dummy_ - other.dummy_, nothrow_ctor); + } + + ThrowingValue operator-() const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(-dummy_, nothrow_ctor); + } + + ThrowingValue& operator++() { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + ++dummy_; + return *this; + } + + ThrowingValue operator++(int) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + auto out = ThrowingValue(dummy_, nothrow_ctor); + ++dummy_; + return out; + } + + ThrowingValue& operator--() { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + --dummy_; + return *this; + } + + ThrowingValue operator--(int) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + auto out = ThrowingValue(dummy_, nothrow_ctor); + --dummy_; + return out; + } + + ThrowingValue operator*(const ThrowingValue& other) const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(dummy_ * other.dummy_, nothrow_ctor); + } + + ThrowingValue operator/(const ThrowingValue& other) const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(dummy_ / other.dummy_, nothrow_ctor); + } + + ThrowingValue operator%(const ThrowingValue& other) const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(dummy_ % other.dummy_, nothrow_ctor); + } + + ThrowingValue operator<<(int shift) const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(dummy_ << shift, nothrow_ctor); + } + + ThrowingValue operator>>(int shift) const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(dummy_ >> shift, nothrow_ctor); + } + + // Comparison Operators + // NOTE: We use `ThrowingBool` instead of `bool` because most STL + // types/containers requires T to be convertible to bool. + friend ThrowingBool operator==(const ThrowingValue& a, + const ThrowingValue& b) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return a.dummy_ == b.dummy_; + } + friend ThrowingBool operator!=(const ThrowingValue& a, + const ThrowingValue& b) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return a.dummy_ != b.dummy_; + } + friend ThrowingBool operator<(const ThrowingValue& a, + const ThrowingValue& b) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return a.dummy_ < b.dummy_; + } + friend ThrowingBool operator<=(const ThrowingValue& a, + const ThrowingValue& b) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return a.dummy_ <= b.dummy_; + } + friend ThrowingBool operator>(const ThrowingValue& a, + const ThrowingValue& b) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return a.dummy_ > b.dummy_; + } + friend ThrowingBool operator>=(const ThrowingValue& a, + const ThrowingValue& b) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return a.dummy_ >= b.dummy_; + } + + // Logical Operators + ThrowingBool operator!() const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return !dummy_; + } + + ThrowingBool operator&&(const ThrowingValue& other) const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return dummy_ && other.dummy_; + } + + ThrowingBool operator||(const ThrowingValue& other) const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return dummy_ || other.dummy_; + } + + // Bitwise Logical Operators + ThrowingValue operator~() const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(~dummy_, nothrow_ctor); + } + + ThrowingValue operator&(const ThrowingValue& other) const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(dummy_ & other.dummy_, nothrow_ctor); + } + + ThrowingValue operator|(const ThrowingValue& other) const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(dummy_ | other.dummy_, nothrow_ctor); + } + + ThrowingValue operator^(const ThrowingValue& other) const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(dummy_ ^ other.dummy_, nothrow_ctor); + } + + // Compound Assignment operators + ThrowingValue& operator+=(const ThrowingValue& other) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ += other.dummy_; + return *this; + } + + ThrowingValue& operator-=(const ThrowingValue& other) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ -= other.dummy_; + return *this; + } + + ThrowingValue& operator*=(const ThrowingValue& other) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ *= other.dummy_; + return *this; + } + + ThrowingValue& operator/=(const ThrowingValue& other) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ /= other.dummy_; + return *this; + } + + ThrowingValue& operator%=(const ThrowingValue& other) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ %= other.dummy_; + return *this; + } + + ThrowingValue& operator&=(const ThrowingValue& other) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ &= other.dummy_; + return *this; + } + + ThrowingValue& operator|=(const ThrowingValue& other) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ |= other.dummy_; + return *this; + } + + ThrowingValue& operator^=(const ThrowingValue& other) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ ^= other.dummy_; + return *this; + } + + ThrowingValue& operator<<=(int shift) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ <<= shift; + return *this; + } + + ThrowingValue& operator>>=(int shift) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ >>= shift; + return *this; + } + + // Pointer operators + void operator&() const = delete; // NOLINT(runtime/operator) + + // Stream operators + friend std::ostream& operator<<(std::ostream& os, const ThrowingValue& tv) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return os << GetInstanceString(tv.dummy_); + } + + friend std::istream& operator>>(std::istream& is, const ThrowingValue&) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return is; + } + + // Memory management operators + // Args.. allows us to overload regular and placement new in one shot + template <typename... Args> + static void* operator new(size_t s, Args&&... args) noexcept( + IsSpecified(TypeSpec::kNoThrowNew)) { + if (!IsSpecified(TypeSpec::kNoThrowNew)) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION, true); + } + return ::operator new(s, std::forward<Args>(args)...); + } + + template <typename... Args> + static void* operator new[](size_t s, Args&&... args) noexcept( + IsSpecified(TypeSpec::kNoThrowNew)) { + if (!IsSpecified(TypeSpec::kNoThrowNew)) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION, true); + } + return ::operator new[](s, std::forward<Args>(args)...); + } + + // Abseil doesn't support throwing overloaded operator delete. These are + // provided so a throwing operator-new can clean up after itself. + // + // We provide both regular and templated operator delete because if only the + // templated version is provided as we did with operator new, the compiler has + // no way of knowing which overload of operator delete to call. See + // https://en.cppreference.com/w/cpp/memory/new/operator_delete and + // https://en.cppreference.com/w/cpp/language/delete for the gory details. + void operator delete(void* p) noexcept { ::operator delete(p); } + + template <typename... Args> + void operator delete(void* p, Args&&... args) noexcept { + ::operator delete(p, std::forward<Args>(args)...); + } + + void operator delete[](void* p) noexcept { return ::operator delete[](p); } + + template <typename... Args> + void operator delete[](void* p, Args&&... args) noexcept { + return ::operator delete[](p, std::forward<Args>(args)...); + } + + // Non-standard access to the actual contained value. No need for this to + // throw. + int& Get() noexcept { return dummy_; } + const int& Get() const noexcept { return dummy_; } + + private: + static std::string GetInstanceString(int dummy) { + return absl::StrCat("ThrowingValue<", + exceptions_internal::GetSpecString(Spec), ">(", dummy, + ")"); + } + + int dummy_; +}; +// While not having to do with exceptions, explicitly delete comma operator, to +// make sure we don't use it on user-supplied types. +template <TypeSpec Spec, typename T> +void operator,(const ThrowingValue<Spec>&, T&&) = delete; +template <TypeSpec Spec, typename T> +void operator,(T&&, const ThrowingValue<Spec>&) = delete; + +/* + * Configuration enum for the ThrowingAllocator type that defines behavior for + * the lifetime of the instance. + * + * kEverythingThrows: Calls to the member functions may throw + * kNoThrowAllocate: Calls to the member functions will not throw + */ +enum class AllocSpec { + kEverythingThrows = 0, + kNoThrowAllocate = 1, +}; + +/* + * An allocator type which is instrumented to throw at a controlled time, or not + * to throw, using AllocSpec. The supported settings are the default of every + * function which is allowed to throw in a conforming allocator possibly + * throwing, or nothing throws, in line with the ABSL_ALLOCATOR_THROWS + * configuration macro. + */ +template <typename T, AllocSpec Spec = AllocSpec::kEverythingThrows> +class ThrowingAllocator : private exceptions_internal::TrackedObject { + static constexpr bool IsSpecified(AllocSpec spec) { + return static_cast<bool>(Spec & spec); + } + + public: + using pointer = T*; + using const_pointer = const T*; + using reference = T&; + using const_reference = const T&; + using void_pointer = void*; + using const_void_pointer = const void*; + using value_type = T; + using size_type = size_t; + using difference_type = ptrdiff_t; + + using is_nothrow = + std::integral_constant<bool, Spec == AllocSpec::kNoThrowAllocate>; + using propagate_on_container_copy_assignment = std::true_type; + using propagate_on_container_move_assignment = std::true_type; + using propagate_on_container_swap = std::true_type; + using is_always_equal = std::false_type; + + ThrowingAllocator() : TrackedObject(GetInstanceString(next_id_)) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ = std::make_shared<const int>(next_id_++); + } + + template <typename U> + ThrowingAllocator(const ThrowingAllocator<U, Spec>& other) noexcept // NOLINT + : TrackedObject(GetInstanceString(*other.State())), + dummy_(other.State()) {} + + // According to C++11 standard [17.6.3.5], Table 28, the move/copy ctors of + // allocator shall not exit via an exception, thus they are marked noexcept. + ThrowingAllocator(const ThrowingAllocator& other) noexcept + : TrackedObject(GetInstanceString(*other.State())), + dummy_(other.State()) {} + + template <typename U> + ThrowingAllocator(ThrowingAllocator<U, Spec>&& other) noexcept // NOLINT + : TrackedObject(GetInstanceString(*other.State())), + dummy_(std::move(other.State())) {} + + ThrowingAllocator(ThrowingAllocator&& other) noexcept + : TrackedObject(GetInstanceString(*other.State())), + dummy_(std::move(other.State())) {} + + ~ThrowingAllocator() noexcept = default; + + ThrowingAllocator& operator=(const ThrowingAllocator& other) noexcept { + dummy_ = other.State(); + return *this; + } + + template <typename U> + ThrowingAllocator& operator=( + const ThrowingAllocator<U, Spec>& other) noexcept { + dummy_ = other.State(); + return *this; + } + + template <typename U> + ThrowingAllocator& operator=(ThrowingAllocator<U, Spec>&& other) noexcept { + dummy_ = std::move(other.State()); + return *this; + } + + template <typename U> + struct rebind { + using other = ThrowingAllocator<U, Spec>; + }; + + pointer allocate(size_type n) noexcept( + IsSpecified(AllocSpec::kNoThrowAllocate)) { + ReadStateAndMaybeThrow(ABSL_PRETTY_FUNCTION); + return static_cast<pointer>(::operator new(n * sizeof(T))); + } + + pointer allocate(size_type n, const_void_pointer) noexcept( + IsSpecified(AllocSpec::kNoThrowAllocate)) { + return allocate(n); + } + + void deallocate(pointer ptr, size_type) noexcept { + ReadState(); + ::operator delete(static_cast<void*>(ptr)); + } + + template <typename U, typename... Args> + void construct(U* ptr, Args&&... args) noexcept( + IsSpecified(AllocSpec::kNoThrowAllocate)) { + ReadStateAndMaybeThrow(ABSL_PRETTY_FUNCTION); + ::new (static_cast<void*>(ptr)) U(std::forward<Args>(args)...); + } + + template <typename U> + void destroy(U* p) noexcept { + ReadState(); + p->~U(); + } + + size_type max_size() const noexcept { + return (std::numeric_limits<difference_type>::max)() / sizeof(value_type); + } + + ThrowingAllocator select_on_container_copy_construction() noexcept( + IsSpecified(AllocSpec::kNoThrowAllocate)) { + auto& out = *this; + ReadStateAndMaybeThrow(ABSL_PRETTY_FUNCTION); + return out; + } + + template <typename U> + bool operator==(const ThrowingAllocator<U, Spec>& other) const noexcept { + return dummy_ == other.dummy_; + } + + template <typename U> + bool operator!=(const ThrowingAllocator<U, Spec>& other) const noexcept { + return dummy_ != other.dummy_; + } + + template <typename, AllocSpec> + friend class ThrowingAllocator; + + private: + static std::string GetInstanceString(int dummy) { + return absl::StrCat("ThrowingAllocator<", + exceptions_internal::GetSpecString(Spec), ">(", dummy, + ")"); + } + + const std::shared_ptr<const int>& State() const { return dummy_; } + std::shared_ptr<const int>& State() { return dummy_; } + + void ReadState() { + // we know that this will never be true, but the compiler doesn't, so this + // should safely force a read of the value. + if (*dummy_ < 0) std::abort(); + } + + void ReadStateAndMaybeThrow(absl::string_view msg) const { + if (!IsSpecified(AllocSpec::kNoThrowAllocate)) { + exceptions_internal::MaybeThrow( + absl::Substitute("Allocator id $0 threw from $1", *dummy_, msg)); + } + } + + static int next_id_; + std::shared_ptr<const int> dummy_; +}; + +template <typename T, AllocSpec Spec> +int ThrowingAllocator<T, Spec>::next_id_ = 0; + +// Tests for resource leaks by attempting to construct a T using args repeatedly +// until successful, using the countdown method. Side effects can then be +// tested for resource leaks. +template <typename T, typename... Args> +void TestThrowingCtor(Args&&... args) { + struct Cleanup { + ~Cleanup() { exceptions_internal::UnsetCountdown(); } + } c; + for (int count = 0;; ++count) { + exceptions_internal::ConstructorTracker ct(count); + exceptions_internal::SetCountdown(count); + try { + T temp(std::forward<Args>(args)...); + static_cast<void>(temp); + break; + } catch (const exceptions_internal::TestException&) { + } + } +} + +// Tests the nothrow guarantee of the provided nullary operation. If the an +// exception is thrown, the result will be AssertionFailure(). Otherwise, it +// will be AssertionSuccess(). +template <typename Operation> +testing::AssertionResult TestNothrowOp(const Operation& operation) { + struct Cleanup { + Cleanup() { exceptions_internal::SetCountdown(); } + ~Cleanup() { exceptions_internal::UnsetCountdown(); } + } c; + try { + operation(); + return testing::AssertionSuccess(); + } catch (const exceptions_internal::TestException&) { + return testing::AssertionFailure() + << "TestException thrown during call to operation() when nothrow " + "guarantee was expected."; + } catch (...) { + return testing::AssertionFailure() + << "Unknown exception thrown during call to operation() when " + "nothrow guarantee was expected."; + } +} + +namespace exceptions_internal { + +// Dummy struct for ExceptionSafetyTestBuilder<> partial state. +struct UninitializedT {}; + +template <typename T> +class DefaultFactory { + public: + explicit DefaultFactory(const T& t) : t_(t) {} + std::unique_ptr<T> operator()() const { return absl::make_unique<T>(t_); } + + private: + T t_; +}; + +template <size_t LazyContractsCount, typename LazyFactory, + typename LazyOperation> +using EnableIfTestable = typename absl::enable_if_t< + LazyContractsCount != 0 && + !std::is_same<LazyFactory, UninitializedT>::value && + !std::is_same<LazyOperation, UninitializedT>::value>; + +template <typename Factory = UninitializedT, + typename Operation = UninitializedT, typename... Contracts> +class ExceptionSafetyTestBuilder; + +} // namespace exceptions_internal + +/* + * Constructs an empty ExceptionSafetyTestBuilder. All + * ExceptionSafetyTestBuilder objects are immutable and all With[thing] mutation + * methods return new instances of ExceptionSafetyTestBuilder. + * + * In order to test a T for exception safety, a factory for that T, a testable + * operation, and at least one contract callback returning an assertion + * result must be applied using the respective methods. + */ +exceptions_internal::ExceptionSafetyTestBuilder<> MakeExceptionSafetyTester(); + +namespace exceptions_internal { +template <typename T> +struct IsUniquePtr : std::false_type {}; + +template <typename T, typename D> +struct IsUniquePtr<std::unique_ptr<T, D>> : std::true_type {}; + +template <typename Factory> +struct FactoryPtrTypeHelper { + using type = decltype(std::declval<const Factory&>()()); + + static_assert(IsUniquePtr<type>::value, "Factories must return a unique_ptr"); +}; + +template <typename Factory> +using FactoryPtrType = typename FactoryPtrTypeHelper<Factory>::type; + +template <typename Factory> +using FactoryElementType = typename FactoryPtrType<Factory>::element_type; + +template <typename T> +class ExceptionSafetyTest { + using Factory = std::function<std::unique_ptr<T>()>; + using Operation = std::function<void(T*)>; + using Contract = std::function<AssertionResult(T*)>; + + public: + template <typename... Contracts> + explicit ExceptionSafetyTest(const Factory& f, const Operation& op, + const Contracts&... contracts) + : factory_(f), operation_(op), contracts_{WrapContract(contracts)...} {} + + AssertionResult Test() const { + for (int count = 0;; ++count) { + exceptions_internal::ConstructorTracker ct(count); + + for (const auto& contract : contracts_) { + auto t_ptr = factory_(); + try { + SetCountdown(count); + operation_(t_ptr.get()); + // Unset for the case that the operation throws no exceptions, which + // would leave the countdown set and break the *next* exception safety + // test after this one. + UnsetCountdown(); + return AssertionSuccess(); + } catch (const exceptions_internal::TestException& e) { + if (!contract(t_ptr.get())) { + return AssertionFailure() << e.what() << " failed contract check"; + } + } + } + } + } + + private: + template <typename ContractFn> + Contract WrapContract(const ContractFn& contract) { + return [contract](T* t_ptr) { return AssertionResult(contract(t_ptr)); }; + } + + Contract WrapContract(StrongGuaranteeTagType) { + return [this](T* t_ptr) { return AssertionResult(*factory_() == *t_ptr); }; + } + + Factory factory_; + Operation operation_; + std::vector<Contract> contracts_; +}; + +/* + * Builds a tester object that tests if performing a operation on a T follows + * exception safety guarantees. Verification is done via contract assertion + * callbacks applied to T instances post-throw. + * + * Template parameters for ExceptionSafetyTestBuilder: + * + * - Factory: The factory object (passed in via tester.WithFactory(...) or + * tester.WithInitialValue(...)) must be invocable with the signature + * `std::unique_ptr<T> operator()() const` where T is the type being tested. + * It is used for reliably creating identical T instances to test on. + * + * - Operation: The operation object (passsed in via tester.WithOperation(...) + * or tester.Test(...)) must be invocable with the signature + * `void operator()(T*) const` where T is the type being tested. It is used + * for performing steps on a T instance that may throw and that need to be + * checked for exception safety. Each call to the operation will receive a + * fresh T instance so it's free to modify and destroy the T instances as it + * pleases. + * + * - Contracts...: The contract assertion callback objects (passed in via + * tester.WithContracts(...)) must be invocable with the signature + * `testing::AssertionResult operator()(T*) const` where T is the type being + * tested. Contract assertion callbacks are provided T instances post-throw. + * They must return testing::AssertionSuccess when the type contracts of the + * provided T instance hold. If the type contracts of the T instance do not + * hold, they must return testing::AssertionFailure. Execution order of + * Contracts... is unspecified. They will each individually get a fresh T + * instance so they are free to modify and destroy the T instances as they + * please. + */ +template <typename Factory, typename Operation, typename... Contracts> +class ExceptionSafetyTestBuilder { + public: + /* + * Returns a new ExceptionSafetyTestBuilder with an included T factory based + * on the provided T instance. The existing factory will not be included in + * the newly created tester instance. The created factory returns a new T + * instance by copy-constructing the provided const T& t. + * + * Preconditions for tester.WithInitialValue(const T& t): + * + * - The const T& t object must be copy-constructible where T is the type + * being tested. For non-copy-constructible objects, use the method + * tester.WithFactory(...). + */ + template <typename T> + ExceptionSafetyTestBuilder<DefaultFactory<T>, Operation, Contracts...> + WithInitialValue(const T& t) const { + return WithFactory(DefaultFactory<T>(t)); + } + + /* + * Returns a new ExceptionSafetyTestBuilder with the provided T factory + * included. The existing factory will not be included in the newly-created + * tester instance. This method is intended for use with types lacking a copy + * constructor. Types that can be copy-constructed should instead use the + * method tester.WithInitialValue(...). + */ + template <typename NewFactory> + ExceptionSafetyTestBuilder<absl::decay_t<NewFactory>, Operation, Contracts...> + WithFactory(const NewFactory& new_factory) const { + return {new_factory, operation_, contracts_}; + } + + /* + * Returns a new ExceptionSafetyTestBuilder with the provided testable + * operation included. The existing operation will not be included in the + * newly created tester. + */ + template <typename NewOperation> + ExceptionSafetyTestBuilder<Factory, absl::decay_t<NewOperation>, Contracts...> + WithOperation(const NewOperation& new_operation) const { + return {factory_, new_operation, contracts_}; + } + + /* + * Returns a new ExceptionSafetyTestBuilder with the provided MoreContracts... + * combined with the Contracts... that were already included in the instance + * on which the method was called. Contracts... cannot be removed or replaced + * once added to an ExceptionSafetyTestBuilder instance. A fresh object must + * be created in order to get an empty Contracts... list. + * + * In addition to passing in custom contract assertion callbacks, this method + * accepts `testing::strong_guarantee` as an argument which checks T instances + * post-throw against freshly created T instances via operator== to verify + * that any state changes made during the execution of the operation were + * properly rolled back. + */ + template <typename... MoreContracts> + ExceptionSafetyTestBuilder<Factory, Operation, Contracts..., + absl::decay_t<MoreContracts>...> + WithContracts(const MoreContracts&... more_contracts) const { + return { + factory_, operation_, + std::tuple_cat(contracts_, std::tuple<absl::decay_t<MoreContracts>...>( + more_contracts...))}; + } + + /* + * Returns a testing::AssertionResult that is the reduced result of the + * exception safety algorithm. The algorithm short circuits and returns + * AssertionFailure after the first contract callback returns an + * AssertionFailure. Otherwise, if all contract callbacks return an + * AssertionSuccess, the reduced result is AssertionSuccess. + * + * The passed-in testable operation will not be saved in a new tester instance + * nor will it modify/replace the existing tester instance. This is useful + * when each operation being tested is unique and does not need to be reused. + * + * Preconditions for tester.Test(const NewOperation& new_operation): + * + * - May only be called after at least one contract assertion callback and a + * factory or initial value have been provided. + */ + template < + typename NewOperation, + typename = EnableIfTestable<sizeof...(Contracts), Factory, NewOperation>> + testing::AssertionResult Test(const NewOperation& new_operation) const { + return TestImpl(new_operation, absl::index_sequence_for<Contracts...>()); + } + + /* + * Returns a testing::AssertionResult that is the reduced result of the + * exception safety algorithm. The algorithm short circuits and returns + * AssertionFailure after the first contract callback returns an + * AssertionFailure. Otherwise, if all contract callbacks return an + * AssertionSuccess, the reduced result is AssertionSuccess. + * + * Preconditions for tester.Test(): + * + * - May only be called after at least one contract assertion callback, a + * factory or initial value and a testable operation have been provided. + */ + template < + typename LazyOperation = Operation, + typename = EnableIfTestable<sizeof...(Contracts), Factory, LazyOperation>> + testing::AssertionResult Test() const { + return Test(operation_); + } + + private: + template <typename, typename, typename...> + friend class ExceptionSafetyTestBuilder; + + friend ExceptionSafetyTestBuilder<> testing::MakeExceptionSafetyTester(); + + ExceptionSafetyTestBuilder() {} + + ExceptionSafetyTestBuilder(const Factory& f, const Operation& o, + const std::tuple<Contracts...>& i) + : factory_(f), operation_(o), contracts_(i) {} + + template <typename SelectedOperation, size_t... Indices> + testing::AssertionResult TestImpl(SelectedOperation selected_operation, + absl::index_sequence<Indices...>) const { + return ExceptionSafetyTest<FactoryElementType<Factory>>( + factory_, selected_operation, std::get<Indices>(contracts_)...) + .Test(); + } + + Factory factory_; + Operation operation_; + std::tuple<Contracts...> contracts_; +}; + +} // namespace exceptions_internal + +} // namespace testing + +#endif // ABSL_HAVE_EXCEPTIONS + +#endif // ABSL_BASE_INTERNAL_EXCEPTION_SAFETY_TESTING_H_ diff --git a/third_party/abseil_cpp/absl/base/internal/exception_testing.h b/third_party/abseil_cpp/absl/base/internal/exception_testing.h new file mode 100644 index 000000000000..01b546557117 --- /dev/null +++ b/third_party/abseil_cpp/absl/base/internal/exception_testing.h @@ -0,0 +1,42 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Testing utilities for ABSL types which throw exceptions. + +#ifndef ABSL_BASE_INTERNAL_EXCEPTION_TESTING_H_ +#define ABSL_BASE_INTERNAL_EXCEPTION_TESTING_H_ + +#include "gtest/gtest.h" +#include "absl/base/config.h" + +// ABSL_BASE_INTERNAL_EXPECT_FAIL tests either for a specified thrown exception +// if exceptions are enabled, or for death with a specified text in the error +// message +#ifdef ABSL_HAVE_EXCEPTIONS + +#define ABSL_BASE_INTERNAL_EXPECT_FAIL(expr, exception_t, text) \ + EXPECT_THROW(expr, exception_t) + +#elif defined(__ANDROID__) +// Android asserts do not log anywhere that gtest can currently inspect. +// So we expect exit, but cannot match the message. +#define ABSL_BASE_INTERNAL_EXPECT_FAIL(expr, exception_t, text) \ + EXPECT_DEATH(expr, ".*") +#else +#define ABSL_BASE_INTERNAL_EXPECT_FAIL(expr, exception_t, text) \ + EXPECT_DEATH_IF_SUPPORTED(expr, text) + +#endif + +#endif // ABSL_BASE_INTERNAL_EXCEPTION_TESTING_H_ diff --git a/third_party/abseil_cpp/absl/base/internal/exponential_biased.cc b/third_party/abseil_cpp/absl/base/internal/exponential_biased.cc new file mode 100644 index 000000000000..1b30c061e3bd --- /dev/null +++ b/third_party/abseil_cpp/absl/base/internal/exponential_biased.cc @@ -0,0 +1,93 @@ +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/exponential_biased.h" + +#include <stdint.h> + +#include <algorithm> +#include <atomic> +#include <cmath> +#include <limits> + +#include "absl/base/attributes.h" +#include "absl/base/optimization.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +// The algorithm generates a random number between 0 and 1 and applies the +// inverse cumulative distribution function for an exponential. Specifically: +// Let m be the inverse of the sample period, then the probability +// distribution function is m*exp(-mx) so the CDF is +// p = 1 - exp(-mx), so +// q = 1 - p = exp(-mx) +// log_e(q) = -mx +// -log_e(q)/m = x +// log_2(q) * (-log_e(2) * 1/m) = x +// In the code, q is actually in the range 1 to 2**26, hence the -26 below +int64_t ExponentialBiased::GetSkipCount(int64_t mean) { + if (ABSL_PREDICT_FALSE(!initialized_)) { + Initialize(); + } + + uint64_t rng = NextRandom(rng_); + rng_ = rng; + + // Take the top 26 bits as the random number + // (This plus the 1<<58 sampling bound give a max possible step of + // 5194297183973780480 bytes.) + // The uint32_t cast is to prevent a (hard-to-reproduce) NAN + // under piii debug for some binaries. + double q = static_cast<uint32_t>(rng >> (kPrngNumBits - 26)) + 1.0; + // Put the computed p-value through the CDF of a geometric. + double interval = bias_ + (std::log2(q) - 26) * (-std::log(2.0) * mean); + // Very large values of interval overflow int64_t. To avoid that, we will + // cheat and clamp any huge values to (int64_t max)/2. This is a potential + // source of bias, but the mean would need to be such a large value that it's + // not likely to come up. For example, with a mean of 1e18, the probability of + // hitting this condition is about 1/1000. For a mean of 1e17, standard + // calculators claim that this event won't happen. + if (interval > static_cast<double>(std::numeric_limits<int64_t>::max() / 2)) { + // Assume huge values are bias neutral, retain bias for next call. + return std::numeric_limits<int64_t>::max() / 2; + } + double value = std::round(interval); + bias_ = interval - value; + return value; +} + +int64_t ExponentialBiased::GetStride(int64_t mean) { + return GetSkipCount(mean - 1) + 1; +} + +void ExponentialBiased::Initialize() { + // We don't get well distributed numbers from `this` so we call NextRandom() a + // bunch to mush the bits around. We use a global_rand to handle the case + // where the same thread (by memory address) gets created and destroyed + // repeatedly. + ABSL_CONST_INIT static std::atomic<uint32_t> global_rand(0); + uint64_t r = reinterpret_cast<uint64_t>(this) + + global_rand.fetch_add(1, std::memory_order_relaxed); + for (int i = 0; i < 20; ++i) { + r = NextRandom(r); + } + rng_ = r; + initialized_ = true; +} + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third_party/abseil_cpp/absl/base/internal/exponential_biased.h b/third_party/abseil_cpp/absl/base/internal/exponential_biased.h new file mode 100644 index 000000000000..94f79a3378db --- /dev/null +++ b/third_party/abseil_cpp/absl/base/internal/exponential_biased.h @@ -0,0 +1,130 @@ +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INTERNAL_EXPONENTIAL_BIASED_H_ +#define ABSL_BASE_INTERNAL_EXPONENTIAL_BIASED_H_ + +#include <stdint.h> + +#include "absl/base/config.h" +#include "absl/base/macros.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +// ExponentialBiased provides a small and fast random number generator for a +// rounded exponential distribution. This generator manages very little state, +// and imposes no synchronization overhead. This makes it useful in specialized +// scenarios requiring minimum overhead, such as stride based periodic sampling. +// +// ExponentialBiased provides two closely related functions, GetSkipCount() and +// GetStride(), both returning a rounded integer defining a number of events +// required before some event with a given mean probability occurs. +// +// The distribution is useful to generate a random wait time or some periodic +// event with a given mean probability. For example, if an action is supposed to +// happen on average once every 'N' events, then we can get a random 'stride' +// counting down how long before the event to happen. For example, if we'd want +// to sample one in every 1000 'Frobber' calls, our code could look like this: +// +// Frobber::Frobber() { +// stride_ = exponential_biased_.GetStride(1000); +// } +// +// void Frobber::Frob(int arg) { +// if (--stride == 0) { +// SampleFrob(arg); +// stride_ = exponential_biased_.GetStride(1000); +// } +// ... +// } +// +// The rounding of the return value creates a bias, especially for smaller means +// where the distribution of the fraction is not evenly distributed. We correct +// this bias by tracking the fraction we rounded up or down on each iteration, +// effectively tracking the distance between the cumulative value, and the +// rounded cumulative value. For example, given a mean of 2: +// +// raw = 1.63076, cumulative = 1.63076, rounded = 2, bias = -0.36923 +// raw = 0.14624, cumulative = 1.77701, rounded = 2, bias = 0.14624 +// raw = 4.93194, cumulative = 6.70895, rounded = 7, bias = -0.06805 +// raw = 0.24206, cumulative = 6.95101, rounded = 7, bias = 0.24206 +// etc... +// +// Adjusting with rounding bias is relatively trivial: +// +// double value = bias_ + exponential_distribution(mean)(); +// double rounded_value = std::round(value); +// bias_ = value - rounded_value; +// return rounded_value; +// +// This class is thread-compatible. +class ExponentialBiased { + public: + // The number of bits set by NextRandom. + static constexpr int kPrngNumBits = 48; + + // `GetSkipCount()` returns the number of events to skip before some chosen + // event happens. For example, randomly tossing a coin, we will on average + // throw heads once before we get tails. We can simulate random coin tosses + // using GetSkipCount() as: + // + // ExponentialBiased eb; + // for (...) { + // int number_of_heads_before_tail = eb.GetSkipCount(1); + // for (int flips = 0; flips < number_of_heads_before_tail; ++flips) { + // printf("head..."); + // } + // printf("tail\n"); + // } + // + int64_t GetSkipCount(int64_t mean); + + // GetStride() returns the number of events required for a specific event to + // happen. See the class comments for a usage example. `GetStride()` is + // equivalent to `GetSkipCount(mean - 1) + 1`. When to use `GetStride()` or + // `GetSkipCount()` depends mostly on what best fits the use case. + int64_t GetStride(int64_t mean); + + // Computes a random number in the range [0, 1<<(kPrngNumBits+1) - 1] + // + // This is public to enable testing. + static uint64_t NextRandom(uint64_t rnd); + + private: + void Initialize(); + + uint64_t rng_{0}; + double bias_{0}; + bool initialized_{false}; +}; + +// Returns the next prng value. +// pRNG is: aX+b mod c with a = 0x5DEECE66D, b = 0xB, c = 1<<48 +// This is the lrand64 generator. +inline uint64_t ExponentialBiased::NextRandom(uint64_t rnd) { + const uint64_t prng_mult = uint64_t{0x5DEECE66D}; + const uint64_t prng_add = 0xB; + const uint64_t prng_mod_power = 48; + const uint64_t prng_mod_mask = + ~((~static_cast<uint64_t>(0)) << prng_mod_power); + return (prng_mult * rnd + prng_add) & prng_mod_mask; +} + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_EXPONENTIAL_BIASED_H_ diff --git a/third_party/abseil_cpp/absl/base/internal/exponential_biased_test.cc b/third_party/abseil_cpp/absl/base/internal/exponential_biased_test.cc new file mode 100644 index 000000000000..90a482d2a9d5 --- /dev/null +++ b/third_party/abseil_cpp/absl/base/internal/exponential_biased_test.cc @@ -0,0 +1,199 @@ +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/exponential_biased.h" + +#include <stddef.h> + +#include <cmath> +#include <cstdint> +#include <vector> + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/strings/str_cat.h" + +using ::testing::Ge; + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +MATCHER_P2(IsBetween, a, b, + absl::StrCat(std::string(negation ? "isn't" : "is"), " between ", a, + " and ", b)) { + return a <= arg && arg <= b; +} + +// Tests of the quality of the random numbers generated +// This uses the Anderson Darling test for uniformity. +// See "Evaluating the Anderson-Darling Distribution" by Marsaglia +// for details. + +// Short cut version of ADinf(z), z>0 (from Marsaglia) +// This returns the p-value for Anderson Darling statistic in +// the limit as n-> infinity. For finite n, apply the error fix below. +double AndersonDarlingInf(double z) { + if (z < 2) { + return exp(-1.2337141 / z) / sqrt(z) * + (2.00012 + + (0.247105 - + (0.0649821 - (0.0347962 - (0.011672 - 0.00168691 * z) * z) * z) * + z) * + z); + } + return exp( + -exp(1.0776 - + (2.30695 - + (0.43424 - (0.082433 - (0.008056 - 0.0003146 * z) * z) * z) * z) * + z)); +} + +// Corrects the approximation error in AndersonDarlingInf for small values of n +// Add this to AndersonDarlingInf to get a better approximation +// (from Marsaglia) +double AndersonDarlingErrFix(int n, double x) { + if (x > 0.8) { + return (-130.2137 + + (745.2337 - + (1705.091 - (1950.646 - (1116.360 - 255.7844 * x) * x) * x) * x) * + x) / + n; + } + double cutoff = 0.01265 + 0.1757 / n; + if (x < cutoff) { + double t = x / cutoff; + t = sqrt(t) * (1 - t) * (49 * t - 102); + return t * (0.0037 / (n * n) + 0.00078 / n + 0.00006) / n; + } else { + double t = (x - cutoff) / (0.8 - cutoff); + t = -0.00022633 + + (6.54034 - (14.6538 - (14.458 - (8.259 - 1.91864 * t) * t) * t) * t) * + t; + return t * (0.04213 + 0.01365 / n) / n; + } +} + +// Returns the AndersonDarling p-value given n and the value of the statistic +double AndersonDarlingPValue(int n, double z) { + double ad = AndersonDarlingInf(z); + double errfix = AndersonDarlingErrFix(n, ad); + return ad + errfix; +} + +double AndersonDarlingStatistic(const std::vector<double>& random_sample) { + int n = random_sample.size(); + double ad_sum = 0; + for (int i = 0; i < n; i++) { + ad_sum += (2 * i + 1) * + std::log(random_sample[i] * (1 - random_sample[n - 1 - i])); + } + double ad_statistic = -n - 1 / static_cast<double>(n) * ad_sum; + return ad_statistic; +} + +// Tests if the array of doubles is uniformly distributed. +// Returns the p-value of the Anderson Darling Statistic +// for the given set of sorted random doubles +// See "Evaluating the Anderson-Darling Distribution" by +// Marsaglia and Marsaglia for details. +double AndersonDarlingTest(const std::vector<double>& random_sample) { + double ad_statistic = AndersonDarlingStatistic(random_sample); + double p = AndersonDarlingPValue(random_sample.size(), ad_statistic); + return p; +} + +TEST(ExponentialBiasedTest, CoinTossDemoWithGetSkipCount) { + ExponentialBiased eb; + for (int runs = 0; runs < 10; ++runs) { + for (int flips = eb.GetSkipCount(1); flips > 0; --flips) { + printf("head..."); + } + printf("tail\n"); + } + int heads = 0; + for (int i = 0; i < 10000000; i += 1 + eb.GetSkipCount(1)) { + ++heads; + } + printf("Heads = %d (%f%%)\n", heads, 100.0 * heads / 10000000); +} + +TEST(ExponentialBiasedTest, SampleDemoWithStride) { + ExponentialBiased eb; + int stride = eb.GetStride(10); + int samples = 0; + for (int i = 0; i < 10000000; ++i) { + if (--stride == 0) { + ++samples; + stride = eb.GetStride(10); + } + } + printf("Samples = %d (%f%%)\n", samples, 100.0 * samples / 10000000); +} + + +// Testing that NextRandom generates uniform random numbers. Applies the +// Anderson-Darling test for uniformity +TEST(ExponentialBiasedTest, TestNextRandom) { + for (auto n : std::vector<int>({ + 10, // Check short-range correlation + 100, 1000, + 10000 // Make sure there's no systemic error + })) { + uint64_t x = 1; + // This assumes that the prng returns 48 bit numbers + uint64_t max_prng_value = static_cast<uint64_t>(1) << 48; + // Initialize. + for (int i = 1; i <= 20; i++) { + x = ExponentialBiased::NextRandom(x); + } + std::vector<uint64_t> int_random_sample(n); + // Collect samples + for (int i = 0; i < n; i++) { + int_random_sample[i] = x; + x = ExponentialBiased::NextRandom(x); + } + // First sort them... + std::sort(int_random_sample.begin(), int_random_sample.end()); + std::vector<double> random_sample(n); + // Convert them to uniform randoms (in the range [0,1]) + for (int i = 0; i < n; i++) { + random_sample[i] = + static_cast<double>(int_random_sample[i]) / max_prng_value; + } + // Now compute the Anderson-Darling statistic + double ad_pvalue = AndersonDarlingTest(random_sample); + EXPECT_GT(std::min(ad_pvalue, 1 - ad_pvalue), 0.0001) + << "prng is not uniform: n = " << n << " p = " << ad_pvalue; + } +} + +// The generator needs to be available as a thread_local and as a static +// variable. +TEST(ExponentialBiasedTest, InitializationModes) { + ABSL_CONST_INIT static ExponentialBiased eb_static; + EXPECT_THAT(eb_static.GetSkipCount(2), Ge(0)); + +#if ABSL_HAVE_THREAD_LOCAL + thread_local ExponentialBiased eb_thread; + EXPECT_THAT(eb_thread.GetSkipCount(2), Ge(0)); +#endif + + ExponentialBiased eb_stack; + EXPECT_THAT(eb_stack.GetSkipCount(2), Ge(0)); +} + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third_party/abseil_cpp/absl/base/internal/fast_type_id.h b/third_party/abseil_cpp/absl/base/internal/fast_type_id.h new file mode 100644 index 000000000000..3db59e83745b --- /dev/null +++ b/third_party/abseil_cpp/absl/base/internal/fast_type_id.h @@ -0,0 +1,48 @@ +// +// Copyright 2020 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef ABSL_BASE_INTERNAL_FAST_TYPE_ID_H_ +#define ABSL_BASE_INTERNAL_FAST_TYPE_ID_H_ + +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +template <typename Type> +struct FastTypeTag { + constexpr static char dummy_var = 0; +}; + +template <typename Type> +constexpr char FastTypeTag<Type>::dummy_var; + +// FastTypeId<Type>() evaluates at compile/link-time to a unique pointer for the +// passed-in type. These are meant to be good match for keys into maps or +// straight up comparisons. +using FastTypeIdType = const void*; + +template <typename Type> +constexpr inline FastTypeIdType FastTypeId() { + return &FastTypeTag<Type>::dummy_var; +} + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_FAST_TYPE_ID_H_ diff --git a/third_party/abseil_cpp/absl/base/internal/fast_type_id_test.cc b/third_party/abseil_cpp/absl/base/internal/fast_type_id_test.cc new file mode 100644 index 000000000000..16f3c1458bdf --- /dev/null +++ b/third_party/abseil_cpp/absl/base/internal/fast_type_id_test.cc @@ -0,0 +1,123 @@ +// Copyright 2020 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/fast_type_id.h" + +#include <cstdint> +#include <map> +#include <vector> + +#include "gtest/gtest.h" + +namespace { +namespace bi = absl::base_internal; + +// NOLINTNEXTLINE +#define PRIM_TYPES(A) \ + A(bool) \ + A(short) \ + A(unsigned short) \ + A(int) \ + A(unsigned int) \ + A(long) \ + A(unsigned long) \ + A(long long) \ + A(unsigned long long) \ + A(float) \ + A(double) \ + A(long double) + +TEST(FastTypeIdTest, PrimitiveTypes) { + bi::FastTypeIdType type_ids[] = { +#define A(T) bi::FastTypeId<T>(), + PRIM_TYPES(A) +#undef A +#define A(T) bi::FastTypeId<const T>(), + PRIM_TYPES(A) +#undef A +#define A(T) bi::FastTypeId<volatile T>(), + PRIM_TYPES(A) +#undef A +#define A(T) bi::FastTypeId<const volatile T>(), + PRIM_TYPES(A) +#undef A + }; + size_t total_type_ids = sizeof(type_ids) / sizeof(bi::FastTypeIdType); + + for (int i = 0; i < total_type_ids; ++i) { + EXPECT_EQ(type_ids[i], type_ids[i]); + for (int j = 0; j < i; ++j) { + EXPECT_NE(type_ids[i], type_ids[j]); + } + } +} + +#define FIXED_WIDTH_TYPES(A) \ + A(int8_t) \ + A(uint8_t) \ + A(int16_t) \ + A(uint16_t) \ + A(int32_t) \ + A(uint32_t) \ + A(int64_t) \ + A(uint64_t) + +TEST(FastTypeIdTest, FixedWidthTypes) { + bi::FastTypeIdType type_ids[] = { +#define A(T) bi::FastTypeId<T>(), + FIXED_WIDTH_TYPES(A) +#undef A +#define A(T) bi::FastTypeId<const T>(), + FIXED_WIDTH_TYPES(A) +#undef A +#define A(T) bi::FastTypeId<volatile T>(), + FIXED_WIDTH_TYPES(A) +#undef A +#define A(T) bi::FastTypeId<const volatile T>(), + FIXED_WIDTH_TYPES(A) +#undef A + }; + size_t total_type_ids = sizeof(type_ids) / sizeof(bi::FastTypeIdType); + + for (int i = 0; i < total_type_ids; ++i) { + EXPECT_EQ(type_ids[i], type_ids[i]); + for (int j = 0; j < i; ++j) { + EXPECT_NE(type_ids[i], type_ids[j]); + } + } +} + +TEST(FastTypeIdTest, AliasTypes) { + using int_alias = int; + EXPECT_EQ(bi::FastTypeId<int_alias>(), bi::FastTypeId<int>()); +} + +TEST(FastTypeIdTest, TemplateSpecializations) { + EXPECT_NE(bi::FastTypeId<std::vector<int>>(), + bi::FastTypeId<std::vector<long>>()); + + EXPECT_NE((bi::FastTypeId<std::map<int, float>>()), + (bi::FastTypeId<std::map<int, double>>())); +} + +struct Base {}; +struct Derived : Base {}; +struct PDerived : private Base {}; + +TEST(FastTypeIdTest, Inheritance) { + EXPECT_NE(bi::FastTypeId<Base>(), bi::FastTypeId<Derived>()); + EXPECT_NE(bi::FastTypeId<Base>(), bi::FastTypeId<PDerived>()); +} + +} // namespace diff --git a/third_party/abseil_cpp/absl/base/internal/hide_ptr.h b/third_party/abseil_cpp/absl/base/internal/hide_ptr.h new file mode 100644 index 000000000000..1dba80909a9b --- /dev/null +++ b/third_party/abseil_cpp/absl/base/internal/hide_ptr.h @@ -0,0 +1,51 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INTERNAL_HIDE_PTR_H_ +#define ABSL_BASE_INTERNAL_HIDE_PTR_H_ + +#include <cstdint> + +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +// Arbitrary value with high bits set. Xor'ing with it is unlikely +// to map one valid pointer to another valid pointer. +constexpr uintptr_t HideMask() { + return (uintptr_t{0xF03A5F7BU} << (sizeof(uintptr_t) - 4) * 8) | 0xF03A5F7BU; +} + +// Hide a pointer from the leak checker. For internal use only. +// Differs from absl::IgnoreLeak(ptr) in that absl::IgnoreLeak(ptr) causes ptr +// and all objects reachable from ptr to be ignored by the leak checker. +template <class T> +inline uintptr_t HidePtr(T* ptr) { + return reinterpret_cast<uintptr_t>(ptr) ^ HideMask(); +} + +// Return a pointer that has been hidden from the leak checker. +// For internal use only. +template <class T> +inline T* UnhidePtr(uintptr_t hidden) { + return reinterpret_cast<T*>(hidden ^ HideMask()); +} + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_HIDE_PTR_H_ diff --git a/third_party/abseil_cpp/absl/base/internal/identity.h b/third_party/abseil_cpp/absl/base/internal/identity.h new file mode 100644 index 000000000000..a3154ed7bc59 --- /dev/null +++ b/third_party/abseil_cpp/absl/base/internal/identity.h @@ -0,0 +1,37 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef ABSL_BASE_INTERNAL_IDENTITY_H_ +#define ABSL_BASE_INTERNAL_IDENTITY_H_ + +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace internal { + +template <typename T> +struct identity { + typedef T type; +}; + +template <typename T> +using identity_t = typename identity<T>::type; + +} // namespace internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_IDENTITY_H_ diff --git a/third_party/abseil_cpp/absl/base/internal/inline_variable.h b/third_party/abseil_cpp/absl/base/internal/inline_variable.h new file mode 100644 index 000000000000..130d8c2476dd --- /dev/null +++ b/third_party/abseil_cpp/absl/base/internal/inline_variable.h @@ -0,0 +1,107 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INTERNAL_INLINE_VARIABLE_EMULATION_H_ +#define ABSL_BASE_INTERNAL_INLINE_VARIABLE_EMULATION_H_ + +#include <type_traits> + +#include "absl/base/internal/identity.h" + +// File: +// This file define a macro that allows the creation of or emulation of C++17 +// inline variables based on whether or not the feature is supported. + +//////////////////////////////////////////////////////////////////////////////// +// Macro: ABSL_INTERNAL_INLINE_CONSTEXPR(type, name, init) +// +// Description: +// Expands to the equivalent of an inline constexpr instance of the specified +// `type` and `name`, initialized to the value `init`. If the compiler being +// used is detected as supporting actual inline variables as a language +// feature, then the macro expands to an actual inline variable definition. +// +// Requires: +// `type` is a type that is usable in an extern variable declaration. +// +// Requires: `name` is a valid identifier +// +// Requires: +// `init` is an expression that can be used in the following definition: +// constexpr type name = init; +// +// Usage: +// +// // Equivalent to: `inline constexpr size_t variant_npos = -1;` +// ABSL_INTERNAL_INLINE_CONSTEXPR(size_t, variant_npos, -1); +// +// Differences in implementation: +// For a direct, language-level inline variable, decltype(name) will be the +// type that was specified along with const qualification, whereas for +// emulated inline variables, decltype(name) may be different (in practice +// it will likely be a reference type). +//////////////////////////////////////////////////////////////////////////////// + +#ifdef __cpp_inline_variables + +// Clang's -Wmissing-variable-declarations option erroneously warned that +// inline constexpr objects need to be pre-declared. This has now been fixed, +// but we will need to support this workaround for people building with older +// versions of clang. +// +// Bug: https://bugs.llvm.org/show_bug.cgi?id=35862 +// +// Note: +// identity_t is used here so that the const and name are in the +// appropriate place for pointer types, reference types, function pointer +// types, etc.. +#if defined(__clang__) +#define ABSL_INTERNAL_EXTERN_DECL(type, name) \ + extern const ::absl::internal::identity_t<type> name; +#else // Otherwise, just define the macro to do nothing. +#define ABSL_INTERNAL_EXTERN_DECL(type, name) +#endif // defined(__clang__) + +// See above comment at top of file for details. +#define ABSL_INTERNAL_INLINE_CONSTEXPR(type, name, init) \ + ABSL_INTERNAL_EXTERN_DECL(type, name) \ + inline constexpr ::absl::internal::identity_t<type> name = init + +#else + +// See above comment at top of file for details. +// +// Note: +// identity_t is used here so that the const and name are in the +// appropriate place for pointer types, reference types, function pointer +// types, etc.. +#define ABSL_INTERNAL_INLINE_CONSTEXPR(var_type, name, init) \ + template <class /*AbslInternalDummy*/ = void> \ + struct AbslInternalInlineVariableHolder##name { \ + static constexpr ::absl::internal::identity_t<var_type> kInstance = init; \ + }; \ + \ + template <class AbslInternalDummy> \ + constexpr ::absl::internal::identity_t<var_type> \ + AbslInternalInlineVariableHolder##name<AbslInternalDummy>::kInstance; \ + \ + static constexpr const ::absl::internal::identity_t<var_type>& \ + name = /* NOLINT */ \ + AbslInternalInlineVariableHolder##name<>::kInstance; \ + static_assert(sizeof(void (*)(decltype(name))) != 0, \ + "Silence unused variable warnings.") + +#endif // __cpp_inline_variables + +#endif // ABSL_BASE_INTERNAL_INLINE_VARIABLE_EMULATION_H_ diff --git a/third_party/abseil_cpp/absl/base/internal/inline_variable_testing.h b/third_party/abseil_cpp/absl/base/internal/inline_variable_testing.h new file mode 100644 index 000000000000..3856b9f80f20 --- /dev/null +++ b/third_party/abseil_cpp/absl/base/internal/inline_variable_testing.h @@ -0,0 +1,46 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INLINE_VARIABLE_TESTING_H_ +#define ABSL_BASE_INLINE_VARIABLE_TESTING_H_ + +#include "absl/base/internal/inline_variable.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace inline_variable_testing_internal { + +struct Foo { + int value = 5; +}; + +ABSL_INTERNAL_INLINE_CONSTEXPR(Foo, inline_variable_foo, {}); +ABSL_INTERNAL_INLINE_CONSTEXPR(Foo, other_inline_variable_foo, {}); + +ABSL_INTERNAL_INLINE_CONSTEXPR(int, inline_variable_int, 5); +ABSL_INTERNAL_INLINE_CONSTEXPR(int, other_inline_variable_int, 5); + +ABSL_INTERNAL_INLINE_CONSTEXPR(void(*)(), inline_variable_fun_ptr, nullptr); + +const Foo& get_foo_a(); +const Foo& get_foo_b(); + +const int& get_int_a(); +const int& get_int_b(); + +} // namespace inline_variable_testing_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INLINE_VARIABLE_TESTING_H_ diff --git a/third_party/abseil_cpp/absl/base/internal/invoke.h b/third_party/abseil_cpp/absl/base/internal/invoke.h new file mode 100644 index 000000000000..c4eceebd7cda --- /dev/null +++ b/third_party/abseil_cpp/absl/base/internal/invoke.h @@ -0,0 +1,187 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// absl::base_internal::Invoke(f, args...) is an implementation of +// INVOKE(f, args...) from section [func.require] of the C++ standard. +// +// [func.require] +// Define INVOKE (f, t1, t2, ..., tN) as follows: +// 1. (t1.*f)(t2, ..., tN) when f is a pointer to a member function of a class T +// and t1 is an object of type T or a reference to an object of type T or a +// reference to an object of a type derived from T; +// 2. ((*t1).*f)(t2, ..., tN) when f is a pointer to a member function of a +// class T and t1 is not one of the types described in the previous item; +// 3. t1.*f when N == 1 and f is a pointer to member data of a class T and t1 is +// an object of type T or a reference to an object of type T or a reference +// to an object of a type derived from T; +// 4. (*t1).*f when N == 1 and f is a pointer to member data of a class T and t1 +// is not one of the types described in the previous item; +// 5. f(t1, t2, ..., tN) in all other cases. +// +// The implementation is SFINAE-friendly: substitution failure within Invoke() +// isn't an error. + +#ifndef ABSL_BASE_INTERNAL_INVOKE_H_ +#define ABSL_BASE_INTERNAL_INVOKE_H_ + +#include <algorithm> +#include <type_traits> +#include <utility> + +#include "absl/meta/type_traits.h" + +// The following code is internal implementation detail. See the comment at the +// top of this file for the API documentation. + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +// The five classes below each implement one of the clauses from the definition +// of INVOKE. The inner class template Accept<F, Args...> checks whether the +// clause is applicable; static function template Invoke(f, args...) does the +// invocation. +// +// By separating the clause selection logic from invocation we make sure that +// Invoke() does exactly what the standard says. + +template <typename Derived> +struct StrippedAccept { + template <typename... Args> + struct Accept : Derived::template AcceptImpl<typename std::remove_cv< + typename std::remove_reference<Args>::type>::type...> {}; +}; + +// (t1.*f)(t2, ..., tN) when f is a pointer to a member function of a class T +// and t1 is an object of type T or a reference to an object of type T or a +// reference to an object of a type derived from T. +struct MemFunAndRef : StrippedAccept<MemFunAndRef> { + template <typename... Args> + struct AcceptImpl : std::false_type {}; + + template <typename MemFunType, typename C, typename Obj, typename... Args> + struct AcceptImpl<MemFunType C::*, Obj, Args...> + : std::integral_constant<bool, std::is_base_of<C, Obj>::value && + absl::is_function<MemFunType>::value> { + }; + + template <typename MemFun, typename Obj, typename... Args> + static decltype((std::declval<Obj>().* + std::declval<MemFun>())(std::declval<Args>()...)) + Invoke(MemFun&& mem_fun, Obj&& obj, Args&&... args) { + return (std::forward<Obj>(obj).* + std::forward<MemFun>(mem_fun))(std::forward<Args>(args)...); + } +}; + +// ((*t1).*f)(t2, ..., tN) when f is a pointer to a member function of a +// class T and t1 is not one of the types described in the previous item. +struct MemFunAndPtr : StrippedAccept<MemFunAndPtr> { + template <typename... Args> + struct AcceptImpl : std::false_type {}; + + template <typename MemFunType, typename C, typename Ptr, typename... Args> + struct AcceptImpl<MemFunType C::*, Ptr, Args...> + : std::integral_constant<bool, !std::is_base_of<C, Ptr>::value && + absl::is_function<MemFunType>::value> { + }; + + template <typename MemFun, typename Ptr, typename... Args> + static decltype(((*std::declval<Ptr>()).* + std::declval<MemFun>())(std::declval<Args>()...)) + Invoke(MemFun&& mem_fun, Ptr&& ptr, Args&&... args) { + return ((*std::forward<Ptr>(ptr)).* + std::forward<MemFun>(mem_fun))(std::forward<Args>(args)...); + } +}; + +// t1.*f when N == 1 and f is a pointer to member data of a class T and t1 is +// an object of type T or a reference to an object of type T or a reference +// to an object of a type derived from T. +struct DataMemAndRef : StrippedAccept<DataMemAndRef> { + template <typename... Args> + struct AcceptImpl : std::false_type {}; + + template <typename R, typename C, typename Obj> + struct AcceptImpl<R C::*, Obj> + : std::integral_constant<bool, std::is_base_of<C, Obj>::value && + !absl::is_function<R>::value> {}; + + template <typename DataMem, typename Ref> + static decltype(std::declval<Ref>().*std::declval<DataMem>()) Invoke( + DataMem&& data_mem, Ref&& ref) { + return std::forward<Ref>(ref).*std::forward<DataMem>(data_mem); + } +}; + +// (*t1).*f when N == 1 and f is a pointer to member data of a class T and t1 +// is not one of the types described in the previous item. +struct DataMemAndPtr : StrippedAccept<DataMemAndPtr> { + template <typename... Args> + struct AcceptImpl : std::false_type {}; + + template <typename R, typename C, typename Ptr> + struct AcceptImpl<R C::*, Ptr> + : std::integral_constant<bool, !std::is_base_of<C, Ptr>::value && + !absl::is_function<R>::value> {}; + + template <typename DataMem, typename Ptr> + static decltype((*std::declval<Ptr>()).*std::declval<DataMem>()) Invoke( + DataMem&& data_mem, Ptr&& ptr) { + return (*std::forward<Ptr>(ptr)).*std::forward<DataMem>(data_mem); + } +}; + +// f(t1, t2, ..., tN) in all other cases. +struct Callable { + // Callable doesn't have Accept because it's the last clause that gets picked + // when none of the previous clauses are applicable. + template <typename F, typename... Args> + static decltype(std::declval<F>()(std::declval<Args>()...)) Invoke( + F&& f, Args&&... args) { + return std::forward<F>(f)(std::forward<Args>(args)...); + } +}; + +// Resolves to the first matching clause. +template <typename... Args> +struct Invoker { + typedef typename std::conditional< + MemFunAndRef::Accept<Args...>::value, MemFunAndRef, + typename std::conditional< + MemFunAndPtr::Accept<Args...>::value, MemFunAndPtr, + typename std::conditional< + DataMemAndRef::Accept<Args...>::value, DataMemAndRef, + typename std::conditional<DataMemAndPtr::Accept<Args...>::value, + DataMemAndPtr, Callable>::type>::type>:: + type>::type type; +}; + +// The result type of Invoke<F, Args...>. +template <typename F, typename... Args> +using InvokeT = decltype(Invoker<F, Args...>::type::Invoke( + std::declval<F>(), std::declval<Args>()...)); + +// Invoke(f, args...) is an implementation of INVOKE(f, args...) from section +// [func.require] of the C++ standard. +template <typename F, typename... Args> +InvokeT<F, Args...> Invoke(F&& f, Args&&... args) { + return Invoker<F, Args...>::type::Invoke(std::forward<F>(f), + std::forward<Args>(args)...); +} +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_INVOKE_H_ diff --git a/third_party/abseil_cpp/absl/base/internal/low_level_alloc.cc b/third_party/abseil_cpp/absl/base/internal/low_level_alloc.cc new file mode 100644 index 000000000000..1bf94438d6d0 --- /dev/null +++ b/third_party/abseil_cpp/absl/base/internal/low_level_alloc.cc @@ -0,0 +1,620 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// A low-level allocator that can be used by other low-level +// modules without introducing dependency cycles. +// This allocator is slow and wasteful of memory; +// it should not be used when performance is key. + +#include "absl/base/internal/low_level_alloc.h" + +#include <type_traits> + +#include "absl/base/call_once.h" +#include "absl/base/config.h" +#include "absl/base/internal/direct_mmap.h" +#include "absl/base/internal/scheduling_mode.h" +#include "absl/base/macros.h" +#include "absl/base/thread_annotations.h" + +// LowLevelAlloc requires that the platform support low-level +// allocation of virtual memory. Platforms lacking this cannot use +// LowLevelAlloc. +#ifndef ABSL_LOW_LEVEL_ALLOC_MISSING + +#ifndef _WIN32 +#include <pthread.h> +#include <signal.h> +#include <sys/mman.h> +#include <unistd.h> +#else +#include <windows.h> +#endif + +#include <string.h> +#include <algorithm> +#include <atomic> +#include <cerrno> +#include <cstddef> +#include <new> // for placement-new + +#include "absl/base/dynamic_annotations.h" +#include "absl/base/internal/raw_logging.h" +#include "absl/base/internal/spinlock.h" + +// MAP_ANONYMOUS +#if defined(__APPLE__) +// For mmap, Linux defines both MAP_ANONYMOUS and MAP_ANON and says MAP_ANON is +// deprecated. In Darwin, MAP_ANON is all there is. +#if !defined MAP_ANONYMOUS +#define MAP_ANONYMOUS MAP_ANON +#endif // !MAP_ANONYMOUS +#endif // __APPLE__ + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +// A first-fit allocator with amortized logarithmic free() time. + +// --------------------------------------------------------------------------- +static const int kMaxLevel = 30; + +namespace { +// This struct describes one allocated block, or one free block. +struct AllocList { + struct Header { + // Size of entire region, including this field. Must be + // first. Valid in both allocated and unallocated blocks. + uintptr_t size; + + // kMagicAllocated or kMagicUnallocated xor this. + uintptr_t magic; + + // Pointer to parent arena. + LowLevelAlloc::Arena *arena; + + // Aligns regions to 0 mod 2*sizeof(void*). + void *dummy_for_alignment; + } header; + + // Next two fields: in unallocated blocks: freelist skiplist data + // in allocated blocks: overlaps with client data + + // Levels in skiplist used. + int levels; + + // Actually has levels elements. The AllocList node may not have room + // for all kMaxLevel entries. See max_fit in LLA_SkiplistLevels(). + AllocList *next[kMaxLevel]; +}; +} // namespace + +// --------------------------------------------------------------------------- +// A trivial skiplist implementation. This is used to keep the freelist +// in address order while taking only logarithmic time per insert and delete. + +// An integer approximation of log2(size/base) +// Requires size >= base. +static int IntLog2(size_t size, size_t base) { + int result = 0; + for (size_t i = size; i > base; i >>= 1) { // i == floor(size/2**result) + result++; + } + // floor(size / 2**result) <= base < floor(size / 2**(result-1)) + // => log2(size/(base+1)) <= result < 1+log2(size/base) + // => result ~= log2(size/base) + return result; +} + +// Return a random integer n: p(n)=1/(2**n) if 1 <= n; p(n)=0 if n < 1. +static int Random(uint32_t *state) { + uint32_t r = *state; + int result = 1; + while ((((r = r*1103515245 + 12345) >> 30) & 1) == 0) { + result++; + } + *state = r; + return result; +} + +// Return a number of skiplist levels for a node of size bytes, where +// base is the minimum node size. Compute level=log2(size / base)+n +// where n is 1 if random is false and otherwise a random number generated with +// the standard distribution for a skiplist: See Random() above. +// Bigger nodes tend to have more skiplist levels due to the log2(size / base) +// term, so first-fit searches touch fewer nodes. "level" is clipped so +// level<kMaxLevel and next[level-1] will fit in the node. +// 0 < LLA_SkiplistLevels(x,y,false) <= LLA_SkiplistLevels(x,y,true) < kMaxLevel +static int LLA_SkiplistLevels(size_t size, size_t base, uint32_t *random) { + // max_fit is the maximum number of levels that will fit in a node for the + // given size. We can't return more than max_fit, no matter what the + // random number generator says. + size_t max_fit = (size - offsetof(AllocList, next)) / sizeof(AllocList *); + int level = IntLog2(size, base) + (random != nullptr ? Random(random) : 1); + if (static_cast<size_t>(level) > max_fit) level = static_cast<int>(max_fit); + if (level > kMaxLevel-1) level = kMaxLevel - 1; + ABSL_RAW_CHECK(level >= 1, "block not big enough for even one level"); + return level; +} + +// Return "atleast", the first element of AllocList *head s.t. *atleast >= *e. +// For 0 <= i < head->levels, set prev[i] to "no_greater", where no_greater +// points to the last element at level i in the AllocList less than *e, or is +// head if no such element exists. +static AllocList *LLA_SkiplistSearch(AllocList *head, + AllocList *e, AllocList **prev) { + AllocList *p = head; + for (int level = head->levels - 1; level >= 0; level--) { + for (AllocList *n; (n = p->next[level]) != nullptr && n < e; p = n) { + } + prev[level] = p; + } + return (head->levels == 0) ? nullptr : prev[0]->next[0]; +} + +// Insert element *e into AllocList *head. Set prev[] as LLA_SkiplistSearch. +// Requires that e->levels be previously set by the caller (using +// LLA_SkiplistLevels()) +static void LLA_SkiplistInsert(AllocList *head, AllocList *e, + AllocList **prev) { + LLA_SkiplistSearch(head, e, prev); + for (; head->levels < e->levels; head->levels++) { // extend prev pointers + prev[head->levels] = head; // to all *e's levels + } + for (int i = 0; i != e->levels; i++) { // add element to list + e->next[i] = prev[i]->next[i]; + prev[i]->next[i] = e; + } +} + +// Remove element *e from AllocList *head. Set prev[] as LLA_SkiplistSearch(). +// Requires that e->levels be previous set by the caller (using +// LLA_SkiplistLevels()) +static void LLA_SkiplistDelete(AllocList *head, AllocList *e, + AllocList **prev) { + AllocList *found = LLA_SkiplistSearch(head, e, prev); + ABSL_RAW_CHECK(e == found, "element not in freelist"); + for (int i = 0; i != e->levels && prev[i]->next[i] == e; i++) { + prev[i]->next[i] = e->next[i]; + } + while (head->levels > 0 && head->next[head->levels - 1] == nullptr) { + head->levels--; // reduce head->levels if level unused + } +} + +// --------------------------------------------------------------------------- +// Arena implementation + +// Metadata for an LowLevelAlloc arena instance. +struct LowLevelAlloc::Arena { + // Constructs an arena with the given LowLevelAlloc flags. + explicit Arena(uint32_t flags_value); + + base_internal::SpinLock mu; + // Head of free list, sorted by address + AllocList freelist ABSL_GUARDED_BY(mu); + // Count of allocated blocks + int32_t allocation_count ABSL_GUARDED_BY(mu); + // flags passed to NewArena + const uint32_t flags; + // Result of sysconf(_SC_PAGESIZE) + const size_t pagesize; + // Lowest power of two >= max(16, sizeof(AllocList)) + const size_t round_up; + // Smallest allocation block size + const size_t min_size; + // PRNG state + uint32_t random ABSL_GUARDED_BY(mu); +}; + +namespace { +// Static storage space for the lazily-constructed, default global arena +// instances. We require this space because the whole point of LowLevelAlloc +// is to avoid relying on malloc/new. +alignas(LowLevelAlloc::Arena) unsigned char default_arena_storage[sizeof( + LowLevelAlloc::Arena)]; +alignas(LowLevelAlloc::Arena) unsigned char unhooked_arena_storage[sizeof( + LowLevelAlloc::Arena)]; +#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING +alignas( + LowLevelAlloc::Arena) unsigned char unhooked_async_sig_safe_arena_storage + [sizeof(LowLevelAlloc::Arena)]; +#endif + +// We must use LowLevelCallOnce here to construct the global arenas, rather than +// using function-level statics, to avoid recursively invoking the scheduler. +absl::once_flag create_globals_once; + +void CreateGlobalArenas() { + new (&default_arena_storage) + LowLevelAlloc::Arena(LowLevelAlloc::kCallMallocHook); + new (&unhooked_arena_storage) LowLevelAlloc::Arena(0); +#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING + new (&unhooked_async_sig_safe_arena_storage) + LowLevelAlloc::Arena(LowLevelAlloc::kAsyncSignalSafe); +#endif +} + +// Returns a global arena that does not call into hooks. Used by NewArena() +// when kCallMallocHook is not set. +LowLevelAlloc::Arena* UnhookedArena() { + base_internal::LowLevelCallOnce(&create_globals_once, CreateGlobalArenas); + return reinterpret_cast<LowLevelAlloc::Arena*>(&unhooked_arena_storage); +} + +#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING +// Returns a global arena that is async-signal safe. Used by NewArena() when +// kAsyncSignalSafe is set. +LowLevelAlloc::Arena *UnhookedAsyncSigSafeArena() { + base_internal::LowLevelCallOnce(&create_globals_once, CreateGlobalArenas); + return reinterpret_cast<LowLevelAlloc::Arena *>( + &unhooked_async_sig_safe_arena_storage); +} +#endif + +} // namespace + +// Returns the default arena, as used by LowLevelAlloc::Alloc() and friends. +LowLevelAlloc::Arena *LowLevelAlloc::DefaultArena() { + base_internal::LowLevelCallOnce(&create_globals_once, CreateGlobalArenas); + return reinterpret_cast<LowLevelAlloc::Arena*>(&default_arena_storage); +} + +// magic numbers to identify allocated and unallocated blocks +static const uintptr_t kMagicAllocated = 0x4c833e95U; +static const uintptr_t kMagicUnallocated = ~kMagicAllocated; + +namespace { +class ABSL_SCOPED_LOCKABLE ArenaLock { + public: + explicit ArenaLock(LowLevelAlloc::Arena *arena) + ABSL_EXCLUSIVE_LOCK_FUNCTION(arena->mu) + : arena_(arena) { +#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING + if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) != 0) { + sigset_t all; + sigfillset(&all); + mask_valid_ = pthread_sigmask(SIG_BLOCK, &all, &mask_) == 0; + } +#endif + arena_->mu.Lock(); + } + ~ArenaLock() { ABSL_RAW_CHECK(left_, "haven't left Arena region"); } + void Leave() ABSL_UNLOCK_FUNCTION() { + arena_->mu.Unlock(); +#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING + if (mask_valid_) { + const int err = pthread_sigmask(SIG_SETMASK, &mask_, nullptr); + if (err != 0) { + ABSL_RAW_LOG(FATAL, "pthread_sigmask failed: %d", err); + } + } +#endif + left_ = true; + } + + private: + bool left_ = false; // whether left region +#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING + bool mask_valid_ = false; + sigset_t mask_; // old mask of blocked signals +#endif + LowLevelAlloc::Arena *arena_; + ArenaLock(const ArenaLock &) = delete; + ArenaLock &operator=(const ArenaLock &) = delete; +}; +} // namespace + +// create an appropriate magic number for an object at "ptr" +// "magic" should be kMagicAllocated or kMagicUnallocated +inline static uintptr_t Magic(uintptr_t magic, AllocList::Header *ptr) { + return magic ^ reinterpret_cast<uintptr_t>(ptr); +} + +namespace { +size_t GetPageSize() { +#ifdef _WIN32 + SYSTEM_INFO system_info; + GetSystemInfo(&system_info); + return std::max(system_info.dwPageSize, system_info.dwAllocationGranularity); +#elif defined(__wasm__) || defined(__asmjs__) + return getpagesize(); +#else + return sysconf(_SC_PAGESIZE); +#endif +} + +size_t RoundedUpBlockSize() { + // Round up block sizes to a power of two close to the header size. + size_t round_up = 16; + while (round_up < sizeof(AllocList::Header)) { + round_up += round_up; + } + return round_up; +} + +} // namespace + +LowLevelAlloc::Arena::Arena(uint32_t flags_value) + : mu(base_internal::SCHEDULE_KERNEL_ONLY), + allocation_count(0), + flags(flags_value), + pagesize(GetPageSize()), + round_up(RoundedUpBlockSize()), + min_size(2 * round_up), + random(0) { + freelist.header.size = 0; + freelist.header.magic = + Magic(kMagicUnallocated, &freelist.header); + freelist.header.arena = this; + freelist.levels = 0; + memset(freelist.next, 0, sizeof(freelist.next)); +} + +// L < meta_data_arena->mu +LowLevelAlloc::Arena *LowLevelAlloc::NewArena(int32_t flags) { + Arena *meta_data_arena = DefaultArena(); +#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING + if ((flags & LowLevelAlloc::kAsyncSignalSafe) != 0) { + meta_data_arena = UnhookedAsyncSigSafeArena(); + } else // NOLINT(readability/braces) +#endif + if ((flags & LowLevelAlloc::kCallMallocHook) == 0) { + meta_data_arena = UnhookedArena(); + } + Arena *result = + new (AllocWithArena(sizeof (*result), meta_data_arena)) Arena(flags); + return result; +} + +// L < arena->mu, L < arena->arena->mu +bool LowLevelAlloc::DeleteArena(Arena *arena) { + ABSL_RAW_CHECK( + arena != nullptr && arena != DefaultArena() && arena != UnhookedArena(), + "may not delete default arena"); + ArenaLock section(arena); + if (arena->allocation_count != 0) { + section.Leave(); + return false; + } + while (arena->freelist.next[0] != nullptr) { + AllocList *region = arena->freelist.next[0]; + size_t size = region->header.size; + arena->freelist.next[0] = region->next[0]; + ABSL_RAW_CHECK( + region->header.magic == Magic(kMagicUnallocated, ®ion->header), + "bad magic number in DeleteArena()"); + ABSL_RAW_CHECK(region->header.arena == arena, + "bad arena pointer in DeleteArena()"); + ABSL_RAW_CHECK(size % arena->pagesize == 0, + "empty arena has non-page-aligned block size"); + ABSL_RAW_CHECK(reinterpret_cast<uintptr_t>(region) % arena->pagesize == 0, + "empty arena has non-page-aligned block"); + int munmap_result; +#ifdef _WIN32 + munmap_result = VirtualFree(region, 0, MEM_RELEASE); + ABSL_RAW_CHECK(munmap_result != 0, + "LowLevelAlloc::DeleteArena: VitualFree failed"); +#else +#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING + if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) == 0) { + munmap_result = munmap(region, size); + } else { + munmap_result = base_internal::DirectMunmap(region, size); + } +#else + munmap_result = munmap(region, size); +#endif // ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING + if (munmap_result != 0) { + ABSL_RAW_LOG(FATAL, "LowLevelAlloc::DeleteArena: munmap failed: %d", + errno); + } +#endif // _WIN32 + } + section.Leave(); + arena->~Arena(); + Free(arena); + return true; +} + +// --------------------------------------------------------------------------- + +// Addition, checking for overflow. The intent is to die if an external client +// manages to push through a request that would cause arithmetic to fail. +static inline uintptr_t CheckedAdd(uintptr_t a, uintptr_t b) { + uintptr_t sum = a + b; + ABSL_RAW_CHECK(sum >= a, "LowLevelAlloc arithmetic overflow"); + return sum; +} + +// Return value rounded up to next multiple of align. +// align must be a power of two. +static inline uintptr_t RoundUp(uintptr_t addr, uintptr_t align) { + return CheckedAdd(addr, align - 1) & ~(align - 1); +} + +// Equivalent to "return prev->next[i]" but with sanity checking +// that the freelist is in the correct order, that it +// consists of regions marked "unallocated", and that no two regions +// are adjacent in memory (they should have been coalesced). +// L >= arena->mu +static AllocList *Next(int i, AllocList *prev, LowLevelAlloc::Arena *arena) { + ABSL_RAW_CHECK(i < prev->levels, "too few levels in Next()"); + AllocList *next = prev->next[i]; + if (next != nullptr) { + ABSL_RAW_CHECK( + next->header.magic == Magic(kMagicUnallocated, &next->header), + "bad magic number in Next()"); + ABSL_RAW_CHECK(next->header.arena == arena, "bad arena pointer in Next()"); + if (prev != &arena->freelist) { + ABSL_RAW_CHECK(prev < next, "unordered freelist"); + ABSL_RAW_CHECK(reinterpret_cast<char *>(prev) + prev->header.size < + reinterpret_cast<char *>(next), + "malformed freelist"); + } + } + return next; +} + +// Coalesce list item "a" with its successor if they are adjacent. +static void Coalesce(AllocList *a) { + AllocList *n = a->next[0]; + if (n != nullptr && reinterpret_cast<char *>(a) + a->header.size == + reinterpret_cast<char *>(n)) { + LowLevelAlloc::Arena *arena = a->header.arena; + a->header.size += n->header.size; + n->header.magic = 0; + n->header.arena = nullptr; + AllocList *prev[kMaxLevel]; + LLA_SkiplistDelete(&arena->freelist, n, prev); + LLA_SkiplistDelete(&arena->freelist, a, prev); + a->levels = LLA_SkiplistLevels(a->header.size, arena->min_size, + &arena->random); + LLA_SkiplistInsert(&arena->freelist, a, prev); + } +} + +// Adds block at location "v" to the free list +// L >= arena->mu +static void AddToFreelist(void *v, LowLevelAlloc::Arena *arena) { + AllocList *f = reinterpret_cast<AllocList *>( + reinterpret_cast<char *>(v) - sizeof (f->header)); + ABSL_RAW_CHECK(f->header.magic == Magic(kMagicAllocated, &f->header), + "bad magic number in AddToFreelist()"); + ABSL_RAW_CHECK(f->header.arena == arena, + "bad arena pointer in AddToFreelist()"); + f->levels = LLA_SkiplistLevels(f->header.size, arena->min_size, + &arena->random); + AllocList *prev[kMaxLevel]; + LLA_SkiplistInsert(&arena->freelist, f, prev); + f->header.magic = Magic(kMagicUnallocated, &f->header); + Coalesce(f); // maybe coalesce with successor + Coalesce(prev[0]); // maybe coalesce with predecessor +} + +// Frees storage allocated by LowLevelAlloc::Alloc(). +// L < arena->mu +void LowLevelAlloc::Free(void *v) { + if (v != nullptr) { + AllocList *f = reinterpret_cast<AllocList *>( + reinterpret_cast<char *>(v) - sizeof (f->header)); + LowLevelAlloc::Arena *arena = f->header.arena; + ArenaLock section(arena); + AddToFreelist(v, arena); + ABSL_RAW_CHECK(arena->allocation_count > 0, "nothing in arena to free"); + arena->allocation_count--; + section.Leave(); + } +} + +// allocates and returns a block of size bytes, to be freed with Free() +// L < arena->mu +static void *DoAllocWithArena(size_t request, LowLevelAlloc::Arena *arena) { + void *result = nullptr; + if (request != 0) { + AllocList *s; // will point to region that satisfies request + ArenaLock section(arena); + // round up with header + size_t req_rnd = RoundUp(CheckedAdd(request, sizeof (s->header)), + arena->round_up); + for (;;) { // loop until we find a suitable region + // find the minimum levels that a block of this size must have + int i = LLA_SkiplistLevels(req_rnd, arena->min_size, nullptr) - 1; + if (i < arena->freelist.levels) { // potential blocks exist + AllocList *before = &arena->freelist; // predecessor of s + while ((s = Next(i, before, arena)) != nullptr && + s->header.size < req_rnd) { + before = s; + } + if (s != nullptr) { // we found a region + break; + } + } + // we unlock before mmap() both because mmap() may call a callback hook, + // and because it may be slow. + arena->mu.Unlock(); + // mmap generous 64K chunks to decrease + // the chances/impact of fragmentation: + size_t new_pages_size = RoundUp(req_rnd, arena->pagesize * 16); + void *new_pages; +#ifdef _WIN32 + new_pages = VirtualAlloc(0, new_pages_size, + MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); + ABSL_RAW_CHECK(new_pages != nullptr, "VirtualAlloc failed"); +#else +#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING + if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) != 0) { + new_pages = base_internal::DirectMmap(nullptr, new_pages_size, + PROT_WRITE|PROT_READ, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); + } else { + new_pages = mmap(nullptr, new_pages_size, PROT_WRITE | PROT_READ, + MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); + } +#else + new_pages = mmap(nullptr, new_pages_size, PROT_WRITE | PROT_READ, + MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); +#endif // ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING + if (new_pages == MAP_FAILED) { + ABSL_RAW_LOG(FATAL, "mmap error: %d", errno); + } + +#endif // _WIN32 + arena->mu.Lock(); + s = reinterpret_cast<AllocList *>(new_pages); + s->header.size = new_pages_size; + // Pretend the block is allocated; call AddToFreelist() to free it. + s->header.magic = Magic(kMagicAllocated, &s->header); + s->header.arena = arena; + AddToFreelist(&s->levels, arena); // insert new region into free list + } + AllocList *prev[kMaxLevel]; + LLA_SkiplistDelete(&arena->freelist, s, prev); // remove from free list + // s points to the first free region that's big enough + if (CheckedAdd(req_rnd, arena->min_size) <= s->header.size) { + // big enough to split + AllocList *n = reinterpret_cast<AllocList *> + (req_rnd + reinterpret_cast<char *>(s)); + n->header.size = s->header.size - req_rnd; + n->header.magic = Magic(kMagicAllocated, &n->header); + n->header.arena = arena; + s->header.size = req_rnd; + AddToFreelist(&n->levels, arena); + } + s->header.magic = Magic(kMagicAllocated, &s->header); + ABSL_RAW_CHECK(s->header.arena == arena, ""); + arena->allocation_count++; + section.Leave(); + result = &s->levels; + } + ANNOTATE_MEMORY_IS_UNINITIALIZED(result, request); + return result; +} + +void *LowLevelAlloc::Alloc(size_t request) { + void *result = DoAllocWithArena(request, DefaultArena()); + return result; +} + +void *LowLevelAlloc::AllocWithArena(size_t request, Arena *arena) { + ABSL_RAW_CHECK(arena != nullptr, "must pass a valid arena"); + void *result = DoAllocWithArena(request, arena); + return result; +} + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_LOW_LEVEL_ALLOC_MISSING diff --git a/third_party/abseil_cpp/absl/base/internal/low_level_alloc.h b/third_party/abseil_cpp/absl/base/internal/low_level_alloc.h new file mode 100644 index 000000000000..db91951c825f --- /dev/null +++ b/third_party/abseil_cpp/absl/base/internal/low_level_alloc.h @@ -0,0 +1,126 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef ABSL_BASE_INTERNAL_LOW_LEVEL_ALLOC_H_ +#define ABSL_BASE_INTERNAL_LOW_LEVEL_ALLOC_H_ + +// A simple thread-safe memory allocator that does not depend on +// mutexes or thread-specific data. It is intended to be used +// sparingly, and only when malloc() would introduce an unwanted +// dependency, such as inside the heap-checker, or the Mutex +// implementation. + +// IWYU pragma: private, include "base/low_level_alloc.h" + +#include <sys/types.h> + +#include <cstdint> + +#include "absl/base/attributes.h" +#include "absl/base/config.h" + +// LowLevelAlloc requires that the platform support low-level +// allocation of virtual memory. Platforms lacking this cannot use +// LowLevelAlloc. +#ifdef ABSL_LOW_LEVEL_ALLOC_MISSING +#error ABSL_LOW_LEVEL_ALLOC_MISSING cannot be directly set +#elif !defined(ABSL_HAVE_MMAP) && !defined(_WIN32) +#define ABSL_LOW_LEVEL_ALLOC_MISSING 1 +#endif + +// Using LowLevelAlloc with kAsyncSignalSafe isn't supported on Windows or +// asm.js / WebAssembly. +// See https://kripken.github.io/emscripten-site/docs/porting/pthreads.html +// for more information. +#ifdef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING +#error ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING cannot be directly set +#elif defined(_WIN32) || defined(__asmjs__) || defined(__wasm__) +#define ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING 1 +#endif + +#include <cstddef> + +#include "absl/base/port.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +class LowLevelAlloc { + public: + struct Arena; // an arena from which memory may be allocated + + // Returns a pointer to a block of at least "request" bytes + // that have been newly allocated from the specific arena. + // for Alloc() call the DefaultArena() is used. + // Returns 0 if passed request==0. + // Does not return 0 under other circumstances; it crashes if memory + // is not available. + static void *Alloc(size_t request) ABSL_ATTRIBUTE_SECTION(malloc_hook); + static void *AllocWithArena(size_t request, Arena *arena) + ABSL_ATTRIBUTE_SECTION(malloc_hook); + + // Deallocates a region of memory that was previously allocated with + // Alloc(). Does nothing if passed 0. "s" must be either 0, + // or must have been returned from a call to Alloc() and not yet passed to + // Free() since that call to Alloc(). The space is returned to the arena + // from which it was allocated. + static void Free(void *s) ABSL_ATTRIBUTE_SECTION(malloc_hook); + + // ABSL_ATTRIBUTE_SECTION(malloc_hook) for Alloc* and Free + // are to put all callers of MallocHook::Invoke* in this module + // into special section, + // so that MallocHook::GetCallerStackTrace can function accurately. + + // Create a new arena. + // The root metadata for the new arena is allocated in the + // meta_data_arena; the DefaultArena() can be passed for meta_data_arena. + // These values may be ored into flags: + enum { + // Report calls to Alloc() and Free() via the MallocHook interface. + // Set in the DefaultArena. + kCallMallocHook = 0x0001, + +#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING + // Make calls to Alloc(), Free() be async-signal-safe. Not set in + // DefaultArena(). Not supported on all platforms. + kAsyncSignalSafe = 0x0002, +#endif + }; + // Construct a new arena. The allocation of the underlying metadata honors + // the provided flags. For example, the call NewArena(kAsyncSignalSafe) + // is itself async-signal-safe, as well as generatating an arena that provides + // async-signal-safe Alloc/Free. + static Arena *NewArena(int32_t flags); + + // Destroys an arena allocated by NewArena and returns true, + // provided no allocated blocks remain in the arena. + // If allocated blocks remain in the arena, does nothing and + // returns false. + // It is illegal to attempt to destroy the DefaultArena(). + static bool DeleteArena(Arena *arena); + + // The default arena that always exists. + static Arena *DefaultArena(); + + private: + LowLevelAlloc(); // no instances +}; + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_LOW_LEVEL_ALLOC_H_ diff --git a/third_party/abseil_cpp/absl/base/internal/low_level_alloc_test.cc b/third_party/abseil_cpp/absl/base/internal/low_level_alloc_test.cc new file mode 100644 index 000000000000..2f2eaffa032e --- /dev/null +++ b/third_party/abseil_cpp/absl/base/internal/low_level_alloc_test.cc @@ -0,0 +1,162 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/low_level_alloc.h" + +#include <stdint.h> +#include <stdio.h> +#include <stdlib.h> +#include <thread> // NOLINT(build/c++11) +#include <unordered_map> +#include <utility> + +#include "absl/container/node_hash_map.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { +namespace { + +// This test doesn't use gtest since it needs to test that everything +// works before main(). +#define TEST_ASSERT(x) \ + if (!(x)) { \ + printf("TEST_ASSERT(%s) FAILED ON LINE %d\n", #x, __LINE__); \ + abort(); \ + } + +// a block of memory obtained from the allocator +struct BlockDesc { + char *ptr; // pointer to memory + int len; // number of bytes + int fill; // filled with data starting with this +}; + +// Check that the pattern placed in the block d +// by RandomizeBlockDesc is still there. +static void CheckBlockDesc(const BlockDesc &d) { + for (int i = 0; i != d.len; i++) { + TEST_ASSERT((d.ptr[i] & 0xff) == ((d.fill + i) & 0xff)); + } +} + +// Fill the block "*d" with a pattern +// starting with a random byte. +static void RandomizeBlockDesc(BlockDesc *d) { + d->fill = rand() & 0xff; + for (int i = 0; i != d->len; i++) { + d->ptr[i] = (d->fill + i) & 0xff; + } +} + +// Use to indicate to the malloc hooks that +// this calls is from LowLevelAlloc. +static bool using_low_level_alloc = false; + +// n times, toss a coin, and based on the outcome +// either allocate a new block or deallocate an old block. +// New blocks are placed in a std::unordered_map with a random key +// and initialized with RandomizeBlockDesc(). +// If keys conflict, the older block is freed. +// Old blocks are always checked with CheckBlockDesc() +// before being freed. At the end of the run, +// all remaining allocated blocks are freed. +// If use_new_arena is true, use a fresh arena, and then delete it. +// If call_malloc_hook is true and user_arena is true, +// allocations and deallocations are reported via the MallocHook +// interface. +static void Test(bool use_new_arena, bool call_malloc_hook, int n) { + typedef absl::node_hash_map<int, BlockDesc> AllocMap; + AllocMap allocated; + AllocMap::iterator it; + BlockDesc block_desc; + int rnd; + LowLevelAlloc::Arena *arena = 0; + if (use_new_arena) { + int32_t flags = call_malloc_hook ? LowLevelAlloc::kCallMallocHook : 0; + arena = LowLevelAlloc::NewArena(flags); + } + for (int i = 0; i != n; i++) { + if (i != 0 && i % 10000 == 0) { + printf("."); + fflush(stdout); + } + + switch (rand() & 1) { // toss a coin + case 0: // coin came up heads: add a block + using_low_level_alloc = true; + block_desc.len = rand() & 0x3fff; + block_desc.ptr = + reinterpret_cast<char *>( + arena == 0 + ? LowLevelAlloc::Alloc(block_desc.len) + : LowLevelAlloc::AllocWithArena(block_desc.len, arena)); + using_low_level_alloc = false; + RandomizeBlockDesc(&block_desc); + rnd = rand(); + it = allocated.find(rnd); + if (it != allocated.end()) { + CheckBlockDesc(it->second); + using_low_level_alloc = true; + LowLevelAlloc::Free(it->second.ptr); + using_low_level_alloc = false; + it->second = block_desc; + } else { + allocated[rnd] = block_desc; + } + break; + case 1: // coin came up tails: remove a block + it = allocated.begin(); + if (it != allocated.end()) { + CheckBlockDesc(it->second); + using_low_level_alloc = true; + LowLevelAlloc::Free(it->second.ptr); + using_low_level_alloc = false; + allocated.erase(it); + } + break; + } + } + // remove all remaining blocks + while ((it = allocated.begin()) != allocated.end()) { + CheckBlockDesc(it->second); + using_low_level_alloc = true; + LowLevelAlloc::Free(it->second.ptr); + using_low_level_alloc = false; + allocated.erase(it); + } + if (use_new_arena) { + TEST_ASSERT(LowLevelAlloc::DeleteArena(arena)); + } +} + +// LowLevelAlloc is designed to be safe to call before main(). +static struct BeforeMain { + BeforeMain() { + Test(false, false, 50000); + Test(true, false, 50000); + Test(true, true, 50000); + } +} before_main; + +} // namespace +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +int main(int argc, char *argv[]) { + // The actual test runs in the global constructor of `before_main`. + printf("PASS\n"); + return 0; +} diff --git a/third_party/abseil_cpp/absl/base/internal/low_level_scheduling.h b/third_party/abseil_cpp/absl/base/internal/low_level_scheduling.h new file mode 100644 index 000000000000..961cc981b864 --- /dev/null +++ b/third_party/abseil_cpp/absl/base/internal/low_level_scheduling.h @@ -0,0 +1,107 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Core interfaces and definitions used by by low-level interfaces such as +// SpinLock. + +#ifndef ABSL_BASE_INTERNAL_LOW_LEVEL_SCHEDULING_H_ +#define ABSL_BASE_INTERNAL_LOW_LEVEL_SCHEDULING_H_ + +#include "absl/base/internal/scheduling_mode.h" +#include "absl/base/macros.h" + +// The following two declarations exist so SchedulingGuard may friend them with +// the appropriate language linkage. These callbacks allow libc internals, such +// as function level statics, to schedule cooperatively when locking. +extern "C" bool __google_disable_rescheduling(void); +extern "C" void __google_enable_rescheduling(bool disable_result); + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +class SchedulingHelper; // To allow use of SchedulingGuard. +class SpinLock; // To allow use of SchedulingGuard. + +// SchedulingGuard +// Provides guard semantics that may be used to disable cooperative rescheduling +// of the calling thread within specific program blocks. This is used to +// protect resources (e.g. low-level SpinLocks or Domain code) that cooperative +// scheduling depends on. +// +// Domain implementations capable of rescheduling in reaction to involuntary +// kernel thread actions (e.g blocking due to a pagefault or syscall) must +// guarantee that an annotated thread is not allowed to (cooperatively) +// reschedule until the annotated region is complete. +// +// It is an error to attempt to use a cooperatively scheduled resource (e.g. +// Mutex) within a rescheduling-disabled region. +// +// All methods are async-signal safe. +class SchedulingGuard { + public: + // Returns true iff the calling thread may be cooperatively rescheduled. + static bool ReschedulingIsAllowed(); + + private: + // Disable cooperative rescheduling of the calling thread. It may still + // initiate scheduling operations (e.g. wake-ups), however, it may not itself + // reschedule. Nestable. The returned result is opaque, clients should not + // attempt to interpret it. + // REQUIRES: Result must be passed to a pairing EnableScheduling(). + static bool DisableRescheduling(); + + // Marks the end of a rescheduling disabled region, previously started by + // DisableRescheduling(). + // REQUIRES: Pairs with innermost call (and result) of DisableRescheduling(). + static void EnableRescheduling(bool disable_result); + + // A scoped helper for {Disable, Enable}Rescheduling(). + // REQUIRES: destructor must run in same thread as constructor. + struct ScopedDisable { + ScopedDisable() { disabled = SchedulingGuard::DisableRescheduling(); } + ~ScopedDisable() { SchedulingGuard::EnableRescheduling(disabled); } + + bool disabled; + }; + + // Access to SchedulingGuard is explicitly white-listed. + friend class SchedulingHelper; + friend class SpinLock; + + SchedulingGuard(const SchedulingGuard&) = delete; + SchedulingGuard& operator=(const SchedulingGuard&) = delete; +}; + +//------------------------------------------------------------------------------ +// End of public interfaces. +//------------------------------------------------------------------------------ + +inline bool SchedulingGuard::ReschedulingIsAllowed() { + return false; +} + +inline bool SchedulingGuard::DisableRescheduling() { + return false; +} + +inline void SchedulingGuard::EnableRescheduling(bool /* disable_result */) { + return; +} + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_LOW_LEVEL_SCHEDULING_H_ diff --git a/third_party/abseil_cpp/absl/base/internal/per_thread_tls.h b/third_party/abseil_cpp/absl/base/internal/per_thread_tls.h new file mode 100644 index 000000000000..cf5e97a0470e --- /dev/null +++ b/third_party/abseil_cpp/absl/base/internal/per_thread_tls.h @@ -0,0 +1,52 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INTERNAL_PER_THREAD_TLS_H_ +#define ABSL_BASE_INTERNAL_PER_THREAD_TLS_H_ + +// This header defines two macros: +// +// If the platform supports thread-local storage: +// +// * ABSL_PER_THREAD_TLS_KEYWORD is the C keyword needed to declare a +// thread-local variable +// * ABSL_PER_THREAD_TLS is 1 +// +// Otherwise: +// +// * ABSL_PER_THREAD_TLS_KEYWORD is empty +// * ABSL_PER_THREAD_TLS is 0 +// +// Microsoft C supports thread-local storage. +// GCC supports it if the appropriate version of glibc is available, +// which the programmer can indicate by defining ABSL_HAVE_TLS + +#include "absl/base/port.h" // For ABSL_HAVE_TLS + +#if defined(ABSL_PER_THREAD_TLS) +#error ABSL_PER_THREAD_TLS cannot be directly set +#elif defined(ABSL_PER_THREAD_TLS_KEYWORD) +#error ABSL_PER_THREAD_TLS_KEYWORD cannot be directly set +#elif defined(ABSL_HAVE_TLS) +#define ABSL_PER_THREAD_TLS_KEYWORD __thread +#define ABSL_PER_THREAD_TLS 1 +#elif defined(_MSC_VER) +#define ABSL_PER_THREAD_TLS_KEYWORD __declspec(thread) +#define ABSL_PER_THREAD_TLS 1 +#else +#define ABSL_PER_THREAD_TLS_KEYWORD +#define ABSL_PER_THREAD_TLS 0 +#endif + +#endif // ABSL_BASE_INTERNAL_PER_THREAD_TLS_H_ diff --git a/third_party/abseil_cpp/absl/base/internal/periodic_sampler.cc b/third_party/abseil_cpp/absl/base/internal/periodic_sampler.cc new file mode 100644 index 000000000000..520dabbaa006 --- /dev/null +++ b/third_party/abseil_cpp/absl/base/internal/periodic_sampler.cc @@ -0,0 +1,53 @@ +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/periodic_sampler.h" + +#include <atomic> + +#include "absl/base/internal/exponential_biased.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +int64_t PeriodicSamplerBase::GetExponentialBiased(int period) noexcept { + return rng_.GetStride(period); +} + +bool PeriodicSamplerBase::SubtleConfirmSample() noexcept { + int current_period = period(); + + // Deal with period case 0 (always off) and 1 (always on) + if (ABSL_PREDICT_FALSE(current_period < 2)) { + stride_ = 0; + return current_period == 1; + } + + // Check if this is the first call to Sample() + if (ABSL_PREDICT_FALSE(stride_ == 1)) { + stride_ = static_cast<uint64_t>(-GetExponentialBiased(current_period)); + if (static_cast<int64_t>(stride_) < -1) { + ++stride_; + return false; + } + } + + stride_ = static_cast<uint64_t>(-GetExponentialBiased(current_period)); + return true; +} + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third_party/abseil_cpp/absl/base/internal/periodic_sampler.h b/third_party/abseil_cpp/absl/base/internal/periodic_sampler.h new file mode 100644 index 000000000000..f8a86796b117 --- /dev/null +++ b/third_party/abseil_cpp/absl/base/internal/periodic_sampler.h @@ -0,0 +1,211 @@ +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INTERNAL_PERIODIC_SAMPLER_H_ +#define ABSL_BASE_INTERNAL_PERIODIC_SAMPLER_H_ + +#include <stdint.h> + +#include <atomic> + +#include "absl/base/internal/exponential_biased.h" +#include "absl/base/optimization.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +// PeriodicSamplerBase provides the basic period sampler implementation. +// +// This is the base class for the templated PeriodicSampler class, which holds +// a global std::atomic value identified by a user defined tag, such that +// each specific PeriodSampler implementation holds its own global period. +// +// PeriodicSamplerBase is thread-compatible except where stated otherwise. +class PeriodicSamplerBase { + public: + // PeriodicSamplerBase is trivial / copyable / movable / destructible. + PeriodicSamplerBase() = default; + PeriodicSamplerBase(PeriodicSamplerBase&&) = default; + PeriodicSamplerBase(const PeriodicSamplerBase&) = default; + + // Returns true roughly once every `period` calls. This is established by a + // randomly picked `stride` that is counted down on each call to `Sample`. + // This stride is picked such that the probability of `Sample()` returning + // true is 1 in `period`. + inline bool Sample() noexcept; + + // The below methods are intended for optimized use cases where the + // size of the inlined fast path code is highly important. Applications + // should use the `Sample()` method unless they have proof that their + // specific use case requires the optimizations offered by these methods. + // + // An example of such a use case is SwissTable sampling. All sampling checks + // are in inlined SwissTable methods, and the number of call sites is huge. + // In this case, the inlined code size added to each translation unit calling + // SwissTable methods is non-trivial. + // + // The `SubtleMaybeSample()` function spuriously returns true even if the + // function should not be sampled, applications MUST match each call to + // 'SubtleMaybeSample()' returning true with a `SubtleConfirmSample()` call, + // and use the result of the latter as the sampling decision. + // In other words: the code should logically be equivalent to: + // + // if (SubtleMaybeSample() && SubtleConfirmSample()) { + // // Sample this call + // } + // + // In the 'inline-size' optimized case, the `SubtleConfirmSample()` call can + // be placed out of line, for example, the typical use case looks as follows: + // + // // --- frobber.h ----------- + // void FrobberSampled(); + // + // inline void FrobberImpl() { + // // ... + // } + // + // inline void Frobber() { + // if (ABSL_PREDICT_FALSE(sampler.SubtleMaybeSample())) { + // FrobberSampled(); + // } else { + // FrobberImpl(); + // } + // } + // + // // --- frobber.cc ----------- + // void FrobberSampled() { + // if (!sampler.SubtleConfirmSample())) { + // // Spurious false positive + // FrobberImpl(); + // return; + // } + // + // // Sampled execution + // // ... + // } + inline bool SubtleMaybeSample() noexcept; + bool SubtleConfirmSample() noexcept; + + protected: + // We explicitly don't use a virtual destructor as this class is never + // virtually destroyed, and it keeps the class trivial, which avoids TLS + // prologue and epilogue code for our TLS instances. + ~PeriodicSamplerBase() = default; + + // Returns the next stride for our sampler. + // This function is virtual for testing purposes only. + virtual int64_t GetExponentialBiased(int period) noexcept; + + private: + // Returns the current period of this sampler. Thread-safe. + virtual int period() const noexcept = 0; + + // Keep and decrement stride_ as an unsigned integer, but compare the value + // to zero casted as a signed int. clang and msvc do not create optimum code + // if we use signed for the combined decrement and sign comparison. + // + // Below 3 alternative options, all compiles generate the best code + // using the unsigned increment <---> signed int comparison option. + // + // Option 1: + // int64_t stride_; + // if (ABSL_PREDICT_TRUE(++stride_ < 0)) { ... } + // + // GCC x64 (OK) : https://gcc.godbolt.org/z/R5MzzA + // GCC ppc (OK) : https://gcc.godbolt.org/z/z7NZAt + // Clang x64 (BAD): https://gcc.godbolt.org/z/t4gPsd + // ICC x64 (OK) : https://gcc.godbolt.org/z/rE6s8W + // MSVC x64 (OK) : https://gcc.godbolt.org/z/ARMXqS + // + // Option 2: + // int64_t stride_ = 0; + // if (ABSL_PREDICT_TRUE(--stride_ >= 0)) { ... } + // + // GCC x64 (OK) : https://gcc.godbolt.org/z/jSQxYK + // GCC ppc (OK) : https://gcc.godbolt.org/z/VJdYaA + // Clang x64 (BAD): https://gcc.godbolt.org/z/Xm4NjX + // ICC x64 (OK) : https://gcc.godbolt.org/z/4snaFd + // MSVC x64 (BAD): https://gcc.godbolt.org/z/BgnEKE + // + // Option 3: + // uint64_t stride_; + // if (ABSL_PREDICT_TRUE(static_cast<int64_t>(++stride_) < 0)) { ... } + // + // GCC x64 (OK) : https://gcc.godbolt.org/z/bFbfPy + // GCC ppc (OK) : https://gcc.godbolt.org/z/S9KkUE + // Clang x64 (OK) : https://gcc.godbolt.org/z/UYzRb4 + // ICC x64 (OK) : https://gcc.godbolt.org/z/ptTNfD + // MSVC x64 (OK) : https://gcc.godbolt.org/z/76j4-5 + uint64_t stride_ = 0; + ExponentialBiased rng_; +}; + +inline bool PeriodicSamplerBase::SubtleMaybeSample() noexcept { + // See comments on `stride_` for the unsigned increment / signed compare. + if (ABSL_PREDICT_TRUE(static_cast<int64_t>(++stride_) < 0)) { + return false; + } + return true; +} + +inline bool PeriodicSamplerBase::Sample() noexcept { + return ABSL_PREDICT_FALSE(SubtleMaybeSample()) ? SubtleConfirmSample() + : false; +} + +// PeriodicSampler is a concreted periodic sampler implementation. +// The user provided Tag identifies the implementation, and is required to +// isolate the global state of this instance from other instances. +// +// Typical use case: +// +// struct HashTablezTag {}; +// thread_local PeriodicSampler sampler; +// +// void HashTableSamplingLogic(...) { +// if (sampler.Sample()) { +// HashTableSlowSamplePath(...); +// } +// } +// +template <typename Tag, int default_period = 0> +class PeriodicSampler final : public PeriodicSamplerBase { + public: + ~PeriodicSampler() = default; + + int period() const noexcept final { + return period_.load(std::memory_order_relaxed); + } + + // Sets the global period for this sampler. Thread-safe. + // Setting a period of 0 disables the sampler, i.e., every call to Sample() + // will return false. Setting a period of 1 puts the sampler in 'always on' + // mode, i.e., every call to Sample() returns true. + static void SetGlobalPeriod(int period) { + period_.store(period, std::memory_order_relaxed); + } + + private: + static std::atomic<int> period_; +}; + +template <typename Tag, int default_period> +std::atomic<int> PeriodicSampler<Tag, default_period>::period_(default_period); + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_PERIODIC_SAMPLER_H_ diff --git a/third_party/abseil_cpp/absl/base/internal/periodic_sampler_benchmark.cc b/third_party/abseil_cpp/absl/base/internal/periodic_sampler_benchmark.cc new file mode 100644 index 000000000000..5ad469ce79c2 --- /dev/null +++ b/third_party/abseil_cpp/absl/base/internal/periodic_sampler_benchmark.cc @@ -0,0 +1,79 @@ +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "benchmark/benchmark.h" +#include "absl/base/internal/periodic_sampler.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { +namespace { + +template <typename Sampler> +void BM_Sample(Sampler* sampler, benchmark::State& state) { + for (auto _ : state) { + benchmark::DoNotOptimize(sampler); + benchmark::DoNotOptimize(sampler->Sample()); + } +} + +template <typename Sampler> +void BM_SampleMinunumInlined(Sampler* sampler, benchmark::State& state) { + for (auto _ : state) { + benchmark::DoNotOptimize(sampler); + if (ABSL_PREDICT_FALSE(sampler->SubtleMaybeSample())) { + benchmark::DoNotOptimize(sampler->SubtleConfirmSample()); + } + } +} + +void BM_PeriodicSampler_TinySample(benchmark::State& state) { + struct Tag {}; + PeriodicSampler<Tag, 10> sampler; + BM_Sample(&sampler, state); +} +BENCHMARK(BM_PeriodicSampler_TinySample); + +void BM_PeriodicSampler_ShortSample(benchmark::State& state) { + struct Tag {}; + PeriodicSampler<Tag, 1024> sampler; + BM_Sample(&sampler, state); +} +BENCHMARK(BM_PeriodicSampler_ShortSample); + +void BM_PeriodicSampler_LongSample(benchmark::State& state) { + struct Tag {}; + PeriodicSampler<Tag, 1024 * 1024> sampler; + BM_Sample(&sampler, state); +} +BENCHMARK(BM_PeriodicSampler_LongSample); + +void BM_PeriodicSampler_LongSampleMinunumInlined(benchmark::State& state) { + struct Tag {}; + PeriodicSampler<Tag, 1024 * 1024> sampler; + BM_SampleMinunumInlined(&sampler, state); +} +BENCHMARK(BM_PeriodicSampler_LongSampleMinunumInlined); + +void BM_PeriodicSampler_Disabled(benchmark::State& state) { + struct Tag {}; + PeriodicSampler<Tag, 0> sampler; + BM_Sample(&sampler, state); +} +BENCHMARK(BM_PeriodicSampler_Disabled); + +} // namespace +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third_party/abseil_cpp/absl/base/internal/periodic_sampler_test.cc b/third_party/abseil_cpp/absl/base/internal/periodic_sampler_test.cc new file mode 100644 index 000000000000..3b301e37ab98 --- /dev/null +++ b/third_party/abseil_cpp/absl/base/internal/periodic_sampler_test.cc @@ -0,0 +1,177 @@ +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/periodic_sampler.h" + +#include <thread> // NOLINT(build/c++11) + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/base/attributes.h" +#include "absl/base/macros.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { +namespace { + +using testing::Eq; +using testing::Return; +using testing::StrictMock; + +class MockPeriodicSampler : public PeriodicSamplerBase { + public: + virtual ~MockPeriodicSampler() = default; + + MOCK_METHOD(int, period, (), (const, noexcept)); + MOCK_METHOD(int64_t, GetExponentialBiased, (int), (noexcept)); +}; + +TEST(PeriodicSamplerBaseTest, Sample) { + StrictMock<MockPeriodicSampler> sampler; + + EXPECT_CALL(sampler, period()).Times(3).WillRepeatedly(Return(16)); + EXPECT_CALL(sampler, GetExponentialBiased(16)) + .WillOnce(Return(2)) + .WillOnce(Return(3)) + .WillOnce(Return(4)); + + EXPECT_FALSE(sampler.Sample()); + EXPECT_TRUE(sampler.Sample()); + + EXPECT_FALSE(sampler.Sample()); + EXPECT_FALSE(sampler.Sample()); + EXPECT_TRUE(sampler.Sample()); + + EXPECT_FALSE(sampler.Sample()); + EXPECT_FALSE(sampler.Sample()); + EXPECT_FALSE(sampler.Sample()); +} + +TEST(PeriodicSamplerBaseTest, ImmediatelySample) { + StrictMock<MockPeriodicSampler> sampler; + + EXPECT_CALL(sampler, period()).Times(2).WillRepeatedly(Return(16)); + EXPECT_CALL(sampler, GetExponentialBiased(16)) + .WillOnce(Return(1)) + .WillOnce(Return(2)) + .WillOnce(Return(3)); + + EXPECT_TRUE(sampler.Sample()); + + EXPECT_FALSE(sampler.Sample()); + EXPECT_TRUE(sampler.Sample()); + + EXPECT_FALSE(sampler.Sample()); + EXPECT_FALSE(sampler.Sample()); +} + +TEST(PeriodicSamplerBaseTest, Disabled) { + StrictMock<MockPeriodicSampler> sampler; + + EXPECT_CALL(sampler, period()).Times(3).WillRepeatedly(Return(0)); + + EXPECT_FALSE(sampler.Sample()); + EXPECT_FALSE(sampler.Sample()); + EXPECT_FALSE(sampler.Sample()); +} + +TEST(PeriodicSamplerBaseTest, AlwaysOn) { + StrictMock<MockPeriodicSampler> sampler; + + EXPECT_CALL(sampler, period()).Times(3).WillRepeatedly(Return(1)); + + EXPECT_TRUE(sampler.Sample()); + EXPECT_TRUE(sampler.Sample()); + EXPECT_TRUE(sampler.Sample()); +} + +TEST(PeriodicSamplerBaseTest, Disable) { + StrictMock<MockPeriodicSampler> sampler; + + EXPECT_CALL(sampler, period()).WillOnce(Return(16)); + EXPECT_CALL(sampler, GetExponentialBiased(16)).WillOnce(Return(3)); + EXPECT_FALSE(sampler.Sample()); + EXPECT_FALSE(sampler.Sample()); + + EXPECT_CALL(sampler, period()).Times(2).WillRepeatedly(Return(0)); + + EXPECT_FALSE(sampler.Sample()); + EXPECT_FALSE(sampler.Sample()); +} + +TEST(PeriodicSamplerBaseTest, Enable) { + StrictMock<MockPeriodicSampler> sampler; + + EXPECT_CALL(sampler, period()).WillOnce(Return(0)); + EXPECT_FALSE(sampler.Sample()); + + EXPECT_CALL(sampler, period()).Times(2).WillRepeatedly(Return(16)); + EXPECT_CALL(sampler, GetExponentialBiased(16)) + .Times(2) + .WillRepeatedly(Return(3)); + + EXPECT_FALSE(sampler.Sample()); + EXPECT_FALSE(sampler.Sample()); + EXPECT_TRUE(sampler.Sample()); + + EXPECT_FALSE(sampler.Sample()); + EXPECT_FALSE(sampler.Sample()); +} + +TEST(PeriodicSamplerTest, ConstructConstInit) { + struct Tag {}; + ABSL_CONST_INIT static PeriodicSampler<Tag> sampler; + (void)sampler; +} + +TEST(PeriodicSamplerTest, DefaultPeriod0) { + struct Tag {}; + PeriodicSampler<Tag> sampler; + EXPECT_THAT(sampler.period(), Eq(0)); +} + +TEST(PeriodicSamplerTest, DefaultPeriod) { + struct Tag {}; + PeriodicSampler<Tag, 100> sampler; + EXPECT_THAT(sampler.period(), Eq(100)); +} + +TEST(PeriodicSamplerTest, SetGlobalPeriod) { + struct Tag1 {}; + struct Tag2 {}; + PeriodicSampler<Tag1, 25> sampler1; + PeriodicSampler<Tag2, 50> sampler2; + + EXPECT_THAT(sampler1.period(), Eq(25)); + EXPECT_THAT(sampler2.period(), Eq(50)); + + std::thread thread([] { + PeriodicSampler<Tag1, 25> sampler1; + PeriodicSampler<Tag2, 50> sampler2; + EXPECT_THAT(sampler1.period(), Eq(25)); + EXPECT_THAT(sampler2.period(), Eq(50)); + sampler1.SetGlobalPeriod(10); + sampler2.SetGlobalPeriod(20); + }); + thread.join(); + + EXPECT_THAT(sampler1.period(), Eq(10)); + EXPECT_THAT(sampler2.period(), Eq(20)); +} + +} // namespace +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third_party/abseil_cpp/absl/base/internal/pretty_function.h b/third_party/abseil_cpp/absl/base/internal/pretty_function.h new file mode 100644 index 000000000000..35d51676dc2e --- /dev/null +++ b/third_party/abseil_cpp/absl/base/internal/pretty_function.h @@ -0,0 +1,33 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INTERNAL_PRETTY_FUNCTION_H_ +#define ABSL_BASE_INTERNAL_PRETTY_FUNCTION_H_ + +// ABSL_PRETTY_FUNCTION +// +// In C++11, __func__ gives the undecorated name of the current function. That +// is, "main", not "int main()". Various compilers give extra macros to get the +// decorated function name, including return type and arguments, to +// differentiate between overload sets. ABSL_PRETTY_FUNCTION is a portable +// version of these macros which forwards to the correct macro on each compiler. +#if defined(_MSC_VER) +#define ABSL_PRETTY_FUNCTION __FUNCSIG__ +#elif defined(__GNUC__) +#define ABSL_PRETTY_FUNCTION __PRETTY_FUNCTION__ +#else +#error "Unsupported compiler" +#endif + +#endif // ABSL_BASE_INTERNAL_PRETTY_FUNCTION_H_ diff --git a/third_party/abseil_cpp/absl/base/internal/raw_logging.cc b/third_party/abseil_cpp/absl/base/internal/raw_logging.cc new file mode 100644 index 000000000000..40cea5506172 --- /dev/null +++ b/third_party/abseil_cpp/absl/base/internal/raw_logging.cc @@ -0,0 +1,240 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/raw_logging.h" + +#include <stddef.h> +#include <cstdarg> +#include <cstdio> +#include <cstdlib> +#include <cstring> + +#include "absl/base/attributes.h" +#include "absl/base/config.h" +#include "absl/base/internal/atomic_hook.h" +#include "absl/base/log_severity.h" + +// We know how to perform low-level writes to stderr in POSIX and Windows. For +// these platforms, we define the token ABSL_LOW_LEVEL_WRITE_SUPPORTED. +// Much of raw_logging.cc becomes a no-op when we can't output messages, +// although a FATAL ABSL_RAW_LOG message will still abort the process. + +// ABSL_HAVE_POSIX_WRITE is defined when the platform provides posix write() +// (as from unistd.h) +// +// This preprocessor token is also defined in raw_io.cc. If you need to copy +// this, consider moving both to config.h instead. +#if defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \ + defined(__Fuchsia__) || defined(__native_client__) || \ + defined(__EMSCRIPTEN__) || defined(__ASYLO__) + +#include <unistd.h> + +#define ABSL_HAVE_POSIX_WRITE 1 +#define ABSL_LOW_LEVEL_WRITE_SUPPORTED 1 +#else +#undef ABSL_HAVE_POSIX_WRITE +#endif + +// ABSL_HAVE_SYSCALL_WRITE is defined when the platform provides the syscall +// syscall(SYS_write, /*int*/ fd, /*char* */ buf, /*size_t*/ len); +// for low level operations that want to avoid libc. +#if (defined(__linux__) || defined(__FreeBSD__)) && !defined(__ANDROID__) +#include <sys/syscall.h> +#define ABSL_HAVE_SYSCALL_WRITE 1 +#define ABSL_LOW_LEVEL_WRITE_SUPPORTED 1 +#else +#undef ABSL_HAVE_SYSCALL_WRITE +#endif + +#ifdef _WIN32 +#include <io.h> + +#define ABSL_HAVE_RAW_IO 1 +#define ABSL_LOW_LEVEL_WRITE_SUPPORTED 1 +#else +#undef ABSL_HAVE_RAW_IO +#endif + +// TODO(gfalcon): We want raw-logging to work on as many platforms as possible. +// Explicitly #error out when not ABSL_LOW_LEVEL_WRITE_SUPPORTED, except for a +// whitelisted set of platforms for which we expect not to be able to raw log. + +ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES static absl::base_internal::AtomicHook< + absl::raw_logging_internal::LogPrefixHook> + log_prefix_hook; +ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES static absl::base_internal::AtomicHook< + absl::raw_logging_internal::AbortHook> + abort_hook; + +#ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED +static const char kTruncated[] = " ... (message truncated)\n"; + +// sprintf the format to the buffer, adjusting *buf and *size to reflect the +// consumed bytes, and return whether the message fit without truncation. If +// truncation occurred, if possible leave room in the buffer for the message +// kTruncated[]. +inline static bool VADoRawLog(char** buf, int* size, const char* format, + va_list ap) ABSL_PRINTF_ATTRIBUTE(3, 0); +inline static bool VADoRawLog(char** buf, int* size, + const char* format, va_list ap) { + int n = vsnprintf(*buf, *size, format, ap); + bool result = true; + if (n < 0 || n > *size) { + result = false; + if (static_cast<size_t>(*size) > sizeof(kTruncated)) { + n = *size - sizeof(kTruncated); // room for truncation message + } else { + n = 0; // no room for truncation message + } + } + *size -= n; + *buf += n; + return result; +} +#endif // ABSL_LOW_LEVEL_WRITE_SUPPORTED + +static constexpr int kLogBufSize = 3000; + +namespace { + +// CAVEAT: vsnprintf called from *DoRawLog below has some (exotic) code paths +// that invoke malloc() and getenv() that might acquire some locks. + +// Helper for RawLog below. +// *DoRawLog writes to *buf of *size and move them past the written portion. +// It returns true iff there was no overflow or error. +bool DoRawLog(char** buf, int* size, const char* format, ...) + ABSL_PRINTF_ATTRIBUTE(3, 4); +bool DoRawLog(char** buf, int* size, const char* format, ...) { + va_list ap; + va_start(ap, format); + int n = vsnprintf(*buf, *size, format, ap); + va_end(ap); + if (n < 0 || n > *size) return false; + *size -= n; + *buf += n; + return true; +} + +void RawLogVA(absl::LogSeverity severity, const char* file, int line, + const char* format, va_list ap) ABSL_PRINTF_ATTRIBUTE(4, 0); +void RawLogVA(absl::LogSeverity severity, const char* file, int line, + const char* format, va_list ap) { + char buffer[kLogBufSize]; + char* buf = buffer; + int size = sizeof(buffer); +#ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED + bool enabled = true; +#else + bool enabled = false; +#endif + +#ifdef ABSL_MIN_LOG_LEVEL + if (severity < static_cast<absl::LogSeverity>(ABSL_MIN_LOG_LEVEL) && + severity < absl::LogSeverity::kFatal) { + enabled = false; + } +#endif + + auto log_prefix_hook_ptr = log_prefix_hook.Load(); + if (log_prefix_hook_ptr) { + enabled = log_prefix_hook_ptr(severity, file, line, &buf, &size); + } else { + if (enabled) { + DoRawLog(&buf, &size, "[%s : %d] RAW: ", file, line); + } + } + const char* const prefix_end = buf; + +#ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED + if (enabled) { + bool no_chop = VADoRawLog(&buf, &size, format, ap); + if (no_chop) { + DoRawLog(&buf, &size, "\n"); + } else { + DoRawLog(&buf, &size, "%s", kTruncated); + } + absl::raw_logging_internal::SafeWriteToStderr(buffer, strlen(buffer)); + } +#else + static_cast<void>(format); + static_cast<void>(ap); +#endif + + // Abort the process after logging a FATAL message, even if the output itself + // was suppressed. + if (severity == absl::LogSeverity::kFatal) { + abort_hook(file, line, buffer, prefix_end, buffer + kLogBufSize); + abort(); + } +} + +} // namespace + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace raw_logging_internal { +void SafeWriteToStderr(const char *s, size_t len) { +#if defined(ABSL_HAVE_SYSCALL_WRITE) + syscall(SYS_write, STDERR_FILENO, s, len); +#elif defined(ABSL_HAVE_POSIX_WRITE) + write(STDERR_FILENO, s, len); +#elif defined(ABSL_HAVE_RAW_IO) + _write(/* stderr */ 2, s, len); +#else + // stderr logging unsupported on this platform + (void) s; + (void) len; +#endif +} + +void RawLog(absl::LogSeverity severity, const char* file, int line, + const char* format, ...) ABSL_PRINTF_ATTRIBUTE(4, 5); +void RawLog(absl::LogSeverity severity, const char* file, int line, + const char* format, ...) { + va_list ap; + va_start(ap, format); + RawLogVA(severity, file, line, format, ap); + va_end(ap); +} + +// Non-formatting version of RawLog(). +// +// TODO(gfalcon): When string_view no longer depends on base, change this +// interface to take its message as a string_view instead. +static void DefaultInternalLog(absl::LogSeverity severity, const char* file, + int line, const std::string& message) { + RawLog(severity, file, line, "%s", message.c_str()); +} + +bool RawLoggingFullySupported() { +#ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED + return true; +#else // !ABSL_LOW_LEVEL_WRITE_SUPPORTED + return false; +#endif // !ABSL_LOW_LEVEL_WRITE_SUPPORTED +} + +ABSL_DLL ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES + absl::base_internal::AtomicHook<InternalLogFunction> + internal_log_function(DefaultInternalLog); + +void RegisterInternalLogFunction(InternalLogFunction func) { + internal_log_function.Store(func); +} + +} // namespace raw_logging_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third_party/abseil_cpp/absl/base/internal/raw_logging.h b/third_party/abseil_cpp/absl/base/internal/raw_logging.h new file mode 100644 index 000000000000..418d6c856feb --- /dev/null +++ b/third_party/abseil_cpp/absl/base/internal/raw_logging.h @@ -0,0 +1,183 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Thread-safe logging routines that do not allocate any memory or +// acquire any locks, and can therefore be used by low-level memory +// allocation, synchronization, and signal-handling code. + +#ifndef ABSL_BASE_INTERNAL_RAW_LOGGING_H_ +#define ABSL_BASE_INTERNAL_RAW_LOGGING_H_ + +#include <string> + +#include "absl/base/attributes.h" +#include "absl/base/config.h" +#include "absl/base/internal/atomic_hook.h" +#include "absl/base/log_severity.h" +#include "absl/base/macros.h" +#include "absl/base/optimization.h" +#include "absl/base/port.h" + +// This is similar to LOG(severity) << format..., but +// * it is to be used ONLY by low-level modules that can't use normal LOG() +// * it is designed to be a low-level logger that does not allocate any +// memory and does not need any locks, hence: +// * it logs straight and ONLY to STDERR w/o buffering +// * it uses an explicit printf-format and arguments list +// * it will silently chop off really long message strings +// Usage example: +// ABSL_RAW_LOG(ERROR, "Failed foo with %i: %s", status, error); +// This will print an almost standard log line like this to stderr only: +// E0821 211317 file.cc:123] RAW: Failed foo with 22: bad_file + +#define ABSL_RAW_LOG(severity, ...) \ + do { \ + constexpr const char* absl_raw_logging_internal_basename = \ + ::absl::raw_logging_internal::Basename(__FILE__, \ + sizeof(__FILE__) - 1); \ + ::absl::raw_logging_internal::RawLog(ABSL_RAW_LOGGING_INTERNAL_##severity, \ + absl_raw_logging_internal_basename, \ + __LINE__, __VA_ARGS__); \ + } while (0) + +// Similar to CHECK(condition) << message, but for low-level modules: +// we use only ABSL_RAW_LOG that does not allocate memory. +// We do not want to provide args list here to encourage this usage: +// if (!cond) ABSL_RAW_LOG(FATAL, "foo ...", hard_to_compute_args); +// so that the args are not computed when not needed. +#define ABSL_RAW_CHECK(condition, message) \ + do { \ + if (ABSL_PREDICT_FALSE(!(condition))) { \ + ABSL_RAW_LOG(FATAL, "Check %s failed: %s", #condition, message); \ + } \ + } while (0) + +// ABSL_INTERNAL_LOG and ABSL_INTERNAL_CHECK work like the RAW variants above, +// except that if the richer log library is linked into the binary, we dispatch +// to that instead. This is potentially useful for internal logging and +// assertions, where we are using RAW_LOG neither for its async-signal-safety +// nor for its non-allocating nature, but rather because raw logging has very +// few other dependencies. +// +// The API is a subset of the above: each macro only takes two arguments. Use +// StrCat if you need to build a richer message. +#define ABSL_INTERNAL_LOG(severity, message) \ + do { \ + ::absl::raw_logging_internal::internal_log_function( \ + ABSL_RAW_LOGGING_INTERNAL_##severity, __FILE__, __LINE__, message); \ + } while (0) + +#define ABSL_INTERNAL_CHECK(condition, message) \ + do { \ + if (ABSL_PREDICT_FALSE(!(condition))) { \ + std::string death_message = "Check " #condition " failed: "; \ + death_message += std::string(message); \ + ABSL_INTERNAL_LOG(FATAL, death_message); \ + } \ + } while (0) + +#define ABSL_RAW_LOGGING_INTERNAL_INFO ::absl::LogSeverity::kInfo +#define ABSL_RAW_LOGGING_INTERNAL_WARNING ::absl::LogSeverity::kWarning +#define ABSL_RAW_LOGGING_INTERNAL_ERROR ::absl::LogSeverity::kError +#define ABSL_RAW_LOGGING_INTERNAL_FATAL ::absl::LogSeverity::kFatal +#define ABSL_RAW_LOGGING_INTERNAL_LEVEL(severity) \ + ::absl::NormalizeLogSeverity(severity) + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace raw_logging_internal { + +// Helper function to implement ABSL_RAW_LOG +// Logs format... at "severity" level, reporting it +// as called from file:line. +// This does not allocate memory or acquire locks. +void RawLog(absl::LogSeverity severity, const char* file, int line, + const char* format, ...) ABSL_PRINTF_ATTRIBUTE(4, 5); + +// Writes the provided buffer directly to stderr, in a safe, low-level manner. +// +// In POSIX this means calling write(), which is async-signal safe and does +// not malloc. If the platform supports the SYS_write syscall, we invoke that +// directly to side-step any libc interception. +void SafeWriteToStderr(const char *s, size_t len); + +// compile-time function to get the "base" filename, that is, the part of +// a filename after the last "/" or "\" path separator. The search starts at +// the end of the string; the second parameter is the length of the string. +constexpr const char* Basename(const char* fname, int offset) { + return offset == 0 || fname[offset - 1] == '/' || fname[offset - 1] == '\\' + ? fname + offset + : Basename(fname, offset - 1); +} + +// For testing only. +// Returns true if raw logging is fully supported. When it is not +// fully supported, no messages will be emitted, but a log at FATAL +// severity will cause an abort. +// +// TODO(gfalcon): Come up with a better name for this method. +bool RawLoggingFullySupported(); + +// Function type for a raw_logging customization hook for suppressing messages +// by severity, and for writing custom prefixes on non-suppressed messages. +// +// The installed hook is called for every raw log invocation. The message will +// be logged to stderr only if the hook returns true. FATAL errors will cause +// the process to abort, even if writing to stderr is suppressed. The hook is +// also provided with an output buffer, where it can write a custom log message +// prefix. +// +// The raw_logging system does not allocate memory or grab locks. User-provided +// hooks must avoid these operations, and must not throw exceptions. +// +// 'severity' is the severity level of the message being written. +// 'file' and 'line' are the file and line number where the ABSL_RAW_LOG macro +// was located. +// 'buffer' and 'buf_size' are pointers to the buffer and buffer size. If the +// hook writes a prefix, it must increment *buffer and decrement *buf_size +// accordingly. +using LogPrefixHook = bool (*)(absl::LogSeverity severity, const char* file, + int line, char** buffer, int* buf_size); + +// Function type for a raw_logging customization hook called to abort a process +// when a FATAL message is logged. If the provided AbortHook() returns, the +// logging system will call abort(). +// +// 'file' and 'line' are the file and line number where the ABSL_RAW_LOG macro +// was located. +// The NUL-terminated logged message lives in the buffer between 'buf_start' +// and 'buf_end'. 'prefix_end' points to the first non-prefix character of the +// buffer (as written by the LogPrefixHook.) +using AbortHook = void (*)(const char* file, int line, const char* buf_start, + const char* prefix_end, const char* buf_end); + +// Internal logging function for ABSL_INTERNAL_LOG to dispatch to. +// +// TODO(gfalcon): When string_view no longer depends on base, change this +// interface to take its message as a string_view instead. +using InternalLogFunction = void (*)(absl::LogSeverity severity, + const char* file, int line, + const std::string& message); + +ABSL_DLL ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES extern base_internal::AtomicHook< + InternalLogFunction> + internal_log_function; + +void RegisterInternalLogFunction(InternalLogFunction func); + +} // namespace raw_logging_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_RAW_LOGGING_H_ diff --git a/third_party/abseil_cpp/absl/base/internal/scheduling_mode.h b/third_party/abseil_cpp/absl/base/internal/scheduling_mode.h new file mode 100644 index 000000000000..8be5ab6dd3c0 --- /dev/null +++ b/third_party/abseil_cpp/absl/base/internal/scheduling_mode.h @@ -0,0 +1,58 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Core interfaces and definitions used by by low-level interfaces such as +// SpinLock. + +#ifndef ABSL_BASE_INTERNAL_SCHEDULING_MODE_H_ +#define ABSL_BASE_INTERNAL_SCHEDULING_MODE_H_ + +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +// Used to describe how a thread may be scheduled. Typically associated with +// the declaration of a resource supporting synchronized access. +// +// SCHEDULE_COOPERATIVE_AND_KERNEL: +// Specifies that when waiting, a cooperative thread (e.g. a Fiber) may +// reschedule (using base::scheduling semantics); allowing other cooperative +// threads to proceed. +// +// SCHEDULE_KERNEL_ONLY: (Also described as "non-cooperative") +// Specifies that no cooperative scheduling semantics may be used, even if the +// current thread is itself cooperatively scheduled. This means that +// cooperative threads will NOT allow other cooperative threads to execute in +// their place while waiting for a resource of this type. Host operating system +// semantics (e.g. a futex) may still be used. +// +// When optional, clients should strongly prefer SCHEDULE_COOPERATIVE_AND_KERNEL +// by default. SCHEDULE_KERNEL_ONLY should only be used for resources on which +// base::scheduling (e.g. the implementation of a Scheduler) may depend. +// +// NOTE: Cooperative resources may not be nested below non-cooperative ones. +// This means that it is invalid to to acquire a SCHEDULE_COOPERATIVE_AND_KERNEL +// resource if a SCHEDULE_KERNEL_ONLY resource is already held. +enum SchedulingMode { + SCHEDULE_KERNEL_ONLY = 0, // Allow scheduling only the host OS. + SCHEDULE_COOPERATIVE_AND_KERNEL, // Also allow cooperative scheduling. +}; + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_SCHEDULING_MODE_H_ diff --git a/third_party/abseil_cpp/absl/base/internal/scoped_set_env.cc b/third_party/abseil_cpp/absl/base/internal/scoped_set_env.cc new file mode 100644 index 000000000000..8a934cb51191 --- /dev/null +++ b/third_party/abseil_cpp/absl/base/internal/scoped_set_env.cc @@ -0,0 +1,81 @@ +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/scoped_set_env.h" + +#ifdef _WIN32 +#include <windows.h> +#endif + +#include <cstdlib> + +#include "absl/base/internal/raw_logging.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +namespace { + +#ifdef _WIN32 +const int kMaxEnvVarValueSize = 1024; +#endif + +void SetEnvVar(const char* name, const char* value) { +#ifdef _WIN32 + SetEnvironmentVariableA(name, value); +#else + if (value == nullptr) { + ::unsetenv(name); + } else { + ::setenv(name, value, 1); + } +#endif +} + +} // namespace + +ScopedSetEnv::ScopedSetEnv(const char* var_name, const char* new_value) + : var_name_(var_name), was_unset_(false) { +#ifdef _WIN32 + char buf[kMaxEnvVarValueSize]; + auto get_res = GetEnvironmentVariableA(var_name_.c_str(), buf, sizeof(buf)); + ABSL_INTERNAL_CHECK(get_res < sizeof(buf), "value exceeds buffer size"); + + if (get_res == 0) { + was_unset_ = (GetLastError() == ERROR_ENVVAR_NOT_FOUND); + } else { + old_value_.assign(buf, get_res); + } + + SetEnvironmentVariableA(var_name_.c_str(), new_value); +#else + const char* val = ::getenv(var_name_.c_str()); + if (val == nullptr) { + was_unset_ = true; + } else { + old_value_ = val; + } +#endif + + SetEnvVar(var_name_.c_str(), new_value); +} + +ScopedSetEnv::~ScopedSetEnv() { + SetEnvVar(var_name_.c_str(), was_unset_ ? nullptr : old_value_.c_str()); +} + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third_party/abseil_cpp/absl/base/internal/scoped_set_env.h b/third_party/abseil_cpp/absl/base/internal/scoped_set_env.h new file mode 100644 index 000000000000..19ec7b5d8a04 --- /dev/null +++ b/third_party/abseil_cpp/absl/base/internal/scoped_set_env.h @@ -0,0 +1,45 @@ +// +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef ABSL_BASE_INTERNAL_SCOPED_SET_ENV_H_ +#define ABSL_BASE_INTERNAL_SCOPED_SET_ENV_H_ + +#include <string> + +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +class ScopedSetEnv { + public: + ScopedSetEnv(const char* var_name, const char* new_value); + ~ScopedSetEnv(); + + private: + std::string var_name_; + std::string old_value_; + + // True if the environment variable was initially not set. + bool was_unset_; +}; + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_SCOPED_SET_ENV_H_ diff --git a/third_party/abseil_cpp/absl/base/internal/scoped_set_env_test.cc b/third_party/abseil_cpp/absl/base/internal/scoped_set_env_test.cc new file mode 100644 index 000000000000..5cbad246c647 --- /dev/null +++ b/third_party/abseil_cpp/absl/base/internal/scoped_set_env_test.cc @@ -0,0 +1,99 @@ +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifdef _WIN32 +#include <windows.h> +#endif + +#include "gtest/gtest.h" +#include "absl/base/internal/scoped_set_env.h" + +namespace { + +using absl::base_internal::ScopedSetEnv; + +std::string GetEnvVar(const char* name) { +#ifdef _WIN32 + char buf[1024]; + auto get_res = GetEnvironmentVariableA(name, buf, sizeof(buf)); + if (get_res >= sizeof(buf)) { + return "TOO_BIG"; + } + + if (get_res == 0) { + return "UNSET"; + } + + return std::string(buf, get_res); +#else + const char* val = ::getenv(name); + if (val == nullptr) { + return "UNSET"; + } + + return val; +#endif +} + +TEST(ScopedSetEnvTest, SetNonExistingVarToString) { + EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "UNSET"); + + { + ScopedSetEnv scoped_set("SCOPED_SET_ENV_TEST_VAR", "value"); + + EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "value"); + } + + EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "UNSET"); +} + +TEST(ScopedSetEnvTest, SetNonExistingVarToNull) { + EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "UNSET"); + + { + ScopedSetEnv scoped_set("SCOPED_SET_ENV_TEST_VAR", nullptr); + + EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "UNSET"); + } + + EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "UNSET"); +} + +TEST(ScopedSetEnvTest, SetExistingVarToString) { + ScopedSetEnv scoped_set("SCOPED_SET_ENV_TEST_VAR", "value"); + EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "value"); + + { + ScopedSetEnv scoped_set("SCOPED_SET_ENV_TEST_VAR", "new_value"); + + EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "new_value"); + } + + EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "value"); +} + +TEST(ScopedSetEnvTest, SetExistingVarToNull) { + ScopedSetEnv scoped_set("SCOPED_SET_ENV_TEST_VAR", "value"); + EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "value"); + + { + ScopedSetEnv scoped_set("SCOPED_SET_ENV_TEST_VAR", nullptr); + + EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "UNSET"); + } + + EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "value"); +} + +} // namespace diff --git a/third_party/abseil_cpp/absl/base/internal/spinlock.cc b/third_party/abseil_cpp/absl/base/internal/spinlock.cc new file mode 100644 index 000000000000..a7d44f3eb091 --- /dev/null +++ b/third_party/abseil_cpp/absl/base/internal/spinlock.cc @@ -0,0 +1,220 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/spinlock.h" + +#include <algorithm> +#include <atomic> +#include <limits> + +#include "absl/base/attributes.h" +#include "absl/base/internal/atomic_hook.h" +#include "absl/base/internal/cycleclock.h" +#include "absl/base/internal/spinlock_wait.h" +#include "absl/base/internal/sysinfo.h" /* For NumCPUs() */ +#include "absl/base/call_once.h" + +// Description of lock-word: +// 31..00: [............................3][2][1][0] +// +// [0]: kSpinLockHeld +// [1]: kSpinLockCooperative +// [2]: kSpinLockDisabledScheduling +// [31..3]: ONLY kSpinLockSleeper OR +// Wait time in cycles >> PROFILE_TIMESTAMP_SHIFT +// +// Detailed descriptions: +// +// Bit [0]: The lock is considered held iff kSpinLockHeld is set. +// +// Bit [1]: Eligible waiters (e.g. Fibers) may co-operatively reschedule when +// contended iff kSpinLockCooperative is set. +// +// Bit [2]: This bit is exclusive from bit [1]. It is used only by a +// non-cooperative lock. When set, indicates that scheduling was +// successfully disabled when the lock was acquired. May be unset, +// even if non-cooperative, if a ThreadIdentity did not yet exist at +// time of acquisition. +// +// Bit [3]: If this is the only upper bit ([31..3]) set then this lock was +// acquired without contention, however, at least one waiter exists. +// +// Otherwise, bits [31..3] represent the time spent by the current lock +// holder to acquire the lock. There may be outstanding waiter(s). + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES static base_internal::AtomicHook<void (*)( + const void *lock, int64_t wait_cycles)> + submit_profile_data; + +void RegisterSpinLockProfiler(void (*fn)(const void *contendedlock, + int64_t wait_cycles)) { + submit_profile_data.Store(fn); +} + +// Static member variable definitions. +constexpr uint32_t SpinLock::kSpinLockHeld; +constexpr uint32_t SpinLock::kSpinLockCooperative; +constexpr uint32_t SpinLock::kSpinLockDisabledScheduling; +constexpr uint32_t SpinLock::kSpinLockSleeper; +constexpr uint32_t SpinLock::kWaitTimeMask; + +// Uncommon constructors. +SpinLock::SpinLock(base_internal::SchedulingMode mode) + : lockword_(IsCooperative(mode) ? kSpinLockCooperative : 0) { + ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static); +} + +// Monitor the lock to see if its value changes within some time period +// (adaptive_spin_count loop iterations). The last value read from the lock +// is returned from the method. +uint32_t SpinLock::SpinLoop() { + // We are already in the slow path of SpinLock, initialize the + // adaptive_spin_count here. + ABSL_CONST_INIT static absl::once_flag init_adaptive_spin_count; + ABSL_CONST_INIT static int adaptive_spin_count = 0; + base_internal::LowLevelCallOnce(&init_adaptive_spin_count, []() { + adaptive_spin_count = base_internal::NumCPUs() > 1 ? 1000 : 1; + }); + + int c = adaptive_spin_count; + uint32_t lock_value; + do { + lock_value = lockword_.load(std::memory_order_relaxed); + } while ((lock_value & kSpinLockHeld) != 0 && --c > 0); + return lock_value; +} + +void SpinLock::SlowLock() { + uint32_t lock_value = SpinLoop(); + lock_value = TryLockInternal(lock_value, 0); + if ((lock_value & kSpinLockHeld) == 0) { + return; + } + + base_internal::SchedulingMode scheduling_mode; + if ((lock_value & kSpinLockCooperative) != 0) { + scheduling_mode = base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL; + } else { + scheduling_mode = base_internal::SCHEDULE_KERNEL_ONLY; + } + + // The lock was not obtained initially, so this thread needs to wait for + // it. Record the current timestamp in the local variable wait_start_time + // so the total wait time can be stored in the lockword once this thread + // obtains the lock. + int64_t wait_start_time = CycleClock::Now(); + uint32_t wait_cycles = 0; + int lock_wait_call_count = 0; + while ((lock_value & kSpinLockHeld) != 0) { + // If the lock is currently held, but not marked as having a sleeper, mark + // it as having a sleeper. + if ((lock_value & kWaitTimeMask) == 0) { + // Here, just "mark" that the thread is going to sleep. Don't store the + // lock wait time in the lock as that will cause the current lock + // owner to think it experienced contention. + if (lockword_.compare_exchange_strong( + lock_value, lock_value | kSpinLockSleeper, + std::memory_order_relaxed, std::memory_order_relaxed)) { + // Successfully transitioned to kSpinLockSleeper. Pass + // kSpinLockSleeper to the SpinLockWait routine to properly indicate + // the last lock_value observed. + lock_value |= kSpinLockSleeper; + } else if ((lock_value & kSpinLockHeld) == 0) { + // Lock is free again, so try and acquire it before sleeping. The + // new lock state will be the number of cycles this thread waited if + // this thread obtains the lock. + lock_value = TryLockInternal(lock_value, wait_cycles); + continue; // Skip the delay at the end of the loop. + } + } + + // SpinLockDelay() calls into fiber scheduler, we need to see + // synchronization there to avoid false positives. + ABSL_TSAN_MUTEX_PRE_DIVERT(this, 0); + // Wait for an OS specific delay. + base_internal::SpinLockDelay(&lockword_, lock_value, ++lock_wait_call_count, + scheduling_mode); + ABSL_TSAN_MUTEX_POST_DIVERT(this, 0); + // Spin again after returning from the wait routine to give this thread + // some chance of obtaining the lock. + lock_value = SpinLoop(); + wait_cycles = EncodeWaitCycles(wait_start_time, CycleClock::Now()); + lock_value = TryLockInternal(lock_value, wait_cycles); + } +} + +void SpinLock::SlowUnlock(uint32_t lock_value) { + base_internal::SpinLockWake(&lockword_, + false); // wake waiter if necessary + + // If our acquisition was contended, collect contentionz profile info. We + // reserve a unitary wait time to represent that a waiter exists without our + // own acquisition having been contended. + if ((lock_value & kWaitTimeMask) != kSpinLockSleeper) { + const uint64_t wait_cycles = DecodeWaitCycles(lock_value); + ABSL_TSAN_MUTEX_PRE_DIVERT(this, 0); + submit_profile_data(this, wait_cycles); + ABSL_TSAN_MUTEX_POST_DIVERT(this, 0); + } +} + +// We use the upper 29 bits of the lock word to store the time spent waiting to +// acquire this lock. This is reported by contentionz profiling. Since the +// lower bits of the cycle counter wrap very quickly on high-frequency +// processors we divide to reduce the granularity to 2^kProfileTimestampShift +// sized units. On a 4Ghz machine this will lose track of wait times greater +// than (2^29/4 Ghz)*128 =~ 17.2 seconds. Such waits should be extremely rare. +static constexpr int kProfileTimestampShift = 7; + +// We currently reserve the lower 3 bits. +static constexpr int kLockwordReservedShift = 3; + +uint32_t SpinLock::EncodeWaitCycles(int64_t wait_start_time, + int64_t wait_end_time) { + static const int64_t kMaxWaitTime = + std::numeric_limits<uint32_t>::max() >> kLockwordReservedShift; + int64_t scaled_wait_time = + (wait_end_time - wait_start_time) >> kProfileTimestampShift; + + // Return a representation of the time spent waiting that can be stored in + // the lock word's upper bits. + uint32_t clamped = static_cast<uint32_t>( + std::min(scaled_wait_time, kMaxWaitTime) << kLockwordReservedShift); + + if (clamped == 0) { + return kSpinLockSleeper; // Just wake waiters, but don't record contention. + } + // Bump up value if necessary to avoid returning kSpinLockSleeper. + const uint32_t kMinWaitTime = + kSpinLockSleeper + (1 << kLockwordReservedShift); + if (clamped == kSpinLockSleeper) { + return kMinWaitTime; + } + return clamped; +} + +uint64_t SpinLock::DecodeWaitCycles(uint32_t lock_value) { + // Cast to uint32_t first to ensure bits [63:32] are cleared. + const uint64_t scaled_wait_time = + static_cast<uint32_t>(lock_value & kWaitTimeMask); + return scaled_wait_time << (kProfileTimestampShift - kLockwordReservedShift); +} + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third_party/abseil_cpp/absl/base/internal/spinlock.h b/third_party/abseil_cpp/absl/base/internal/spinlock.h new file mode 100644 index 000000000000..2222398b16b4 --- /dev/null +++ b/third_party/abseil_cpp/absl/base/internal/spinlock.h @@ -0,0 +1,230 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Most users requiring mutual exclusion should use Mutex. +// SpinLock is provided for use in three situations: +// - for use in code that Mutex itself depends on +// - to get a faster fast-path release under low contention (without an +// atomic read-modify-write) In return, SpinLock has worse behaviour under +// contention, which is why Mutex is preferred in most situations. +// - for async signal safety (see below) + +// SpinLock is async signal safe. If a spinlock is used within a signal +// handler, all code that acquires the lock must ensure that the signal cannot +// arrive while they are holding the lock. Typically, this is done by blocking +// the signal. + +#ifndef ABSL_BASE_INTERNAL_SPINLOCK_H_ +#define ABSL_BASE_INTERNAL_SPINLOCK_H_ + +#include <stdint.h> +#include <sys/types.h> + +#include <atomic> + +#include "absl/base/attributes.h" +#include "absl/base/const_init.h" +#include "absl/base/dynamic_annotations.h" +#include "absl/base/internal/low_level_scheduling.h" +#include "absl/base/internal/raw_logging.h" +#include "absl/base/internal/scheduling_mode.h" +#include "absl/base/internal/tsan_mutex_interface.h" +#include "absl/base/macros.h" +#include "absl/base/port.h" +#include "absl/base/thread_annotations.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +class ABSL_LOCKABLE SpinLock { + public: + SpinLock() : lockword_(kSpinLockCooperative) { + ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static); + } + + // Constructors that allow non-cooperative spinlocks to be created for use + // inside thread schedulers. Normal clients should not use these. + explicit SpinLock(base_internal::SchedulingMode mode); + + // Constructor for global SpinLock instances. See absl/base/const_init.h. + constexpr SpinLock(absl::ConstInitType, base_internal::SchedulingMode mode) + : lockword_(IsCooperative(mode) ? kSpinLockCooperative : 0) {} + + ~SpinLock() { ABSL_TSAN_MUTEX_DESTROY(this, __tsan_mutex_not_static); } + + // Acquire this SpinLock. + inline void Lock() ABSL_EXCLUSIVE_LOCK_FUNCTION() { + ABSL_TSAN_MUTEX_PRE_LOCK(this, 0); + if (!TryLockImpl()) { + SlowLock(); + } + ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0); + } + + // Try to acquire this SpinLock without blocking and return true if the + // acquisition was successful. If the lock was not acquired, false is + // returned. If this SpinLock is free at the time of the call, TryLock + // will return true with high probability. + inline bool TryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) { + ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_try_lock); + bool res = TryLockImpl(); + ABSL_TSAN_MUTEX_POST_LOCK( + this, __tsan_mutex_try_lock | (res ? 0 : __tsan_mutex_try_lock_failed), + 0); + return res; + } + + // Release this SpinLock, which must be held by the calling thread. + inline void Unlock() ABSL_UNLOCK_FUNCTION() { + ABSL_TSAN_MUTEX_PRE_UNLOCK(this, 0); + uint32_t lock_value = lockword_.load(std::memory_order_relaxed); + lock_value = lockword_.exchange(lock_value & kSpinLockCooperative, + std::memory_order_release); + + if ((lock_value & kSpinLockDisabledScheduling) != 0) { + base_internal::SchedulingGuard::EnableRescheduling(true); + } + if ((lock_value & kWaitTimeMask) != 0) { + // Collect contentionz profile info, and speed the wakeup of any waiter. + // The wait_cycles value indicates how long this thread spent waiting + // for the lock. + SlowUnlock(lock_value); + } + ABSL_TSAN_MUTEX_POST_UNLOCK(this, 0); + } + + // Determine if the lock is held. When the lock is held by the invoking + // thread, true will always be returned. Intended to be used as + // CHECK(lock.IsHeld()). + inline bool IsHeld() const { + return (lockword_.load(std::memory_order_relaxed) & kSpinLockHeld) != 0; + } + + protected: + // These should not be exported except for testing. + + // Store number of cycles between wait_start_time and wait_end_time in a + // lock value. + static uint32_t EncodeWaitCycles(int64_t wait_start_time, + int64_t wait_end_time); + + // Extract number of wait cycles in a lock value. + static uint64_t DecodeWaitCycles(uint32_t lock_value); + + // Provide access to protected method above. Use for testing only. + friend struct SpinLockTest; + + private: + // lockword_ is used to store the following: + // + // bit[0] encodes whether a lock is being held. + // bit[1] encodes whether a lock uses cooperative scheduling. + // bit[2] encodes whether a lock disables scheduling. + // bit[3:31] encodes time a lock spent on waiting as a 29-bit unsigned int. + static constexpr uint32_t kSpinLockHeld = 1; + static constexpr uint32_t kSpinLockCooperative = 2; + static constexpr uint32_t kSpinLockDisabledScheduling = 4; + static constexpr uint32_t kSpinLockSleeper = 8; + // Includes kSpinLockSleeper. + static constexpr uint32_t kWaitTimeMask = + ~(kSpinLockHeld | kSpinLockCooperative | kSpinLockDisabledScheduling); + + // Returns true if the provided scheduling mode is cooperative. + static constexpr bool IsCooperative( + base_internal::SchedulingMode scheduling_mode) { + return scheduling_mode == base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL; + } + + uint32_t TryLockInternal(uint32_t lock_value, uint32_t wait_cycles); + void SlowLock() ABSL_ATTRIBUTE_COLD; + void SlowUnlock(uint32_t lock_value) ABSL_ATTRIBUTE_COLD; + uint32_t SpinLoop(); + + inline bool TryLockImpl() { + uint32_t lock_value = lockword_.load(std::memory_order_relaxed); + return (TryLockInternal(lock_value, 0) & kSpinLockHeld) == 0; + } + + std::atomic<uint32_t> lockword_; + + SpinLock(const SpinLock&) = delete; + SpinLock& operator=(const SpinLock&) = delete; +}; + +// Corresponding locker object that arranges to acquire a spinlock for +// the duration of a C++ scope. +class ABSL_SCOPED_LOCKABLE SpinLockHolder { + public: + inline explicit SpinLockHolder(SpinLock* l) ABSL_EXCLUSIVE_LOCK_FUNCTION(l) + : lock_(l) { + l->Lock(); + } + inline ~SpinLockHolder() ABSL_UNLOCK_FUNCTION() { lock_->Unlock(); } + + SpinLockHolder(const SpinLockHolder&) = delete; + SpinLockHolder& operator=(const SpinLockHolder&) = delete; + + private: + SpinLock* lock_; +}; + +// Register a hook for profiling support. +// +// The function pointer registered here will be called whenever a spinlock is +// contended. The callback is given an opaque handle to the contended spinlock +// and the number of wait cycles. This is thread-safe, but only a single +// profiler can be registered. It is an error to call this function multiple +// times with different arguments. +void RegisterSpinLockProfiler(void (*fn)(const void* lock, + int64_t wait_cycles)); + +//------------------------------------------------------------------------------ +// Public interface ends here. +//------------------------------------------------------------------------------ + +// If (result & kSpinLockHeld) == 0, then *this was successfully locked. +// Otherwise, returns last observed value for lockword_. +inline uint32_t SpinLock::TryLockInternal(uint32_t lock_value, + uint32_t wait_cycles) { + if ((lock_value & kSpinLockHeld) != 0) { + return lock_value; + } + + uint32_t sched_disabled_bit = 0; + if ((lock_value & kSpinLockCooperative) == 0) { + // For non-cooperative locks we must make sure we mark ourselves as + // non-reschedulable before we attempt to CompareAndSwap. + if (base_internal::SchedulingGuard::DisableRescheduling()) { + sched_disabled_bit = kSpinLockDisabledScheduling; + } + } + + if (!lockword_.compare_exchange_strong( + lock_value, + kSpinLockHeld | lock_value | wait_cycles | sched_disabled_bit, + std::memory_order_acquire, std::memory_order_relaxed)) { + base_internal::SchedulingGuard::EnableRescheduling(sched_disabled_bit != 0); + } + + return lock_value; +} + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_SPINLOCK_H_ diff --git a/third_party/abseil_cpp/absl/base/internal/spinlock_akaros.inc b/third_party/abseil_cpp/absl/base/internal/spinlock_akaros.inc new file mode 100644 index 000000000000..bc468940fc5d --- /dev/null +++ b/third_party/abseil_cpp/absl/base/internal/spinlock_akaros.inc @@ -0,0 +1,35 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This file is an Akaros-specific part of spinlock_wait.cc + +#include <atomic> + +#include "absl/base/internal/scheduling_mode.h" + +extern "C" { + +ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockDelay( + std::atomic<uint32_t>* /* lock_word */, uint32_t /* value */, + int /* loop */, absl::base_internal::SchedulingMode /* mode */) { + // In Akaros, one must take care not to call anything that could cause a + // malloc(), a blocking system call, or a uthread_yield() while holding a + // spinlock. Our callers assume will not call into libraries or other + // arbitrary code. +} + +ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockWake( + std::atomic<uint32_t>* /* lock_word */, bool /* all */) {} + +} // extern "C" diff --git a/third_party/abseil_cpp/absl/base/internal/spinlock_benchmark.cc b/third_party/abseil_cpp/absl/base/internal/spinlock_benchmark.cc new file mode 100644 index 000000000000..0451c65f9559 --- /dev/null +++ b/third_party/abseil_cpp/absl/base/internal/spinlock_benchmark.cc @@ -0,0 +1,52 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// See also //absl/synchronization:mutex_benchmark for a comparison of SpinLock +// and Mutex performance under varying levels of contention. + +#include "absl/base/internal/raw_logging.h" +#include "absl/base/internal/scheduling_mode.h" +#include "absl/base/internal/spinlock.h" +#include "absl/synchronization/internal/create_thread_identity.h" +#include "benchmark/benchmark.h" + +namespace { + +template <absl::base_internal::SchedulingMode scheduling_mode> +static void BM_SpinLock(benchmark::State& state) { + // Ensure a ThreadIdentity is installed. + ABSL_INTERNAL_CHECK( + absl::synchronization_internal::GetOrCreateCurrentThreadIdentity() != + nullptr, + "GetOrCreateCurrentThreadIdentity() failed"); + + static auto* spinlock = new absl::base_internal::SpinLock(scheduling_mode); + for (auto _ : state) { + absl::base_internal::SpinLockHolder holder(spinlock); + } +} + +BENCHMARK_TEMPLATE(BM_SpinLock, + absl::base_internal::SCHEDULE_KERNEL_ONLY) + ->UseRealTime() + ->Threads(1) + ->ThreadPerCpu(); + +BENCHMARK_TEMPLATE(BM_SpinLock, + absl::base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL) + ->UseRealTime() + ->Threads(1) + ->ThreadPerCpu(); + +} // namespace diff --git a/third_party/abseil_cpp/absl/base/internal/spinlock_linux.inc b/third_party/abseil_cpp/absl/base/internal/spinlock_linux.inc new file mode 100644 index 000000000000..e31c6ed477c0 --- /dev/null +++ b/third_party/abseil_cpp/absl/base/internal/spinlock_linux.inc @@ -0,0 +1,74 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This file is a Linux-specific part of spinlock_wait.cc + +#include <linux/futex.h> +#include <sys/syscall.h> +#include <unistd.h> + +#include <atomic> +#include <climits> +#include <cstdint> +#include <ctime> + +#include "absl/base/attributes.h" +#include "absl/base/internal/errno_saver.h" + +// The SpinLock lockword is `std::atomic<uint32_t>`. Here we assert that +// `std::atomic<uint32_t>` is bitwise equivalent of the `int` expected +// by SYS_futex. We also assume that reads/writes done to the lockword +// by SYS_futex have rational semantics with regard to the +// std::atomic<> API. C++ provides no guarantees of these assumptions, +// but they are believed to hold in practice. +static_assert(sizeof(std::atomic<uint32_t>) == sizeof(int), + "SpinLock lockword has the wrong size for a futex"); + +// Some Android headers are missing these definitions even though they +// support these futex operations. +#ifdef __BIONIC__ +#ifndef SYS_futex +#define SYS_futex __NR_futex +#endif +#ifndef FUTEX_PRIVATE_FLAG +#define FUTEX_PRIVATE_FLAG 128 +#endif +#endif + +#if defined(__NR_futex_time64) && !defined(SYS_futex_time64) +#define SYS_futex_time64 __NR_futex_time64 +#endif + +#if defined(SYS_futex_time64) && !defined(SYS_futex) +#define SYS_futex SYS_futex_time64 +#endif + +extern "C" { + +ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockDelay( + std::atomic<uint32_t> *w, uint32_t value, int loop, + absl::base_internal::SchedulingMode) { + absl::base_internal::ErrnoSaver errno_saver; + struct timespec tm; + tm.tv_sec = 0; + tm.tv_nsec = absl::base_internal::SpinLockSuggestedDelayNS(loop); + syscall(SYS_futex, w, FUTEX_WAIT | FUTEX_PRIVATE_FLAG, value, &tm); +} + +ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockWake(std::atomic<uint32_t> *w, + bool all) { + syscall(SYS_futex, w, FUTEX_WAKE | FUTEX_PRIVATE_FLAG, all ? INT_MAX : 1, 0); +} + +} // extern "C" diff --git a/third_party/abseil_cpp/absl/base/internal/spinlock_posix.inc b/third_party/abseil_cpp/absl/base/internal/spinlock_posix.inc new file mode 100644 index 000000000000..fcd21b151b6e --- /dev/null +++ b/third_party/abseil_cpp/absl/base/internal/spinlock_posix.inc @@ -0,0 +1,46 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This file is a Posix-specific part of spinlock_wait.cc + +#include <sched.h> + +#include <atomic> +#include <ctime> + +#include "absl/base/internal/errno_saver.h" +#include "absl/base/internal/scheduling_mode.h" +#include "absl/base/port.h" + +extern "C" { + +ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockDelay( + std::atomic<uint32_t>* /* lock_word */, uint32_t /* value */, int loop, + absl::base_internal::SchedulingMode /* mode */) { + absl::base_internal::ErrnoSaver errno_saver; + if (loop == 0) { + } else if (loop == 1) { + sched_yield(); + } else { + struct timespec tm; + tm.tv_sec = 0; + tm.tv_nsec = absl::base_internal::SpinLockSuggestedDelayNS(loop); + nanosleep(&tm, nullptr); + } +} + +ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockWake( + std::atomic<uint32_t>* /* lock_word */, bool /* all */) {} + +} // extern "C" diff --git a/third_party/abseil_cpp/absl/base/internal/spinlock_wait.cc b/third_party/abseil_cpp/absl/base/internal/spinlock_wait.cc new file mode 100644 index 000000000000..fa824be1c008 --- /dev/null +++ b/third_party/abseil_cpp/absl/base/internal/spinlock_wait.cc @@ -0,0 +1,81 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// The OS-specific header included below must provide two calls: +// AbslInternalSpinLockDelay() and AbslInternalSpinLockWake(). +// See spinlock_wait.h for the specs. + +#include <atomic> +#include <cstdint> + +#include "absl/base/internal/spinlock_wait.h" + +#if defined(_WIN32) +#include "absl/base/internal/spinlock_win32.inc" +#elif defined(__linux__) +#include "absl/base/internal/spinlock_linux.inc" +#elif defined(__akaros__) +#include "absl/base/internal/spinlock_akaros.inc" +#else +#include "absl/base/internal/spinlock_posix.inc" +#endif + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +// See spinlock_wait.h for spec. +uint32_t SpinLockWait(std::atomic<uint32_t> *w, int n, + const SpinLockWaitTransition trans[], + base_internal::SchedulingMode scheduling_mode) { + int loop = 0; + for (;;) { + uint32_t v = w->load(std::memory_order_acquire); + int i; + for (i = 0; i != n && v != trans[i].from; i++) { + } + if (i == n) { + SpinLockDelay(w, v, ++loop, scheduling_mode); // no matching transition + } else if (trans[i].to == v || // null transition + w->compare_exchange_strong(v, trans[i].to, + std::memory_order_acquire, + std::memory_order_relaxed)) { + if (trans[i].done) return v; + } + } +} + +static std::atomic<uint64_t> delay_rand; + +// Return a suggested delay in nanoseconds for iteration number "loop" +int SpinLockSuggestedDelayNS(int loop) { + // Weak pseudo-random number generator to get some spread between threads + // when many are spinning. + uint64_t r = delay_rand.load(std::memory_order_relaxed); + r = 0x5deece66dLL * r + 0xb; // numbers from nrand48() + delay_rand.store(r, std::memory_order_relaxed); + + if (loop < 0 || loop > 32) { // limit loop to 0..32 + loop = 32; + } + const int kMinDelay = 128 << 10; // 128us + // Double delay every 8 iterations, up to 16x (2ms). + int delay = kMinDelay << (loop / 8); + // Randomize in delay..2*delay range, for resulting 128us..4ms range. + return delay | ((delay - 1) & static_cast<int>(r)); +} + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third_party/abseil_cpp/absl/base/internal/spinlock_wait.h b/third_party/abseil_cpp/absl/base/internal/spinlock_wait.h new file mode 100644 index 000000000000..169bc749fbc5 --- /dev/null +++ b/third_party/abseil_cpp/absl/base/internal/spinlock_wait.h @@ -0,0 +1,93 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INTERNAL_SPINLOCK_WAIT_H_ +#define ABSL_BASE_INTERNAL_SPINLOCK_WAIT_H_ + +// Operations to make atomic transitions on a word, and to allow +// waiting for those transitions to become possible. + +#include <stdint.h> +#include <atomic> + +#include "absl/base/internal/scheduling_mode.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +// SpinLockWait() waits until it can perform one of several transitions from +// "from" to "to". It returns when it performs a transition where done==true. +struct SpinLockWaitTransition { + uint32_t from; + uint32_t to; + bool done; +}; + +// Wait until *w can transition from trans[i].from to trans[i].to for some i +// satisfying 0<=i<n && trans[i].done, atomically make the transition, +// then return the old value of *w. Make any other atomic transitions +// where !trans[i].done, but continue waiting. +uint32_t SpinLockWait(std::atomic<uint32_t> *w, int n, + const SpinLockWaitTransition trans[], + SchedulingMode scheduling_mode); + +// If possible, wake some thread that has called SpinLockDelay(w, ...). If +// "all" is true, wake all such threads. This call is a hint, and on some +// systems it may be a no-op; threads calling SpinLockDelay() will always wake +// eventually even if SpinLockWake() is never called. +void SpinLockWake(std::atomic<uint32_t> *w, bool all); + +// Wait for an appropriate spin delay on iteration "loop" of a +// spin loop on location *w, whose previously observed value was "value". +// SpinLockDelay() may do nothing, may yield the CPU, may sleep a clock tick, +// or may wait for a delay that can be truncated by a call to SpinLockWake(w). +// In all cases, it must return in bounded time even if SpinLockWake() is not +// called. +void SpinLockDelay(std::atomic<uint32_t> *w, uint32_t value, int loop, + base_internal::SchedulingMode scheduling_mode); + +// Helper used by AbslInternalSpinLockDelay. +// Returns a suggested delay in nanoseconds for iteration number "loop". +int SpinLockSuggestedDelayNS(int loop); + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +// In some build configurations we pass --detect-odr-violations to the +// gold linker. This causes it to flag weak symbol overrides as ODR +// violations. Because ODR only applies to C++ and not C, +// --detect-odr-violations ignores symbols not mangled with C++ names. +// By changing our extension points to be extern "C", we dodge this +// check. +extern "C" { +void AbslInternalSpinLockWake(std::atomic<uint32_t> *w, bool all); +void AbslInternalSpinLockDelay( + std::atomic<uint32_t> *w, uint32_t value, int loop, + absl::base_internal::SchedulingMode scheduling_mode); +} + +inline void absl::base_internal::SpinLockWake(std::atomic<uint32_t> *w, + bool all) { + AbslInternalSpinLockWake(w, all); +} + +inline void absl::base_internal::SpinLockDelay( + std::atomic<uint32_t> *w, uint32_t value, int loop, + absl::base_internal::SchedulingMode scheduling_mode) { + AbslInternalSpinLockDelay(w, value, loop, scheduling_mode); +} + +#endif // ABSL_BASE_INTERNAL_SPINLOCK_WAIT_H_ diff --git a/third_party/abseil_cpp/absl/base/internal/spinlock_win32.inc b/third_party/abseil_cpp/absl/base/internal/spinlock_win32.inc new file mode 100644 index 000000000000..78654b5b5966 --- /dev/null +++ b/third_party/abseil_cpp/absl/base/internal/spinlock_win32.inc @@ -0,0 +1,37 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This file is a Win32-specific part of spinlock_wait.cc + +#include <windows.h> +#include <atomic> +#include "absl/base/internal/scheduling_mode.h" + +extern "C" { + +void AbslInternalSpinLockDelay(std::atomic<uint32_t>* /* lock_word */, + uint32_t /* value */, int loop, + absl::base_internal::SchedulingMode /* mode */) { + if (loop == 0) { + } else if (loop == 1) { + Sleep(0); + } else { + Sleep(absl::base_internal::SpinLockSuggestedDelayNS(loop) / 1000000); + } +} + +void AbslInternalSpinLockWake(std::atomic<uint32_t>* /* lock_word */, + bool /* all */) {} + +} // extern "C" diff --git a/third_party/abseil_cpp/absl/base/internal/strerror.cc b/third_party/abseil_cpp/absl/base/internal/strerror.cc new file mode 100644 index 000000000000..af181513cde9 --- /dev/null +++ b/third_party/abseil_cpp/absl/base/internal/strerror.cc @@ -0,0 +1,75 @@ +// Copyright 2020 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/strerror.h" + +#include <cerrno> +#include <cstddef> +#include <cstdio> +#include <cstring> +#include <string> +#include <type_traits> + +#include "absl/base/attributes.h" +#include "absl/base/internal/errno_saver.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { +namespace { +const char* StrErrorAdaptor(int errnum, char* buf, size_t buflen) { +#if defined(_WIN32) + int rc = strerror_s(buf, buflen, errnum); + buf[buflen - 1] = '\0'; // guarantee NUL termination + if (rc == 0 && strncmp(buf, "Unknown error", buflen) == 0) *buf = '\0'; + return buf; +#else +#if defined(__GLIBC__) || defined(__APPLE__) + // Use the BSD sys_errlist API provided by GNU glibc and others to + // avoid any need to copy the message into the local buffer first. + if (0 <= errnum && errnum < sys_nerr) { + if (const char* p = sys_errlist[errnum]) { + return p; + } + } +#endif + // The type of `ret` is platform-specific; both of these branches must compile + // either way but only one will execute on any given platform: + auto ret = strerror_r(errnum, buf, buflen); + if (std::is_same<decltype(ret), int>::value) { + // XSI `strerror_r`; `ret` is `int`: + if (ret) *buf = '\0'; + return buf; + } else { + // GNU `strerror_r`; `ret` is `char *`: + return reinterpret_cast<const char*>(ret); + } +#endif +} +} // namespace + +std::string StrError(int errnum) { + absl::base_internal::ErrnoSaver errno_saver; + char buf[100]; + const char* str = StrErrorAdaptor(errnum, buf, sizeof buf); + if (*str == '\0') { + snprintf(buf, sizeof buf, "Unknown error %d", errnum); + str = buf; + } + return str; +} + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third_party/abseil_cpp/absl/base/internal/strerror.h b/third_party/abseil_cpp/absl/base/internal/strerror.h new file mode 100644 index 000000000000..350097366eed --- /dev/null +++ b/third_party/abseil_cpp/absl/base/internal/strerror.h @@ -0,0 +1,39 @@ +// Copyright 2020 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INTERNAL_STRERROR_H_ +#define ABSL_BASE_INTERNAL_STRERROR_H_ + +#include <string> + +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +// A portable and thread-safe alternative to C89's `strerror`. +// +// The C89 specification of `strerror` is not suitable for use in a +// multi-threaded application as the returned string may be changed by calls to +// `strerror` from another thread. The many non-stdlib alternatives differ +// enough in their names, availability, and semantics to justify this wrapper +// around them. `errno` will not be modified by a call to `absl::StrError`. +std::string StrError(int errnum); + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_STRERROR_H_ diff --git a/third_party/abseil_cpp/absl/base/internal/strerror_benchmark.cc b/third_party/abseil_cpp/absl/base/internal/strerror_benchmark.cc new file mode 100644 index 000000000000..d8ca86b95beb --- /dev/null +++ b/third_party/abseil_cpp/absl/base/internal/strerror_benchmark.cc @@ -0,0 +1,38 @@ +// Copyright 2020 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include <cerrno> +#include <cstdio> +#include <string> + +#include "absl/base/internal/strerror.h" +#include "benchmark/benchmark.h" + +namespace { +#if defined(__GLIBC__) || defined(__APPLE__) +void BM_SysErrList(benchmark::State& state) { + for (auto _ : state) { + benchmark::DoNotOptimize(std::string(sys_errlist[ERANGE])); + } +} +BENCHMARK(BM_SysErrList); +#endif + +void BM_AbslStrError(benchmark::State& state) { + for (auto _ : state) { + benchmark::DoNotOptimize(absl::base_internal::StrError(ERANGE)); + } +} +BENCHMARK(BM_AbslStrError); +} // namespace diff --git a/third_party/abseil_cpp/absl/base/internal/strerror_test.cc b/third_party/abseil_cpp/absl/base/internal/strerror_test.cc new file mode 100644 index 000000000000..a53da97f92c9 --- /dev/null +++ b/third_party/abseil_cpp/absl/base/internal/strerror_test.cc @@ -0,0 +1,86 @@ +// Copyright 2020 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/strerror.h" + +#include <atomic> +#include <cerrno> +#include <cstdio> +#include <cstring> +#include <string> +#include <thread> // NOLINT(build/c++11) +#include <vector> + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/strings/match.h" + +namespace { +using ::testing::AnyOf; +using ::testing::Eq; + +TEST(StrErrorTest, ValidErrorCode) { + errno = ERANGE; + EXPECT_THAT(absl::base_internal::StrError(EDOM), Eq(strerror(EDOM))); + EXPECT_THAT(errno, Eq(ERANGE)); +} + +TEST(StrErrorTest, InvalidErrorCode) { + errno = ERANGE; + EXPECT_THAT(absl::base_internal::StrError(-1), + AnyOf(Eq("No error information"), Eq("Unknown error -1"))); + EXPECT_THAT(errno, Eq(ERANGE)); +} + +TEST(StrErrorTest, MultipleThreads) { + // In this test, we will start up 2 threads and have each one call + // StrError 1000 times, each time with a different errnum. We + // expect that StrError(errnum) will return a string equal to the + // one returned by strerror(errnum), if the code is known. Since + // strerror is known to be thread-hostile, collect all the expected + // strings up front. + const int kNumCodes = 1000; + std::vector<std::string> expected_strings(kNumCodes); + for (int i = 0; i < kNumCodes; ++i) { + expected_strings[i] = strerror(i); + } + + std::atomic_int counter(0); + auto thread_fun = [&]() { + for (int i = 0; i < kNumCodes; ++i) { + ++counter; + errno = ERANGE; + const std::string value = absl::base_internal::StrError(i); + // Only the GNU implementation is guaranteed to provide the + // string "Unknown error nnn". POSIX doesn't say anything. + if (!absl::StartsWith(value, "Unknown error ")) { + EXPECT_THAT(absl::base_internal::StrError(i), Eq(expected_strings[i])); + } + EXPECT_THAT(errno, Eq(ERANGE)); + } + }; + + const int kNumThreads = 100; + std::vector<std::thread> threads; + for (int i = 0; i < kNumThreads; ++i) { + threads.push_back(std::thread(thread_fun)); + } + for (auto& thread : threads) { + thread.join(); + } + + EXPECT_THAT(counter, Eq(kNumThreads * kNumCodes)); +} + +} // namespace diff --git a/third_party/abseil_cpp/absl/base/internal/sysinfo.cc b/third_party/abseil_cpp/absl/base/internal/sysinfo.cc new file mode 100644 index 000000000000..6c69683faf67 --- /dev/null +++ b/third_party/abseil_cpp/absl/base/internal/sysinfo.cc @@ -0,0 +1,425 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/sysinfo.h" + +#include "absl/base/attributes.h" + +#ifdef _WIN32 +#include <windows.h> +#else +#include <fcntl.h> +#include <pthread.h> +#include <sys/stat.h> +#include <sys/types.h> +#include <unistd.h> +#endif + +#ifdef __linux__ +#include <sys/syscall.h> +#endif + +#if defined(__APPLE__) || defined(__FreeBSD__) +#include <sys/sysctl.h> +#endif + +#if defined(__myriad2__) +#include <rtems.h> +#endif + +#include <string.h> +#include <cassert> +#include <cstdint> +#include <cstdio> +#include <cstdlib> +#include <ctime> +#include <limits> +#include <thread> // NOLINT(build/c++11) +#include <utility> +#include <vector> + +#include "absl/base/call_once.h" +#include "absl/base/internal/raw_logging.h" +#include "absl/base/internal/spinlock.h" +#include "absl/base/internal/unscaledcycleclock.h" +#include "absl/base/thread_annotations.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +static int GetNumCPUs() { +#if defined(__myriad2__) + return 1; +#else + // Other possibilities: + // - Read /sys/devices/system/cpu/online and use cpumask_parse() + // - sysconf(_SC_NPROCESSORS_ONLN) + return std::thread::hardware_concurrency(); +#endif +} + +#if defined(_WIN32) + +static double GetNominalCPUFrequency() { +#if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP) && \ + !WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP) + // UWP apps don't have access to the registry and currently don't provide an + // API informing about CPU nominal frequency. + return 1.0; +#else +#pragma comment(lib, "advapi32.lib") // For Reg* functions. + HKEY key; + // Use the Reg* functions rather than the SH functions because shlwapi.dll + // pulls in gdi32.dll which makes process destruction much more costly. + if (RegOpenKeyExA(HKEY_LOCAL_MACHINE, + "HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0", 0, + KEY_READ, &key) == ERROR_SUCCESS) { + DWORD type = 0; + DWORD data = 0; + DWORD data_size = sizeof(data); + auto result = RegQueryValueExA(key, "~MHz", 0, &type, + reinterpret_cast<LPBYTE>(&data), &data_size); + RegCloseKey(key); + if (result == ERROR_SUCCESS && type == REG_DWORD && + data_size == sizeof(data)) { + return data * 1e6; // Value is MHz. + } + } + return 1.0; +#endif // WINAPI_PARTITION_APP && !WINAPI_PARTITION_DESKTOP +} + +#elif defined(CTL_HW) && defined(HW_CPU_FREQ) + +static double GetNominalCPUFrequency() { + unsigned freq; + size_t size = sizeof(freq); + int mib[2] = {CTL_HW, HW_CPU_FREQ}; + if (sysctl(mib, 2, &freq, &size, nullptr, 0) == 0) { + return static_cast<double>(freq); + } + return 1.0; +} + +#else + +// Helper function for reading a long from a file. Returns true if successful +// and the memory location pointed to by value is set to the value read. +static bool ReadLongFromFile(const char *file, long *value) { + bool ret = false; + int fd = open(file, O_RDONLY); + if (fd != -1) { + char line[1024]; + char *err; + memset(line, '\0', sizeof(line)); + int len = read(fd, line, sizeof(line) - 1); + if (len <= 0) { + ret = false; + } else { + const long temp_value = strtol(line, &err, 10); + if (line[0] != '\0' && (*err == '\n' || *err == '\0')) { + *value = temp_value; + ret = true; + } + } + close(fd); + } + return ret; +} + +#if defined(ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY) + +// Reads a monotonic time source and returns a value in +// nanoseconds. The returned value uses an arbitrary epoch, not the +// Unix epoch. +static int64_t ReadMonotonicClockNanos() { + struct timespec t; +#ifdef CLOCK_MONOTONIC_RAW + int rc = clock_gettime(CLOCK_MONOTONIC_RAW, &t); +#else + int rc = clock_gettime(CLOCK_MONOTONIC, &t); +#endif + if (rc != 0) { + perror("clock_gettime() failed"); + abort(); + } + return int64_t{t.tv_sec} * 1000000000 + t.tv_nsec; +} + +class UnscaledCycleClockWrapperForInitializeFrequency { + public: + static int64_t Now() { return base_internal::UnscaledCycleClock::Now(); } +}; + +struct TimeTscPair { + int64_t time; // From ReadMonotonicClockNanos(). + int64_t tsc; // From UnscaledCycleClock::Now(). +}; + +// Returns a pair of values (monotonic kernel time, TSC ticks) that +// approximately correspond to each other. This is accomplished by +// doing several reads and picking the reading with the lowest +// latency. This approach is used to minimize the probability that +// our thread was preempted between clock reads. +static TimeTscPair GetTimeTscPair() { + int64_t best_latency = std::numeric_limits<int64_t>::max(); + TimeTscPair best; + for (int i = 0; i < 10; ++i) { + int64_t t0 = ReadMonotonicClockNanos(); + int64_t tsc = UnscaledCycleClockWrapperForInitializeFrequency::Now(); + int64_t t1 = ReadMonotonicClockNanos(); + int64_t latency = t1 - t0; + if (latency < best_latency) { + best_latency = latency; + best.time = t0; + best.tsc = tsc; + } + } + return best; +} + +// Measures and returns the TSC frequency by taking a pair of +// measurements approximately `sleep_nanoseconds` apart. +static double MeasureTscFrequencyWithSleep(int sleep_nanoseconds) { + auto t0 = GetTimeTscPair(); + struct timespec ts; + ts.tv_sec = 0; + ts.tv_nsec = sleep_nanoseconds; + while (nanosleep(&ts, &ts) != 0 && errno == EINTR) {} + auto t1 = GetTimeTscPair(); + double elapsed_ticks = t1.tsc - t0.tsc; + double elapsed_time = (t1.time - t0.time) * 1e-9; + return elapsed_ticks / elapsed_time; +} + +// Measures and returns the TSC frequency by calling +// MeasureTscFrequencyWithSleep(), doubling the sleep interval until the +// frequency measurement stabilizes. +static double MeasureTscFrequency() { + double last_measurement = -1.0; + int sleep_nanoseconds = 1000000; // 1 millisecond. + for (int i = 0; i < 8; ++i) { + double measurement = MeasureTscFrequencyWithSleep(sleep_nanoseconds); + if (measurement * 0.99 < last_measurement && + last_measurement < measurement * 1.01) { + // Use the current measurement if it is within 1% of the + // previous measurement. + return measurement; + } + last_measurement = measurement; + sleep_nanoseconds *= 2; + } + return last_measurement; +} + +#endif // ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY + +static double GetNominalCPUFrequency() { + long freq = 0; + + // Google's production kernel has a patch to export the TSC + // frequency through sysfs. If the kernel is exporting the TSC + // frequency use that. There are issues where cpuinfo_max_freq + // cannot be relied on because the BIOS may be exporting an invalid + // p-state (on x86) or p-states may be used to put the processor in + // a new mode (turbo mode). Essentially, those frequencies cannot + // always be relied upon. The same reasons apply to /proc/cpuinfo as + // well. + if (ReadLongFromFile("/sys/devices/system/cpu/cpu0/tsc_freq_khz", &freq)) { + return freq * 1e3; // Value is kHz. + } + +#if defined(ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY) + // On these platforms, the TSC frequency is the nominal CPU + // frequency. But without having the kernel export it directly + // though /sys/devices/system/cpu/cpu0/tsc_freq_khz, there is no + // other way to reliably get the TSC frequency, so we have to + // measure it ourselves. Some CPUs abuse cpuinfo_max_freq by + // exporting "fake" frequencies for implementing new features. For + // example, Intel's turbo mode is enabled by exposing a p-state + // value with a higher frequency than that of the real TSC + // rate. Because of this, we prefer to measure the TSC rate + // ourselves on i386 and x86-64. + return MeasureTscFrequency(); +#else + + // If CPU scaling is in effect, we want to use the *maximum* + // frequency, not whatever CPU speed some random processor happens + // to be using now. + if (ReadLongFromFile("/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq", + &freq)) { + return freq * 1e3; // Value is kHz. + } + + return 1.0; +#endif // !ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY +} + +#endif + +ABSL_CONST_INIT static once_flag init_num_cpus_once; +ABSL_CONST_INIT static int num_cpus = 0; + +// NumCPUs() may be called before main() and before malloc is properly +// initialized, therefore this must not allocate memory. +int NumCPUs() { + base_internal::LowLevelCallOnce( + &init_num_cpus_once, []() { num_cpus = GetNumCPUs(); }); + return num_cpus; +} + +// A default frequency of 0.0 might be dangerous if it is used in division. +ABSL_CONST_INIT static once_flag init_nominal_cpu_frequency_once; +ABSL_CONST_INIT static double nominal_cpu_frequency = 1.0; + +// NominalCPUFrequency() may be called before main() and before malloc is +// properly initialized, therefore this must not allocate memory. +double NominalCPUFrequency() { + base_internal::LowLevelCallOnce( + &init_nominal_cpu_frequency_once, + []() { nominal_cpu_frequency = GetNominalCPUFrequency(); }); + return nominal_cpu_frequency; +} + +#if defined(_WIN32) + +pid_t GetTID() { + return pid_t{GetCurrentThreadId()}; +} + +#elif defined(__linux__) + +#ifndef SYS_gettid +#define SYS_gettid __NR_gettid +#endif + +pid_t GetTID() { + return syscall(SYS_gettid); +} + +#elif defined(__akaros__) + +pid_t GetTID() { + // Akaros has a concept of "vcore context", which is the state the program + // is forced into when we need to make a user-level scheduling decision, or + // run a signal handler. This is analogous to the interrupt context that a + // CPU might enter if it encounters some kind of exception. + // + // There is no current thread context in vcore context, but we need to give + // a reasonable answer if asked for a thread ID (e.g., in a signal handler). + // Thread 0 always exists, so if we are in vcore context, we return that. + // + // Otherwise, we know (since we are using pthreads) that the uthread struct + // current_uthread is pointing to is the first element of a + // struct pthread_tcb, so we extract and return the thread ID from that. + // + // TODO(dcross): Akaros anticipates moving the thread ID to the uthread + // structure at some point. We should modify this code to remove the cast + // when that happens. + if (in_vcore_context()) + return 0; + return reinterpret_cast<struct pthread_tcb *>(current_uthread)->id; +} + +#elif defined(__myriad2__) + +pid_t GetTID() { + uint32_t tid; + rtems_task_ident(RTEMS_SELF, 0, &tid); + return tid; +} + +#else + +// Fallback implementation of GetTID using pthread_getspecific. +ABSL_CONST_INIT static once_flag tid_once; +ABSL_CONST_INIT static pthread_key_t tid_key; +ABSL_CONST_INIT static absl::base_internal::SpinLock tid_lock( + absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY); + +// We set a bit per thread in this array to indicate that an ID is in +// use. ID 0 is unused because it is the default value returned by +// pthread_getspecific(). +ABSL_CONST_INIT static std::vector<uint32_t> *tid_array + ABSL_GUARDED_BY(tid_lock) = nullptr; +static constexpr int kBitsPerWord = 32; // tid_array is uint32_t. + +// Returns the TID to tid_array. +static void FreeTID(void *v) { + intptr_t tid = reinterpret_cast<intptr_t>(v); + int word = tid / kBitsPerWord; + uint32_t mask = ~(1u << (tid % kBitsPerWord)); + absl::base_internal::SpinLockHolder lock(&tid_lock); + assert(0 <= word && static_cast<size_t>(word) < tid_array->size()); + (*tid_array)[word] &= mask; +} + +static void InitGetTID() { + if (pthread_key_create(&tid_key, FreeTID) != 0) { + // The logging system calls GetTID() so it can't be used here. + perror("pthread_key_create failed"); + abort(); + } + + // Initialize tid_array. + absl::base_internal::SpinLockHolder lock(&tid_lock); + tid_array = new std::vector<uint32_t>(1); + (*tid_array)[0] = 1; // ID 0 is never-allocated. +} + +// Return a per-thread small integer ID from pthread's thread-specific data. +pid_t GetTID() { + absl::call_once(tid_once, InitGetTID); + + intptr_t tid = reinterpret_cast<intptr_t>(pthread_getspecific(tid_key)); + if (tid != 0) { + return tid; + } + + int bit; // tid_array[word] = 1u << bit; + size_t word; + { + // Search for the first unused ID. + absl::base_internal::SpinLockHolder lock(&tid_lock); + // First search for a word in the array that is not all ones. + word = 0; + while (word < tid_array->size() && ~(*tid_array)[word] == 0) { + ++word; + } + if (word == tid_array->size()) { + tid_array->push_back(0); // No space left, add kBitsPerWord more IDs. + } + // Search for a zero bit in the word. + bit = 0; + while (bit < kBitsPerWord && (((*tid_array)[word] >> bit) & 1) != 0) { + ++bit; + } + tid = (word * kBitsPerWord) + bit; + (*tid_array)[word] |= 1u << bit; // Mark the TID as allocated. + } + + if (pthread_setspecific(tid_key, reinterpret_cast<void *>(tid)) != 0) { + perror("pthread_setspecific failed"); + abort(); + } + + return static_cast<pid_t>(tid); +} + +#endif + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third_party/abseil_cpp/absl/base/internal/sysinfo.h b/third_party/abseil_cpp/absl/base/internal/sysinfo.h new file mode 100644 index 000000000000..7246d5dd95c3 --- /dev/null +++ b/third_party/abseil_cpp/absl/base/internal/sysinfo.h @@ -0,0 +1,66 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This file includes routines to find out characteristics +// of the machine a program is running on. It is undoubtedly +// system-dependent. + +// Functions listed here that accept a pid_t as an argument act on the +// current process if the pid_t argument is 0 +// All functions here are thread-hostile due to file caching unless +// commented otherwise. + +#ifndef ABSL_BASE_INTERNAL_SYSINFO_H_ +#define ABSL_BASE_INTERNAL_SYSINFO_H_ + +#ifndef _WIN32 +#include <sys/types.h> +#endif + +#include <cstdint> + +#include "absl/base/port.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +// Nominal core processor cycles per second of each processor. This is _not_ +// necessarily the frequency of the CycleClock counter (see cycleclock.h) +// Thread-safe. +double NominalCPUFrequency(); + +// Number of logical processors (hyperthreads) in system. Thread-safe. +int NumCPUs(); + +// Return the thread id of the current thread, as told by the system. +// No two currently-live threads implemented by the OS shall have the same ID. +// Thread ids of exited threads may be reused. Multiple user-level threads +// may have the same thread ID if multiplexed on the same OS thread. +// +// On Linux, you may send a signal to the resulting ID with kill(). However, +// it is recommended for portability that you use pthread_kill() instead. +#ifdef _WIN32 +// On Windows, process id and thread id are of the same type according to the +// return types of GetProcessId() and GetThreadId() are both DWORD, an unsigned +// 32-bit type. +using pid_t = uint32_t; +#endif +pid_t GetTID(); + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_SYSINFO_H_ diff --git a/third_party/abseil_cpp/absl/base/internal/sysinfo_test.cc b/third_party/abseil_cpp/absl/base/internal/sysinfo_test.cc new file mode 100644 index 000000000000..fa8b88b1dc07 --- /dev/null +++ b/third_party/abseil_cpp/absl/base/internal/sysinfo_test.cc @@ -0,0 +1,100 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/sysinfo.h" + +#ifndef _WIN32 +#include <sys/types.h> +#include <unistd.h> +#endif + +#include <thread> // NOLINT(build/c++11) +#include <unordered_set> +#include <vector> + +#include "gtest/gtest.h" +#include "absl/synchronization/barrier.h" +#include "absl/synchronization/mutex.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { +namespace { + +TEST(SysinfoTest, NumCPUs) { + EXPECT_NE(NumCPUs(), 0) + << "NumCPUs() should not have the default value of 0"; +} + +TEST(SysinfoTest, NominalCPUFrequency) { +#if !(defined(__aarch64__) && defined(__linux__)) && !defined(__EMSCRIPTEN__) + EXPECT_GE(NominalCPUFrequency(), 1000.0) + << "NominalCPUFrequency() did not return a reasonable value"; +#else + // Aarch64 cannot read the CPU frequency from sysfs, so we get back 1.0. + // Emscripten does not have a sysfs to read from at all. + EXPECT_EQ(NominalCPUFrequency(), 1.0) + << "CPU frequency detection was fixed! Please update unittest."; +#endif +} + +TEST(SysinfoTest, GetTID) { + EXPECT_EQ(GetTID(), GetTID()); // Basic compile and equality test. +#ifdef __native_client__ + // Native Client has a race condition bug that leads to memory + // exaustion when repeatedly creating and joining threads. + // https://bugs.chromium.org/p/nativeclient/issues/detail?id=1027 + return; +#endif + // Test that TIDs are unique to each thread. + // Uses a few loops to exercise implementations that reallocate IDs. + for (int i = 0; i < 10; ++i) { + constexpr int kNumThreads = 10; + Barrier all_threads_done(kNumThreads); + std::vector<std::thread> threads; + + Mutex mutex; + std::unordered_set<pid_t> tids; + + for (int j = 0; j < kNumThreads; ++j) { + threads.push_back(std::thread([&]() { + pid_t id = GetTID(); + { + MutexLock lock(&mutex); + ASSERT_TRUE(tids.find(id) == tids.end()); + tids.insert(id); + } + // We can't simply join the threads here. The threads need to + // be alive otherwise the TID might have been reallocated to + // another live thread. + all_threads_done.Block(); + })); + } + for (auto& thread : threads) { + thread.join(); + } + } +} + +#ifdef __linux__ +TEST(SysinfoTest, LinuxGetTID) { + // On Linux, for the main thread, GetTID()==getpid() is guaranteed by the API. + EXPECT_EQ(GetTID(), getpid()); +} +#endif + +} // namespace +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third_party/abseil_cpp/absl/base/internal/thread_annotations.h b/third_party/abseil_cpp/absl/base/internal/thread_annotations.h new file mode 100644 index 000000000000..4dab6a9c150a --- /dev/null +++ b/third_party/abseil_cpp/absl/base/internal/thread_annotations.h @@ -0,0 +1,271 @@ +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: thread_annotations.h +// ----------------------------------------------------------------------------- +// +// WARNING: This is a backwards compatible header and it will be removed after +// the migration to prefixed thread annotations is finished; please include +// "absl/base/thread_annotations.h". +// +// This header file contains macro definitions for thread safety annotations +// that allow developers to document the locking policies of multi-threaded +// code. The annotations can also help program analysis tools to identify +// potential thread safety issues. +// +// These annotations are implemented using compiler attributes. Using the macros +// defined here instead of raw attributes allow for portability and future +// compatibility. +// +// When referring to mutexes in the arguments of the attributes, you should +// use variable names or more complex expressions (e.g. my_object->mutex_) +// that evaluate to a concrete mutex object whenever possible. If the mutex +// you want to refer to is not in scope, you may use a member pointer +// (e.g. &MyClass::mutex_) to refer to a mutex in some (unknown) object. + +#ifndef ABSL_BASE_INTERNAL_THREAD_ANNOTATIONS_H_ +#define ABSL_BASE_INTERNAL_THREAD_ANNOTATIONS_H_ + +#if defined(__clang__) +#define THREAD_ANNOTATION_ATTRIBUTE__(x) __attribute__((x)) +#else +#define THREAD_ANNOTATION_ATTRIBUTE__(x) // no-op +#endif + +// GUARDED_BY() +// +// Documents if a shared field or global variable needs to be protected by a +// mutex. GUARDED_BY() allows the user to specify a particular mutex that +// should be held when accessing the annotated variable. +// +// Although this annotation (and PT_GUARDED_BY, below) cannot be applied to +// local variables, a local variable and its associated mutex can often be +// combined into a small class or struct, thereby allowing the annotation. +// +// Example: +// +// class Foo { +// Mutex mu_; +// int p1_ GUARDED_BY(mu_); +// ... +// }; +#define GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(guarded_by(x)) + +// PT_GUARDED_BY() +// +// Documents if the memory location pointed to by a pointer should be guarded +// by a mutex when dereferencing the pointer. +// +// Example: +// class Foo { +// Mutex mu_; +// int *p1_ PT_GUARDED_BY(mu_); +// ... +// }; +// +// Note that a pointer variable to a shared memory location could itself be a +// shared variable. +// +// Example: +// +// // `q_`, guarded by `mu1_`, points to a shared memory location that is +// // guarded by `mu2_`: +// int *q_ GUARDED_BY(mu1_) PT_GUARDED_BY(mu2_); +#define PT_GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(pt_guarded_by(x)) + +// ACQUIRED_AFTER() / ACQUIRED_BEFORE() +// +// Documents the acquisition order between locks that can be held +// simultaneously by a thread. For any two locks that need to be annotated +// to establish an acquisition order, only one of them needs the annotation. +// (i.e. You don't have to annotate both locks with both ACQUIRED_AFTER +// and ACQUIRED_BEFORE.) +// +// As with GUARDED_BY, this is only applicable to mutexes that are shared +// fields or global variables. +// +// Example: +// +// Mutex m1_; +// Mutex m2_ ACQUIRED_AFTER(m1_); +#define ACQUIRED_AFTER(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(acquired_after(__VA_ARGS__)) + +#define ACQUIRED_BEFORE(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(acquired_before(__VA_ARGS__)) + +// EXCLUSIVE_LOCKS_REQUIRED() / SHARED_LOCKS_REQUIRED() +// +// Documents a function that expects a mutex to be held prior to entry. +// The mutex is expected to be held both on entry to, and exit from, the +// function. +// +// An exclusive lock allows read-write access to the guarded data member(s), and +// only one thread can acquire a lock exclusively at any one time. A shared lock +// allows read-only access, and any number of threads can acquire a shared lock +// concurrently. +// +// Generally, non-const methods should be annotated with +// EXCLUSIVE_LOCKS_REQUIRED, while const methods should be annotated with +// SHARED_LOCKS_REQUIRED. +// +// Example: +// +// Mutex mu1, mu2; +// int a GUARDED_BY(mu1); +// int b GUARDED_BY(mu2); +// +// void foo() EXCLUSIVE_LOCKS_REQUIRED(mu1, mu2) { ... } +// void bar() const SHARED_LOCKS_REQUIRED(mu1, mu2) { ... } +#define EXCLUSIVE_LOCKS_REQUIRED(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(exclusive_locks_required(__VA_ARGS__)) + +#define SHARED_LOCKS_REQUIRED(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(shared_locks_required(__VA_ARGS__)) + +// LOCKS_EXCLUDED() +// +// Documents the locks acquired in the body of the function. These locks +// cannot be held when calling this function (as Abseil's `Mutex` locks are +// non-reentrant). +#define LOCKS_EXCLUDED(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(locks_excluded(__VA_ARGS__)) + +// LOCK_RETURNED() +// +// Documents a function that returns a mutex without acquiring it. For example, +// a public getter method that returns a pointer to a private mutex should +// be annotated with LOCK_RETURNED. +#define LOCK_RETURNED(x) \ + THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x)) + +// LOCKABLE +// +// Documents if a class/type is a lockable type (such as the `Mutex` class). +#define LOCKABLE \ + THREAD_ANNOTATION_ATTRIBUTE__(lockable) + +// SCOPED_LOCKABLE +// +// Documents if a class does RAII locking (such as the `MutexLock` class). +// The constructor should use `LOCK_FUNCTION()` to specify the mutex that is +// acquired, and the destructor should use `UNLOCK_FUNCTION()` with no +// arguments; the analysis will assume that the destructor unlocks whatever the +// constructor locked. +#define SCOPED_LOCKABLE \ + THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable) + +// EXCLUSIVE_LOCK_FUNCTION() +// +// Documents functions that acquire a lock in the body of a function, and do +// not release it. +#define EXCLUSIVE_LOCK_FUNCTION(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(exclusive_lock_function(__VA_ARGS__)) + +// SHARED_LOCK_FUNCTION() +// +// Documents functions that acquire a shared (reader) lock in the body of a +// function, and do not release it. +#define SHARED_LOCK_FUNCTION(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(shared_lock_function(__VA_ARGS__)) + +// UNLOCK_FUNCTION() +// +// Documents functions that expect a lock to be held on entry to the function, +// and release it in the body of the function. +#define UNLOCK_FUNCTION(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(unlock_function(__VA_ARGS__)) + +// EXCLUSIVE_TRYLOCK_FUNCTION() / SHARED_TRYLOCK_FUNCTION() +// +// Documents functions that try to acquire a lock, and return success or failure +// (or a non-boolean value that can be interpreted as a boolean). +// The first argument should be `true` for functions that return `true` on +// success, or `false` for functions that return `false` on success. The second +// argument specifies the mutex that is locked on success. If unspecified, this +// mutex is assumed to be `this`. +#define EXCLUSIVE_TRYLOCK_FUNCTION(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(exclusive_trylock_function(__VA_ARGS__)) + +#define SHARED_TRYLOCK_FUNCTION(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(shared_trylock_function(__VA_ARGS__)) + +// ASSERT_EXCLUSIVE_LOCK() / ASSERT_SHARED_LOCK() +// +// Documents functions that dynamically check to see if a lock is held, and fail +// if it is not held. +#define ASSERT_EXCLUSIVE_LOCK(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(assert_exclusive_lock(__VA_ARGS__)) + +#define ASSERT_SHARED_LOCK(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(assert_shared_lock(__VA_ARGS__)) + +// NO_THREAD_SAFETY_ANALYSIS +// +// Turns off thread safety checking within the body of a particular function. +// This annotation is used to mark functions that are known to be correct, but +// the locking behavior is more complicated than the analyzer can handle. +#define NO_THREAD_SAFETY_ANALYSIS \ + THREAD_ANNOTATION_ATTRIBUTE__(no_thread_safety_analysis) + +//------------------------------------------------------------------------------ +// Tool-Supplied Annotations +//------------------------------------------------------------------------------ + +// TS_UNCHECKED should be placed around lock expressions that are not valid +// C++ syntax, but which are present for documentation purposes. These +// annotations will be ignored by the analysis. +#define TS_UNCHECKED(x) "" + +// TS_FIXME is used to mark lock expressions that are not valid C++ syntax. +// It is used by automated tools to mark and disable invalid expressions. +// The annotation should either be fixed, or changed to TS_UNCHECKED. +#define TS_FIXME(x) "" + +// Like NO_THREAD_SAFETY_ANALYSIS, this turns off checking within the body of +// a particular function. However, this attribute is used to mark functions +// that are incorrect and need to be fixed. It is used by automated tools to +// avoid breaking the build when the analysis is updated. +// Code owners are expected to eventually fix the routine. +#define NO_THREAD_SAFETY_ANALYSIS_FIXME NO_THREAD_SAFETY_ANALYSIS + +// Similar to NO_THREAD_SAFETY_ANALYSIS_FIXME, this macro marks a GUARDED_BY +// annotation that needs to be fixed, because it is producing thread safety +// warning. It disables the GUARDED_BY. +#define GUARDED_BY_FIXME(x) + +// Disables warnings for a single read operation. This can be used to avoid +// warnings when it is known that the read is not actually involved in a race, +// but the compiler cannot confirm that. +#define TS_UNCHECKED_READ(x) thread_safety_analysis::ts_unchecked_read(x) + + +namespace thread_safety_analysis { + +// Takes a reference to a guarded data member, and returns an unguarded +// reference. +template <typename T> +inline const T& ts_unchecked_read(const T& v) NO_THREAD_SAFETY_ANALYSIS { + return v; +} + +template <typename T> +inline T& ts_unchecked_read(T& v) NO_THREAD_SAFETY_ANALYSIS { + return v; +} + +} // namespace thread_safety_analysis + +#endif // ABSL_BASE_INTERNAL_THREAD_ANNOTATIONS_H_ diff --git a/third_party/abseil_cpp/absl/base/internal/thread_identity.cc b/third_party/abseil_cpp/absl/base/internal/thread_identity.cc new file mode 100644 index 000000000000..d63a04ae91d5 --- /dev/null +++ b/third_party/abseil_cpp/absl/base/internal/thread_identity.cc @@ -0,0 +1,152 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/thread_identity.h" + +#ifndef _WIN32 +#include <pthread.h> +#include <signal.h> +#endif + +#include <atomic> +#include <cassert> +#include <memory> + +#include "absl/base/call_once.h" +#include "absl/base/internal/raw_logging.h" +#include "absl/base/internal/spinlock.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +#if ABSL_THREAD_IDENTITY_MODE != ABSL_THREAD_IDENTITY_MODE_USE_CPP11 +namespace { +// Used to co-ordinate one-time creation of our pthread_key +absl::once_flag init_thread_identity_key_once; +pthread_key_t thread_identity_pthread_key; +std::atomic<bool> pthread_key_initialized(false); + +void AllocateThreadIdentityKey(ThreadIdentityReclaimerFunction reclaimer) { + pthread_key_create(&thread_identity_pthread_key, reclaimer); + pthread_key_initialized.store(true, std::memory_order_release); +} +} // namespace +#endif + +#if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_TLS || \ + ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_CPP11 +// The actual TLS storage for a thread's currently associated ThreadIdentity. +// This is referenced by inline accessors in the header. +// "protected" visibility ensures that if multiple instances of Abseil code +// exist within a process (via dlopen() or similar), references to +// thread_identity_ptr from each instance of the code will refer to +// *different* instances of this ptr. +#ifdef __GNUC__ +__attribute__((visibility("protected"))) +#endif // __GNUC__ +#if ABSL_PER_THREAD_TLS +// Prefer __thread to thread_local as benchmarks indicate it is a bit faster. +ABSL_PER_THREAD_TLS_KEYWORD ThreadIdentity* thread_identity_ptr = nullptr; +#elif defined(ABSL_HAVE_THREAD_LOCAL) +thread_local ThreadIdentity* thread_identity_ptr = nullptr; +#endif // ABSL_PER_THREAD_TLS +#endif // TLS or CPP11 + +void SetCurrentThreadIdentity( + ThreadIdentity* identity, ThreadIdentityReclaimerFunction reclaimer) { + assert(CurrentThreadIdentityIfPresent() == nullptr); + // Associate our destructor. + // NOTE: This call to pthread_setspecific is currently the only immovable + // barrier to CurrentThreadIdentity() always being async signal safe. +#if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC + // NOTE: Not async-safe. But can be open-coded. + absl::call_once(init_thread_identity_key_once, AllocateThreadIdentityKey, + reclaimer); + +#if defined(__EMSCRIPTEN__) || defined(__MINGW32__) + // Emscripten and MinGW pthread implementations does not support signals. + // See https://kripken.github.io/emscripten-site/docs/porting/pthreads.html + // for more information. + pthread_setspecific(thread_identity_pthread_key, + reinterpret_cast<void*>(identity)); +#else + // We must mask signals around the call to setspecific as with current glibc, + // a concurrent getspecific (needed for GetCurrentThreadIdentityIfPresent()) + // may zero our value. + // + // While not officially async-signal safe, getspecific within a signal handler + // is otherwise OK. + sigset_t all_signals; + sigset_t curr_signals; + sigfillset(&all_signals); + pthread_sigmask(SIG_SETMASK, &all_signals, &curr_signals); + pthread_setspecific(thread_identity_pthread_key, + reinterpret_cast<void*>(identity)); + pthread_sigmask(SIG_SETMASK, &curr_signals, nullptr); +#endif // !__EMSCRIPTEN__ && !__MINGW32__ + +#elif ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_TLS + // NOTE: Not async-safe. But can be open-coded. + absl::call_once(init_thread_identity_key_once, AllocateThreadIdentityKey, + reclaimer); + pthread_setspecific(thread_identity_pthread_key, + reinterpret_cast<void*>(identity)); + thread_identity_ptr = identity; +#elif ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_CPP11 + thread_local std::unique_ptr<ThreadIdentity, ThreadIdentityReclaimerFunction> + holder(identity, reclaimer); + thread_identity_ptr = identity; +#else +#error Unimplemented ABSL_THREAD_IDENTITY_MODE +#endif +} + +#if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_TLS || \ + ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_CPP11 + +// Please see the comment on `CurrentThreadIdentityIfPresent` in +// thread_identity.h. Because DLLs cannot expose thread_local variables in +// headers, we opt for the correct-but-slower option of placing the definition +// of this function only in a translation unit inside DLL. +#if defined(ABSL_BUILD_DLL) || defined(ABSL_CONSUME_DLL) +ThreadIdentity* CurrentThreadIdentityIfPresent() { return thread_identity_ptr; } +#endif +#endif + +void ClearCurrentThreadIdentity() { +#if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_TLS || \ + ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_CPP11 + thread_identity_ptr = nullptr; +#elif ABSL_THREAD_IDENTITY_MODE == \ + ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC + // pthread_setspecific expected to clear value on destruction + assert(CurrentThreadIdentityIfPresent() == nullptr); +#endif +} + +#if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC +ThreadIdentity* CurrentThreadIdentityIfPresent() { + bool initialized = pthread_key_initialized.load(std::memory_order_acquire); + if (!initialized) { + return nullptr; + } + return reinterpret_cast<ThreadIdentity*>( + pthread_getspecific(thread_identity_pthread_key)); +} +#endif + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third_party/abseil_cpp/absl/base/internal/thread_identity.h b/third_party/abseil_cpp/absl/base/internal/thread_identity.h new file mode 100644 index 000000000000..ceb109b41c6a --- /dev/null +++ b/third_party/abseil_cpp/absl/base/internal/thread_identity.h @@ -0,0 +1,259 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Each active thread has an ThreadIdentity that may represent the thread in +// various level interfaces. ThreadIdentity objects are never deallocated. +// When a thread terminates, its ThreadIdentity object may be reused for a +// thread created later. + +#ifndef ABSL_BASE_INTERNAL_THREAD_IDENTITY_H_ +#define ABSL_BASE_INTERNAL_THREAD_IDENTITY_H_ + +#ifndef _WIN32 +#include <pthread.h> +// Defines __GOOGLE_GRTE_VERSION__ (via glibc-specific features.h) when +// supported. +#include <unistd.h> +#endif + +#include <atomic> +#include <cstdint> + +#include "absl/base/config.h" +#include "absl/base/internal/per_thread_tls.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +struct SynchLocksHeld; +struct SynchWaitParams; + +namespace base_internal { + +class SpinLock; +struct ThreadIdentity; + +// Used by the implementation of absl::Mutex and absl::CondVar. +struct PerThreadSynch { + // The internal representation of absl::Mutex and absl::CondVar rely + // on the alignment of PerThreadSynch. Both store the address of the + // PerThreadSynch in the high-order bits of their internal state, + // which means the low kLowZeroBits of the address of PerThreadSynch + // must be zero. + static constexpr int kLowZeroBits = 8; + static constexpr int kAlignment = 1 << kLowZeroBits; + + // Returns the associated ThreadIdentity. + // This can be implemented as a cast because we guarantee + // PerThreadSynch is the first element of ThreadIdentity. + ThreadIdentity* thread_identity() { + return reinterpret_cast<ThreadIdentity*>(this); + } + + PerThreadSynch *next; // Circular waiter queue; initialized to 0. + PerThreadSynch *skip; // If non-zero, all entries in Mutex queue + // up to and including "skip" have same + // condition as this, and will be woken later + bool may_skip; // if false while on mutex queue, a mutex unlocker + // is using this PerThreadSynch as a terminator. Its + // skip field must not be filled in because the loop + // might then skip over the terminator. + + // The wait parameters of the current wait. waitp is null if the + // thread is not waiting. Transitions from null to non-null must + // occur before the enqueue commit point (state = kQueued in + // Enqueue() and CondVarEnqueue()). Transitions from non-null to + // null must occur after the wait is finished (state = kAvailable in + // Mutex::Block() and CondVar::WaitCommon()). This field may be + // changed only by the thread that describes this PerThreadSynch. A + // special case is Fer(), which calls Enqueue() on another thread, + // but with an identical SynchWaitParams pointer, thus leaving the + // pointer unchanged. + SynchWaitParams *waitp; + + bool suppress_fatal_errors; // If true, try to proceed even in the face of + // broken invariants. This is used within fatal + // signal handlers to improve the chances of + // debug logging information being output + // successfully. + + intptr_t readers; // Number of readers in mutex. + int priority; // Priority of thread (updated every so often). + + // When priority will next be read (cycles). + int64_t next_priority_read_cycles; + + // State values: + // kAvailable: This PerThreadSynch is available. + // kQueued: This PerThreadSynch is unavailable, it's currently queued on a + // Mutex or CondVar waistlist. + // + // Transitions from kQueued to kAvailable require a release + // barrier. This is needed as a waiter may use "state" to + // independently observe that it's no longer queued. + // + // Transitions from kAvailable to kQueued require no barrier, they + // are externally ordered by the Mutex. + enum State { + kAvailable, + kQueued + }; + std::atomic<State> state; + + bool maybe_unlocking; // Valid at head of Mutex waiter queue; + // true if UnlockSlow could be searching + // for a waiter to wake. Used for an optimization + // in Enqueue(). true is always a valid value. + // Can be reset to false when the unlocker or any + // writer releases the lock, or a reader fully releases + // the lock. It may not be set to false by a reader + // that decrements the count to non-zero. + // protected by mutex spinlock + + bool wake; // This thread is to be woken from a Mutex. + + // If "x" is on a waiter list for a mutex, "x->cond_waiter" is true iff the + // waiter is waiting on the mutex as part of a CV Wait or Mutex Await. + // + // The value of "x->cond_waiter" is meaningless if "x" is not on a + // Mutex waiter list. + bool cond_waiter; + + // Locks held; used during deadlock detection. + // Allocated in Synch_GetAllLocks() and freed in ReclaimThreadIdentity(). + SynchLocksHeld *all_locks; +}; + +struct ThreadIdentity { + // Must be the first member. The Mutex implementation requires that + // the PerThreadSynch object associated with each thread is + // PerThreadSynch::kAlignment aligned. We provide this alignment on + // ThreadIdentity itself. + PerThreadSynch per_thread_synch; + + // Private: Reserved for absl::synchronization_internal::Waiter. + struct WaiterState { + char data[128]; + } waiter_state; + + // Used by PerThreadSem::{Get,Set}ThreadBlockedCounter(). + std::atomic<int>* blocked_count_ptr; + + // The following variables are mostly read/written just by the + // thread itself. The only exception is that these are read by + // a ticker thread as a hint. + std::atomic<int> ticker; // Tick counter, incremented once per second. + std::atomic<int> wait_start; // Ticker value when thread started waiting. + std::atomic<bool> is_idle; // Has thread become idle yet? + + ThreadIdentity* next; +}; + +// Returns the ThreadIdentity object representing the calling thread; guaranteed +// to be unique for its lifetime. The returned object will remain valid for the +// program's lifetime; although it may be re-assigned to a subsequent thread. +// If one does not exist, return nullptr instead. +// +// Does not malloc(*), and is async-signal safe. +// [*] Technically pthread_setspecific() does malloc on first use; however this +// is handled internally within tcmalloc's initialization already. +// +// New ThreadIdentity objects can be constructed and associated with a thread +// by calling GetOrCreateCurrentThreadIdentity() in per-thread-sem.h. +ThreadIdentity* CurrentThreadIdentityIfPresent(); + +using ThreadIdentityReclaimerFunction = void (*)(void*); + +// Sets the current thread identity to the given value. 'reclaimer' is a +// pointer to the global function for cleaning up instances on thread +// destruction. +void SetCurrentThreadIdentity(ThreadIdentity* identity, + ThreadIdentityReclaimerFunction reclaimer); + +// Removes the currently associated ThreadIdentity from the running thread. +// This must be called from inside the ThreadIdentityReclaimerFunction, and only +// from that function. +void ClearCurrentThreadIdentity(); + +// May be chosen at compile time via: -DABSL_FORCE_THREAD_IDENTITY_MODE=<mode +// index> +#ifdef ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC +#error ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC cannot be direcly set +#else +#define ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC 0 +#endif + +#ifdef ABSL_THREAD_IDENTITY_MODE_USE_TLS +#error ABSL_THREAD_IDENTITY_MODE_USE_TLS cannot be direcly set +#else +#define ABSL_THREAD_IDENTITY_MODE_USE_TLS 1 +#endif + +#ifdef ABSL_THREAD_IDENTITY_MODE_USE_CPP11 +#error ABSL_THREAD_IDENTITY_MODE_USE_CPP11 cannot be direcly set +#else +#define ABSL_THREAD_IDENTITY_MODE_USE_CPP11 2 +#endif + +#ifdef ABSL_THREAD_IDENTITY_MODE +#error ABSL_THREAD_IDENTITY_MODE cannot be direcly set +#elif defined(ABSL_FORCE_THREAD_IDENTITY_MODE) +#define ABSL_THREAD_IDENTITY_MODE ABSL_FORCE_THREAD_IDENTITY_MODE +#elif defined(_WIN32) && !defined(__MINGW32__) +#define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_CPP11 +#elif ABSL_PER_THREAD_TLS && defined(__GOOGLE_GRTE_VERSION__) && \ + (__GOOGLE_GRTE_VERSION__ >= 20140228L) +// Support for async-safe TLS was specifically added in GRTEv4. It's not +// present in the upstream eglibc. +// Note: Current default for production systems. +#define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_TLS +#else +#define ABSL_THREAD_IDENTITY_MODE \ + ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC +#endif + +#if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_TLS || \ + ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_CPP11 + +#if ABSL_PER_THREAD_TLS +ABSL_CONST_INIT extern ABSL_PER_THREAD_TLS_KEYWORD ThreadIdentity* + thread_identity_ptr; +#elif defined(ABSL_HAVE_THREAD_LOCAL) +ABSL_CONST_INIT extern thread_local ThreadIdentity* thread_identity_ptr; +#else +#error Thread-local storage not detected on this platform +#endif + +// thread_local variables cannot be in headers exposed by DLLs. However, it is +// important for performance reasons in general that +// `CurrentThreadIdentityIfPresent` be inlined. This is not possible across a +// DLL boundary so, with DLLs, we opt to have the function not be inlined. Note +// that `CurrentThreadIdentityIfPresent` is declared above so we can exclude +// this entire inline definition when compiling as a DLL. +#if !defined(ABSL_BUILD_DLL) && !defined(ABSL_CONSUME_DLL) +inline ThreadIdentity* CurrentThreadIdentityIfPresent() { + return thread_identity_ptr; +} +#endif + +#elif ABSL_THREAD_IDENTITY_MODE != \ + ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC +#error Unknown ABSL_THREAD_IDENTITY_MODE +#endif + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_THREAD_IDENTITY_H_ diff --git a/third_party/abseil_cpp/absl/base/internal/thread_identity_benchmark.cc b/third_party/abseil_cpp/absl/base/internal/thread_identity_benchmark.cc new file mode 100644 index 000000000000..0ae10f2b1e8a --- /dev/null +++ b/third_party/abseil_cpp/absl/base/internal/thread_identity_benchmark.cc @@ -0,0 +1,38 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "benchmark/benchmark.h" +#include "absl/base/internal/thread_identity.h" +#include "absl/synchronization/internal/create_thread_identity.h" +#include "absl/synchronization/internal/per_thread_sem.h" + +namespace { + +void BM_SafeCurrentThreadIdentity(benchmark::State& state) { + for (auto _ : state) { + benchmark::DoNotOptimize( + absl::synchronization_internal::GetOrCreateCurrentThreadIdentity()); + } +} +BENCHMARK(BM_SafeCurrentThreadIdentity); + +void BM_UnsafeCurrentThreadIdentity(benchmark::State& state) { + for (auto _ : state) { + benchmark::DoNotOptimize( + absl::base_internal::CurrentThreadIdentityIfPresent()); + } +} +BENCHMARK(BM_UnsafeCurrentThreadIdentity); + +} // namespace diff --git a/third_party/abseil_cpp/absl/base/internal/thread_identity_test.cc b/third_party/abseil_cpp/absl/base/internal/thread_identity_test.cc new file mode 100644 index 000000000000..624d5b96b6ee --- /dev/null +++ b/third_party/abseil_cpp/absl/base/internal/thread_identity_test.cc @@ -0,0 +1,129 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/thread_identity.h" + +#include <thread> // NOLINT(build/c++11) +#include <vector> + +#include "gtest/gtest.h" +#include "absl/base/attributes.h" +#include "absl/base/internal/spinlock.h" +#include "absl/base/macros.h" +#include "absl/base/thread_annotations.h" +#include "absl/synchronization/internal/per_thread_sem.h" +#include "absl/synchronization/mutex.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { +namespace { + +ABSL_CONST_INIT static absl::base_internal::SpinLock map_lock( + absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY); +ABSL_CONST_INIT static int num_identities_reused ABSL_GUARDED_BY(map_lock); + +static const void* const kCheckNoIdentity = reinterpret_cast<void*>(1); + +static void TestThreadIdentityCurrent(const void* assert_no_identity) { + ThreadIdentity* identity; + + // We have to test this conditionally, because if the test framework relies + // on Abseil, then some previous action may have already allocated an + // identity. + if (assert_no_identity == kCheckNoIdentity) { + identity = CurrentThreadIdentityIfPresent(); + EXPECT_TRUE(identity == nullptr); + } + + identity = synchronization_internal::GetOrCreateCurrentThreadIdentity(); + EXPECT_TRUE(identity != nullptr); + ThreadIdentity* identity_no_init; + identity_no_init = CurrentThreadIdentityIfPresent(); + EXPECT_TRUE(identity == identity_no_init); + + // Check that per_thread_synch is correctly aligned. + EXPECT_EQ(0, reinterpret_cast<intptr_t>(&identity->per_thread_synch) % + PerThreadSynch::kAlignment); + EXPECT_EQ(identity, identity->per_thread_synch.thread_identity()); + + absl::base_internal::SpinLockHolder l(&map_lock); + num_identities_reused++; +} + +TEST(ThreadIdentityTest, BasicIdentityWorks) { + // This tests for the main() thread. + TestThreadIdentityCurrent(nullptr); +} + +TEST(ThreadIdentityTest, BasicIdentityWorksThreaded) { + // Now try the same basic test with multiple threads being created and + // destroyed. This makes sure that: + // - New threads are created without a ThreadIdentity. + // - We re-allocate ThreadIdentity objects from the free-list. + // - If a thread implementation chooses to recycle threads, that + // correct re-initialization occurs. + static const int kNumLoops = 3; + static const int kNumThreads = 400; + for (int iter = 0; iter < kNumLoops; iter++) { + std::vector<std::thread> threads; + for (int i = 0; i < kNumThreads; ++i) { + threads.push_back( + std::thread(TestThreadIdentityCurrent, kCheckNoIdentity)); + } + for (auto& thread : threads) { + thread.join(); + } + } + + // We should have recycled ThreadIdentity objects above; while (external) + // library threads allocating their own identities may preclude some + // reuse, we should have sufficient repetitions to exclude this. + absl::base_internal::SpinLockHolder l(&map_lock); + EXPECT_LT(kNumThreads, num_identities_reused); +} + +TEST(ThreadIdentityTest, ReusedThreadIdentityMutexTest) { + // This test repeatly creates and joins a series of threads, each of + // which acquires and releases shared Mutex locks. This verifies + // Mutex operations work correctly under a reused + // ThreadIdentity. Note that the most likely failure mode of this + // test is a crash or deadlock. + static const int kNumLoops = 10; + static const int kNumThreads = 12; + static const int kNumMutexes = 3; + static const int kNumLockLoops = 5; + + Mutex mutexes[kNumMutexes]; + for (int iter = 0; iter < kNumLoops; ++iter) { + std::vector<std::thread> threads; + for (int thread = 0; thread < kNumThreads; ++thread) { + threads.push_back(std::thread([&]() { + for (int l = 0; l < kNumLockLoops; ++l) { + for (int m = 0; m < kNumMutexes; ++m) { + MutexLock lock(&mutexes[m]); + } + } + })); + } + for (auto& thread : threads) { + thread.join(); + } + } +} + +} // namespace +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third_party/abseil_cpp/absl/base/internal/throw_delegate.cc b/third_party/abseil_cpp/absl/base/internal/throw_delegate.cc new file mode 100644 index 000000000000..c055f75d9d28 --- /dev/null +++ b/third_party/abseil_cpp/absl/base/internal/throw_delegate.cc @@ -0,0 +1,108 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/throw_delegate.h" + +#include <cstdlib> +#include <functional> +#include <new> +#include <stdexcept> +#include "absl/base/config.h" +#include "absl/base/internal/raw_logging.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +namespace { +template <typename T> +[[noreturn]] void Throw(const T& error) { +#ifdef ABSL_HAVE_EXCEPTIONS + throw error; +#else + ABSL_RAW_LOG(FATAL, "%s", error.what()); + std::abort(); +#endif +} +} // namespace + +void ThrowStdLogicError(const std::string& what_arg) { + Throw(std::logic_error(what_arg)); +} +void ThrowStdLogicError(const char* what_arg) { + Throw(std::logic_error(what_arg)); +} +void ThrowStdInvalidArgument(const std::string& what_arg) { + Throw(std::invalid_argument(what_arg)); +} +void ThrowStdInvalidArgument(const char* what_arg) { + Throw(std::invalid_argument(what_arg)); +} + +void ThrowStdDomainError(const std::string& what_arg) { + Throw(std::domain_error(what_arg)); +} +void ThrowStdDomainError(const char* what_arg) { + Throw(std::domain_error(what_arg)); +} + +void ThrowStdLengthError(const std::string& what_arg) { + Throw(std::length_error(what_arg)); +} +void ThrowStdLengthError(const char* what_arg) { + Throw(std::length_error(what_arg)); +} + +void ThrowStdOutOfRange(const std::string& what_arg) { + Throw(std::out_of_range(what_arg)); +} +void ThrowStdOutOfRange(const char* what_arg) { + Throw(std::out_of_range(what_arg)); +} + +void ThrowStdRuntimeError(const std::string& what_arg) { + Throw(std::runtime_error(what_arg)); +} +void ThrowStdRuntimeError(const char* what_arg) { + Throw(std::runtime_error(what_arg)); +} + +void ThrowStdRangeError(const std::string& what_arg) { + Throw(std::range_error(what_arg)); +} +void ThrowStdRangeError(const char* what_arg) { + Throw(std::range_error(what_arg)); +} + +void ThrowStdOverflowError(const std::string& what_arg) { + Throw(std::overflow_error(what_arg)); +} +void ThrowStdOverflowError(const char* what_arg) { + Throw(std::overflow_error(what_arg)); +} + +void ThrowStdUnderflowError(const std::string& what_arg) { + Throw(std::underflow_error(what_arg)); +} +void ThrowStdUnderflowError(const char* what_arg) { + Throw(std::underflow_error(what_arg)); +} + +void ThrowStdBadFunctionCall() { Throw(std::bad_function_call()); } + +void ThrowStdBadAlloc() { Throw(std::bad_alloc()); } + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third_party/abseil_cpp/absl/base/internal/throw_delegate.h b/third_party/abseil_cpp/absl/base/internal/throw_delegate.h new file mode 100644 index 000000000000..075f5272543a --- /dev/null +++ b/third_party/abseil_cpp/absl/base/internal/throw_delegate.h @@ -0,0 +1,75 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef ABSL_BASE_INTERNAL_THROW_DELEGATE_H_ +#define ABSL_BASE_INTERNAL_THROW_DELEGATE_H_ + +#include <string> + +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +// Helper functions that allow throwing exceptions consistently from anywhere. +// The main use case is for header-based libraries (eg templates), as they will +// be built by many different targets with their own compiler options. +// In particular, this will allow a safe way to throw exceptions even if the +// caller is compiled with -fno-exceptions. This is intended for implementing +// things like map<>::at(), which the standard documents as throwing an +// exception on error. +// +// Using other techniques like #if tricks could lead to ODR violations. +// +// You shouldn't use it unless you're writing code that you know will be built +// both with and without exceptions and you need to conform to an interface +// that uses exceptions. + +[[noreturn]] void ThrowStdLogicError(const std::string& what_arg); +[[noreturn]] void ThrowStdLogicError(const char* what_arg); +[[noreturn]] void ThrowStdInvalidArgument(const std::string& what_arg); +[[noreturn]] void ThrowStdInvalidArgument(const char* what_arg); +[[noreturn]] void ThrowStdDomainError(const std::string& what_arg); +[[noreturn]] void ThrowStdDomainError(const char* what_arg); +[[noreturn]] void ThrowStdLengthError(const std::string& what_arg); +[[noreturn]] void ThrowStdLengthError(const char* what_arg); +[[noreturn]] void ThrowStdOutOfRange(const std::string& what_arg); +[[noreturn]] void ThrowStdOutOfRange(const char* what_arg); +[[noreturn]] void ThrowStdRuntimeError(const std::string& what_arg); +[[noreturn]] void ThrowStdRuntimeError(const char* what_arg); +[[noreturn]] void ThrowStdRangeError(const std::string& what_arg); +[[noreturn]] void ThrowStdRangeError(const char* what_arg); +[[noreturn]] void ThrowStdOverflowError(const std::string& what_arg); +[[noreturn]] void ThrowStdOverflowError(const char* what_arg); +[[noreturn]] void ThrowStdUnderflowError(const std::string& what_arg); +[[noreturn]] void ThrowStdUnderflowError(const char* what_arg); + +[[noreturn]] void ThrowStdBadFunctionCall(); +[[noreturn]] void ThrowStdBadAlloc(); + +// ThrowStdBadArrayNewLength() cannot be consistently supported because +// std::bad_array_new_length is missing in libstdc++ until 4.9.0. +// https://gcc.gnu.org/onlinedocs/gcc-4.8.3/libstdc++/api/a01379_source.html +// https://gcc.gnu.org/onlinedocs/gcc-4.9.0/libstdc++/api/a01327_source.html +// libcxx (as of 3.2) and msvc (as of 2015) both have it. +// [[noreturn]] void ThrowStdBadArrayNewLength(); + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_THROW_DELEGATE_H_ diff --git a/third_party/abseil_cpp/absl/base/internal/tsan_mutex_interface.h b/third_party/abseil_cpp/absl/base/internal/tsan_mutex_interface.h new file mode 100644 index 000000000000..2a510603bc8c --- /dev/null +++ b/third_party/abseil_cpp/absl/base/internal/tsan_mutex_interface.h @@ -0,0 +1,66 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This file is intended solely for spinlock.h. +// It provides ThreadSanitizer annotations for custom mutexes. +// See <sanitizer/tsan_interface.h> for meaning of these annotations. + +#ifndef ABSL_BASE_INTERNAL_TSAN_MUTEX_INTERFACE_H_ +#define ABSL_BASE_INTERNAL_TSAN_MUTEX_INTERFACE_H_ + +// ABSL_INTERNAL_HAVE_TSAN_INTERFACE +// Macro intended only for internal use. +// +// Checks whether LLVM Thread Sanitizer interfaces are available. +// First made available in LLVM 5.0 (Sep 2017). +#ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE +#error "ABSL_INTERNAL_HAVE_TSAN_INTERFACE cannot be directly set." +#endif + +#if defined(THREAD_SANITIZER) && defined(__has_include) +#if __has_include(<sanitizer/tsan_interface.h>) +#define ABSL_INTERNAL_HAVE_TSAN_INTERFACE 1 +#endif +#endif + +#ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE +#include <sanitizer/tsan_interface.h> + +#define ABSL_TSAN_MUTEX_CREATE __tsan_mutex_create +#define ABSL_TSAN_MUTEX_DESTROY __tsan_mutex_destroy +#define ABSL_TSAN_MUTEX_PRE_LOCK __tsan_mutex_pre_lock +#define ABSL_TSAN_MUTEX_POST_LOCK __tsan_mutex_post_lock +#define ABSL_TSAN_MUTEX_PRE_UNLOCK __tsan_mutex_pre_unlock +#define ABSL_TSAN_MUTEX_POST_UNLOCK __tsan_mutex_post_unlock +#define ABSL_TSAN_MUTEX_PRE_SIGNAL __tsan_mutex_pre_signal +#define ABSL_TSAN_MUTEX_POST_SIGNAL __tsan_mutex_post_signal +#define ABSL_TSAN_MUTEX_PRE_DIVERT __tsan_mutex_pre_divert +#define ABSL_TSAN_MUTEX_POST_DIVERT __tsan_mutex_post_divert + +#else + +#define ABSL_TSAN_MUTEX_CREATE(...) +#define ABSL_TSAN_MUTEX_DESTROY(...) +#define ABSL_TSAN_MUTEX_PRE_LOCK(...) +#define ABSL_TSAN_MUTEX_POST_LOCK(...) +#define ABSL_TSAN_MUTEX_PRE_UNLOCK(...) +#define ABSL_TSAN_MUTEX_POST_UNLOCK(...) +#define ABSL_TSAN_MUTEX_PRE_SIGNAL(...) +#define ABSL_TSAN_MUTEX_POST_SIGNAL(...) +#define ABSL_TSAN_MUTEX_PRE_DIVERT(...) +#define ABSL_TSAN_MUTEX_POST_DIVERT(...) + +#endif + +#endif // ABSL_BASE_INTERNAL_TSAN_MUTEX_INTERFACE_H_ diff --git a/third_party/abseil_cpp/absl/base/internal/unaligned_access.h b/third_party/abseil_cpp/absl/base/internal/unaligned_access.h new file mode 100644 index 000000000000..6be56c865b3d --- /dev/null +++ b/third_party/abseil_cpp/absl/base/internal/unaligned_access.h @@ -0,0 +1,158 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef ABSL_BASE_INTERNAL_UNALIGNED_ACCESS_H_ +#define ABSL_BASE_INTERNAL_UNALIGNED_ACCESS_H_ + +#include <string.h> + +#include <cstdint> + +#include "absl/base/attributes.h" +#include "absl/base/config.h" + +// unaligned APIs + +// Portable handling of unaligned loads, stores, and copies. + +// The unaligned API is C++ only. The declarations use C++ features +// (namespaces, inline) which are absent or incompatible in C. +#if defined(__cplusplus) + +#if defined(ADDRESS_SANITIZER) || defined(THREAD_SANITIZER) ||\ + defined(MEMORY_SANITIZER) +// Consider we have an unaligned load/store of 4 bytes from address 0x...05. +// AddressSanitizer will treat it as a 3-byte access to the range 05:07 and +// will miss a bug if 08 is the first unaddressable byte. +// ThreadSanitizer will also treat this as a 3-byte access to 05:07 and will +// miss a race between this access and some other accesses to 08. +// MemorySanitizer will correctly propagate the shadow on unaligned stores +// and correctly report bugs on unaligned loads, but it may not properly +// update and report the origin of the uninitialized memory. +// For all three tools, replacing an unaligned access with a tool-specific +// callback solves the problem. + +// Make sure uint16_t/uint32_t/uint64_t are defined. +#include <stdint.h> + +extern "C" { +uint16_t __sanitizer_unaligned_load16(const void *p); +uint32_t __sanitizer_unaligned_load32(const void *p); +uint64_t __sanitizer_unaligned_load64(const void *p); +void __sanitizer_unaligned_store16(void *p, uint16_t v); +void __sanitizer_unaligned_store32(void *p, uint32_t v); +void __sanitizer_unaligned_store64(void *p, uint64_t v); +} // extern "C" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +inline uint16_t UnalignedLoad16(const void *p) { + return __sanitizer_unaligned_load16(p); +} + +inline uint32_t UnalignedLoad32(const void *p) { + return __sanitizer_unaligned_load32(p); +} + +inline uint64_t UnalignedLoad64(const void *p) { + return __sanitizer_unaligned_load64(p); +} + +inline void UnalignedStore16(void *p, uint16_t v) { + __sanitizer_unaligned_store16(p, v); +} + +inline void UnalignedStore32(void *p, uint32_t v) { + __sanitizer_unaligned_store32(p, v); +} + +inline void UnalignedStore64(void *p, uint64_t v) { + __sanitizer_unaligned_store64(p, v); +} + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#define ABSL_INTERNAL_UNALIGNED_LOAD16(_p) \ + (absl::base_internal::UnalignedLoad16(_p)) +#define ABSL_INTERNAL_UNALIGNED_LOAD32(_p) \ + (absl::base_internal::UnalignedLoad32(_p)) +#define ABSL_INTERNAL_UNALIGNED_LOAD64(_p) \ + (absl::base_internal::UnalignedLoad64(_p)) + +#define ABSL_INTERNAL_UNALIGNED_STORE16(_p, _val) \ + (absl::base_internal::UnalignedStore16(_p, _val)) +#define ABSL_INTERNAL_UNALIGNED_STORE32(_p, _val) \ + (absl::base_internal::UnalignedStore32(_p, _val)) +#define ABSL_INTERNAL_UNALIGNED_STORE64(_p, _val) \ + (absl::base_internal::UnalignedStore64(_p, _val)) + +#else + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +inline uint16_t UnalignedLoad16(const void *p) { + uint16_t t; + memcpy(&t, p, sizeof t); + return t; +} + +inline uint32_t UnalignedLoad32(const void *p) { + uint32_t t; + memcpy(&t, p, sizeof t); + return t; +} + +inline uint64_t UnalignedLoad64(const void *p) { + uint64_t t; + memcpy(&t, p, sizeof t); + return t; +} + +inline void UnalignedStore16(void *p, uint16_t v) { memcpy(p, &v, sizeof v); } + +inline void UnalignedStore32(void *p, uint32_t v) { memcpy(p, &v, sizeof v); } + +inline void UnalignedStore64(void *p, uint64_t v) { memcpy(p, &v, sizeof v); } + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#define ABSL_INTERNAL_UNALIGNED_LOAD16(_p) \ + (absl::base_internal::UnalignedLoad16(_p)) +#define ABSL_INTERNAL_UNALIGNED_LOAD32(_p) \ + (absl::base_internal::UnalignedLoad32(_p)) +#define ABSL_INTERNAL_UNALIGNED_LOAD64(_p) \ + (absl::base_internal::UnalignedLoad64(_p)) + +#define ABSL_INTERNAL_UNALIGNED_STORE16(_p, _val) \ + (absl::base_internal::UnalignedStore16(_p, _val)) +#define ABSL_INTERNAL_UNALIGNED_STORE32(_p, _val) \ + (absl::base_internal::UnalignedStore32(_p, _val)) +#define ABSL_INTERNAL_UNALIGNED_STORE64(_p, _val) \ + (absl::base_internal::UnalignedStore64(_p, _val)) + +#endif + +#endif // defined(__cplusplus), end of unaligned API + +#endif // ABSL_BASE_INTERNAL_UNALIGNED_ACCESS_H_ diff --git a/third_party/abseil_cpp/absl/base/internal/unique_small_name_test.cc b/third_party/abseil_cpp/absl/base/internal/unique_small_name_test.cc new file mode 100644 index 000000000000..ff8c2b3fb427 --- /dev/null +++ b/third_party/abseil_cpp/absl/base/internal/unique_small_name_test.cc @@ -0,0 +1,77 @@ +// Copyright 2020 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "gtest/gtest.h" +#include "absl/base/optimization.h" +#include "absl/strings/string_view.h" + +// This test by itself does not do anything fancy, but it serves as binary I can +// query in shell test. + +namespace { + +template <class T> +void DoNotOptimize(const T& var) { +#ifdef __GNUC__ + asm volatile("" : "+m"(const_cast<T&>(var))); +#else + std::cout << (void*)&var; +#endif +} + +int very_long_int_variable_name ABSL_INTERNAL_UNIQUE_SMALL_NAME() = 0; +char very_long_str_variable_name[] ABSL_INTERNAL_UNIQUE_SMALL_NAME() = "abc"; + +TEST(UniqueSmallName, NonAutomaticVar) { + EXPECT_EQ(very_long_int_variable_name, 0); + EXPECT_EQ(absl::string_view(very_long_str_variable_name), "abc"); +} + +int VeryLongFreeFunctionName() ABSL_INTERNAL_UNIQUE_SMALL_NAME(); + +TEST(UniqueSmallName, FreeFunction) { + DoNotOptimize(&VeryLongFreeFunctionName); + + EXPECT_EQ(VeryLongFreeFunctionName(), 456); +} + +int VeryLongFreeFunctionName() { return 456; } + +struct VeryLongStructName { + explicit VeryLongStructName(int i); + + int VeryLongMethodName() ABSL_INTERNAL_UNIQUE_SMALL_NAME(); + + static int VeryLongStaticMethodName() ABSL_INTERNAL_UNIQUE_SMALL_NAME(); + + private: + int fld; +}; + +TEST(UniqueSmallName, Struct) { + VeryLongStructName var(10); + + DoNotOptimize(var); + DoNotOptimize(&VeryLongStructName::VeryLongMethodName); + DoNotOptimize(&VeryLongStructName::VeryLongStaticMethodName); + + EXPECT_EQ(var.VeryLongMethodName(), 10); + EXPECT_EQ(VeryLongStructName::VeryLongStaticMethodName(), 123); +} + +VeryLongStructName::VeryLongStructName(int i) : fld(i) {} +int VeryLongStructName::VeryLongMethodName() { return fld; } +int VeryLongStructName::VeryLongStaticMethodName() { return 123; } + +} // namespace diff --git a/third_party/abseil_cpp/absl/base/internal/unscaledcycleclock.cc b/third_party/abseil_cpp/absl/base/internal/unscaledcycleclock.cc new file mode 100644 index 000000000000..f1e7bbef844a --- /dev/null +++ b/third_party/abseil_cpp/absl/base/internal/unscaledcycleclock.cc @@ -0,0 +1,140 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/unscaledcycleclock.h" + +#if ABSL_USE_UNSCALED_CYCLECLOCK + +#if defined(_WIN32) +#include <intrin.h> +#endif + +#if defined(__powerpc__) || defined(__ppc__) +#ifdef __GLIBC__ +#include <sys/platform/ppc.h> +#elif defined(__FreeBSD__) +#include <sys/sysctl.h> +#include <sys/types.h> +#endif +#endif + +#include "absl/base/internal/sysinfo.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +#if defined(__i386__) + +int64_t UnscaledCycleClock::Now() { + int64_t ret; + __asm__ volatile("rdtsc" : "=A"(ret)); + return ret; +} + +double UnscaledCycleClock::Frequency() { + return base_internal::NominalCPUFrequency(); +} + +#elif defined(__x86_64__) + +int64_t UnscaledCycleClock::Now() { + uint64_t low, high; + __asm__ volatile("rdtsc" : "=a"(low), "=d"(high)); + return (high << 32) | low; +} + +double UnscaledCycleClock::Frequency() { + return base_internal::NominalCPUFrequency(); +} + +#elif defined(__powerpc__) || defined(__ppc__) + +int64_t UnscaledCycleClock::Now() { +#ifdef __GLIBC__ + return __ppc_get_timebase(); +#else +#ifdef __powerpc64__ + int64_t tbr; + asm volatile("mfspr %0, 268" : "=r"(tbr)); + return tbr; +#else + int32_t tbu, tbl, tmp; + asm volatile( + "0:\n" + "mftbu %[hi32]\n" + "mftb %[lo32]\n" + "mftbu %[tmp]\n" + "cmpw %[tmp],%[hi32]\n" + "bne 0b\n" + : [ hi32 ] "=r"(tbu), [ lo32 ] "=r"(tbl), [ tmp ] "=r"(tmp)); + return (static_cast<int64_t>(tbu) << 32) | tbl; +#endif +#endif +} + +double UnscaledCycleClock::Frequency() { +#ifdef __GLIBC__ + return __ppc_get_timebase_freq(); +#elif defined(__FreeBSD__) + static once_flag init_timebase_frequency_once; + static double timebase_frequency = 0.0; + base_internal::LowLevelCallOnce(&init_timebase_frequency_once, [&]() { + size_t length = sizeof(timebase_frequency); + sysctlbyname("kern.timecounter.tc.timebase.frequency", &timebase_frequency, + &length, nullptr, 0); + }); + return timebase_frequency; +#else +#error Must implement UnscaledCycleClock::Frequency() +#endif +} + +#elif defined(__aarch64__) + +// System timer of ARMv8 runs at a different frequency than the CPU's. +// The frequency is fixed, typically in the range 1-50MHz. It can be +// read at CNTFRQ special register. We assume the OS has set up +// the virtual timer properly. +int64_t UnscaledCycleClock::Now() { + int64_t virtual_timer_value; + asm volatile("mrs %0, cntvct_el0" : "=r"(virtual_timer_value)); + return virtual_timer_value; +} + +double UnscaledCycleClock::Frequency() { + uint64_t aarch64_timer_frequency; + asm volatile("mrs %0, cntfrq_el0" : "=r"(aarch64_timer_frequency)); + return aarch64_timer_frequency; +} + +#elif defined(_M_IX86) || defined(_M_X64) + +#pragma intrinsic(__rdtsc) + +int64_t UnscaledCycleClock::Now() { + return __rdtsc(); +} + +double UnscaledCycleClock::Frequency() { + return base_internal::NominalCPUFrequency(); +} + +#endif + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_USE_UNSCALED_CYCLECLOCK diff --git a/third_party/abseil_cpp/absl/base/internal/unscaledcycleclock.h b/third_party/abseil_cpp/absl/base/internal/unscaledcycleclock.h new file mode 100644 index 000000000000..cdce9bf8a83c --- /dev/null +++ b/third_party/abseil_cpp/absl/base/internal/unscaledcycleclock.h @@ -0,0 +1,124 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// UnscaledCycleClock +// An UnscaledCycleClock yields the value and frequency of a cycle counter +// that increments at a rate that is approximately constant. +// This class is for internal / whitelisted use only, you should consider +// using CycleClock instead. +// +// Notes: +// The cycle counter frequency is not necessarily the core clock frequency. +// That is, CycleCounter cycles are not necessarily "CPU cycles". +// +// An arbitrary offset may have been added to the counter at power on. +// +// On some platforms, the rate and offset of the counter may differ +// slightly when read from different CPUs of a multiprocessor. Usually, +// we try to ensure that the operating system adjusts values periodically +// so that values agree approximately. If you need stronger guarantees, +// consider using alternate interfaces. +// +// The CPU is not required to maintain the ordering of a cycle counter read +// with respect to surrounding instructions. + +#ifndef ABSL_BASE_INTERNAL_UNSCALEDCYCLECLOCK_H_ +#define ABSL_BASE_INTERNAL_UNSCALEDCYCLECLOCK_H_ + +#include <cstdint> + +#if defined(__APPLE__) +#include <TargetConditionals.h> +#endif + +#include "absl/base/port.h" + +// The following platforms have an implementation of a hardware counter. +#if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__) || \ + defined(__powerpc__) || defined(__ppc__) || \ + defined(_M_IX86) || defined(_M_X64) +#define ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION 1 +#else +#define ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION 0 +#endif + +// The following platforms often disable access to the hardware +// counter (through a sandbox) even if the underlying hardware has a +// usable counter. The CycleTimer interface also requires a *scaled* +// CycleClock that runs at atleast 1 MHz. We've found some Android +// ARM64 devices where this is not the case, so we disable it by +// default on Android ARM64. +#if defined(__native_client__) || \ + (defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE) || \ + (defined(__ANDROID__) && defined(__aarch64__)) +#define ABSL_USE_UNSCALED_CYCLECLOCK_DEFAULT 0 +#else +#define ABSL_USE_UNSCALED_CYCLECLOCK_DEFAULT 1 +#endif + +// UnscaledCycleClock is an optional internal feature. +// Use "#if ABSL_USE_UNSCALED_CYCLECLOCK" to test for its presence. +// Can be overridden at compile-time via -DABSL_USE_UNSCALED_CYCLECLOCK=0|1 +#if !defined(ABSL_USE_UNSCALED_CYCLECLOCK) +#define ABSL_USE_UNSCALED_CYCLECLOCK \ + (ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION && \ + ABSL_USE_UNSCALED_CYCLECLOCK_DEFAULT) +#endif + +#if ABSL_USE_UNSCALED_CYCLECLOCK + +// This macro can be used to test if UnscaledCycleClock::Frequency() +// is NominalCPUFrequency() on a particular platform. +#if (defined(__i386__) || defined(__x86_64__) || \ + defined(_M_IX86) || defined(_M_X64)) +#define ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY +#endif + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace time_internal { +class UnscaledCycleClockWrapperForGetCurrentTime; +} // namespace time_internal + +namespace base_internal { +class CycleClock; +class UnscaledCycleClockWrapperForInitializeFrequency; + +class UnscaledCycleClock { + private: + UnscaledCycleClock() = delete; + + // Return the value of a cycle counter that counts at a rate that is + // approximately constant. + static int64_t Now(); + + // Return the how much UnscaledCycleClock::Now() increases per second. + // This is not necessarily the core CPU clock frequency. + // It may be the nominal value report by the kernel, rather than a measured + // value. + static double Frequency(); + + // Whitelisted friends. + friend class base_internal::CycleClock; + friend class time_internal::UnscaledCycleClockWrapperForGetCurrentTime; + friend class base_internal::UnscaledCycleClockWrapperForInitializeFrequency; +}; + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_USE_UNSCALED_CYCLECLOCK + +#endif // ABSL_BASE_INTERNAL_UNSCALEDCYCLECLOCK_H_ diff --git a/third_party/abseil_cpp/absl/base/invoke_test.cc b/third_party/abseil_cpp/absl/base/invoke_test.cc new file mode 100644 index 000000000000..6aa613c9136c --- /dev/null +++ b/third_party/abseil_cpp/absl/base/invoke_test.cc @@ -0,0 +1,223 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/invoke.h" + +#include <functional> +#include <memory> +#include <string> +#include <utility> + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/memory/memory.h" +#include "absl/strings/str_cat.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { +namespace { + +int Function(int a, int b) { return a - b; } + +int Sink(std::unique_ptr<int> p) { + return *p; +} + +std::unique_ptr<int> Factory(int n) { + return make_unique<int>(n); +} + +void NoOp() {} + +struct ConstFunctor { + int operator()(int a, int b) const { return a - b; } +}; + +struct MutableFunctor { + int operator()(int a, int b) { return a - b; } +}; + +struct EphemeralFunctor { + int operator()(int a, int b) && { return a - b; } +}; + +struct OverloadedFunctor { + template <typename... Args> + std::string operator()(const Args&... args) & { + return StrCat("&", args...); + } + template <typename... Args> + std::string operator()(const Args&... args) const& { + return StrCat("const&", args...); + } + template <typename... Args> + std::string operator()(const Args&... args) && { + return StrCat("&&", args...); + } +}; + +struct Class { + int Method(int a, int b) { return a - b; } + int ConstMethod(int a, int b) const { return a - b; } + int RefMethod(int a, int b) & { return a - b; } + int RefRefMethod(int a, int b) && { return a - b; } + int NoExceptMethod(int a, int b) noexcept { return a - b; } + int VolatileMethod(int a, int b) volatile { return a - b; } + + int member; +}; + +struct FlipFlop { + int ConstMethod() const { return member; } + FlipFlop operator*() const { return {-member}; } + + int member; +}; + +// CallMaybeWithArg(f) resolves either to Invoke(f) or Invoke(f, 42), depending +// on which one is valid. +template <typename F> +decltype(Invoke(std::declval<const F&>())) CallMaybeWithArg(const F& f) { + return Invoke(f); +} + +template <typename F> +decltype(Invoke(std::declval<const F&>(), 42)) CallMaybeWithArg(const F& f) { + return Invoke(f, 42); +} + +TEST(InvokeTest, Function) { + EXPECT_EQ(1, Invoke(Function, 3, 2)); + EXPECT_EQ(1, Invoke(&Function, 3, 2)); +} + +TEST(InvokeTest, NonCopyableArgument) { + EXPECT_EQ(42, Invoke(Sink, make_unique<int>(42))); +} + +TEST(InvokeTest, NonCopyableResult) { + EXPECT_THAT(Invoke(Factory, 42), ::testing::Pointee(42)); +} + +TEST(InvokeTest, VoidResult) { + Invoke(NoOp); +} + +TEST(InvokeTest, ConstFunctor) { + EXPECT_EQ(1, Invoke(ConstFunctor(), 3, 2)); +} + +TEST(InvokeTest, MutableFunctor) { + MutableFunctor f; + EXPECT_EQ(1, Invoke(f, 3, 2)); + EXPECT_EQ(1, Invoke(MutableFunctor(), 3, 2)); +} + +TEST(InvokeTest, EphemeralFunctor) { + EphemeralFunctor f; + EXPECT_EQ(1, Invoke(std::move(f), 3, 2)); + EXPECT_EQ(1, Invoke(EphemeralFunctor(), 3, 2)); +} + +TEST(InvokeTest, OverloadedFunctor) { + OverloadedFunctor f; + const OverloadedFunctor& cf = f; + + EXPECT_EQ("&", Invoke(f)); + EXPECT_EQ("& 42", Invoke(f, " 42")); + + EXPECT_EQ("const&", Invoke(cf)); + EXPECT_EQ("const& 42", Invoke(cf, " 42")); + + EXPECT_EQ("&&", Invoke(std::move(f))); + EXPECT_EQ("&& 42", Invoke(std::move(f), " 42")); +} + +TEST(InvokeTest, ReferenceWrapper) { + ConstFunctor cf; + MutableFunctor mf; + EXPECT_EQ(1, Invoke(std::cref(cf), 3, 2)); + EXPECT_EQ(1, Invoke(std::ref(cf), 3, 2)); + EXPECT_EQ(1, Invoke(std::ref(mf), 3, 2)); +} + +TEST(InvokeTest, MemberFunction) { + std::unique_ptr<Class> p(new Class); + std::unique_ptr<const Class> cp(new Class); + std::unique_ptr<volatile Class> vp(new Class); + + EXPECT_EQ(1, Invoke(&Class::Method, p, 3, 2)); + EXPECT_EQ(1, Invoke(&Class::Method, p.get(), 3, 2)); + EXPECT_EQ(1, Invoke(&Class::Method, *p, 3, 2)); + EXPECT_EQ(1, Invoke(&Class::RefMethod, p, 3, 2)); + EXPECT_EQ(1, Invoke(&Class::RefMethod, p.get(), 3, 2)); + EXPECT_EQ(1, Invoke(&Class::RefMethod, *p, 3, 2)); + EXPECT_EQ(1, Invoke(&Class::RefRefMethod, std::move(*p), 3, 2)); // NOLINT + EXPECT_EQ(1, Invoke(&Class::NoExceptMethod, p, 3, 2)); + EXPECT_EQ(1, Invoke(&Class::NoExceptMethod, p.get(), 3, 2)); + EXPECT_EQ(1, Invoke(&Class::NoExceptMethod, *p, 3, 2)); + + EXPECT_EQ(1, Invoke(&Class::ConstMethod, p, 3, 2)); + EXPECT_EQ(1, Invoke(&Class::ConstMethod, p.get(), 3, 2)); + EXPECT_EQ(1, Invoke(&Class::ConstMethod, *p, 3, 2)); + + EXPECT_EQ(1, Invoke(&Class::ConstMethod, cp, 3, 2)); + EXPECT_EQ(1, Invoke(&Class::ConstMethod, cp.get(), 3, 2)); + EXPECT_EQ(1, Invoke(&Class::ConstMethod, *cp, 3, 2)); + + EXPECT_EQ(1, Invoke(&Class::VolatileMethod, p, 3, 2)); + EXPECT_EQ(1, Invoke(&Class::VolatileMethod, p.get(), 3, 2)); + EXPECT_EQ(1, Invoke(&Class::VolatileMethod, *p, 3, 2)); + EXPECT_EQ(1, Invoke(&Class::VolatileMethod, vp, 3, 2)); + EXPECT_EQ(1, Invoke(&Class::VolatileMethod, vp.get(), 3, 2)); + EXPECT_EQ(1, Invoke(&Class::VolatileMethod, *vp, 3, 2)); + + EXPECT_EQ(1, Invoke(&Class::Method, make_unique<Class>(), 3, 2)); + EXPECT_EQ(1, Invoke(&Class::ConstMethod, make_unique<Class>(), 3, 2)); + EXPECT_EQ(1, Invoke(&Class::ConstMethod, make_unique<const Class>(), 3, 2)); +} + +TEST(InvokeTest, DataMember) { + std::unique_ptr<Class> p(new Class{42}); + std::unique_ptr<const Class> cp(new Class{42}); + EXPECT_EQ(42, Invoke(&Class::member, p)); + EXPECT_EQ(42, Invoke(&Class::member, *p)); + EXPECT_EQ(42, Invoke(&Class::member, p.get())); + + Invoke(&Class::member, p) = 42; + Invoke(&Class::member, p.get()) = 42; + + EXPECT_EQ(42, Invoke(&Class::member, cp)); + EXPECT_EQ(42, Invoke(&Class::member, *cp)); + EXPECT_EQ(42, Invoke(&Class::member, cp.get())); +} + +TEST(InvokeTest, FlipFlop) { + FlipFlop obj = {42}; + // This call could resolve to (obj.*&FlipFlop::ConstMethod)() or + // ((*obj).*&FlipFlop::ConstMethod)(). We verify that it's the former. + EXPECT_EQ(42, Invoke(&FlipFlop::ConstMethod, obj)); + EXPECT_EQ(42, Invoke(&FlipFlop::member, obj)); +} + +TEST(InvokeTest, SfinaeFriendly) { + CallMaybeWithArg(NoOp); + EXPECT_THAT(CallMaybeWithArg(Factory), ::testing::Pointee(42)); +} + +} // namespace +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third_party/abseil_cpp/absl/base/log_severity.cc b/third_party/abseil_cpp/absl/base/log_severity.cc new file mode 100644 index 000000000000..72312afd367e --- /dev/null +++ b/third_party/abseil_cpp/absl/base/log_severity.cc @@ -0,0 +1,27 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/log_severity.h" + +#include <ostream> + +namespace absl { +ABSL_NAMESPACE_BEGIN + +std::ostream& operator<<(std::ostream& os, absl::LogSeverity s) { + if (s == absl::NormalizeLogSeverity(s)) return os << absl::LogSeverityName(s); + return os << "absl::LogSeverity(" << static_cast<int>(s) << ")"; +} +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third_party/abseil_cpp/absl/base/log_severity.h b/third_party/abseil_cpp/absl/base/log_severity.h new file mode 100644 index 000000000000..65a3b1667270 --- /dev/null +++ b/third_party/abseil_cpp/absl/base/log_severity.h @@ -0,0 +1,121 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INTERNAL_LOG_SEVERITY_H_ +#define ABSL_BASE_INTERNAL_LOG_SEVERITY_H_ + +#include <array> +#include <ostream> + +#include "absl/base/attributes.h" +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +// absl::LogSeverity +// +// Four severity levels are defined. Logging APIs should terminate the program +// when a message is logged at severity `kFatal`; the other levels have no +// special semantics. +// +// Values other than the four defined levels (e.g. produced by `static_cast`) +// are valid, but their semantics when passed to a function, macro, or flag +// depend on the function, macro, or flag. The usual behavior is to normalize +// such values to a defined severity level, however in some cases values other +// than the defined levels are useful for comparison. +// +// Exmaple: +// +// // Effectively disables all logging: +// SetMinLogLevel(static_cast<absl::LogSeverity>(100)); +// +// Abseil flags may be defined with type `LogSeverity`. Dependency layering +// constraints require that the `AbslParseFlag()` overload be declared and +// defined in the flags library itself rather than here. The `AbslUnparseFlag()` +// overload is defined there as well for consistency. +// +// absl::LogSeverity Flag String Representation +// +// An `absl::LogSeverity` has a string representation used for parsing +// command-line flags based on the enumerator name (e.g. `kFatal`) or +// its unprefixed name (without the `k`) in any case-insensitive form. (E.g. +// "FATAL", "fatal" or "Fatal" are all valid.) Unparsing such flags produces an +// unprefixed string representation in all caps (e.g. "FATAL") or an integer. +// +// Additionally, the parser accepts arbitrary integers (as if the type were +// `int`). +// +// Examples: +// +// --my_log_level=kInfo +// --my_log_level=INFO +// --my_log_level=info +// --my_log_level=0 +// +// Unparsing a flag produces the same result as `absl::LogSeverityName()` for +// the standard levels and a base-ten integer otherwise. +enum class LogSeverity : int { + kInfo = 0, + kWarning = 1, + kError = 2, + kFatal = 3, +}; + +// LogSeverities() +// +// Returns an iterable of all standard `absl::LogSeverity` values, ordered from +// least to most severe. +constexpr std::array<absl::LogSeverity, 4> LogSeverities() { + return {{absl::LogSeverity::kInfo, absl::LogSeverity::kWarning, + absl::LogSeverity::kError, absl::LogSeverity::kFatal}}; +} + +// LogSeverityName() +// +// Returns the all-caps string representation (e.g. "INFO") of the specified +// severity level if it is one of the standard levels and "UNKNOWN" otherwise. +constexpr const char* LogSeverityName(absl::LogSeverity s) { + return s == absl::LogSeverity::kInfo + ? "INFO" + : s == absl::LogSeverity::kWarning + ? "WARNING" + : s == absl::LogSeverity::kError + ? "ERROR" + : s == absl::LogSeverity::kFatal ? "FATAL" : "UNKNOWN"; +} + +// NormalizeLogSeverity() +// +// Values less than `kInfo` normalize to `kInfo`; values greater than `kFatal` +// normalize to `kError` (**NOT** `kFatal`). +constexpr absl::LogSeverity NormalizeLogSeverity(absl::LogSeverity s) { + return s < absl::LogSeverity::kInfo + ? absl::LogSeverity::kInfo + : s > absl::LogSeverity::kFatal ? absl::LogSeverity::kError : s; +} +constexpr absl::LogSeverity NormalizeLogSeverity(int s) { + return absl::NormalizeLogSeverity(static_cast<absl::LogSeverity>(s)); +} + +// operator<< +// +// The exact representation of a streamed `absl::LogSeverity` is deliberately +// unspecified; do not rely on it. +std::ostream& operator<<(std::ostream& os, absl::LogSeverity s); + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_LOG_SEVERITY_H_ diff --git a/third_party/abseil_cpp/absl/base/log_severity_test.cc b/third_party/abseil_cpp/absl/base/log_severity_test.cc new file mode 100644 index 000000000000..2c6872b00a64 --- /dev/null +++ b/third_party/abseil_cpp/absl/base/log_severity_test.cc @@ -0,0 +1,204 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/log_severity.h" + +#include <cstdint> +#include <ios> +#include <limits> +#include <ostream> +#include <sstream> +#include <string> +#include <tuple> + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/flags/internal/flag.h" +#include "absl/flags/marshalling.h" +#include "absl/strings/str_cat.h" + +namespace { +using ::testing::Eq; +using ::testing::IsFalse; +using ::testing::IsTrue; +using ::testing::TestWithParam; +using ::testing::Values; + +std::string StreamHelper(absl::LogSeverity value) { + std::ostringstream stream; + stream << value; + return stream.str(); +} + +TEST(StreamTest, Works) { + EXPECT_THAT(StreamHelper(static_cast<absl::LogSeverity>(-100)), + Eq("absl::LogSeverity(-100)")); + EXPECT_THAT(StreamHelper(absl::LogSeverity::kInfo), Eq("INFO")); + EXPECT_THAT(StreamHelper(absl::LogSeverity::kWarning), Eq("WARNING")); + EXPECT_THAT(StreamHelper(absl::LogSeverity::kError), Eq("ERROR")); + EXPECT_THAT(StreamHelper(absl::LogSeverity::kFatal), Eq("FATAL")); + EXPECT_THAT(StreamHelper(static_cast<absl::LogSeverity>(4)), + Eq("absl::LogSeverity(4)")); +} + +static_assert( + absl::flags_internal::FlagUseOneWordStorage<absl::LogSeverity>::value, + "Flags of type absl::LogSeverity ought to be lock-free."); + +using ParseFlagFromOutOfRangeIntegerTest = TestWithParam<int64_t>; +INSTANTIATE_TEST_SUITE_P( + Instantiation, ParseFlagFromOutOfRangeIntegerTest, + Values(static_cast<int64_t>(std::numeric_limits<int>::min()) - 1, + static_cast<int64_t>(std::numeric_limits<int>::max()) + 1)); +TEST_P(ParseFlagFromOutOfRangeIntegerTest, ReturnsError) { + const std::string to_parse = absl::StrCat(GetParam()); + absl::LogSeverity value; + std::string error; + EXPECT_THAT(absl::ParseFlag(to_parse, &value, &error), IsFalse()) << value; +} + +using ParseFlagFromAlmostOutOfRangeIntegerTest = TestWithParam<int>; +INSTANTIATE_TEST_SUITE_P(Instantiation, + ParseFlagFromAlmostOutOfRangeIntegerTest, + Values(std::numeric_limits<int>::min(), + std::numeric_limits<int>::max())); +TEST_P(ParseFlagFromAlmostOutOfRangeIntegerTest, YieldsExpectedValue) { + const auto expected = static_cast<absl::LogSeverity>(GetParam()); + const std::string to_parse = absl::StrCat(GetParam()); + absl::LogSeverity value; + std::string error; + ASSERT_THAT(absl::ParseFlag(to_parse, &value, &error), IsTrue()) << error; + EXPECT_THAT(value, Eq(expected)); +} + +using ParseFlagFromIntegerMatchingEnumeratorTest = + TestWithParam<std::tuple<absl::string_view, absl::LogSeverity>>; +INSTANTIATE_TEST_SUITE_P( + Instantiation, ParseFlagFromIntegerMatchingEnumeratorTest, + Values(std::make_tuple("0", absl::LogSeverity::kInfo), + std::make_tuple(" 0", absl::LogSeverity::kInfo), + std::make_tuple("-0", absl::LogSeverity::kInfo), + std::make_tuple("+0", absl::LogSeverity::kInfo), + std::make_tuple("00", absl::LogSeverity::kInfo), + std::make_tuple("0 ", absl::LogSeverity::kInfo), + std::make_tuple("0x0", absl::LogSeverity::kInfo), + std::make_tuple("1", absl::LogSeverity::kWarning), + std::make_tuple("+1", absl::LogSeverity::kWarning), + std::make_tuple("2", absl::LogSeverity::kError), + std::make_tuple("3", absl::LogSeverity::kFatal))); +TEST_P(ParseFlagFromIntegerMatchingEnumeratorTest, YieldsExpectedValue) { + const absl::string_view to_parse = std::get<0>(GetParam()); + const absl::LogSeverity expected = std::get<1>(GetParam()); + absl::LogSeverity value; + std::string error; + ASSERT_THAT(absl::ParseFlag(to_parse, &value, &error), IsTrue()) << error; + EXPECT_THAT(value, Eq(expected)); +} + +using ParseFlagFromOtherIntegerTest = + TestWithParam<std::tuple<absl::string_view, int>>; +INSTANTIATE_TEST_SUITE_P(Instantiation, ParseFlagFromOtherIntegerTest, + Values(std::make_tuple("-1", -1), + std::make_tuple("4", 4), + std::make_tuple("010", 10), + std::make_tuple("0x10", 16))); +TEST_P(ParseFlagFromOtherIntegerTest, YieldsExpectedValue) { + const absl::string_view to_parse = std::get<0>(GetParam()); + const auto expected = static_cast<absl::LogSeverity>(std::get<1>(GetParam())); + absl::LogSeverity value; + std::string error; + ASSERT_THAT(absl::ParseFlag(to_parse, &value, &error), IsTrue()) << error; + EXPECT_THAT(value, Eq(expected)); +} + +using ParseFlagFromEnumeratorTest = + TestWithParam<std::tuple<absl::string_view, absl::LogSeverity>>; +INSTANTIATE_TEST_SUITE_P( + Instantiation, ParseFlagFromEnumeratorTest, + Values(std::make_tuple("INFO", absl::LogSeverity::kInfo), + std::make_tuple("info", absl::LogSeverity::kInfo), + std::make_tuple("kInfo", absl::LogSeverity::kInfo), + std::make_tuple("iNfO", absl::LogSeverity::kInfo), + std::make_tuple("kInFo", absl::LogSeverity::kInfo), + std::make_tuple("WARNING", absl::LogSeverity::kWarning), + std::make_tuple("warning", absl::LogSeverity::kWarning), + std::make_tuple("kWarning", absl::LogSeverity::kWarning), + std::make_tuple("WaRnInG", absl::LogSeverity::kWarning), + std::make_tuple("KwArNiNg", absl::LogSeverity::kWarning), + std::make_tuple("ERROR", absl::LogSeverity::kError), + std::make_tuple("error", absl::LogSeverity::kError), + std::make_tuple("kError", absl::LogSeverity::kError), + std::make_tuple("eRrOr", absl::LogSeverity::kError), + std::make_tuple("kErRoR", absl::LogSeverity::kError), + std::make_tuple("FATAL", absl::LogSeverity::kFatal), + std::make_tuple("fatal", absl::LogSeverity::kFatal), + std::make_tuple("kFatal", absl::LogSeverity::kFatal), + std::make_tuple("FaTaL", absl::LogSeverity::kFatal), + std::make_tuple("KfAtAl", absl::LogSeverity::kFatal))); +TEST_P(ParseFlagFromEnumeratorTest, YieldsExpectedValue) { + const absl::string_view to_parse = std::get<0>(GetParam()); + const absl::LogSeverity expected = std::get<1>(GetParam()); + absl::LogSeverity value; + std::string error; + ASSERT_THAT(absl::ParseFlag(to_parse, &value, &error), IsTrue()) << error; + EXPECT_THAT(value, Eq(expected)); +} + +using ParseFlagFromGarbageTest = TestWithParam<absl::string_view>; +INSTANTIATE_TEST_SUITE_P(Instantiation, ParseFlagFromGarbageTest, + Values("", "\0", " ", "garbage", "kkinfo", "I")); +TEST_P(ParseFlagFromGarbageTest, ReturnsError) { + const absl::string_view to_parse = GetParam(); + absl::LogSeverity value; + std::string error; + EXPECT_THAT(absl::ParseFlag(to_parse, &value, &error), IsFalse()) << value; +} + +using UnparseFlagToEnumeratorTest = + TestWithParam<std::tuple<absl::LogSeverity, absl::string_view>>; +INSTANTIATE_TEST_SUITE_P( + Instantiation, UnparseFlagToEnumeratorTest, + Values(std::make_tuple(absl::LogSeverity::kInfo, "INFO"), + std::make_tuple(absl::LogSeverity::kWarning, "WARNING"), + std::make_tuple(absl::LogSeverity::kError, "ERROR"), + std::make_tuple(absl::LogSeverity::kFatal, "FATAL"))); +TEST_P(UnparseFlagToEnumeratorTest, ReturnsExpectedValueAndRoundTrips) { + const absl::LogSeverity to_unparse = std::get<0>(GetParam()); + const absl::string_view expected = std::get<1>(GetParam()); + const std::string stringified_value = absl::UnparseFlag(to_unparse); + EXPECT_THAT(stringified_value, Eq(expected)); + absl::LogSeverity reparsed_value; + std::string error; + EXPECT_THAT(absl::ParseFlag(stringified_value, &reparsed_value, &error), + IsTrue()); + EXPECT_THAT(reparsed_value, Eq(to_unparse)); +} + +using UnparseFlagToOtherIntegerTest = TestWithParam<int>; +INSTANTIATE_TEST_SUITE_P(Instantiation, UnparseFlagToOtherIntegerTest, + Values(std::numeric_limits<int>::min(), -1, 4, + std::numeric_limits<int>::max())); +TEST_P(UnparseFlagToOtherIntegerTest, ReturnsExpectedValueAndRoundTrips) { + const absl::LogSeverity to_unparse = + static_cast<absl::LogSeverity>(GetParam()); + const std::string expected = absl::StrCat(GetParam()); + const std::string stringified_value = absl::UnparseFlag(to_unparse); + EXPECT_THAT(stringified_value, Eq(expected)); + absl::LogSeverity reparsed_value; + std::string error; + EXPECT_THAT(absl::ParseFlag(stringified_value, &reparsed_value, &error), + IsTrue()); + EXPECT_THAT(reparsed_value, Eq(to_unparse)); +} +} // namespace diff --git a/third_party/abseil_cpp/absl/base/macros.h b/third_party/abseil_cpp/absl/base/macros.h new file mode 100644 index 000000000000..ea1da65ba272 --- /dev/null +++ b/third_party/abseil_cpp/absl/base/macros.h @@ -0,0 +1,226 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: macros.h +// ----------------------------------------------------------------------------- +// +// This header file defines the set of language macros used within Abseil code. +// For the set of macros used to determine supported compilers and platforms, +// see absl/base/config.h instead. +// +// This code is compiled directly on many platforms, including client +// platforms like Windows, Mac, and embedded systems. Before making +// any changes here, make sure that you're not breaking any platforms. + +#ifndef ABSL_BASE_MACROS_H_ +#define ABSL_BASE_MACROS_H_ + +#include <cassert> +#include <cstddef> + +#include "absl/base/attributes.h" +#include "absl/base/config.h" +#include "absl/base/optimization.h" +#include "absl/base/port.h" + +// ABSL_ARRAYSIZE() +// +// Returns the number of elements in an array as a compile-time constant, which +// can be used in defining new arrays. If you use this macro on a pointer by +// mistake, you will get a compile-time error. +#define ABSL_ARRAYSIZE(array) \ + (sizeof(::absl::macros_internal::ArraySizeHelper(array))) + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace macros_internal { +// Note: this internal template function declaration is used by ABSL_ARRAYSIZE. +// The function doesn't need a definition, as we only use its type. +template <typename T, size_t N> +auto ArraySizeHelper(const T (&array)[N]) -> char (&)[N]; +} // namespace macros_internal +ABSL_NAMESPACE_END +} // namespace absl + +// ABSL_FALLTHROUGH_INTENDED +// +// Annotates implicit fall-through between switch labels, allowing a case to +// indicate intentional fallthrough and turn off warnings about any lack of a +// `break` statement. The ABSL_FALLTHROUGH_INTENDED macro should be followed by +// a semicolon and can be used in most places where `break` can, provided that +// no statements exist between it and the next switch label. +// +// Example: +// +// switch (x) { +// case 40: +// case 41: +// if (truth_is_out_there) { +// ++x; +// ABSL_FALLTHROUGH_INTENDED; // Use instead of/along with annotations +// // in comments +// } else { +// return x; +// } +// case 42: +// ... +// +// Notes: when compiled with clang in C++11 mode, the ABSL_FALLTHROUGH_INTENDED +// macro is expanded to the [[clang::fallthrough]] attribute, which is analysed +// when performing switch labels fall-through diagnostic +// (`-Wimplicit-fallthrough`). See clang documentation on language extensions +// for details: +// https://clang.llvm.org/docs/AttributeReference.html#fallthrough-clang-fallthrough +// +// When used with unsupported compilers, the ABSL_FALLTHROUGH_INTENDED macro +// has no effect on diagnostics. In any case this macro has no effect on runtime +// behavior and performance of code. +#ifdef ABSL_FALLTHROUGH_INTENDED +#error "ABSL_FALLTHROUGH_INTENDED should not be defined." +#endif + +// TODO(zhangxy): Use c++17 standard [[fallthrough]] macro, when supported. +#if defined(__clang__) && defined(__has_warning) +#if __has_feature(cxx_attributes) && __has_warning("-Wimplicit-fallthrough") +#define ABSL_FALLTHROUGH_INTENDED [[clang::fallthrough]] +#endif +#elif defined(__GNUC__) && __GNUC__ >= 7 +#define ABSL_FALLTHROUGH_INTENDED [[gnu::fallthrough]] +#endif + +#ifndef ABSL_FALLTHROUGH_INTENDED +#define ABSL_FALLTHROUGH_INTENDED \ + do { \ + } while (0) +#endif + +// ABSL_DEPRECATED() +// +// Marks a deprecated class, struct, enum, function, method and variable +// declarations. The macro argument is used as a custom diagnostic message (e.g. +// suggestion of a better alternative). +// +// Examples: +// +// class ABSL_DEPRECATED("Use Bar instead") Foo {...}; +// +// ABSL_DEPRECATED("Use Baz() instead") void Bar() {...} +// +// template <typename T> +// ABSL_DEPRECATED("Use DoThat() instead") +// void DoThis(); +// +// Every usage of a deprecated entity will trigger a warning when compiled with +// clang's `-Wdeprecated-declarations` option. This option is turned off by +// default, but the warnings will be reported by clang-tidy. +#if defined(__clang__) && __cplusplus >= 201103L +#define ABSL_DEPRECATED(message) __attribute__((deprecated(message))) +#endif + +#ifndef ABSL_DEPRECATED +#define ABSL_DEPRECATED(message) +#endif + +// ABSL_BAD_CALL_IF() +// +// Used on a function overload to trap bad calls: any call that matches the +// overload will cause a compile-time error. This macro uses a clang-specific +// "enable_if" attribute, as described at +// https://clang.llvm.org/docs/AttributeReference.html#enable-if +// +// Overloads which use this macro should be bracketed by +// `#ifdef ABSL_BAD_CALL_IF`. +// +// Example: +// +// int isdigit(int c); +// #ifdef ABSL_BAD_CALL_IF +// int isdigit(int c) +// ABSL_BAD_CALL_IF(c <= -1 || c > 255, +// "'c' must have the value of an unsigned char or EOF"); +// #endif // ABSL_BAD_CALL_IF +#if ABSL_HAVE_ATTRIBUTE(enable_if) +#define ABSL_BAD_CALL_IF(expr, msg) \ + __attribute__((enable_if(expr, "Bad call trap"), unavailable(msg))) +#endif + +// ABSL_ASSERT() +// +// In C++11, `assert` can't be used portably within constexpr functions. +// ABSL_ASSERT functions as a runtime assert but works in C++11 constexpr +// functions. Example: +// +// constexpr double Divide(double a, double b) { +// return ABSL_ASSERT(b != 0), a / b; +// } +// +// This macro is inspired by +// https://akrzemi1.wordpress.com/2017/05/18/asserts-in-constexpr-functions/ +#if defined(NDEBUG) +#define ABSL_ASSERT(expr) \ + (false ? static_cast<void>(expr) : static_cast<void>(0)) +#else +#define ABSL_ASSERT(expr) \ + (ABSL_PREDICT_TRUE((expr)) ? static_cast<void>(0) \ + : [] { assert(false && #expr); }()) // NOLINT +#endif + +// `ABSL_INTERNAL_HARDENING_ABORT()` controls how `ABSL_HARDENING_ASSERT()` +// aborts the program in release mode (when NDEBUG is defined). The +// implementation should abort the program as quickly as possible and ideally it +// should not be possible to ignore the abort request. +#if (ABSL_HAVE_BUILTIN(__builtin_trap) && \ + ABSL_HAVE_BUILTIN(__builtin_unreachable)) || \ + (defined(__GNUC__) && !defined(__clang__)) +#define ABSL_INTERNAL_HARDENING_ABORT() \ + do { \ + __builtin_trap(); \ + __builtin_unreachable(); \ + } while (false) +#else +#define ABSL_INTERNAL_HARDENING_ABORT() abort() +#endif + +// ABSL_HARDENING_ASSERT() +// +// `ABSL_HARDENING_ASSERT()` is like `ABSL_ASSERT()`, but used to implement +// runtime assertions that should be enabled in hardened builds even when +// `NDEBUG` is defined. +// +// When `NDEBUG` is not defined, `ABSL_HARDENING_ASSERT()` is identical to +// `ABSL_ASSERT()`. +// +// See `ABSL_OPTION_HARDENED` in `absl/base/options.h` for more information on +// hardened mode. +#if ABSL_OPTION_HARDENED == 1 && defined(NDEBUG) +#define ABSL_HARDENING_ASSERT(expr) \ + (ABSL_PREDICT_TRUE((expr)) ? static_cast<void>(0) \ + : [] { ABSL_INTERNAL_HARDENING_ABORT(); }()) +#else +#define ABSL_HARDENING_ASSERT(expr) ABSL_ASSERT(expr) +#endif + +#ifdef ABSL_HAVE_EXCEPTIONS +#define ABSL_INTERNAL_TRY try +#define ABSL_INTERNAL_CATCH_ANY catch (...) +#define ABSL_INTERNAL_RETHROW do { throw; } while (false) +#else // ABSL_HAVE_EXCEPTIONS +#define ABSL_INTERNAL_TRY if (true) +#define ABSL_INTERNAL_CATCH_ANY else if (false) +#define ABSL_INTERNAL_RETHROW do {} while (false) +#endif // ABSL_HAVE_EXCEPTIONS + +#endif // ABSL_BASE_MACROS_H_ diff --git a/third_party/abseil_cpp/absl/base/optimization.h b/third_party/abseil_cpp/absl/base/optimization.h new file mode 100644 index 000000000000..92bf9cd38840 --- /dev/null +++ b/third_party/abseil_cpp/absl/base/optimization.h @@ -0,0 +1,241 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: optimization.h +// ----------------------------------------------------------------------------- +// +// This header file defines portable macros for performance optimization. + +#ifndef ABSL_BASE_OPTIMIZATION_H_ +#define ABSL_BASE_OPTIMIZATION_H_ + +#include "absl/base/config.h" + +// ABSL_BLOCK_TAIL_CALL_OPTIMIZATION +// +// Instructs the compiler to avoid optimizing tail-call recursion. Use of this +// macro is useful when you wish to preserve the existing function order within +// a stack trace for logging, debugging, or profiling purposes. +// +// Example: +// +// int f() { +// int result = g(); +// ABSL_BLOCK_TAIL_CALL_OPTIMIZATION(); +// return result; +// } +#if defined(__pnacl__) +#define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() if (volatile int x = 0) { (void)x; } +#elif defined(__clang__) +// Clang will not tail call given inline volatile assembly. +#define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() __asm__ __volatile__("") +#elif defined(__GNUC__) +// GCC will not tail call given inline volatile assembly. +#define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() __asm__ __volatile__("") +#elif defined(_MSC_VER) +#include <intrin.h> +// The __nop() intrinsic blocks the optimisation. +#define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() __nop() +#else +#define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() if (volatile int x = 0) { (void)x; } +#endif + +// ABSL_CACHELINE_SIZE +// +// Explicitly defines the size of the L1 cache for purposes of alignment. +// Setting the cacheline size allows you to specify that certain objects be +// aligned on a cacheline boundary with `ABSL_CACHELINE_ALIGNED` declarations. +// (See below.) +// +// NOTE: this macro should be replaced with the following C++17 features, when +// those are generally available: +// +// * `std::hardware_constructive_interference_size` +// * `std::hardware_destructive_interference_size` +// +// See http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2016/p0154r1.html +// for more information. +#if defined(__GNUC__) +// Cache line alignment +#if defined(__i386__) || defined(__x86_64__) +#define ABSL_CACHELINE_SIZE 64 +#elif defined(__powerpc64__) +#define ABSL_CACHELINE_SIZE 128 +#elif defined(__aarch64__) +// We would need to read special register ctr_el0 to find out L1 dcache size. +// This value is a good estimate based on a real aarch64 machine. +#define ABSL_CACHELINE_SIZE 64 +#elif defined(__arm__) +// Cache line sizes for ARM: These values are not strictly correct since +// cache line sizes depend on implementations, not architectures. There +// are even implementations with cache line sizes configurable at boot +// time. +#if defined(__ARM_ARCH_5T__) +#define ABSL_CACHELINE_SIZE 32 +#elif defined(__ARM_ARCH_7A__) +#define ABSL_CACHELINE_SIZE 64 +#endif +#endif + +#ifndef ABSL_CACHELINE_SIZE +// A reasonable default guess. Note that overestimates tend to waste more +// space, while underestimates tend to waste more time. +#define ABSL_CACHELINE_SIZE 64 +#endif + +// ABSL_CACHELINE_ALIGNED +// +// Indicates that the declared object be cache aligned using +// `ABSL_CACHELINE_SIZE` (see above). Cacheline aligning objects allows you to +// load a set of related objects in the L1 cache for performance improvements. +// Cacheline aligning objects properly allows constructive memory sharing and +// prevents destructive (or "false") memory sharing. +// +// NOTE: this macro should be replaced with usage of `alignas()` using +// `std::hardware_constructive_interference_size` and/or +// `std::hardware_destructive_interference_size` when available within C++17. +// +// See http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2016/p0154r1.html +// for more information. +// +// On some compilers, `ABSL_CACHELINE_ALIGNED` expands to an `__attribute__` +// or `__declspec` attribute. For compilers where this is not known to work, +// the macro expands to nothing. +// +// No further guarantees are made here. The result of applying the macro +// to variables and types is always implementation-defined. +// +// WARNING: It is easy to use this attribute incorrectly, even to the point +// of causing bugs that are difficult to diagnose, crash, etc. It does not +// of itself guarantee that objects are aligned to a cache line. +// +// NOTE: Some compilers are picky about the locations of annotations such as +// this attribute, so prefer to put it at the beginning of your declaration. +// For example, +// +// ABSL_CACHELINE_ALIGNED static Foo* foo = ... +// +// class ABSL_CACHELINE_ALIGNED Bar { ... +// +// Recommendations: +// +// 1) Consult compiler documentation; this comment is not kept in sync as +// toolchains evolve. +// 2) Verify your use has the intended effect. This often requires inspecting +// the generated machine code. +// 3) Prefer applying this attribute to individual variables. Avoid +// applying it to types. This tends to localize the effect. +#define ABSL_CACHELINE_ALIGNED __attribute__((aligned(ABSL_CACHELINE_SIZE))) +#elif defined(_MSC_VER) +#define ABSL_CACHELINE_SIZE 64 +#define ABSL_CACHELINE_ALIGNED __declspec(align(ABSL_CACHELINE_SIZE)) +#else +#define ABSL_CACHELINE_SIZE 64 +#define ABSL_CACHELINE_ALIGNED +#endif + +// ABSL_PREDICT_TRUE, ABSL_PREDICT_FALSE +// +// Enables the compiler to prioritize compilation using static analysis for +// likely paths within a boolean branch. +// +// Example: +// +// if (ABSL_PREDICT_TRUE(expression)) { +// return result; // Faster if more likely +// } else { +// return 0; +// } +// +// Compilers can use the information that a certain branch is not likely to be +// taken (for instance, a CHECK failure) to optimize for the common case in +// the absence of better information (ie. compiling gcc with `-fprofile-arcs`). +// +// Recommendation: Modern CPUs dynamically predict branch execution paths, +// typically with accuracy greater than 97%. As a result, annotating every +// branch in a codebase is likely counterproductive; however, annotating +// specific branches that are both hot and consistently mispredicted is likely +// to yield performance improvements. +#if ABSL_HAVE_BUILTIN(__builtin_expect) || \ + (defined(__GNUC__) && !defined(__clang__)) +#define ABSL_PREDICT_FALSE(x) (__builtin_expect(x, 0)) +#define ABSL_PREDICT_TRUE(x) (__builtin_expect(false || (x), true)) +#else +#define ABSL_PREDICT_FALSE(x) (x) +#define ABSL_PREDICT_TRUE(x) (x) +#endif + +// ABSL_INTERNAL_ASSUME(cond) +// Informs the compiler than a condition is always true and that it can assume +// it to be true for optimization purposes. The call has undefined behavior if +// the condition is false. +// In !NDEBUG mode, the condition is checked with an assert(). +// NOTE: The expression must not have side effects, as it will only be evaluated +// in some compilation modes and not others. +// +// Example: +// +// int x = ...; +// ABSL_INTERNAL_ASSUME(x >= 0); +// // The compiler can optimize the division to a simple right shift using the +// // assumption specified above. +// int y = x / 16; +// +#if !defined(NDEBUG) +#define ABSL_INTERNAL_ASSUME(cond) assert(cond) +#elif ABSL_HAVE_BUILTIN(__builtin_assume) +#define ABSL_INTERNAL_ASSUME(cond) __builtin_assume(cond) +#elif defined(__GNUC__) || ABSL_HAVE_BUILTIN(__builtin_unreachable) +#define ABSL_INTERNAL_ASSUME(cond) \ + do { \ + if (!(cond)) __builtin_unreachable(); \ + } while (0) +#elif defined(_MSC_VER) +#define ABSL_INTERNAL_ASSUME(cond) __assume(cond) +#else +#define ABSL_INTERNAL_ASSUME(cond) \ + do { \ + static_cast<void>(false && (cond)); \ + } while (0) +#endif + +// ABSL_INTERNAL_UNIQUE_SMALL_NAME(cond) +// This macro forces small unique name on a static file level symbols like +// static local variables or static functions. This is intended to be used in +// macro definitions to optimize the cost of generated code. Do NOT use it on +// symbols exported from translation unit since it may casue a link time +// conflict. +// +// Example: +// +// #define MY_MACRO(txt) +// namespace { +// char VeryVeryLongVarName[] ABSL_INTERNAL_UNIQUE_SMALL_NAME() = txt; +// const char* VeryVeryLongFuncName() ABSL_INTERNAL_UNIQUE_SMALL_NAME(); +// const char* VeryVeryLongFuncName() { return txt; } +// } +// + +#if defined(__GNUC__) +#define ABSL_INTERNAL_UNIQUE_SMALL_NAME2(x) #x +#define ABSL_INTERNAL_UNIQUE_SMALL_NAME1(x) ABSL_INTERNAL_UNIQUE_SMALL_NAME2(x) +#define ABSL_INTERNAL_UNIQUE_SMALL_NAME() \ + asm(ABSL_INTERNAL_UNIQUE_SMALL_NAME1(.absl.__COUNTER__)) +#else +#define ABSL_INTERNAL_UNIQUE_SMALL_NAME() +#endif + +#endif // ABSL_BASE_OPTIMIZATION_H_ diff --git a/third_party/abseil_cpp/absl/base/options.h b/third_party/abseil_cpp/absl/base/options.h new file mode 100644 index 000000000000..230bf1eecc4a --- /dev/null +++ b/third_party/abseil_cpp/absl/base/options.h @@ -0,0 +1,238 @@ +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: options.h +// ----------------------------------------------------------------------------- +// +// This file contains Abseil configuration options for setting specific +// implementations instead of letting Abseil determine which implementation to +// use at compile-time. Setting these options may be useful for package or build +// managers who wish to guarantee ABI stability within binary builds (which are +// otherwise difficult to enforce). +// +// *** IMPORTANT NOTICE FOR PACKAGE MANAGERS: It is important that +// maintainers of package managers who wish to package Abseil read and +// understand this file! *** +// +// Abseil contains a number of possible configuration endpoints, based on +// parameters such as the detected platform, language version, or command-line +// flags used to invoke the underlying binary. As is the case with all +// libraries, binaries which contain Abseil code must ensure that separate +// packages use the same compiled copy of Abseil to avoid a diamond dependency +// problem, which can occur if two packages built with different Abseil +// configuration settings are linked together. Diamond dependency problems in +// C++ may manifest as violations to the One Definition Rule (ODR) (resulting in +// linker errors), or undefined behavior (resulting in crashes). +// +// Diamond dependency problems can be avoided if all packages utilize the same +// exact version of Abseil. Building from source code with the same compilation +// parameters is the easiest way to avoid such dependency problems. However, for +// package managers who cannot control such compilation parameters, we are +// providing the file to allow you to inject ABI (Application Binary Interface) +// stability across builds. Settings options in this file will neither change +// API nor ABI, providing a stable copy of Abseil between packages. +// +// Care must be taken to keep options within these configurations isolated +// from any other dynamic settings, such as command-line flags which could alter +// these options. This file is provided specifically to help build and package +// managers provide a stable copy of Abseil within their libraries and binaries; +// other developers should not have need to alter the contents of this file. +// +// ----------------------------------------------------------------------------- +// Usage +// ----------------------------------------------------------------------------- +// +// For any particular package release, set the appropriate definitions within +// this file to whatever value makes the most sense for your package(s). Note +// that, by default, most of these options, at the moment, affect the +// implementation of types; future options may affect other implementation +// details. +// +// NOTE: the defaults within this file all assume that Abseil can select the +// proper Abseil implementation at compile-time, which will not be sufficient +// to guarantee ABI stability to package managers. + +#ifndef ABSL_BASE_OPTIONS_H_ +#define ABSL_BASE_OPTIONS_H_ + +// Include a standard library header to allow configuration based on the +// standard library in use. +#ifdef __cplusplus +#include <ciso646> +#endif + +// ----------------------------------------------------------------------------- +// Type Compatibility Options +// ----------------------------------------------------------------------------- +// +// ABSL_OPTION_USE_STD_ANY +// +// This option controls whether absl::any is implemented as an alias to +// std::any, or as an independent implementation. +// +// A value of 0 means to use Abseil's implementation. This requires only C++11 +// support, and is expected to work on every toolchain we support. +// +// A value of 1 means to use an alias to std::any. This requires that all code +// using Abseil is built in C++17 mode or later. +// +// A value of 2 means to detect the C++ version being used to compile Abseil, +// and use an alias only if a working std::any is available. This option is +// useful when you are building your entire program, including all of its +// dependencies, from source. It should not be used otherwise -- for example, +// if you are distributing Abseil in a binary package manager -- since in +// mode 2, absl::any will name a different type, with a different mangled name +// and binary layout, depending on the compiler flags passed by the end user. +// For more info, see https://abseil.io/about/design/dropin-types. +// +// User code should not inspect this macro. To check in the preprocessor if +// absl::any is a typedef of std::any, use the feature macro ABSL_USES_STD_ANY. + +#define ABSL_OPTION_USE_STD_ANY 2 + + +// ABSL_OPTION_USE_STD_OPTIONAL +// +// This option controls whether absl::optional is implemented as an alias to +// std::optional, or as an independent implementation. +// +// A value of 0 means to use Abseil's implementation. This requires only C++11 +// support, and is expected to work on every toolchain we support. +// +// A value of 1 means to use an alias to std::optional. This requires that all +// code using Abseil is built in C++17 mode or later. +// +// A value of 2 means to detect the C++ version being used to compile Abseil, +// and use an alias only if a working std::optional is available. This option +// is useful when you are building your program from source. It should not be +// used otherwise -- for example, if you are distributing Abseil in a binary +// package manager -- since in mode 2, absl::optional will name a different +// type, with a different mangled name and binary layout, depending on the +// compiler flags passed by the end user. For more info, see +// https://abseil.io/about/design/dropin-types. + +// User code should not inspect this macro. To check in the preprocessor if +// absl::optional is a typedef of std::optional, use the feature macro +// ABSL_USES_STD_OPTIONAL. + +#define ABSL_OPTION_USE_STD_OPTIONAL 2 + + +// ABSL_OPTION_USE_STD_STRING_VIEW +// +// This option controls whether absl::string_view is implemented as an alias to +// std::string_view, or as an independent implementation. +// +// A value of 0 means to use Abseil's implementation. This requires only C++11 +// support, and is expected to work on every toolchain we support. +// +// A value of 1 means to use an alias to std::string_view. This requires that +// all code using Abseil is built in C++17 mode or later. +// +// A value of 2 means to detect the C++ version being used to compile Abseil, +// and use an alias only if a working std::string_view is available. This +// option is useful when you are building your program from source. It should +// not be used otherwise -- for example, if you are distributing Abseil in a +// binary package manager -- since in mode 2, absl::string_view will name a +// different type, with a different mangled name and binary layout, depending on +// the compiler flags passed by the end user. For more info, see +// https://abseil.io/about/design/dropin-types. +// +// User code should not inspect this macro. To check in the preprocessor if +// absl::string_view is a typedef of std::string_view, use the feature macro +// ABSL_USES_STD_STRING_VIEW. + +#define ABSL_OPTION_USE_STD_STRING_VIEW 2 + +// ABSL_OPTION_USE_STD_VARIANT +// +// This option controls whether absl::variant is implemented as an alias to +// std::variant, or as an independent implementation. +// +// A value of 0 means to use Abseil's implementation. This requires only C++11 +// support, and is expected to work on every toolchain we support. +// +// A value of 1 means to use an alias to std::variant. This requires that all +// code using Abseil is built in C++17 mode or later. +// +// A value of 2 means to detect the C++ version being used to compile Abseil, +// and use an alias only if a working std::variant is available. This option +// is useful when you are building your program from source. It should not be +// used otherwise -- for example, if you are distributing Abseil in a binary +// package manager -- since in mode 2, absl::variant will name a different +// type, with a different mangled name and binary layout, depending on the +// compiler flags passed by the end user. For more info, see +// https://abseil.io/about/design/dropin-types. +// +// User code should not inspect this macro. To check in the preprocessor if +// absl::variant is a typedef of std::variant, use the feature macro +// ABSL_USES_STD_VARIANT. + +#define ABSL_OPTION_USE_STD_VARIANT 2 + + +// ABSL_OPTION_USE_INLINE_NAMESPACE +// ABSL_OPTION_INLINE_NAMESPACE_NAME +// +// These options controls whether all entities in the absl namespace are +// contained within an inner inline namespace. This does not affect the +// user-visible API of Abseil, but it changes the mangled names of all symbols. +// +// This can be useful as a version tag if you are distributing Abseil in +// precompiled form. This will prevent a binary library build of Abseil with +// one inline namespace being used with headers configured with a different +// inline namespace name. Binary packagers are reminded that Abseil does not +// guarantee any ABI stability in Abseil, so any update of Abseil or +// configuration change in such a binary package should be combined with a +// new, unique value for the inline namespace name. +// +// A value of 0 means not to use inline namespaces. +// +// A value of 1 means to use an inline namespace with the given name inside +// namespace absl. If this is set, ABSL_OPTION_INLINE_NAMESPACE_NAME must also +// be changed to a new, unique identifier name. In particular "head" is not +// allowed. + +#define ABSL_OPTION_USE_INLINE_NAMESPACE 0 +#define ABSL_OPTION_INLINE_NAMESPACE_NAME head + +// ABSL_OPTION_HARDENED +// +// This option enables a "hardened" build in release mode (in this context, +// release mode is defined as a build where the `NDEBUG` macro is defined). +// +// A value of 0 means that "hardened" mode is not enabled. +// +// A value of 1 means that "hardened" mode is enabled. +// +// Hardened builds have additional security checks enabled when `NDEBUG` is +// defined. Defining `NDEBUG` is normally used to turn `assert()` macro into a +// no-op, as well as disabling other bespoke program consistency checks. By +// defining ABSL_OPTION_HARDENED to 1, a select set of checks remain enabled in +// release mode. These checks guard against programming errors that may lead to +// security vulnerabilities. In release mode, when one of these programming +// errors is encountered, the program will immediately abort, possibly without +// any attempt at logging. +// +// The checks enabled by this option are not free; they do incur runtime cost. +// +// The checks enabled by this option are always active when `NDEBUG` is not +// defined, even in the case when ABSL_OPTION_HARDENED is defined to 0. The +// checks enabled by this option may abort the program in a different way and +// log additional information when `NDEBUG` is not defined. + +#define ABSL_OPTION_HARDENED 0 + +#endif // ABSL_BASE_OPTIONS_H_ diff --git a/third_party/abseil_cpp/absl/base/policy_checks.h b/third_party/abseil_cpp/absl/base/policy_checks.h new file mode 100644 index 000000000000..4dfa49e54ac2 --- /dev/null +++ b/third_party/abseil_cpp/absl/base/policy_checks.h @@ -0,0 +1,111 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: policy_checks.h +// ----------------------------------------------------------------------------- +// +// This header enforces a minimum set of policies at build time, such as the +// supported compiler and library versions. Unsupported configurations are +// reported with `#error`. This enforcement is best effort, so successfully +// compiling this header does not guarantee a supported configuration. + +#ifndef ABSL_BASE_POLICY_CHECKS_H_ +#define ABSL_BASE_POLICY_CHECKS_H_ + +// Included for the __GLIBC_PREREQ macro used below. +#include <limits.h> + +// Included for the _STLPORT_VERSION macro used below. +#if defined(__cplusplus) +#include <cstddef> +#endif + +// ----------------------------------------------------------------------------- +// Operating System Check +// ----------------------------------------------------------------------------- + +#if defined(__CYGWIN__) +#error "Cygwin is not supported." +#endif + +// ----------------------------------------------------------------------------- +// Compiler Check +// ----------------------------------------------------------------------------- + +// We support MSVC++ 14.0 update 2 and later. +// This minimum will go up. +#if defined(_MSC_FULL_VER) && _MSC_FULL_VER < 190023918 && !defined(__clang__) +#error "This package requires Visual Studio 2015 Update 2 or higher." +#endif + +// We support gcc 4.7 and later. +// This minimum will go up. +#if defined(__GNUC__) && !defined(__clang__) +#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 7) +#error "This package requires gcc 4.7 or higher." +#endif +#endif + +// We support Apple Xcode clang 4.2.1 (version 421.11.65) and later. +// This corresponds to Apple Xcode version 4.5. +// This minimum will go up. +#if defined(__apple_build_version__) && __apple_build_version__ < 4211165 +#error "This package requires __apple_build_version__ of 4211165 or higher." +#endif + +// ----------------------------------------------------------------------------- +// C++ Version Check +// ----------------------------------------------------------------------------- + +// Enforce C++11 as the minimum. Note that Visual Studio has not +// advanced __cplusplus despite being good enough for our purposes, so +// so we exempt it from the check. +#if defined(__cplusplus) && !defined(_MSC_VER) +#if __cplusplus < 201103L +#error "C++ versions less than C++11 are not supported." +#endif +#endif + +// ----------------------------------------------------------------------------- +// Standard Library Check +// ----------------------------------------------------------------------------- + +#if defined(_STLPORT_VERSION) +#error "STLPort is not supported." +#endif + +// ----------------------------------------------------------------------------- +// `char` Size Check +// ----------------------------------------------------------------------------- + +// Abseil currently assumes CHAR_BIT == 8. If you would like to use Abseil on a +// platform where this is not the case, please provide us with the details about +// your platform so we can consider relaxing this requirement. +#if CHAR_BIT != 8 +#error "Abseil assumes CHAR_BIT == 8." +#endif + +// ----------------------------------------------------------------------------- +// `int` Size Check +// ----------------------------------------------------------------------------- + +// Abseil currently assumes that an int is 4 bytes. If you would like to use +// Abseil on a platform where this is not the case, please provide us with the +// details about your platform so we can consider relaxing this requirement. +#if INT_MAX < 2147483647 +#error "Abseil assumes that int is at least 4 bytes. " +#endif + +#endif // ABSL_BASE_POLICY_CHECKS_H_ diff --git a/third_party/abseil_cpp/absl/base/port.h b/third_party/abseil_cpp/absl/base/port.h new file mode 100644 index 000000000000..6c28068d4f40 --- /dev/null +++ b/third_party/abseil_cpp/absl/base/port.h @@ -0,0 +1,26 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This files is a forwarding header for other headers containing various +// portability macros and functions. +// This file is used for both C and C++! + +#ifndef ABSL_BASE_PORT_H_ +#define ABSL_BASE_PORT_H_ + +#include "absl/base/attributes.h" +#include "absl/base/config.h" +#include "absl/base/optimization.h" + +#endif // ABSL_BASE_PORT_H_ diff --git a/third_party/abseil_cpp/absl/base/raw_logging_test.cc b/third_party/abseil_cpp/absl/base/raw_logging_test.cc new file mode 100644 index 000000000000..3d30bd386115 --- /dev/null +++ b/third_party/abseil_cpp/absl/base/raw_logging_test.cc @@ -0,0 +1,79 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This test serves primarily as a compilation test for base/raw_logging.h. +// Raw logging testing is covered by logging_unittest.cc, which is not as +// portable as this test. + +#include "absl/base/internal/raw_logging.h" + +#include <tuple> + +#include "gtest/gtest.h" +#include "absl/strings/str_cat.h" + +namespace { + +TEST(RawLoggingCompilationTest, Log) { + ABSL_RAW_LOG(INFO, "RAW INFO: %d", 1); + ABSL_RAW_LOG(INFO, "RAW INFO: %d %d", 1, 2); + ABSL_RAW_LOG(INFO, "RAW INFO: %d %d %d", 1, 2, 3); + ABSL_RAW_LOG(INFO, "RAW INFO: %d %d %d %d", 1, 2, 3, 4); + ABSL_RAW_LOG(INFO, "RAW INFO: %d %d %d %d %d", 1, 2, 3, 4, 5); + ABSL_RAW_LOG(WARNING, "RAW WARNING: %d", 1); + ABSL_RAW_LOG(ERROR, "RAW ERROR: %d", 1); +} + +TEST(RawLoggingCompilationTest, PassingCheck) { + ABSL_RAW_CHECK(true, "RAW CHECK"); +} + +// Not all platforms support output from raw log, so we don't verify any +// particular output for RAW check failures (expecting the empty string +// accomplishes this). This test is primarily a compilation test, but we +// are verifying process death when EXPECT_DEATH works for a platform. +const char kExpectedDeathOutput[] = ""; + +TEST(RawLoggingDeathTest, FailingCheck) { + EXPECT_DEATH_IF_SUPPORTED(ABSL_RAW_CHECK(1 == 0, "explanation"), + kExpectedDeathOutput); +} + +TEST(RawLoggingDeathTest, LogFatal) { + EXPECT_DEATH_IF_SUPPORTED(ABSL_RAW_LOG(FATAL, "my dog has fleas"), + kExpectedDeathOutput); +} + +TEST(InternalLog, CompilationTest) { + ABSL_INTERNAL_LOG(INFO, "Internal Log"); + std::string log_msg = "Internal Log"; + ABSL_INTERNAL_LOG(INFO, log_msg); + + ABSL_INTERNAL_LOG(INFO, log_msg + " 2"); + + float d = 1.1f; + ABSL_INTERNAL_LOG(INFO, absl::StrCat("Internal log ", 3, " + ", d)); +} + +TEST(InternalLogDeathTest, FailingCheck) { + EXPECT_DEATH_IF_SUPPORTED(ABSL_INTERNAL_CHECK(1 == 0, "explanation"), + kExpectedDeathOutput); +} + +TEST(InternalLogDeathTest, LogFatal) { + EXPECT_DEATH_IF_SUPPORTED(ABSL_INTERNAL_LOG(FATAL, "my dog has fleas"), + kExpectedDeathOutput); +} + +} // namespace diff --git a/third_party/abseil_cpp/absl/base/spinlock_test_common.cc b/third_party/abseil_cpp/absl/base/spinlock_test_common.cc new file mode 100644 index 000000000000..b68c51a1dcde --- /dev/null +++ b/third_party/abseil_cpp/absl/base/spinlock_test_common.cc @@ -0,0 +1,265 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// A bunch of threads repeatedly hash an array of ints protected by a +// spinlock. If the spinlock is working properly, all elements of the +// array should be equal at the end of the test. + +#include <cstdint> +#include <limits> +#include <random> +#include <thread> // NOLINT(build/c++11) +#include <vector> + +#include "gtest/gtest.h" +#include "absl/base/attributes.h" +#include "absl/base/internal/low_level_scheduling.h" +#include "absl/base/internal/scheduling_mode.h" +#include "absl/base/internal/spinlock.h" +#include "absl/base/internal/sysinfo.h" +#include "absl/base/macros.h" +#include "absl/synchronization/blocking_counter.h" +#include "absl/synchronization/notification.h" + +constexpr int32_t kNumThreads = 10; +constexpr int32_t kIters = 1000; + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +// This is defined outside of anonymous namespace so that it can be +// a friend of SpinLock to access protected methods for testing. +struct SpinLockTest { + static uint32_t EncodeWaitCycles(int64_t wait_start_time, + int64_t wait_end_time) { + return SpinLock::EncodeWaitCycles(wait_start_time, wait_end_time); + } + static uint64_t DecodeWaitCycles(uint32_t lock_value) { + return SpinLock::DecodeWaitCycles(lock_value); + } +}; + +namespace { + +static constexpr int kArrayLength = 10; +static uint32_t values[kArrayLength]; + +ABSL_CONST_INIT static SpinLock static_cooperative_spinlock( + absl::kConstInit, base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL); +ABSL_CONST_INIT static SpinLock static_noncooperative_spinlock( + absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY); + +// Simple integer hash function based on the public domain lookup2 hash. +// http://burtleburtle.net/bob/c/lookup2.c +static uint32_t Hash32(uint32_t a, uint32_t c) { + uint32_t b = 0x9e3779b9UL; // The golden ratio; an arbitrary value. + a -= b; a -= c; a ^= (c >> 13); + b -= c; b -= a; b ^= (a << 8); + c -= a; c -= b; c ^= (b >> 13); + a -= b; a -= c; a ^= (c >> 12); + b -= c; b -= a; b ^= (a << 16); + c -= a; c -= b; c ^= (b >> 5); + a -= b; a -= c; a ^= (c >> 3); + b -= c; b -= a; b ^= (a << 10); + c -= a; c -= b; c ^= (b >> 15); + return c; +} + +static void TestFunction(int thread_salt, SpinLock* spinlock) { + for (int i = 0; i < kIters; i++) { + SpinLockHolder h(spinlock); + for (int j = 0; j < kArrayLength; j++) { + const int index = (j + thread_salt) % kArrayLength; + values[index] = Hash32(values[index], thread_salt); + std::this_thread::yield(); + } + } +} + +static void ThreadedTest(SpinLock* spinlock) { + std::vector<std::thread> threads; + for (int i = 0; i < kNumThreads; ++i) { + threads.push_back(std::thread(TestFunction, i, spinlock)); + } + for (auto& thread : threads) { + thread.join(); + } + + SpinLockHolder h(spinlock); + for (int i = 1; i < kArrayLength; i++) { + EXPECT_EQ(values[0], values[i]); + } +} + +TEST(SpinLock, StackNonCooperativeDisablesScheduling) { + SpinLock spinlock(base_internal::SCHEDULE_KERNEL_ONLY); + spinlock.Lock(); + EXPECT_FALSE(base_internal::SchedulingGuard::ReschedulingIsAllowed()); + spinlock.Unlock(); +} + +TEST(SpinLock, StaticNonCooperativeDisablesScheduling) { + static_noncooperative_spinlock.Lock(); + EXPECT_FALSE(base_internal::SchedulingGuard::ReschedulingIsAllowed()); + static_noncooperative_spinlock.Unlock(); +} + +TEST(SpinLock, WaitCyclesEncoding) { + // These are implementation details not exported by SpinLock. + const int kProfileTimestampShift = 7; + const int kLockwordReservedShift = 3; + const uint32_t kSpinLockSleeper = 8; + + // We should be able to encode up to (1^kMaxCycleBits - 1) without clamping + // but the lower kProfileTimestampShift will be dropped. + const int kMaxCyclesShift = + 32 - kLockwordReservedShift + kProfileTimestampShift; + const uint64_t kMaxCycles = (int64_t{1} << kMaxCyclesShift) - 1; + + // These bits should be zero after encoding. + const uint32_t kLockwordReservedMask = (1 << kLockwordReservedShift) - 1; + + // These bits are dropped when wait cycles are encoded. + const uint64_t kProfileTimestampMask = (1 << kProfileTimestampShift) - 1; + + // Test a bunch of random values + std::default_random_engine generator; + // Shift to avoid overflow below. + std::uniform_int_distribution<uint64_t> time_distribution( + 0, std::numeric_limits<uint64_t>::max() >> 4); + std::uniform_int_distribution<uint64_t> cycle_distribution(0, kMaxCycles); + + for (int i = 0; i < 100; i++) { + int64_t start_time = time_distribution(generator); + int64_t cycles = cycle_distribution(generator); + int64_t end_time = start_time + cycles; + uint32_t lock_value = SpinLockTest::EncodeWaitCycles(start_time, end_time); + EXPECT_EQ(0, lock_value & kLockwordReservedMask); + uint64_t decoded = SpinLockTest::DecodeWaitCycles(lock_value); + EXPECT_EQ(0, decoded & kProfileTimestampMask); + EXPECT_EQ(cycles & ~kProfileTimestampMask, decoded); + } + + // Test corner cases + int64_t start_time = time_distribution(generator); + EXPECT_EQ(kSpinLockSleeper, + SpinLockTest::EncodeWaitCycles(start_time, start_time)); + EXPECT_EQ(0, SpinLockTest::DecodeWaitCycles(0)); + EXPECT_EQ(0, SpinLockTest::DecodeWaitCycles(kLockwordReservedMask)); + EXPECT_EQ(kMaxCycles & ~kProfileTimestampMask, + SpinLockTest::DecodeWaitCycles(~kLockwordReservedMask)); + + // Check that we cannot produce kSpinLockSleeper during encoding. + int64_t sleeper_cycles = + kSpinLockSleeper << (kProfileTimestampShift - kLockwordReservedShift); + uint32_t sleeper_value = + SpinLockTest::EncodeWaitCycles(start_time, start_time + sleeper_cycles); + EXPECT_NE(sleeper_value, kSpinLockSleeper); + + // Test clamping + uint32_t max_value = + SpinLockTest::EncodeWaitCycles(start_time, start_time + kMaxCycles); + uint64_t max_value_decoded = SpinLockTest::DecodeWaitCycles(max_value); + uint64_t expected_max_value_decoded = kMaxCycles & ~kProfileTimestampMask; + EXPECT_EQ(expected_max_value_decoded, max_value_decoded); + + const int64_t step = (1 << kProfileTimestampShift); + uint32_t after_max_value = + SpinLockTest::EncodeWaitCycles(start_time, start_time + kMaxCycles + step); + uint64_t after_max_value_decoded = + SpinLockTest::DecodeWaitCycles(after_max_value); + EXPECT_EQ(expected_max_value_decoded, after_max_value_decoded); + + uint32_t before_max_value = SpinLockTest::EncodeWaitCycles( + start_time, start_time + kMaxCycles - step); + uint64_t before_max_value_decoded = + SpinLockTest::DecodeWaitCycles(before_max_value); + EXPECT_GT(expected_max_value_decoded, before_max_value_decoded); +} + +TEST(SpinLockWithThreads, StackSpinLock) { + SpinLock spinlock; + ThreadedTest(&spinlock); +} + +TEST(SpinLockWithThreads, StackCooperativeSpinLock) { + SpinLock spinlock(base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL); + ThreadedTest(&spinlock); +} + +TEST(SpinLockWithThreads, StackNonCooperativeSpinLock) { + SpinLock spinlock(base_internal::SCHEDULE_KERNEL_ONLY); + ThreadedTest(&spinlock); +} + +TEST(SpinLockWithThreads, StaticCooperativeSpinLock) { + ThreadedTest(&static_cooperative_spinlock); +} + +TEST(SpinLockWithThreads, StaticNonCooperativeSpinLock) { + ThreadedTest(&static_noncooperative_spinlock); +} + +TEST(SpinLockWithThreads, DoesNotDeadlock) { + struct Helper { + static void NotifyThenLock(Notification* locked, SpinLock* spinlock, + BlockingCounter* b) { + locked->WaitForNotification(); // Wait for LockThenWait() to hold "s". + b->DecrementCount(); + SpinLockHolder l(spinlock); + } + + static void LockThenWait(Notification* locked, SpinLock* spinlock, + BlockingCounter* b) { + SpinLockHolder l(spinlock); + locked->Notify(); + b->Wait(); + } + + static void DeadlockTest(SpinLock* spinlock, int num_spinners) { + Notification locked; + BlockingCounter counter(num_spinners); + std::vector<std::thread> threads; + + threads.push_back( + std::thread(Helper::LockThenWait, &locked, spinlock, &counter)); + for (int i = 0; i < num_spinners; ++i) { + threads.push_back( + std::thread(Helper::NotifyThenLock, &locked, spinlock, &counter)); + } + + for (auto& thread : threads) { + thread.join(); + } + } + }; + + SpinLock stack_cooperative_spinlock( + base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL); + SpinLock stack_noncooperative_spinlock(base_internal::SCHEDULE_KERNEL_ONLY); + Helper::DeadlockTest(&stack_cooperative_spinlock, + base_internal::NumCPUs() * 2); + Helper::DeadlockTest(&stack_noncooperative_spinlock, + base_internal::NumCPUs() * 2); + Helper::DeadlockTest(&static_cooperative_spinlock, + base_internal::NumCPUs() * 2); + Helper::DeadlockTest(&static_noncooperative_spinlock, + base_internal::NumCPUs() * 2); +} + +} // namespace +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third_party/abseil_cpp/absl/base/thread_annotations.h b/third_party/abseil_cpp/absl/base/thread_annotations.h new file mode 100644 index 000000000000..5f51c0c2d2d5 --- /dev/null +++ b/third_party/abseil_cpp/absl/base/thread_annotations.h @@ -0,0 +1,280 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: thread_annotations.h +// ----------------------------------------------------------------------------- +// +// This header file contains macro definitions for thread safety annotations +// that allow developers to document the locking policies of multi-threaded +// code. The annotations can also help program analysis tools to identify +// potential thread safety issues. +// +// These annotations are implemented using compiler attributes. Using the macros +// defined here instead of raw attributes allow for portability and future +// compatibility. +// +// When referring to mutexes in the arguments of the attributes, you should +// use variable names or more complex expressions (e.g. my_object->mutex_) +// that evaluate to a concrete mutex object whenever possible. If the mutex +// you want to refer to is not in scope, you may use a member pointer +// (e.g. &MyClass::mutex_) to refer to a mutex in some (unknown) object. + +#ifndef ABSL_BASE_THREAD_ANNOTATIONS_H_ +#define ABSL_BASE_THREAD_ANNOTATIONS_H_ + +#include "absl/base/config.h" +// TODO(mbonadei): Remove after the backward compatibility period. +#include "absl/base/internal/thread_annotations.h" // IWYU pragma: export + +#if defined(__clang__) +#define ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(x) __attribute__((x)) +#else +#define ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(x) // no-op +#endif + +// ABSL_GUARDED_BY() +// +// Documents if a shared field or global variable needs to be protected by a +// mutex. ABSL_GUARDED_BY() allows the user to specify a particular mutex that +// should be held when accessing the annotated variable. +// +// Although this annotation (and ABSL_PT_GUARDED_BY, below) cannot be applied to +// local variables, a local variable and its associated mutex can often be +// combined into a small class or struct, thereby allowing the annotation. +// +// Example: +// +// class Foo { +// Mutex mu_; +// int p1_ ABSL_GUARDED_BY(mu_); +// ... +// }; +#define ABSL_GUARDED_BY(x) \ + ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(guarded_by(x)) + +// ABSL_PT_GUARDED_BY() +// +// Documents if the memory location pointed to by a pointer should be guarded +// by a mutex when dereferencing the pointer. +// +// Example: +// class Foo { +// Mutex mu_; +// int *p1_ ABSL_PT_GUARDED_BY(mu_); +// ... +// }; +// +// Note that a pointer variable to a shared memory location could itself be a +// shared variable. +// +// Example: +// +// // `q_`, guarded by `mu1_`, points to a shared memory location that is +// // guarded by `mu2_`: +// int *q_ ABSL_GUARDED_BY(mu1_) ABSL_PT_GUARDED_BY(mu2_); +#define ABSL_PT_GUARDED_BY(x) \ + ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(pt_guarded_by(x)) + +// ABSL_ACQUIRED_AFTER() / ABSL_ACQUIRED_BEFORE() +// +// Documents the acquisition order between locks that can be held +// simultaneously by a thread. For any two locks that need to be annotated +// to establish an acquisition order, only one of them needs the annotation. +// (i.e. You don't have to annotate both locks with both ABSL_ACQUIRED_AFTER +// and ABSL_ACQUIRED_BEFORE.) +// +// As with ABSL_GUARDED_BY, this is only applicable to mutexes that are shared +// fields or global variables. +// +// Example: +// +// Mutex m1_; +// Mutex m2_ ABSL_ACQUIRED_AFTER(m1_); +#define ABSL_ACQUIRED_AFTER(...) \ + ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(acquired_after(__VA_ARGS__)) + +#define ABSL_ACQUIRED_BEFORE(...) \ + ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(acquired_before(__VA_ARGS__)) + +// ABSL_EXCLUSIVE_LOCKS_REQUIRED() / ABSL_SHARED_LOCKS_REQUIRED() +// +// Documents a function that expects a mutex to be held prior to entry. +// The mutex is expected to be held both on entry to, and exit from, the +// function. +// +// An exclusive lock allows read-write access to the guarded data member(s), and +// only one thread can acquire a lock exclusively at any one time. A shared lock +// allows read-only access, and any number of threads can acquire a shared lock +// concurrently. +// +// Generally, non-const methods should be annotated with +// ABSL_EXCLUSIVE_LOCKS_REQUIRED, while const methods should be annotated with +// ABSL_SHARED_LOCKS_REQUIRED. +// +// Example: +// +// Mutex mu1, mu2; +// int a ABSL_GUARDED_BY(mu1); +// int b ABSL_GUARDED_BY(mu2); +// +// void foo() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu1, mu2) { ... } +// void bar() const ABSL_SHARED_LOCKS_REQUIRED(mu1, mu2) { ... } +#define ABSL_EXCLUSIVE_LOCKS_REQUIRED(...) \ + ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE( \ + exclusive_locks_required(__VA_ARGS__)) + +#define ABSL_SHARED_LOCKS_REQUIRED(...) \ + ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(shared_locks_required(__VA_ARGS__)) + +// ABSL_LOCKS_EXCLUDED() +// +// Documents the locks acquired in the body of the function. These locks +// cannot be held when calling this function (as Abseil's `Mutex` locks are +// non-reentrant). +#define ABSL_LOCKS_EXCLUDED(...) \ + ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(locks_excluded(__VA_ARGS__)) + +// ABSL_LOCK_RETURNED() +// +// Documents a function that returns a mutex without acquiring it. For example, +// a public getter method that returns a pointer to a private mutex should +// be annotated with ABSL_LOCK_RETURNED. +#define ABSL_LOCK_RETURNED(x) \ + ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(lock_returned(x)) + +// ABSL_LOCKABLE +// +// Documents if a class/type is a lockable type (such as the `Mutex` class). +#define ABSL_LOCKABLE ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(lockable) + +// ABSL_SCOPED_LOCKABLE +// +// Documents if a class does RAII locking (such as the `MutexLock` class). +// The constructor should use `LOCK_FUNCTION()` to specify the mutex that is +// acquired, and the destructor should use `UNLOCK_FUNCTION()` with no +// arguments; the analysis will assume that the destructor unlocks whatever the +// constructor locked. +#define ABSL_SCOPED_LOCKABLE \ + ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(scoped_lockable) + +// ABSL_EXCLUSIVE_LOCK_FUNCTION() +// +// Documents functions that acquire a lock in the body of a function, and do +// not release it. +#define ABSL_EXCLUSIVE_LOCK_FUNCTION(...) \ + ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE( \ + exclusive_lock_function(__VA_ARGS__)) + +// ABSL_SHARED_LOCK_FUNCTION() +// +// Documents functions that acquire a shared (reader) lock in the body of a +// function, and do not release it. +#define ABSL_SHARED_LOCK_FUNCTION(...) \ + ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(shared_lock_function(__VA_ARGS__)) + +// ABSL_UNLOCK_FUNCTION() +// +// Documents functions that expect a lock to be held on entry to the function, +// and release it in the body of the function. +#define ABSL_UNLOCK_FUNCTION(...) \ + ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(unlock_function(__VA_ARGS__)) + +// ABSL_EXCLUSIVE_TRYLOCK_FUNCTION() / ABSL_SHARED_TRYLOCK_FUNCTION() +// +// Documents functions that try to acquire a lock, and return success or failure +// (or a non-boolean value that can be interpreted as a boolean). +// The first argument should be `true` for functions that return `true` on +// success, or `false` for functions that return `false` on success. The second +// argument specifies the mutex that is locked on success. If unspecified, this +// mutex is assumed to be `this`. +#define ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(...) \ + ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE( \ + exclusive_trylock_function(__VA_ARGS__)) + +#define ABSL_SHARED_TRYLOCK_FUNCTION(...) \ + ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE( \ + shared_trylock_function(__VA_ARGS__)) + +// ABSL_ASSERT_EXCLUSIVE_LOCK() / ABSL_ASSERT_SHARED_LOCK() +// +// Documents functions that dynamically check to see if a lock is held, and fail +// if it is not held. +#define ABSL_ASSERT_EXCLUSIVE_LOCK(...) \ + ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(assert_exclusive_lock(__VA_ARGS__)) + +#define ABSL_ASSERT_SHARED_LOCK(...) \ + ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(assert_shared_lock(__VA_ARGS__)) + +// ABSL_NO_THREAD_SAFETY_ANALYSIS +// +// Turns off thread safety checking within the body of a particular function. +// This annotation is used to mark functions that are known to be correct, but +// the locking behavior is more complicated than the analyzer can handle. +#define ABSL_NO_THREAD_SAFETY_ANALYSIS \ + ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(no_thread_safety_analysis) + +//------------------------------------------------------------------------------ +// Tool-Supplied Annotations +//------------------------------------------------------------------------------ + +// ABSL_TS_UNCHECKED should be placed around lock expressions that are not valid +// C++ syntax, but which are present for documentation purposes. These +// annotations will be ignored by the analysis. +#define ABSL_TS_UNCHECKED(x) "" + +// ABSL_TS_FIXME is used to mark lock expressions that are not valid C++ syntax. +// It is used by automated tools to mark and disable invalid expressions. +// The annotation should either be fixed, or changed to ABSL_TS_UNCHECKED. +#define ABSL_TS_FIXME(x) "" + +// Like ABSL_NO_THREAD_SAFETY_ANALYSIS, this turns off checking within the body +// of a particular function. However, this attribute is used to mark functions +// that are incorrect and need to be fixed. It is used by automated tools to +// avoid breaking the build when the analysis is updated. +// Code owners are expected to eventually fix the routine. +#define ABSL_NO_THREAD_SAFETY_ANALYSIS_FIXME ABSL_NO_THREAD_SAFETY_ANALYSIS + +// Similar to ABSL_NO_THREAD_SAFETY_ANALYSIS_FIXME, this macro marks a +// ABSL_GUARDED_BY annotation that needs to be fixed, because it is producing +// thread safety warning. It disables the ABSL_GUARDED_BY. +#define ABSL_GUARDED_BY_FIXME(x) + +// Disables warnings for a single read operation. This can be used to avoid +// warnings when it is known that the read is not actually involved in a race, +// but the compiler cannot confirm that. +#define ABSL_TS_UNCHECKED_READ(x) absl::base_internal::ts_unchecked_read(x) + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +// Takes a reference to a guarded data member, and returns an unguarded +// reference. +// Do not used this function directly, use ABSL_TS_UNCHECKED_READ instead. +template <typename T> +inline const T& ts_unchecked_read(const T& v) ABSL_NO_THREAD_SAFETY_ANALYSIS { + return v; +} + +template <typename T> +inline T& ts_unchecked_read(T& v) ABSL_NO_THREAD_SAFETY_ANALYSIS { + return v; +} + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_THREAD_ANNOTATIONS_H_ diff --git a/third_party/abseil_cpp/absl/base/throw_delegate_test.cc b/third_party/abseil_cpp/absl/base/throw_delegate_test.cc new file mode 100644 index 000000000000..5ba4ce55e6d5 --- /dev/null +++ b/third_party/abseil_cpp/absl/base/throw_delegate_test.cc @@ -0,0 +1,107 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/throw_delegate.h" + +#include <functional> +#include <new> +#include <stdexcept> + +#include "absl/base/config.h" +#include "gtest/gtest.h" + +namespace { + +using absl::base_internal::ThrowStdLogicError; +using absl::base_internal::ThrowStdInvalidArgument; +using absl::base_internal::ThrowStdDomainError; +using absl::base_internal::ThrowStdLengthError; +using absl::base_internal::ThrowStdOutOfRange; +using absl::base_internal::ThrowStdRuntimeError; +using absl::base_internal::ThrowStdRangeError; +using absl::base_internal::ThrowStdOverflowError; +using absl::base_internal::ThrowStdUnderflowError; +using absl::base_internal::ThrowStdBadFunctionCall; +using absl::base_internal::ThrowStdBadAlloc; + +constexpr const char* what_arg = "The quick brown fox jumps over the lazy dog"; + +template <typename E> +void ExpectThrowChar(void (*f)(const char*)) { +#ifdef ABSL_HAVE_EXCEPTIONS + try { + f(what_arg); + FAIL() << "Didn't throw"; + } catch (const E& e) { + EXPECT_STREQ(e.what(), what_arg); + } +#else + EXPECT_DEATH_IF_SUPPORTED(f(what_arg), what_arg); +#endif +} + +template <typename E> +void ExpectThrowString(void (*f)(const std::string&)) { +#ifdef ABSL_HAVE_EXCEPTIONS + try { + f(what_arg); + FAIL() << "Didn't throw"; + } catch (const E& e) { + EXPECT_STREQ(e.what(), what_arg); + } +#else + EXPECT_DEATH_IF_SUPPORTED(f(what_arg), what_arg); +#endif +} + +template <typename E> +void ExpectThrowNoWhat(void (*f)()) { +#ifdef ABSL_HAVE_EXCEPTIONS + try { + f(); + FAIL() << "Didn't throw"; + } catch (const E& e) { + } +#else + EXPECT_DEATH_IF_SUPPORTED(f(), ""); +#endif +} + +TEST(ThrowHelper, Test) { + // Not using EXPECT_THROW because we want to check the .what() message too. + ExpectThrowChar<std::logic_error>(ThrowStdLogicError); + ExpectThrowChar<std::invalid_argument>(ThrowStdInvalidArgument); + ExpectThrowChar<std::domain_error>(ThrowStdDomainError); + ExpectThrowChar<std::length_error>(ThrowStdLengthError); + ExpectThrowChar<std::out_of_range>(ThrowStdOutOfRange); + ExpectThrowChar<std::runtime_error>(ThrowStdRuntimeError); + ExpectThrowChar<std::range_error>(ThrowStdRangeError); + ExpectThrowChar<std::overflow_error>(ThrowStdOverflowError); + ExpectThrowChar<std::underflow_error>(ThrowStdUnderflowError); + + ExpectThrowString<std::logic_error>(ThrowStdLogicError); + ExpectThrowString<std::invalid_argument>(ThrowStdInvalidArgument); + ExpectThrowString<std::domain_error>(ThrowStdDomainError); + ExpectThrowString<std::length_error>(ThrowStdLengthError); + ExpectThrowString<std::out_of_range>(ThrowStdOutOfRange); + ExpectThrowString<std::runtime_error>(ThrowStdRuntimeError); + ExpectThrowString<std::range_error>(ThrowStdRangeError); + ExpectThrowString<std::overflow_error>(ThrowStdOverflowError); + ExpectThrowString<std::underflow_error>(ThrowStdUnderflowError); + + ExpectThrowNoWhat<std::bad_function_call>(ThrowStdBadFunctionCall); + ExpectThrowNoWhat<std::bad_alloc>(ThrowStdBadAlloc); +} + +} // namespace |