diff options
author | Abseil Team <absl-team@google.com> | 2020-03-31T19·32-0700 |
---|---|---|
committer | Andy Getz <durandal@google.com> | 2020-03-31T20·34-0400 |
commit | fba8a316c30690097de5d6127ad307d84a1b74ca (patch) | |
tree | 952cde69de3f0bca33453bf166514711e3abd5e0 | |
parent | 79e0dc11514df035a8d07a356f9ee1800fb2160c (diff) |
Export of internal Abseil changes
-- 2dd5008c7b4176859e320c7c337078adb173b662 by Tom Manshreck <shreck@google.com>: Internal change PiperOrigin-RevId: 304022549 -- 6442abd78697b03cfe698b0d0dac7f1eb4b5cb38 by Andy Getzendanner <durandal@google.com>: Internal change PiperOrigin-RevId: 303890410 -- eb8b37b468b0f23da09d3de714272928ef61f942 by Gennadiy Rozental <rogeeff@google.com>: Roll changes forward with ChunkIterator templatized. This should facilitate usage of "small" chunk iterator for a regular usage and proper "big" iterator internally in Cord implementation. This way Cord users are not exposed to stack size overhead if they have a lot of chunk iterators or recursive implementation which relies on chunk iterators. PiperOrigin-RevId: 303877118 -- 9623c569e7c55b45254e95f2d14c5badf9c901aa by Gennadiy Rozental <rogeeff@google.com>: Switch Flags implementation of fast type id to use absl/base/internal/fast_type_id.h PiperOrigin-RevId: 303861019 -- e2931e8d53c86d0816da6bbc8ba58cf5a3a443bb by Matthew Brown <matthewbr@google.com>: Internal Change PiperOrigin-RevId: 303832407 -- b549ed6e441e920b8ad6f02a80b9fd543820ef86 by Tom Manshreck <shreck@google.com>: Update Cord header file comments to Abseil standards PiperOrigin-RevId: 303823232 -- fc633d4f31a2d058f2b6a7029fc7c9820cd71c92 by Evan Brown <ezb@google.com>: Remove top-level const from K/V in map_slot_type::mutable_value and map_slot_type::key. This allows us to move between `map_slot_type::mutable_value`s internally even when the key_type and/or mapped_type specified by the user are const. PiperOrigin-RevId: 303811694 -- 909b3ce7cb3583ee9c374d36ff5f82bba02a1b64 by Derek Mauro <dmauro@google.com>: Add hardening assertions to the preconditions of absl::Cord PiperOrigin-RevId: 303419537 -- 9d32f79eabd54e6cb17bcc28b53e9bcfeb3cf6f4 by Greg Falcon <gfalcon@google.com>: Don't use MSVC-specific bit manipulations when using Clang on Windows. This fixes a compiler warning. Note that we do not have continuous testing for this configuration; this CL is best-effort support. PiperOrigin-RevId: 303322582 -- f6e0a35a2b9081d2a9eef73789b7bc1b5e46e5ad by Gennadiy Rozental <rogeeff@google.com>: Introduce standlone FastTypeId utility to represent compile time unique type id. PiperOrigin-RevId: 303180545 -- 99120e9fbdb5b2d327139ab8f617533d7bc3345b by Abseil Team <absl-team@google.com>: Changed absl's import of std::string_view to using string_view = std::string_view. This should help tools (e.g. include-what-you-use) discover where absl::string_view is defined. PiperOrigin-RevId: 303169095 GitOrigin-RevId: 2dd5008c7b4176859e320c7c337078adb173b662 Change-Id: I1e18ae08e23686ac963e7ea5e5bd499e18d51048
-rw-r--r-- | CMake/AbseilDll.cmake | 2 | ||||
-rw-r--r-- | absl/base/BUILD.bazel | 25 | ||||
-rw-r--r-- | absl/base/CMakeLists.txt | 25 | ||||
-rw-r--r-- | absl/base/internal/bits.h | 24 | ||||
-rw-r--r-- | absl/base/internal/fast_type_id.h | 48 | ||||
-rw-r--r-- | absl/base/internal/fast_type_id_test.cc | 123 | ||||
-rw-r--r-- | absl/container/BUILD.bazel | 2 | ||||
-rw-r--r-- | absl/container/CMakeLists.txt | 2 | ||||
-rw-r--r-- | absl/container/internal/container_memory.h | 6 | ||||
-rw-r--r-- | absl/container/internal/container_memory_test.cc | 30 | ||||
-rw-r--r-- | absl/flags/BUILD.bazel | 1 | ||||
-rw-r--r-- | absl/flags/CMakeLists.txt | 1 | ||||
-rw-r--r-- | absl/flags/internal/commandlineflag.h | 22 | ||||
-rw-r--r-- | absl/flags/internal/flag.cc | 34 | ||||
-rw-r--r-- | absl/flags/internal/flag.h | 46 | ||||
-rw-r--r-- | absl/flags/internal/registry.cc | 8 | ||||
-rw-r--r-- | absl/flags/internal/registry.h | 4 | ||||
-rw-r--r-- | absl/strings/BUILD.bazel | 1 | ||||
-rw-r--r-- | absl/strings/CMakeLists.txt | 1 | ||||
-rw-r--r-- | absl/strings/cord.cc | 291 | ||||
-rw-r--r-- | absl/strings/cord.h | 638 | ||||
-rw-r--r-- | absl/strings/cord_test.cc | 68 | ||||
-rw-r--r-- | absl/strings/internal/str_format/extension.h | 18 | ||||
-rw-r--r-- | absl/strings/string_view.h | 2 |
24 files changed, 1002 insertions, 420 deletions
diff --git a/CMake/AbseilDll.cmake b/CMake/AbseilDll.cmake index ed01a48d3805..7646c154ef67 100644 --- a/CMake/AbseilDll.cmake +++ b/CMake/AbseilDll.cmake @@ -19,6 +19,7 @@ set(ABSL_INTERNAL_DLL_FILES "base/internal/errno_saver.h" "base/internal/exponential_biased.cc" "base/internal/exponential_biased.h" + "base/internal/fast_type_id.h" "base/internal/hide_ptr.h" "base/internal/identity.h" "base/internal/invoke.h" @@ -130,7 +131,6 @@ set(ABSL_INTERNAL_DLL_FILES "random/bit_gen_ref.h" "random/discrete_distribution.cc" "random/discrete_distribution.h" - "random/distribution_format_traits.h" "random/distributions.h" "random/exponential_distribution.h" "random/gaussian_distribution.cc" diff --git a/absl/base/BUILD.bazel b/absl/base/BUILD.bazel index 24dab79102f0..1af9e45e668a 100644 --- a/absl/base/BUILD.bazel +++ b/absl/base/BUILD.bazel @@ -750,3 +750,28 @@ cc_binary( "@com_github_google_benchmark//:benchmark_main", ], ) + +cc_library( + name = "fast_type_id", + hdrs = ["internal/fast_type_id.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + visibility = [ + "//absl:__subpackages__", + ], + deps = [ + ":config", + ], +) + +cc_test( + name = "fast_type_id_test", + size = "small", + srcs = ["internal/fast_type_id_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":fast_type_id", + "@com_google_googletest//:gtest_main", + ], +) diff --git a/absl/base/CMakeLists.txt b/absl/base/CMakeLists.txt index 4230d2e73ee1..545499200247 100644 --- a/absl/base/CMakeLists.txt +++ b/absl/base/CMakeLists.txt @@ -674,3 +674,28 @@ absl_cc_test( gmock gtest_main ) + +absl_cc_library( + NAME + fast_type_id + HDRS + "internal/fast_type_id.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config +) + +absl_cc_test( + NAME + fast_type_id_test + SRCS + "internal/fast_type_id_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::fast_type_id + gtest_main +) diff --git a/absl/base/internal/bits.h b/absl/base/internal/bits.h index 8b03453c184f..14c51d8b3013 100644 --- a/absl/base/internal/bits.h +++ b/absl/base/internal/bits.h @@ -24,7 +24,7 @@ // Clang on Windows has __builtin_clzll; otherwise we need to use the // windows intrinsic functions. -#if defined(_MSC_VER) +#if defined(_MSC_VER) && !defined(__clang__) #include <intrin.h> #if defined(_M_X64) #pragma intrinsic(_BitScanReverse64) @@ -36,7 +36,7 @@ #include "absl/base/attributes.h" -#if defined(_MSC_VER) +#if defined(_MSC_VER) && !defined(__clang__) // We can achieve something similar to attribute((always_inline)) with MSVC by // using the __forceinline keyword, however this is not perfect. MSVC is // much less aggressive about inlining, and even with the __forceinline keyword. @@ -73,14 +73,14 @@ ABSL_BASE_INTERNAL_FORCEINLINE int CountLeadingZeros64Slow(uint64_t n) { } ABSL_BASE_INTERNAL_FORCEINLINE int CountLeadingZeros64(uint64_t n) { -#if defined(_MSC_VER) && defined(_M_X64) +#if defined(_MSC_VER) && !defined(__clang__) && defined(_M_X64) // MSVC does not have __buitin_clzll. Use _BitScanReverse64. unsigned long result = 0; // NOLINT(runtime/int) if (_BitScanReverse64(&result, n)) { return 63 - result; } return 64; -#elif defined(_MSC_VER) +#elif defined(_MSC_VER) && !defined(__clang__) // MSVC does not have __buitin_clzll. Compose two calls to _BitScanReverse unsigned long result = 0; // NOLINT(runtime/int) if ((n >> 32) && _BitScanReverse(&result, n >> 32)) { @@ -90,7 +90,7 @@ ABSL_BASE_INTERNAL_FORCEINLINE int CountLeadingZeros64(uint64_t n) { return 63 - result; } return 64; -#elif defined(__GNUC__) +#elif defined(__GNUC__) || defined(__clang__) // Use __builtin_clzll, which uses the following instructions: // x86: bsr // ARM64: clz @@ -126,13 +126,13 @@ ABSL_BASE_INTERNAL_FORCEINLINE int CountLeadingZeros32Slow(uint64_t n) { } ABSL_BASE_INTERNAL_FORCEINLINE int CountLeadingZeros32(uint32_t n) { -#if defined(_MSC_VER) +#if defined(_MSC_VER) && !defined(__clang__) unsigned long result = 0; // NOLINT(runtime/int) if (_BitScanReverse(&result, n)) { return 31 - result; } return 32; -#elif defined(__GNUC__) +#elif defined(__GNUC__) || defined(__clang__) // Use __builtin_clz, which uses the following instructions: // x86: bsr // ARM64: clz @@ -163,11 +163,11 @@ ABSL_BASE_INTERNAL_FORCEINLINE int CountTrailingZerosNonZero64Slow(uint64_t n) { } ABSL_BASE_INTERNAL_FORCEINLINE int CountTrailingZerosNonZero64(uint64_t n) { -#if defined(_MSC_VER) && defined(_M_X64) +#if defined(_MSC_VER) && !defined(__clang__) && defined(_M_X64) unsigned long result = 0; // NOLINT(runtime/int) _BitScanForward64(&result, n); return result; -#elif defined(_MSC_VER) +#elif defined(_MSC_VER) && !defined(__clang__) unsigned long result = 0; // NOLINT(runtime/int) if (static_cast<uint32_t>(n) == 0) { _BitScanForward(&result, n >> 32); @@ -175,7 +175,7 @@ ABSL_BASE_INTERNAL_FORCEINLINE int CountTrailingZerosNonZero64(uint64_t n) { } _BitScanForward(&result, n); return result; -#elif defined(__GNUC__) +#elif defined(__GNUC__) || defined(__clang__) static_assert(sizeof(unsigned long long) == sizeof(n), // NOLINT(runtime/int) "__builtin_ctzll does not take 64-bit arg"); return __builtin_ctzll(n); @@ -196,11 +196,11 @@ ABSL_BASE_INTERNAL_FORCEINLINE int CountTrailingZerosNonZero32Slow(uint32_t n) { } ABSL_BASE_INTERNAL_FORCEINLINE int CountTrailingZerosNonZero32(uint32_t n) { -#if defined(_MSC_VER) +#if defined(_MSC_VER) && !defined(__clang__) unsigned long result = 0; // NOLINT(runtime/int) _BitScanForward(&result, n); return result; -#elif defined(__GNUC__) +#elif defined(__GNUC__) || defined(__clang__) static_assert(sizeof(int) == sizeof(n), "__builtin_ctz does not take 32-bit arg"); return __builtin_ctz(n); diff --git a/absl/base/internal/fast_type_id.h b/absl/base/internal/fast_type_id.h new file mode 100644 index 000000000000..3db59e83745b --- /dev/null +++ b/absl/base/internal/fast_type_id.h @@ -0,0 +1,48 @@ +// +// Copyright 2020 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef ABSL_BASE_INTERNAL_FAST_TYPE_ID_H_ +#define ABSL_BASE_INTERNAL_FAST_TYPE_ID_H_ + +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +template <typename Type> +struct FastTypeTag { + constexpr static char dummy_var = 0; +}; + +template <typename Type> +constexpr char FastTypeTag<Type>::dummy_var; + +// FastTypeId<Type>() evaluates at compile/link-time to a unique pointer for the +// passed-in type. These are meant to be good match for keys into maps or +// straight up comparisons. +using FastTypeIdType = const void*; + +template <typename Type> +constexpr inline FastTypeIdType FastTypeId() { + return &FastTypeTag<Type>::dummy_var; +} + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_FAST_TYPE_ID_H_ diff --git a/absl/base/internal/fast_type_id_test.cc b/absl/base/internal/fast_type_id_test.cc new file mode 100644 index 000000000000..16f3c1458bdf --- /dev/null +++ b/absl/base/internal/fast_type_id_test.cc @@ -0,0 +1,123 @@ +// Copyright 2020 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/fast_type_id.h" + +#include <cstdint> +#include <map> +#include <vector> + +#include "gtest/gtest.h" + +namespace { +namespace bi = absl::base_internal; + +// NOLINTNEXTLINE +#define PRIM_TYPES(A) \ + A(bool) \ + A(short) \ + A(unsigned short) \ + A(int) \ + A(unsigned int) \ + A(long) \ + A(unsigned long) \ + A(long long) \ + A(unsigned long long) \ + A(float) \ + A(double) \ + A(long double) + +TEST(FastTypeIdTest, PrimitiveTypes) { + bi::FastTypeIdType type_ids[] = { +#define A(T) bi::FastTypeId<T>(), + PRIM_TYPES(A) +#undef A +#define A(T) bi::FastTypeId<const T>(), + PRIM_TYPES(A) +#undef A +#define A(T) bi::FastTypeId<volatile T>(), + PRIM_TYPES(A) +#undef A +#define A(T) bi::FastTypeId<const volatile T>(), + PRIM_TYPES(A) +#undef A + }; + size_t total_type_ids = sizeof(type_ids) / sizeof(bi::FastTypeIdType); + + for (int i = 0; i < total_type_ids; ++i) { + EXPECT_EQ(type_ids[i], type_ids[i]); + for (int j = 0; j < i; ++j) { + EXPECT_NE(type_ids[i], type_ids[j]); + } + } +} + +#define FIXED_WIDTH_TYPES(A) \ + A(int8_t) \ + A(uint8_t) \ + A(int16_t) \ + A(uint16_t) \ + A(int32_t) \ + A(uint32_t) \ + A(int64_t) \ + A(uint64_t) + +TEST(FastTypeIdTest, FixedWidthTypes) { + bi::FastTypeIdType type_ids[] = { +#define A(T) bi::FastTypeId<T>(), + FIXED_WIDTH_TYPES(A) +#undef A +#define A(T) bi::FastTypeId<const T>(), + FIXED_WIDTH_TYPES(A) +#undef A +#define A(T) bi::FastTypeId<volatile T>(), + FIXED_WIDTH_TYPES(A) +#undef A +#define A(T) bi::FastTypeId<const volatile T>(), + FIXED_WIDTH_TYPES(A) +#undef A + }; + size_t total_type_ids = sizeof(type_ids) / sizeof(bi::FastTypeIdType); + + for (int i = 0; i < total_type_ids; ++i) { + EXPECT_EQ(type_ids[i], type_ids[i]); + for (int j = 0; j < i; ++j) { + EXPECT_NE(type_ids[i], type_ids[j]); + } + } +} + +TEST(FastTypeIdTest, AliasTypes) { + using int_alias = int; + EXPECT_EQ(bi::FastTypeId<int_alias>(), bi::FastTypeId<int>()); +} + +TEST(FastTypeIdTest, TemplateSpecializations) { + EXPECT_NE(bi::FastTypeId<std::vector<int>>(), + bi::FastTypeId<std::vector<long>>()); + + EXPECT_NE((bi::FastTypeId<std::map<int, float>>()), + (bi::FastTypeId<std::map<int, double>>())); +} + +struct Base {}; +struct Derived : Base {}; +struct PDerived : private Base {}; + +TEST(FastTypeIdTest, Inheritance) { + EXPECT_NE(bi::FastTypeId<Base>(), bi::FastTypeId<Derived>()); + EXPECT_NE(bi::FastTypeId<Base>(), bi::FastTypeId<PDerived>()); +} + +} // namespace diff --git a/absl/container/BUILD.bazel b/absl/container/BUILD.bazel index 4bed57351aa4..1b0710b8d649 100644 --- a/absl/container/BUILD.bazel +++ b/absl/container/BUILD.bazel @@ -366,6 +366,7 @@ cc_library( linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ "//absl/memory", + "//absl/meta:type_traits", "//absl/utility", ], ) @@ -378,6 +379,7 @@ cc_test( tags = NOTEST_TAGS_NONMOBILE, deps = [ ":container_memory", + ":test_instance_tracker", "//absl/strings", "@com_google_googletest//:gtest_main", ], diff --git a/absl/container/CMakeLists.txt b/absl/container/CMakeLists.txt index a732fe8273ac..d79fa12e46d0 100644 --- a/absl/container/CMakeLists.txt +++ b/absl/container/CMakeLists.txt @@ -421,6 +421,7 @@ absl_cc_library( ${ABSL_DEFAULT_COPTS} DEPS absl::memory + absl::type_traits absl::utility PUBLIC ) @@ -435,6 +436,7 @@ absl_cc_test( DEPS absl::container_memory absl::strings + absl::test_instance_tracker gmock_main ) diff --git a/absl/container/internal/container_memory.h b/absl/container/internal/container_memory.h index 55b59c7ff313..3487ac18998f 100644 --- a/absl/container/internal/container_memory.h +++ b/absl/container/internal/container_memory.h @@ -31,6 +31,7 @@ #include <utility> #include "absl/memory/memory.h" +#include "absl/meta/type_traits.h" #include "absl/utility/utility.h" namespace absl { @@ -319,11 +320,12 @@ union map_slot_type { map_slot_type() {} ~map_slot_type() = delete; using value_type = std::pair<const K, V>; - using mutable_value_type = std::pair<K, V>; + using mutable_value_type = + std::pair<absl::remove_const_t<K>, absl::remove_const_t<V>>; value_type value; mutable_value_type mutable_value; - K key; + absl::remove_const_t<K> key; }; template <class K, class V> diff --git a/absl/container/internal/container_memory_test.cc b/absl/container/internal/container_memory_test.cc index e3262e3c884a..6a7fcd29ba90 100644 --- a/absl/container/internal/container_memory_test.cc +++ b/absl/container/internal/container_memory_test.cc @@ -22,6 +22,7 @@ #include "gmock/gmock.h" #include "gtest/gtest.h" +#include "absl/container/internal/test_instance_tracker.h" #include "absl/strings/string_view.h" namespace absl { @@ -29,9 +30,11 @@ ABSL_NAMESPACE_BEGIN namespace container_internal { namespace { -using ::testing::Gt; +using ::absl::test_internal::CopyableMovableInstance; +using ::absl::test_internal::InstanceTracker; using ::testing::_; using ::testing::ElementsAre; +using ::testing::Gt; using ::testing::Pair; TEST(Memory, AlignmentLargerThanBase) { @@ -222,6 +225,31 @@ TEST(DecomposePair, NotDecomposable) { std::make_tuple(0.5))); } +TEST(MapSlotPolicy, ConstKeyAndValue) { + using slot_policy = map_slot_policy<const CopyableMovableInstance, + const CopyableMovableInstance>; + using slot_type = typename slot_policy::slot_type; + + union Slots { + Slots() {} + ~Slots() {} + slot_type slots[100]; + } slots; + + std::allocator< + std::pair<const CopyableMovableInstance, const CopyableMovableInstance>> + alloc; + InstanceTracker tracker; + slot_policy::construct(&alloc, &slots.slots[0], CopyableMovableInstance(1), + CopyableMovableInstance(1)); + for (int i = 0; i < 99; ++i) { + slot_policy::transfer(&alloc, &slots.slots[i + 1], &slots.slots[i]); + } + slot_policy::destroy(&alloc, &slots.slots[99]); + + EXPECT_EQ(tracker.copies(), 0); +} + } // namespace } // namespace container_internal ABSL_NAMESPACE_END diff --git a/absl/flags/BUILD.bazel b/absl/flags/BUILD.bazel index 908e77612be6..4b51d9d45207 100644 --- a/absl/flags/BUILD.bazel +++ b/absl/flags/BUILD.bazel @@ -147,6 +147,7 @@ cc_library( ":marshalling", "//absl/base:config", "//absl/base:core_headers", + "//absl/base:fast_type_id", "//absl/strings", "//absl/types:optional", ], diff --git a/absl/flags/CMakeLists.txt b/absl/flags/CMakeLists.txt index 01cf09b1b52a..2204b0ff2dd8 100644 --- a/absl/flags/CMakeLists.txt +++ b/absl/flags/CMakeLists.txt @@ -128,6 +128,7 @@ absl_cc_library( ${ABSL_DEFAULT_LINKOPTS} DEPS absl::config + absl::fast_type_id absl::flags_config absl::flags_marshalling absl::core_headers diff --git a/absl/flags/internal/commandlineflag.h b/absl/flags/internal/commandlineflag.h index 338a1228dacb..ef992f7f478d 100644 --- a/absl/flags/internal/commandlineflag.h +++ b/absl/flags/internal/commandlineflag.h @@ -24,6 +24,7 @@ #include <typeinfo> #include "absl/base/config.h" +#include "absl/base/internal/fast_type_id.h" #include "absl/base/macros.h" #include "absl/flags/config.h" #include "absl/flags/marshalling.h" @@ -34,23 +35,12 @@ namespace absl { ABSL_NAMESPACE_BEGIN namespace flags_internal { -// An alias for flag static type id. Values of type identify the flag value type -// simialarly to typeid(T), but without relying on RTTI being available. In most +// An alias for flag fast type id. This value identifies the flag value type +// simialarly to typeid(T), without relying on RTTI being available. In most // cases this id is enough to uniquely identify the flag's value type. In a few // cases we'll have to resort to using actual RTTI implementation if it is // available. -using FlagStaticTypeId = void* (*)(); - -// Address of this function template is used in current implementation as a flag -// static type id. -template <typename T> -void* FlagStaticTypeIdGen() { -#if defined(ABSL_FLAGS_INTERNAL_HAS_RTTI) - return const_cast<std::type_info*>(&typeid(T)); -#else - return nullptr; -#endif -} +using FlagFastTypeId = base_internal::FastTypeIdType; // Options that control SetCommandLineOptionWithMode. enum FlagSettingMode { @@ -97,7 +87,7 @@ class CommandLineFlag { // Return true iff flag has type T. template <typename T> inline bool IsOfType() const { - return TypeId() == &flags_internal::FlagStaticTypeIdGen<T>; + return TypeId() == base_internal::FastTypeId<T>(); } // Attempts to retrieve the flag value. Returns value on success, @@ -150,7 +140,7 @@ class CommandLineFlag { // Returns true iff this is a handle to an Abseil Flag. virtual bool IsAbseilFlag() const; // Returns id of the flag's value type. - virtual FlagStaticTypeId TypeId() const = 0; + virtual FlagFastTypeId TypeId() const = 0; virtual bool IsModified() const = 0; virtual bool IsSpecifiedOnCommandLine() const = 0; virtual std::string DefaultValue() const = 0; diff --git a/absl/flags/internal/flag.cc b/absl/flags/internal/flag.cc index 5b4499abe47e..f3c424ad83d8 100644 --- a/absl/flags/internal/flag.cc +++ b/absl/flags/internal/flag.cc @@ -48,9 +48,9 @@ const char kStrippedFlagHelp[] = "\001\002\003\004 (unknown) \004\003\002\001"; namespace { // Currently we only validate flag values for user-defined flag types. -bool ShouldValidateFlagValue(FlagStaticTypeId flag_type_id) { +bool ShouldValidateFlagValue(FlagFastTypeId flag_type_id) { #define DONT_VALIDATE(T) \ - if (flag_type_id == &FlagStaticTypeIdGen<T>) return false; + if (flag_type_id == base_internal::FastTypeId<T>()) return false; ABSL_FLAGS_INTERNAL_BUILTIN_TYPES(DONT_VALIDATE) #undef DONT_VALIDATE @@ -161,24 +161,24 @@ absl::Mutex* FlagImpl::DataGuard() const { return reinterpret_cast<absl::Mutex*>(&data_guard_); } -void FlagImpl::AssertValidType(FlagStaticTypeId type_id) const { - FlagStaticTypeId this_type_id = flags_internal::StaticTypeId(op_); +void FlagImpl::AssertValidType(FlagFastTypeId rhs_type_id, + const std::type_info* (*gen_rtti)()) const { + FlagFastTypeId lhs_type_id = flags_internal::FastTypeId(op_); - // `type_id` is the type id corresponding to the declaration visibile at the - // call site. `this_type_id` is the type id corresponding to the type stored - // during flag definition. They must match for this operation to be - // well-defined. - if (ABSL_PREDICT_TRUE(type_id == this_type_id)) return; + // `rhs_type_id` is the fast type id corresponding to the declaration + // visibile at the call site. `lhs_type_id` is the fast type id + // corresponding to the type specified in flag definition. They must match + // for this operation to be well-defined. + if (ABSL_PREDICT_TRUE(lhs_type_id == rhs_type_id)) return; - void* lhs_runtime_type_id = type_id(); - void* rhs_runtime_type_id = this_type_id(); + const std::type_info* lhs_runtime_type_id = + flags_internal::RuntimeTypeId(op_); + const std::type_info* rhs_runtime_type_id = (*gen_rtti)(); if (lhs_runtime_type_id == rhs_runtime_type_id) return; #if defined(ABSL_FLAGS_INTERNAL_HAS_RTTI) - if (*reinterpret_cast<std::type_info*>(lhs_runtime_type_id) == - *reinterpret_cast<std::type_info*>(rhs_runtime_type_id)) - return; + if (*lhs_runtime_type_id == *rhs_runtime_type_id) return; #endif ABSL_INTERNAL_LOG( @@ -233,8 +233,8 @@ std::string FlagImpl::Help() const { : help_.gen_func(); } -FlagStaticTypeId FlagImpl::TypeId() const { - return flags_internal::StaticTypeId(op_); +FlagFastTypeId FlagImpl::TypeId() const { + return flags_internal::FastTypeId(op_); } bool FlagImpl::IsModified() const { @@ -429,7 +429,7 @@ void FlagImpl::Read(void* dst) const { void FlagImpl::Write(const void* src) { absl::MutexLock l(DataGuard()); - if (ShouldValidateFlagValue(flags_internal::StaticTypeId(op_))) { + if (ShouldValidateFlagValue(flags_internal::FastTypeId(op_))) { std::unique_ptr<void, DynValueDeleter> obj{flags_internal::Clone(op_, src), DynValueDeleter{op_}}; std::string ignored_error; diff --git a/absl/flags/internal/flag.h b/absl/flags/internal/flag.h index 19119bbb6193..c1bf8652815d 100644 --- a/absl/flags/internal/flag.h +++ b/absl/flags/internal/flag.h @@ -23,6 +23,7 @@ #include <memory> #include <string> #include <type_traits> +#include <typeinfo> #include "absl/base/call_once.h" #include "absl/base/config.h" @@ -50,7 +51,8 @@ enum class FlagOp { kCopy, kCopyConstruct, kSizeof, - kStaticTypeId, + kFastTypeId, + kRuntimeTypeId, kParse, kUnparse, kValueOffset, @@ -96,10 +98,15 @@ inline size_t Sizeof(FlagOpFn op) { return static_cast<size_t>(reinterpret_cast<intptr_t>( op(FlagOp::kSizeof, nullptr, nullptr, nullptr))); } -// Returns static type id coresponding to the value type. -inline FlagStaticTypeId StaticTypeId(FlagOpFn op) { - return reinterpret_cast<FlagStaticTypeId>( - op(FlagOp::kStaticTypeId, nullptr, nullptr, nullptr)); +// Returns fast type id coresponding to the value type. +inline FlagFastTypeId FastTypeId(FlagOpFn op) { + return reinterpret_cast<FlagFastTypeId>( + op(FlagOp::kFastTypeId, nullptr, nullptr, nullptr)); +} +// Returns fast type id coresponding to the value type. +inline const std::type_info* RuntimeTypeId(FlagOpFn op) { + return reinterpret_cast<const std::type_info*>( + op(FlagOp::kRuntimeTypeId, nullptr, nullptr, nullptr)); } // Returns offset of the field value_ from the field impl_ inside of // absl::Flag<T> data. Given FlagImpl pointer p you can get the @@ -112,6 +119,16 @@ inline ptrdiff_t ValueOffset(FlagOpFn op) { op(FlagOp::kValueOffset, nullptr, nullptr, nullptr))); } +// Returns an address of RTTI's typeid(T). +template <typename T> +inline const std::type_info* GenRuntimeTypeId() { +#if defined(ABSL_FLAGS_INTERNAL_HAS_RTTI) + return &typeid(T); +#else + return nullptr; +#endif +} + /////////////////////////////////////////////////////////////////////////////// // Flag help auxiliary structs. @@ -374,9 +391,10 @@ class FlagImpl final : public flags_internal::CommandLineFlag { // For example if flag is declared as absl::Flag<int> FLAGS_foo, a call to // absl::GetFlag(FLAGS_foo) validates that the type of FLAGS_foo is indeed // int. To do that we pass the "assumed" type id (which is deduced from type - // int) as an argument `op`, which is in turn is validated against the type id - // stored in flag object by flag definition statement. - void AssertValidType(FlagStaticTypeId type_id) const; + // int) as an argument `type_id`, which is in turn is validated against the + // type id stored in flag object by flag definition statement. + void AssertValidType(FlagFastTypeId type_id, + const std::type_info* (*gen_rtti)()) const; private: template <typename T> @@ -433,7 +451,7 @@ class FlagImpl final : public flags_internal::CommandLineFlag { std::string Filename() const override; absl::string_view Typename() const override; std::string Help() const override; - FlagStaticTypeId TypeId() const override; + FlagFastTypeId TypeId() const override; bool IsModified() const override ABSL_LOCKS_EXCLUDED(*DataGuard()); bool IsSpecifiedOnCommandLine() const override ABSL_LOCKS_EXCLUDED(*DataGuard()); @@ -539,14 +557,14 @@ class Flag { U u; #if !defined(NDEBUG) - impl_.AssertValidType(&flags_internal::FlagStaticTypeIdGen<T>); + impl_.AssertValidType(base_internal::FastTypeId<T>(), &GenRuntimeTypeId<T>); #endif if (!value_.Get(&u.value)) impl_.Read(&u.value); return std::move(u.value); } void Set(const T& v) { - impl_.AssertValidType(&flags_internal::FlagStaticTypeIdGen<T>); + impl_.AssertValidType(base_internal::FastTypeId<T>(), &GenRuntimeTypeId<T>); impl_.Write(&v); } void SetCallback(const FlagCallbackFunc mutation_callback) { @@ -595,8 +613,10 @@ void* FlagOps(FlagOp op, const void* v1, void* v2, void* v3) { return nullptr; case FlagOp::kSizeof: return reinterpret_cast<void*>(static_cast<uintptr_t>(sizeof(T))); - case FlagOp::kStaticTypeId: - return reinterpret_cast<void*>(&FlagStaticTypeIdGen<T>); + case FlagOp::kFastTypeId: + return const_cast<void*>(base_internal::FastTypeId<T>()); + case FlagOp::kRuntimeTypeId: + return const_cast<std::type_info*>(GenRuntimeTypeId<T>()); case FlagOp::kParse: { // Initialize the temporary instance of type T based on current value in // destination (which is going to be flag's default value). diff --git a/absl/flags/internal/registry.cc b/absl/flags/internal/registry.cc index 9ed912140ee7..eb619c7086ad 100644 --- a/absl/flags/internal/registry.cc +++ b/absl/flags/internal/registry.cc @@ -284,14 +284,14 @@ namespace { class RetiredFlagObj final : public flags_internal::CommandLineFlag { public: - constexpr RetiredFlagObj(const char* name, FlagStaticTypeId type_id) + constexpr RetiredFlagObj(const char* name, FlagFastTypeId type_id) : name_(name), type_id_(type_id) {} private: absl::string_view Name() const override { return name_; } std::string Filename() const override { return "RETIRED"; } absl::string_view Typename() const override { return ""; } - FlagStaticTypeId TypeId() const override { return type_id_; } + FlagFastTypeId TypeId() const override { return type_id_; } std::string Help() const override { return ""; } bool IsRetired() const override { return true; } bool IsModified() const override { return false; } @@ -317,7 +317,7 @@ class RetiredFlagObj final : public flags_internal::CommandLineFlag { // Data members const char* const name_; - const FlagStaticTypeId type_id_; + const FlagFastTypeId type_id_; }; void DestroyRetiredFlag(flags_internal::CommandLineFlag* flag) { @@ -327,7 +327,7 @@ void DestroyRetiredFlag(flags_internal::CommandLineFlag* flag) { } // namespace -bool Retire(const char* name, FlagStaticTypeId type_id) { +bool Retire(const char* name, FlagFastTypeId type_id) { auto* flag = new flags_internal::RetiredFlagObj(name, type_id); FlagRegistry::GlobalRegistry()->RegisterFlag(flag); return true; diff --git a/absl/flags/internal/registry.h b/absl/flags/internal/registry.h index 69ff889fb100..af8ed6b99b78 100644 --- a/absl/flags/internal/registry.h +++ b/absl/flags/internal/registry.h @@ -79,12 +79,12 @@ bool RegisterCommandLineFlag(CommandLineFlag*); // // Retire flag with name "name" and type indicated by ops. -bool Retire(const char* name, FlagStaticTypeId type_id); +bool Retire(const char* name, FlagFastTypeId type_id); // Registered a retired flag with name 'flag_name' and type 'T'. template <typename T> inline bool RetiredFlag(const char* flag_name) { - return flags_internal::Retire(flag_name, &FlagStaticTypeIdGen<T>); + return flags_internal::Retire(flag_name, base_internal::FastTypeId<T>()); } // If the flag is retired, returns true and indicates in |*type_is_bool| diff --git a/absl/strings/BUILD.bazel b/absl/strings/BUILD.bazel index 64f55fb4f93d..389011224891 100644 --- a/absl/strings/BUILD.bazel +++ b/absl/strings/BUILD.bazel @@ -313,6 +313,7 @@ cc_test( ":strings", "//absl/base", "//absl/base:config", + "//absl/base:core_headers", "//absl/base:endian", "//absl/base:raw_logging_internal", "//absl/container:fixed_array", diff --git a/absl/strings/CMakeLists.txt b/absl/strings/CMakeLists.txt index c7874ecf10da..d3a8bd7eb1e5 100644 --- a/absl/strings/CMakeLists.txt +++ b/absl/strings/CMakeLists.txt @@ -578,6 +578,7 @@ absl_cc_test( absl::strings absl::base absl::config + absl::core_headers absl::endian absl::raw_logging_internal absl::fixed_array diff --git a/absl/strings/cord.cc b/absl/strings/cord.cc index 4f64f79965c7..7de7766c5116 100644 --- a/absl/strings/cord.cc +++ b/absl/strings/cord.cc @@ -28,9 +28,9 @@ #include "absl/base/casts.h" #include "absl/base/internal/raw_logging.h" +#include "absl/base/macros.h" #include "absl/base/port.h" #include "absl/container/fixed_array.h" -#include "absl/container/inlined_vector.h" #include "absl/strings/escaping.h" #include "absl/strings/internal/cord_internal.h" #include "absl/strings/internal/resize_uninitialized.h" @@ -132,6 +132,14 @@ inline const CordRepExternal* CordRep::external() const { return static_cast<const CordRepExternal*>(this); } +using CordTreeConstPath = CordTreePath<const CordRep*, MaxCordDepth()>; + +// This type is used to store the list of pending nodes during re-balancing. +// Its maximum size is 2 * MaxCordDepth() because the tree has a maximum +// possible depth of MaxCordDepth() and every concat node along a tree path +// could theoretically be split during rebalancing. +using RebalancingStack = CordTreePath<CordRep*, 2 * MaxCordDepth()>; + } // namespace cord_internal static const size_t kFlatOverhead = offsetof(CordRep, data); @@ -180,98 +188,78 @@ static constexpr size_t TagToLength(uint8_t tag) { // Enforce that kMaxFlatSize maps to a well-known exact tag value. static_assert(TagToAllocatedSize(224) == kMaxFlatSize, "Bad tag logic"); -constexpr uint64_t Fibonacci(unsigned char n, uint64_t a = 0, uint64_t b = 1) { - return n == 0 ? a : Fibonacci(n - 1, b, a + b); +constexpr size_t Fibonacci(uint8_t n, const size_t a = 0, const size_t b = 1) { + return n == 0 + ? a + : n == 1 ? b + : Fibonacci(n - 1, b, + (a > (size_t(-1) - b)) ? size_t(-1) : a + b); } -static_assert(Fibonacci(63) == 6557470319842, - "Fibonacci values computed incorrectly"); - // Minimum length required for a given depth tree -- a tree is considered // balanced if -// length(t) >= min_length[depth(t)] -// The root node depth is allowed to become twice as large to reduce rebalancing -// for larger strings (see IsRootBalanced). -static constexpr uint64_t min_length[] = { - Fibonacci(2), - Fibonacci(3), - Fibonacci(4), - Fibonacci(5), - Fibonacci(6), - Fibonacci(7), - Fibonacci(8), - Fibonacci(9), - Fibonacci(10), - Fibonacci(11), - Fibonacci(12), - Fibonacci(13), - Fibonacci(14), - Fibonacci(15), - Fibonacci(16), - Fibonacci(17), - Fibonacci(18), - Fibonacci(19), - Fibonacci(20), - Fibonacci(21), - Fibonacci(22), - Fibonacci(23), - Fibonacci(24), - Fibonacci(25), - Fibonacci(26), - Fibonacci(27), - Fibonacci(28), - Fibonacci(29), - Fibonacci(30), - Fibonacci(31), - Fibonacci(32), - Fibonacci(33), - Fibonacci(34), - Fibonacci(35), - Fibonacci(36), - Fibonacci(37), - Fibonacci(38), - Fibonacci(39), - Fibonacci(40), - Fibonacci(41), - Fibonacci(42), - Fibonacci(43), - Fibonacci(44), - Fibonacci(45), - Fibonacci(46), - Fibonacci(47), - 0xffffffffffffffffull, // Avoid overflow -}; - -static const int kMinLengthSize = ABSL_ARRAYSIZE(min_length); - -// The inlined size to use with absl::InlinedVector. -// -// Note: The InlinedVectors in this file (and in cord.h) do not need to use -// the same value for their inlined size. The fact that they do is historical. -// It may be desirable for each to use a different inlined size optimized for -// that InlinedVector's usage. -// -// TODO(jgm): Benchmark to see if there's a more optimal value than 47 for -// the inlined vector size (47 exists for backward compatibility). -static const int kInlinedVectorSize = 47; - -static inline bool IsRootBalanced(CordRep* node) { - if (node->tag != CONCAT) { - return true; - } else if (node->concat()->depth() <= 15) { - return true; - } else if (node->concat()->depth() > kMinLengthSize) { - return false; - } else { - // Allow depth to become twice as large as implied by fibonacci rule to - // reduce rebalancing for larger strings. - return (node->length >= min_length[node->concat()->depth() / 2]); - } +// length(t) >= kMinLength[depth(t)] +// The node depth is allowed to become larger to reduce rebalancing +// for larger strings (see ShouldRebalance). +constexpr size_t kMinLength[] = { + Fibonacci(2), Fibonacci(3), Fibonacci(4), Fibonacci(5), Fibonacci(6), + Fibonacci(7), Fibonacci(8), Fibonacci(9), Fibonacci(10), Fibonacci(11), + Fibonacci(12), Fibonacci(13), Fibonacci(14), Fibonacci(15), Fibonacci(16), + Fibonacci(17), Fibonacci(18), Fibonacci(19), Fibonacci(20), Fibonacci(21), + Fibonacci(22), Fibonacci(23), Fibonacci(24), Fibonacci(25), Fibonacci(26), + Fibonacci(27), Fibonacci(28), Fibonacci(29), Fibonacci(30), Fibonacci(31), + Fibonacci(32), Fibonacci(33), Fibonacci(34), Fibonacci(35), Fibonacci(36), + Fibonacci(37), Fibonacci(38), Fibonacci(39), Fibonacci(40), Fibonacci(41), + Fibonacci(42), Fibonacci(43), Fibonacci(44), Fibonacci(45), Fibonacci(46), + Fibonacci(47), Fibonacci(48), Fibonacci(49), Fibonacci(50), Fibonacci(51), + Fibonacci(52), Fibonacci(53), Fibonacci(54), Fibonacci(55), Fibonacci(56), + Fibonacci(57), Fibonacci(58), Fibonacci(59), Fibonacci(60), Fibonacci(61), + Fibonacci(62), Fibonacci(63), Fibonacci(64), Fibonacci(65), Fibonacci(66), + Fibonacci(67), Fibonacci(68), Fibonacci(69), Fibonacci(70), Fibonacci(71), + Fibonacci(72), Fibonacci(73), Fibonacci(74), Fibonacci(75), Fibonacci(76), + Fibonacci(77), Fibonacci(78), Fibonacci(79), Fibonacci(80), Fibonacci(81), + Fibonacci(82), Fibonacci(83), Fibonacci(84), Fibonacci(85), Fibonacci(86), + Fibonacci(87), Fibonacci(88), Fibonacci(89), Fibonacci(90), Fibonacci(91), + Fibonacci(92), Fibonacci(93), Fibonacci(94), Fibonacci(95)}; + +static_assert(sizeof(kMinLength) / sizeof(size_t) >= + (cord_internal::MaxCordDepth() + 1), + "Not enough elements in kMinLength array to cover all the " + "supported Cord depth(s)"); + +inline bool ShouldRebalance(const CordRep* node) { + if (node->tag != CONCAT) return false; + + size_t node_depth = node->concat()->depth(); + + if (node_depth <= 15) return false; + + // Rebalancing Cords is expensive, so we reduce how often rebalancing occurs + // by allowing shallow Cords to have twice the depth that the Fibonacci rule + // would otherwise imply. Deep Cords need to follow the rule more closely, + // however to ensure algorithm correctness. We implement this with linear + // interpolation. Cords of depth 16 are treated as though they have a depth + // of 16 * 1/2, and Cords of depth MaxCordDepth() interpolate to + // MaxCordDepth() * 1. + return node->length < + kMinLength[(node_depth * (cord_internal::MaxCordDepth() - 16)) / + (2 * cord_internal::MaxCordDepth() - 16 - node_depth)]; +} + +// Unlike root balancing condition this one is part of the re-balancing +// algorithm and has to be always matching against right depth for +// algorithm to be correct. +inline bool IsNodeBalanced(const CordRep* node) { + if (node->tag != CONCAT) return true; + + size_t node_depth = node->concat()->depth(); + + return node->length >= kMinLength[node_depth]; } static CordRep* Rebalance(CordRep* node); -static void DumpNode(CordRep* rep, bool include_data, std::ostream* os); -static bool VerifyNode(CordRep* root, CordRep* start_node, +static void DumpNode(const CordRep* rep, bool include_data, std::ostream* os); +static bool VerifyNode(const CordRep* root, const CordRep* start_node, bool full_validation); static inline CordRep* VerifyTree(CordRep* node) { @@ -318,7 +306,8 @@ __attribute__((preserve_most)) static void UnrefInternal(CordRep* rep) { assert(rep != nullptr); - absl::InlinedVector<CordRep*, kInlinedVectorSize> pending; + cord_internal::RebalancingStack pending; + while (true) { if (rep->tag == CONCAT) { CordRepConcat* rep_concat = rep->concat(); @@ -400,6 +389,11 @@ static void SetConcatChildren(CordRepConcat* concat, CordRep* left, concat->length = left->length + right->length; concat->set_depth(1 + std::max(Depth(left), Depth(right))); + + ABSL_INTERNAL_CHECK(concat->depth() <= cord_internal::MaxCordDepth(), + "Cord depth exceeds max"); + ABSL_INTERNAL_CHECK(concat->length >= left->length, "Cord is too long"); + ABSL_INTERNAL_CHECK(concat->length >= right->length, "Cord is too long"); } // Create a concatenation of the specified nodes. @@ -425,7 +419,7 @@ static CordRep* RawConcat(CordRep* left, CordRep* right) { static CordRep* Concat(CordRep* left, CordRep* right) { CordRep* rep = RawConcat(left, right); - if (rep != nullptr && !IsRootBalanced(rep)) { + if (rep != nullptr && ShouldRebalance(rep)) { rep = Rebalance(rep); } return VerifyTree(rep); @@ -720,6 +714,14 @@ void Cord::InlineRep::ClearSlow() { memset(data_, 0, sizeof(data_)); } +inline Cord::InternalChunkIterator Cord::internal_chunk_begin() const { + return InternalChunkIterator(this); +} + +inline Cord::InternalChunkRange Cord::InternalChunks() const { + return InternalChunkRange(this); +} + // -------------------------------------------------------------------- // Constructors and destructors @@ -916,7 +918,7 @@ void Cord::Prepend(absl::string_view src) { static CordRep* RemovePrefixFrom(CordRep* node, size_t n) { if (n >= node->length) return nullptr; if (n == 0) return Ref(node); - absl::InlinedVector<CordRep*, kInlinedVectorSize> rhs_stack; + cord_internal::CordTreeMutablePath rhs_stack; while (node->tag == CONCAT) { assert(n <= node->length); @@ -957,7 +959,7 @@ static CordRep* RemovePrefixFrom(CordRep* node, size_t n) { static CordRep* RemoveSuffixFrom(CordRep* node, size_t n) { if (n >= node->length) return nullptr; if (n == 0) return Ref(node); - absl::InlinedVector<CordRep*, kInlinedVectorSize> lhs_stack; + absl::cord_internal::CordTreeMutablePath lhs_stack; bool inplace_ok = node->refcount.IsOne(); while (node->tag == CONCAT) { @@ -1028,6 +1030,7 @@ void Cord::RemoveSuffix(size_t n) { // Work item for NewSubRange(). struct SubRange { + SubRange() = default; SubRange(CordRep* a_node, size_t a_pos, size_t a_n) : node(a_node), pos(a_pos), n(a_n) {} CordRep* node; // nullptr means concat last 2 results. @@ -1036,8 +1039,11 @@ struct SubRange { }; static CordRep* NewSubRange(CordRep* node, size_t pos, size_t n) { - absl::InlinedVector<CordRep*, kInlinedVectorSize> results; - absl::InlinedVector<SubRange, kInlinedVectorSize> todo; + cord_internal::CordTreeMutablePath results; + // The algorithm below in worst case scenario adds up to 3 nodes to the `todo` + // list, but we also pop one out on every cycle. If original tree has depth d + // todo list can grew up to 2*d in size. + cord_internal::CordTreePath<SubRange, 2 * cord_internal::MaxCordDepth()> todo; todo.push_back(SubRange(node, pos, n)); do { const SubRange& sr = todo.back(); @@ -1074,7 +1080,7 @@ static CordRep* NewSubRange(CordRep* node, size_t pos, size_t n) { } } while (!todo.empty()); assert(results.size() == 1); - return results[0]; + return results.back(); } Cord Cord::Subcord(size_t pos, size_t new_size) const { @@ -1090,7 +1096,7 @@ Cord Cord::Subcord(size_t pos, size_t new_size) const { } else if (new_size == 0) { // We want to return empty subcord, so nothing to do. } else if (new_size <= InlineRep::kMaxInline) { - Cord::ChunkIterator it = chunk_begin(); + Cord::InternalChunkIterator it = internal_chunk_begin(); it.AdvanceBytes(pos); char* dest = sub_cord.contents_.data_; size_t remaining_size = new_size; @@ -1113,11 +1119,12 @@ Cord Cord::Subcord(size_t pos, size_t new_size) const { class CordForest { public: - explicit CordForest(size_t length) - : root_length_(length), trees_(kMinLengthSize, nullptr) {} + explicit CordForest(size_t length) : root_length_(length), trees_({}) {} void Build(CordRep* cord_root) { - std::vector<CordRep*> pending = {cord_root}; + // We are adding up to two nodes to the `pending` list, but we also popping + // one, so the size of `pending` will never exceed `MaxCordDepth()`. + cord_internal::CordTreeMutablePath pending(cord_root); while (!pending.empty()) { CordRep* node = pending.back(); @@ -1129,21 +1136,20 @@ class CordForest { } CordRepConcat* concat_node = node->concat(); - if (concat_node->depth() >= kMinLengthSize || - concat_node->length < min_length[concat_node->depth()]) { - pending.push_back(concat_node->right); - pending.push_back(concat_node->left); - - if (concat_node->refcount.IsOne()) { - concat_node->left = concat_freelist_; - concat_freelist_ = concat_node; - } else { - Ref(concat_node->right); - Ref(concat_node->left); - Unref(concat_node); - } - } else { + if (IsNodeBalanced(concat_node)) { AddNode(node); + continue; + } + pending.push_back(concat_node->right); + pending.push_back(concat_node->left); + + if (concat_node->refcount.IsOne()) { + concat_node->left = concat_freelist_; + concat_freelist_ = concat_node; + } else { + Ref(concat_node->right); + Ref(concat_node->left); + Unref(concat_node); } } } @@ -1175,7 +1181,7 @@ class CordForest { // Collect together everything with which we will merge with node int i = 0; - for (; node->length > min_length[i + 1]; ++i) { + for (; node->length >= kMinLength[i + 1]; ++i) { auto& tree_at_i = trees_[i]; if (tree_at_i == nullptr) continue; @@ -1186,7 +1192,7 @@ class CordForest { sum = AppendNode(node, sum); // Insert sum into appropriate place in the forest - for (; sum->length >= min_length[i]; ++i) { + for (; sum->length >= kMinLength[i]; ++i) { auto& tree_at_i = trees_[i]; if (tree_at_i == nullptr) continue; @@ -1194,7 +1200,7 @@ class CordForest { tree_at_i = nullptr; } - // min_length[0] == 1, which means sum->length >= min_length[0] + // kMinLength[0] == 1, which means sum->length >= kMinLength[0] assert(i > 0); trees_[i - 1] = sum; } @@ -1227,9 +1233,7 @@ class CordForest { } size_t root_length_; - - // use an inlined vector instead of a flat array to get bounds checking - absl::InlinedVector<CordRep*, kInlinedVectorSize> trees_; + std::array<cord_internal::CordRep*, cord_internal::MaxCordDepth()> trees_; // List of concat nodes we can re-use for Cord balancing. CordRepConcat* concat_freelist_ = nullptr; @@ -1330,7 +1334,7 @@ inline absl::string_view Cord::InlineRep::FindFlatStartPiece() const { inline int Cord::CompareSlowPath(absl::string_view rhs, size_t compared_size, size_t size_to_compare) const { - auto advance = [](Cord::ChunkIterator* it, absl::string_view* chunk) { + auto advance = [](Cord::InternalChunkIterator* it, absl::string_view* chunk) { if (!chunk->empty()) return true; ++*it; if (it->bytes_remaining_ == 0) return false; @@ -1338,7 +1342,7 @@ inline int Cord::CompareSlowPath(absl::string_view rhs, size_t compared_size, return true; }; - Cord::ChunkIterator lhs_it = chunk_begin(); + Cord::InternalChunkIterator lhs_it = internal_chunk_begin(); // compared_size is inside first chunk. absl::string_view lhs_chunk = @@ -1360,7 +1364,7 @@ inline int Cord::CompareSlowPath(absl::string_view rhs, size_t compared_size, inline int Cord::CompareSlowPath(const Cord& rhs, size_t compared_size, size_t size_to_compare) const { - auto advance = [](Cord::ChunkIterator* it, absl::string_view* chunk) { + auto advance = [](Cord::InternalChunkIterator* it, absl::string_view* chunk) { if (!chunk->empty()) return true; ++*it; if (it->bytes_remaining_ == 0) return false; @@ -1368,8 +1372,8 @@ inline int Cord::CompareSlowPath(const Cord& rhs, size_t compared_size, return true; }; - Cord::ChunkIterator lhs_it = chunk_begin(); - Cord::ChunkIterator rhs_it = rhs.chunk_begin(); + Cord::InternalChunkIterator lhs_it = internal_chunk_begin(); + Cord::InternalChunkIterator rhs_it = rhs.internal_chunk_begin(); // compared_size is inside both first chunks. absl::string_view lhs_chunk = @@ -1503,8 +1507,11 @@ void Cord::CopyToArraySlowPath(char* dst) const { } } -Cord::ChunkIterator& Cord::ChunkIterator::operator++() { - assert(bytes_remaining_ > 0 && "Attempted to iterate past `end()`"); +template <typename StorageType> +Cord::GenericChunkIterator<StorageType>& +Cord::GenericChunkIterator<StorageType>::operator++() { + ABSL_HARDENING_ASSERT(bytes_remaining_ > 0 && + "Attempted to iterate past `end()`"); assert(bytes_remaining_ >= current_chunk_.size()); bytes_remaining_ -= current_chunk_.size(); @@ -1542,8 +1549,10 @@ Cord::ChunkIterator& Cord::ChunkIterator::operator++() { return *this; } -Cord Cord::ChunkIterator::AdvanceAndReadBytes(size_t n) { - assert(bytes_remaining_ >= n && "Attempted to iterate past `end()`"); +template <typename StorageType> +Cord Cord::GenericChunkIterator<StorageType>::AdvanceAndReadBytes(size_t n) { + ABSL_HARDENING_ASSERT(bytes_remaining_ >= n && + "Attempted to iterate past `end()`"); Cord subcord; if (n <= InlineRep::kMaxInline) { @@ -1655,7 +1664,8 @@ Cord Cord::ChunkIterator::AdvanceAndReadBytes(size_t n) { return subcord; } -void Cord::ChunkIterator::AdvanceBytesSlowPath(size_t n) { +template <typename StorageType> +void Cord::GenericChunkIterator<StorageType>::AdvanceBytesSlowPath(size_t n) { assert(bytes_remaining_ >= n && "Attempted to iterate past `end()`"); assert(n >= current_chunk_.size()); // This should only be called when // iterating to a new node. @@ -1714,7 +1724,7 @@ void Cord::ChunkIterator::AdvanceBytesSlowPath(size_t n) { } char Cord::operator[](size_t i) const { - assert(i < size()); + ABSL_HARDENING_ASSERT(i < size()); size_t offset = i; const CordRep* rep = contents_.tree(); if (rep == nullptr) { @@ -1841,18 +1851,18 @@ absl::string_view Cord::FlattenSlowPath() { } } -static void DumpNode(CordRep* rep, bool include_data, std::ostream* os) { +static void DumpNode(const CordRep* rep, bool include_data, std::ostream* os) { const int kIndentStep = 1; int indent = 0; - absl::InlinedVector<CordRep*, kInlinedVectorSize> stack; - absl::InlinedVector<int, kInlinedVectorSize> indents; + cord_internal::CordTreeConstPath stack; + cord_internal::CordTreePath<int, cord_internal::MaxCordDepth()> indents; for (;;) { *os << std::setw(3) << rep->refcount.Get(); *os << " " << std::setw(7) << rep->length; *os << " ["; - if (include_data) *os << static_cast<void*>(rep); + if (include_data) *os << static_cast<const void*>(rep); *os << "]"; - *os << " " << (IsRootBalanced(rep) ? 'b' : 'u'); + *os << " " << (IsNodeBalanced(rep) ? 'b' : 'u'); *os << " " << std::setw(indent) << ""; if (rep->tag == CONCAT) { *os << "CONCAT depth=" << Depth(rep) << "\n"; @@ -1873,7 +1883,7 @@ static void DumpNode(CordRep* rep, bool include_data, std::ostream* os) { } else { *os << "FLAT cap=" << TagToLength(rep->tag) << " ["; if (include_data) - *os << absl::CEscape(std::string(rep->data, rep->length)); + *os << absl::CEscape(absl::string_view(rep->data, rep->length)); *os << "]\n"; } if (stack.empty()) break; @@ -1886,19 +1896,19 @@ static void DumpNode(CordRep* rep, bool include_data, std::ostream* os) { ABSL_INTERNAL_CHECK(indents.empty(), ""); } -static std::string ReportError(CordRep* root, CordRep* node) { +static std::string ReportError(const CordRep* root, const CordRep* node) { std::ostringstream buf; buf << "Error at node " << node << " in:"; DumpNode(root, true, &buf); return buf.str(); } -static bool VerifyNode(CordRep* root, CordRep* start_node, +static bool VerifyNode(const CordRep* root, const CordRep* start_node, bool full_validation) { - absl::InlinedVector<CordRep*, 2> worklist; + cord_internal::CordTreeConstPath worklist; worklist.push_back(start_node); do { - CordRep* node = worklist.back(); + const CordRep* node = worklist.back(); worklist.pop_back(); ABSL_INTERNAL_CHECK(node != nullptr, ReportError(root, node)); @@ -1948,7 +1958,7 @@ static bool VerifyNode(CordRep* root, CordRep* start_node, // Iterate over the tree. cur_node is never a leaf node and leaf nodes will // never be appended to tree_stack. This reduces overhead from manipulating // tree_stack. - absl::InlinedVector<const CordRep*, kInlinedVectorSize> tree_stack; + cord_internal::CordTreeConstPath tree_stack; const CordRep* cur_node = rep; while (true) { const CordRep* next_node = nullptr; @@ -1995,6 +2005,9 @@ std::ostream& operator<<(std::ostream& out, const Cord& cord) { return out; } +template class Cord::GenericChunkIterator<cord_internal::CordTreeMutablePath>; +template class Cord::GenericChunkIterator<cord_internal::CordTreeDynamicPath>; + namespace strings_internal { size_t CordTestAccess::FlatOverhead() { return kFlatOverhead; } size_t CordTestAccess::MaxFlatLength() { return kMaxFlatLength; } diff --git a/absl/strings/cord.h b/absl/strings/cord.h index 66645eef6e48..3ab3cb87f520 100644 --- a/absl/strings/cord.h +++ b/absl/strings/cord.h @@ -11,25 +11,52 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. - -// A Cord is a sequence of characters with some unusual access propreties. -// A Cord supports efficient insertions and deletions at the start and end of -// the byte sequence, but random access reads are slower, and random access -// modifications are not supported by the API. Cord also provides cheap copies -// (using a copy-on-write strategy) and cheap substring operations. // -// Thread safety -// ------------- +// ----------------------------------------------------------------------------- +// File: cord.h +// ----------------------------------------------------------------------------- +// +// This file defines the `absl::Cord` data structure and operations on that data +// structure. A Cord is a string-like sequence of characters optimized for +// specific use cases. Unlike a `std::string`, which stores an array of +// contiguous characters, Cord data is stored in a structure consisting of +// separate, reference-counted "chunks." (Currently, this implementation is a +// tree structure, though that implementation may change.) +// +// Because a Cord consists of these chunks, data can be added to or removed from +// a Cord during its lifetime. Chunks may also be shared between Cords. Unlike a +// `std::string`, a Cord can therefore accomodate data that changes over its +// lifetime, though it's not quite "mutable"; it can change only in the +// attachment, detachment, or rearrangement of chunks of its constituent data. +// +// A Cord provides some benefit over `std::string` under the following (albeit +// narrow) circumstances: +// +// * Cord data is designed to grow and shrink over a Cord's lifetime. Cord +// provides efficient insertions and deletions at the start and end of the +// character sequences, avoiding copies in those cases. Static data should +// generally be stored as strings. +// * External memory consisting of string-like data can be directly added to +// a Cord without requiring copies or allocations. +// * Cord data may be shared and copied cheaply. Cord provides a copy-on-write +// implementation and cheap sub-Cord operations. Copying a Cord is an O(1) +// operation. +// +// As a consequence to the above, Cord data is generally large. Small data +// should generally use strings, as construction of a Cord requires some +// overhead. Small Cords (<= 15 bytes) are represented inline, but most small +// Cords are expected to grow over their lifetimes. +// +// Note that because a Cord is made up of separate chunked data, random access +// to character data within a Cord is slower than within a `std::string`. +// +// Thread Safety +// // Cord has the same thread-safety properties as many other types like // std::string, std::vector<>, int, etc -- it is thread-compatible. In -// particular, if no thread may call a non-const method, then it is safe to -// concurrently call const methods. Copying a Cord produces a new instance that -// can be used concurrently with the original in arbitrary ways. -// -// Implementation is similar to the "Ropes" described in: -// Ropes: An alternative to strings -// Hans J. Boehm, Russ Atkinson, Michael Plass -// Software Practice and Experience, December 1995 +// particular, if threads do not call non-const methods, then it is safe to call +// const methods without synchronization. Copying a Cord produces a new instance +// that can be used concurrently with the original in arbitrary ways. #ifndef ABSL_STRINGS_CORD_H_ #define ABSL_STRINGS_CORD_H_ @@ -68,6 +95,90 @@ template <typename H> H HashFragmentedCord(H, const Cord&); } +// Cord +// +// A Cord is a sequence of characters, designed to be more efficient than a +// `std::string` in certain circumstances: namely, large string data that needs +// to change over its lifetime or shared, especially when such data is shared +// across API boundaries. +// +// A Cord stores its character data in a structure that allows efficient prepend +// and append operations. This makes a Cord useful for large string data sent +// over in a wire format that may need to be prepended or appended at some point +// during the data exchange (e.g. HTTP, protocol buffers). For example, a +// Cord is useful for storing an HTTP request, and prepending an HTTP header to +// such a request. +// +// Cords should not be used for storing general string data, however. They +// require overhead to construct and are slower than strings for random access. +// +// The Cord API provides the following common API operations: +// +// * Create or assign Cords out of existing string data, memory, or other Cords +// * Append and prepend data to an existing Cord +// * Create new Sub-Cords from existing Cord data +// * Swap Cord data and compare Cord equality +// * Write out Cord data by constructing a `std::string` +// +// Additionally, the API provides iterator utilities to iterate through Cord +// data via chunks or character bytes. +// + +namespace cord_internal { + +// It's expensive to keep a Cord's tree perfectly balanced, so instead we keep +// trees approximately balanced. A tree node N of depth D(N) that contains a +// string of L(N) characters is considered balanced if L >= Fibonacci(D + 2). +// The "+ 2" is used to ensure that every balanced leaf node contains at least +// one character. Here we presume that +// Fibonacci(0) = 0 +// Fibonacci(1) = 1 +// Fibonacci(2) = 1 +// Fibonacci(3) = 2 +// ... +// The algorithm is based on paper by Hans Boehm et al: +// https://www.cs.rit.edu/usr/local/pub/jeh/courses/QUARTERS/FP/Labs/CedarRope/rope-paper.pdf +// In this paper authors shows that rebalancing based on cord forest of already +// balanced subtrees can be proven to never produce tree of depth larger than +// largest Fibonacci number representable in the same integral type as cord size +// For 64 bit integers this is the 93rd Fibonacci number. For 32 bit integrals +// this is 47th Fibonacci number. +constexpr size_t MaxCordDepth() { return sizeof(size_t) == 8 ? 93 : 47; } + +// This class models fixed max size stack of CordRep pointers. +// The elements are being pushed back and popped from the back. +template <typename CordRepPtr, size_t N> +class CordTreePath { + public: + CordTreePath() {} + explicit CordTreePath(CordRepPtr root) { push_back(root); } + + bool empty() const { return size_ == 0; } + size_t size() const { return size_; } + void clear() { size_ = 0; } + + CordRepPtr back() { return data_[size_ - 1]; } + + void pop_back() { + --size_; + assert(size_ < N); + } + void push_back(CordRepPtr elem) { data_[size_++] = elem; } + + private: + CordRepPtr data_[N]; + size_t size_ = 0; +}; + +// Fixed length container for mutable "path" in cord tree, which can hold any +// possible valid path in cord tree. +using CordTreeMutablePath = CordTreePath<CordRep*, MaxCordDepth()>; +// Variable length container for mutable "path" in cord tree. It starts with +// capacity for 15 elements and grow if necessary. +using CordTreeDynamicPath = + absl::InlinedVector<absl::cord_internal::CordRep*, 15>; +} // namespace cord_internal + // A Cord is a sequence of characters. class Cord { private: @@ -75,53 +186,124 @@ class Cord { using EnableIfString = absl::enable_if_t<std::is_same<T, std::string>::value, int>; + //---------------------------------------------------------------------------- + // Cord::GenericChunkIterator + //---------------------------------------------------------------------------- + // + // A `Cord::GenericChunkIterator` provides an interface for the standard + // `Cord::ChunkIterator` as well as some private implementations. + template <typename StorageType> + class GenericChunkIterator { + public: + using iterator_category = std::input_iterator_tag; + using value_type = absl::string_view; + using difference_type = ptrdiff_t; + using pointer = const value_type*; + using reference = value_type; + + GenericChunkIterator() = default; + + GenericChunkIterator& operator++(); + GenericChunkIterator operator++(int); + bool operator==(const GenericChunkIterator& other) const; + bool operator!=(const GenericChunkIterator& other) const; + reference operator*() const; + pointer operator->() const; + + friend class Cord; + friend class CharIterator; + + private: + // Constructs a `begin()` iterator from `cord`. + explicit GenericChunkIterator(const Cord* cord); + + // Removes `n` bytes from `current_chunk_`. Expects `n` to be smaller than + // `current_chunk_.size()`. + void RemoveChunkPrefix(size_t n); + Cord AdvanceAndReadBytes(size_t n); + void AdvanceBytes(size_t n); + // Iterates `n` bytes, where `n` is expected to be greater than or equal to + // `current_chunk_.size()`. + void AdvanceBytesSlowPath(size_t n); + + // A view into bytes of the current `CordRep`. It may only be a view to a + // suffix of bytes if this is being used by `CharIterator`. + absl::string_view current_chunk_; + // The current leaf, or `nullptr` if the iterator points to short data. + // If the current chunk is a substring node, current_leaf_ points to the + // underlying flat or external node. + cord_internal::CordRep* current_leaf_ = nullptr; + // The number of bytes left in the `Cord` over which we are iterating. + size_t bytes_remaining_ = 0; + StorageType stack_of_right_children_; + }; + template <typename IteratorType> + class GenericChunkRange { + public: + explicit GenericChunkRange(const Cord* cord) : cord_(cord) {} + + IteratorType begin() const { return IteratorType(cord_); } + IteratorType end() const { return IteratorType(); } + + private: + const Cord* cord_; + }; + public: - // -------------------------------------------------------------------- - // Constructors, destructors and helper factories + // Cord::Cord() Constructors - // Create an empty cord + // Creates an empty Cord constexpr Cord() noexcept; - // Cord is copyable and efficiently movable. - // The moved-from state is valid but unspecified. + // Creates a Cord from an existing Cord. Cord is copyable and efficiently + // movable. The moved-from state is valid but unspecified. Cord(const Cord& src); Cord(Cord&& src) noexcept; Cord& operator=(const Cord& x); Cord& operator=(Cord&& x) noexcept; - // Create a cord out of "src". This constructor is explicit on - // purpose so that people do not get automatic type conversions. + // Creates a Cord from a `src` string. This constructor is marked explicit to + // prevent implicit Cord constructions from arguments convertible to an + // `absl::string_view`. explicit Cord(absl::string_view src); Cord& operator=(absl::string_view src); - // These are templated to avoid ambiguities for types that are convertible to - // both `absl::string_view` and `std::string`, such as `const char*`. + // Creates a Cord from a `std::string&&` rvalue. These constructors are + // templated to avoid ambiguities for types that are convertible to both + // `absl::string_view` and `std::string`, such as `const char*`. // - // Note that these functions reserve the right to reuse the `string&&`'s + // Note that these functions reserve the right to use the `string&&`'s // memory and that they will do so in the future. template <typename T, EnableIfString<T> = 0> explicit Cord(T&& src) : Cord(absl::string_view(src)) {} template <typename T, EnableIfString<T> = 0> Cord& operator=(T&& src); - // Destroy the cord + // Cord::~Cord() + // + // Destructs the Cord ~Cord() { if (contents_.is_tree()) DestroyCordSlow(); } - // Creates a Cord that takes ownership of external memory. The contents of - // `data` are not copied. + // Cord::MakeCordFromExternal(data, callable) + // + // Creates a Cord that takes ownership of external string memory. The + // contents of `data` are not copied to the Cord; instead, the external + // memory is added to the Cord and reference-counted. This data may not be + // changed for the life of the Cord, though it may be prepended or appended + // to. + // + // `MakeCordFromExternal()` takes a callable "releaser" that is invoked when + // the reference count for `data` reaches zero. As noted above, this data must + // remain live until the releaser is invoked. The callable releaser also must: // - // This function takes a callable that is invoked when all Cords are - // finished with `data`. The data must remain live and unchanging until the - // releaser is called. The requirements for the releaser are that it: - // * is move constructible, - // * supports `void operator()(absl::string_view) const` or - // `void operator()() const`, - // * does not have alignment requirement greater than what is guaranteed by - // ::operator new. This is dictated by alignof(std::max_align_t) before - // C++17 and __STDCPP_DEFAULT_NEW_ALIGNMENT__ if compiling with C++17 or - // it is supported by the implementation. + // * be move constructible + // * support `void operator()(absl::string_view) const` or `void operator()` + // * not have alignment requirement greater than what is guaranteed by + // `::operator new`. This alignment is dictated by + // `alignof(std::max_align_t)` (pre-C++17 code) or + // `__STDCPP_DEFAULT_NEW_ALIGNMENT__` (C++17 code). // // Example: // @@ -135,8 +317,8 @@ class Cord { // }); // } // - // WARNING: It's likely a bug if your releaser doesn't do anything. - // For example, consider the following: + // WARNING: Because a Cord can be reference-counted, it's likely a bug if your + // releaser doesn't do anything. For example, consider the following: // // void Foo(const char* buffer, int len) { // auto c = absl::MakeCordFromExternal(absl::string_view(buffer, len), @@ -150,67 +332,100 @@ class Cord { template <typename Releaser> friend Cord MakeCordFromExternal(absl::string_view data, Releaser&& releaser); - // -------------------------------------------------------------------- - // Mutations - + // Cord::Clear() + // + // Releases the Cord data. Any nodes that share data with other Cords, if + // applicable, will have their reference counts reduced by 1. void Clear(); + // Cord::Append() + // + // Appends data to the Cord, which may come from another Cord or other string + // data. void Append(const Cord& src); void Append(Cord&& src); void Append(absl::string_view src); template <typename T, EnableIfString<T> = 0> void Append(T&& src); + // Cord::Prepend() + // + // Prepends data to the Cord, which may come from another Cord or other string + // data. void Prepend(const Cord& src); void Prepend(absl::string_view src); template <typename T, EnableIfString<T> = 0> void Prepend(T&& src); + // Cord::RemovePrefix() + // + // Removes the first `n` bytes of a Cord. void RemovePrefix(size_t n); void RemoveSuffix(size_t n); - // Returns a new cord representing the subrange [pos, pos + new_size) of + // Cord::Subcord() + // + // Returns a new Cord representing the subrange [pos, pos + new_size) of // *this. If pos >= size(), the result is empty(). If // (pos + new_size) >= size(), the result is the subrange [pos, size()). Cord Subcord(size_t pos, size_t new_size) const; + // swap() + // + // Swaps the data of Cord `x` with Cord `y`. friend void swap(Cord& x, Cord& y) noexcept; - // -------------------------------------------------------------------- - // Accessors - + // Cord::size() + // + // Returns the size of the Cord. size_t size() const; + + // Cord::empty() + // + // Determines whether the given Cord is empty, returning `true` is so. bool empty() const; - // Returns the approximate number of bytes pinned by this Cord. Note that - // Cords that share memory could each be "charged" independently for the same - // shared memory. + // Cord:EstimatedMemoryUsage() + // + // Returns the *approximate* number of bytes held in full or in part by this + // Cord (which may not remain the same between invocations). Note that Cords + // that share memory could each be "charged" independently for the same shared + // memory. size_t EstimatedMemoryUsage() const; - // -------------------------------------------------------------------- - // Comparators - - // Compares 'this' Cord with rhs. This function and its relatives - // treat Cords as sequences of unsigned bytes. The comparison is a - // straightforward lexicographic comparison. Return value: + // Cord::Compare() + // + // Compares 'this' Cord with rhs. This function and its relatives treat Cords + // as sequences of unsigned bytes. The comparison is a straightforward + // lexicographic comparison. `Cord::Compare()` returns values as follows: + // // -1 'this' Cord is smaller // 0 two Cords are equal // 1 'this' Cord is larger int Compare(absl::string_view rhs) const; int Compare(const Cord& rhs) const; - // Does 'this' cord start/end with rhs + // Cord::StartsWith() + // + // Determines whether the Cord starts with the passed string data `rhs`. bool StartsWith(const Cord& rhs) const; bool StartsWith(absl::string_view rhs) const; + + // Cord::EndsWidth() + // + // Determines whether the Cord ends with the passed string data `rhs`. bool EndsWith(absl::string_view rhs) const; bool EndsWith(const Cord& rhs) const; - // -------------------------------------------------------------------- - // Conversion to other types - + // Cord::operator std::string() + // + // Converts a Cord into a `std::string()`. This operator is marked explicit to + // prevent unintended Cord usage in functions that take a string. explicit operator std::string() const; - // Copies the contents from `src` to `*dst`. + // CopyCordToString() + // + // Copies the contents of a `src` Cord into a `*dst` string. // // This function optimizes the case of reusing the destination string since it // can reuse previously allocated capacity. However, this function does not @@ -219,80 +434,46 @@ class Cord { // object, prefer to simply use the conversion operator to `std::string`. friend void CopyCordToString(const Cord& src, std::string* dst); - // -------------------------------------------------------------------- - // Iteration - class CharIterator; - // Type for iterating over the chunks of a `Cord`. See comments for - // `Cord::chunk_begin()`, `Cord::chunk_end()` and `Cord::Chunks()` below for - // preferred usage. + //---------------------------------------------------------------------------- + // Cord::ChunkIterator + //---------------------------------------------------------------------------- + // + // A `Cord::ChunkIterator` allows iteration over the constituent chunks of its + // Cord. Such iteration allows you to perform non-const operatons on the data + // of a Cord without modifying it. + // + // Generally, you do not instantiate a `Cord::ChunkIterator` directly; + // instead, you create one implicitly through use of the `Cord::Chunks()` + // member function. // - // Additional notes: + // The `Cord::ChunkIterator` has the following properties: + // + // * The iterator is invalidated after any non-const operation on the + // Cord object over which it iterates. // * The `string_view` returned by dereferencing a valid, non-`end()` // iterator is guaranteed to be non-empty. - // * A `ChunkIterator` object is invalidated after any non-const - // operation on the `Cord` object over which it iterates. - // * Two `ChunkIterator` objects can be equality compared if and only if - // they remain valid and iterate over the same `Cord`. - // * This is a proxy iterator. This means the `string_view` returned by the - // iterator does not live inside the Cord, and its lifetime is limited to - // the lifetime of the iterator itself. To help prevent issues, - // `ChunkIterator::reference` is not a true reference type and is - // equivalent to `value_type`. - // * The iterator keeps state that can grow for `Cord`s that contain many + // * Two `ChunkIterator` objects can be compared equal if and only if they + // remain valid and iterate over the same Cord. + // * The iterator in this case is a proxy iterator; the `string_view` + // returned by the iterator does not live inside the Cord, and its + // lifetime is limited to the lifetime of the iterator itself. To help + // prevent lifetime issues, `ChunkIterator::reference` is not a true + // reference type and is equivalent to `value_type`. + // * The iterator keeps state that can grow for Cords that contain many // nodes and are imbalanced due to sharing. Prefer to pass this type by // const reference instead of by value. - class ChunkIterator { - public: - using iterator_category = std::input_iterator_tag; - using value_type = absl::string_view; - using difference_type = ptrdiff_t; - using pointer = const value_type*; - using reference = value_type; - - ChunkIterator() = default; - - ChunkIterator& operator++(); - ChunkIterator operator++(int); - bool operator==(const ChunkIterator& other) const; - bool operator!=(const ChunkIterator& other) const; - reference operator*() const; - pointer operator->() const; - - friend class Cord; - friend class CharIterator; - - private: - // Constructs a `begin()` iterator from `cord`. - explicit ChunkIterator(const Cord* cord); - - // Removes `n` bytes from `current_chunk_`. Expects `n` to be smaller than - // `current_chunk_.size()`. - void RemoveChunkPrefix(size_t n); - Cord AdvanceAndReadBytes(size_t n); - void AdvanceBytes(size_t n); - // Iterates `n` bytes, where `n` is expected to be greater than or equal to - // `current_chunk_.size()`. - void AdvanceBytesSlowPath(size_t n); - - // A view into bytes of the current `CordRep`. It may only be a view to a - // suffix of bytes if this is being used by `CharIterator`. - absl::string_view current_chunk_; - // The current leaf, or `nullptr` if the iterator points to short data. - // If the current chunk is a substring node, current_leaf_ points to the - // underlying flat or external node. - absl::cord_internal::CordRep* current_leaf_ = nullptr; - // The number of bytes left in the `Cord` over which we are iterating. - size_t bytes_remaining_ = 0; - absl::InlinedVector<absl::cord_internal::CordRep*, 4> - stack_of_right_children_; - }; + using ChunkIterator = + GenericChunkIterator<cord_internal::CordTreeDynamicPath>; + // Cord::ChunkIterator::chunk_begin() + // // Returns an iterator to the first chunk of the `Cord`. // - // This is useful for getting a `ChunkIterator` outside the context of a - // range-based for-loop (in which case see `Cord::Chunks()` below). + // Generally, prefer using `Cord::Chunks()` within a range-based for loop for + // iterating over the chunks of a Cord. This method may be useful for getting + // a `ChunkIterator` where range-based for-loops are not useful. // // Example: // @@ -301,26 +482,35 @@ class Cord { // return std::find(c.chunk_begin(), c.chunk_end(), s); // } ChunkIterator chunk_begin() const; + + // Cord::ChunkItertator::chunk_end() + // // Returns an iterator one increment past the last chunk of the `Cord`. + // + // Generally, prefer using `Cord::Chunks()` within a range-based for loop for + // iterating over the chunks of a Cord. This method may be useful for getting + // a `ChunkIterator` where range-based for-loops may not be available. ChunkIterator chunk_end() const; - // Convenience wrapper over `Cord::chunk_begin()` and `Cord::chunk_end()` to - // enable range-based for-loop iteration over `Cord` chunks. + //---------------------------------------------------------------------------- + // Cord::ChunkIterator::ChunkRange + //---------------------------------------------------------------------------- // - // Prefer to use `Cord::Chunks()` below instead of constructing this directly. - class ChunkRange { - public: - explicit ChunkRange(const Cord* cord) : cord_(cord) {} - - ChunkIterator begin() const; - ChunkIterator end() const; - - private: - const Cord* cord_; - }; + // `ChunkRange` is a helper class for iterating over the chunks of the `Cord`, + // producing an iterator which can be used within a range-based for loop. + // Construction of a `ChunkRange` will return an iterator pointing to the + // first chunk of the Cord. Generally, do not construct a `ChunkRange` + // directly; instead, prefer to use the `Cord::Chunks()` method. + // + // Implementation note: `ChunkRange` is simply a convenience wrapper over + // `Cord::chunk_begin()` and `Cord::chunk_end()`. + using ChunkRange = GenericChunkRange<ChunkIterator>; - // Returns a range for iterating over the chunks of a `Cord` with a - // range-based for-loop. + // Cord::Chunks() + // + // Returns a `Cord::ChunkIterator::ChunkRange` for iterating over the chunks + // of a `Cord` with a range-based for-loop. For most iteration tasks on a + // Cord, use `Cord::Chunks()` to retrieve this iterator. // // Example: // @@ -337,22 +527,30 @@ class Cord { // } ChunkRange Chunks() const; - // Type for iterating over the characters of a `Cord`. See comments for - // `Cord::char_begin()`, `Cord::char_end()` and `Cord::Chars()` below for - // preferred usage. + //---------------------------------------------------------------------------- + // Cord::CharIterator + //---------------------------------------------------------------------------- + // + // A `Cord::CharIterator` allows iteration over the constituent characters of + // a `Cord`. + // + // Generally, you do not instantiate a `Cord::CharIterator` directly; instead, + // you create one implicitly through use of the `Cord::Chars()` member + // function. + // + // A `Cord::CharIterator` has the following properties: // - // Additional notes: - // * A `CharIterator` object is invalidated after any non-const - // operation on the `Cord` object over which it iterates. - // * Two `CharIterator` objects can be equality compared if and only if - // they remain valid and iterate over the same `Cord`. - // * The iterator keeps state that can grow for `Cord`s that contain many + // * The iterator is invalidated after any non-const operation on the + // Cord object over which it iterates. + // * Two `CharIterator` objects can be compared equal if and only if they + // remain valid and iterate over the same Cord. + // * The iterator keeps state that can grow for Cords that contain many // nodes and are imbalanced due to sharing. Prefer to pass this type by // const reference instead of by value. - // * This type cannot be a forward iterator because a `Cord` can reuse - // sections of memory. This violates the requirement that if dereferencing - // two iterators returns the same object, the iterators must compare - // equal. + // * This type cannot act as a forward iterator because a `Cord` can reuse + // sections of memory. This fact violates the requirement for forward + // iterators to compare equal if dereferencing them returns the same + // object. class CharIterator { public: using iterator_category = std::input_iterator_tag; @@ -378,34 +576,56 @@ class Cord { ChunkIterator chunk_iterator_; }; - // Advances `*it` by `n_bytes` and returns the bytes passed as a `Cord`. + // Cord::CharIterator::AdvanceAndRead() // - // `n_bytes` must be less than or equal to the number of bytes remaining for - // iteration. Otherwise the behavior is undefined. It is valid to pass - // `char_end()` and 0. + // Advances the `Cord::CharIterator` by `n_bytes` and returns the bytes + // advanced as a separate `Cord`. `n_bytes` must be less than or equal to the + // number of bytes within the Cord; otherwise, behavior is undefined. It is + // valid to pass `char_end()` and `0`. static Cord AdvanceAndRead(CharIterator* it, size_t n_bytes); - // Advances `*it` by `n_bytes`. + // Cord::CharIterator::Advance() // - // `n_bytes` must be less than or equal to the number of bytes remaining for - // iteration. Otherwise the behavior is undefined. It is valid to pass - // `char_end()` and 0. + // Advances the `Cord::CharIterator` by `n_bytes`. `n_bytes` must be less than + // or equal to the number of bytes remaining within the Cord; otherwise, + // behavior is undefined. It is valid to pass `char_end()` and `0`. static void Advance(CharIterator* it, size_t n_bytes); + // Cord::CharIterator::ChunkRemaining() + // // Returns the longest contiguous view starting at the iterator's position. // // `it` must be dereferenceable. static absl::string_view ChunkRemaining(const CharIterator& it); + // Cord::CharIterator::char_begin() + // // Returns an iterator to the first character of the `Cord`. + // + // Generally, prefer using `Cord::Chars()` within a range-based for loop for + // iterating over the chunks of a Cord. This method may be useful for getting + // a `CharIterator` where range-based for-loops may not be available. CharIterator char_begin() const; + + // Cord::CharIterator::char_end() + // // Returns an iterator to one past the last character of the `Cord`. + // + // Generally, prefer using `Cord::Chars()` within a range-based for loop for + // iterating over the chunks of a Cord. This method may be useful for getting + // a `CharIterator` where range-based for-loops are not useful. CharIterator char_end() const; - // Convenience wrapper over `Cord::char_begin()` and `Cord::char_end()` to - // enable range-based for-loop iterator over the characters of a `Cord`. + // Cord::CharIterator::CharRange // - // Prefer to use `Cord::Chars()` below instead of constructing this directly. + // `CharRange` is a helper class for iterating over the characters of a + // producing an iterator which can be used within a range-based for loop. + // Construction of a `CharRange` will return an iterator pointing to the first + // character of the Cord. Generally, do not construct a `CharRange` directly; + // instead, prefer to use the `Cord::Chars()` method show below. + // + // Implementation note: `CharRange` is simply a convenience wrapper over + // `Cord::char_begin()` and `Cord::char_end()`. class CharRange { public: explicit CharRange(const Cord* cord) : cord_(cord) {} @@ -417,8 +637,11 @@ class Cord { const Cord* cord_; }; - // Returns a range for iterating over the characters of a `Cord` with a - // range-based for-loop. + // Cord::CharIterator::Chars() + // + // Returns a `Cord::CharIterator` for iterating over the characters of a + // `Cord` with a range-based for-loop. For most character-based iteration + // tasks on a Cord, use `Cord::Chars()` to retrieve this iterator. // // Example: // @@ -435,23 +658,26 @@ class Cord { // } CharRange Chars() const; - // -------------------------------------------------------------------- - // Miscellaneous - - // Get the "i"th character of 'this' and return it. - // NOTE: This routine is reasonably efficient. It is roughly - // logarithmic in the number of nodes that make up the cord. Still, - // if you need to iterate over the contents of a cord, you should - // use a CharIterator/CordIterator rather than call operator[] or Get() - // repeatedly in a loop. + // Cord::operator[] + // + // Get the "i"th character of the Cord and returns it, provided that + // 0 <= i < Cord.size(). // - // REQUIRES: 0 <= i < size() + // NOTE: This routine is reasonably efficient. It is roughly + // logarithmic based on the number of chunks that make up the cord. Still, + // if you need to iterate over the contents of a cord, you should + // use a CharIterator/ChunkIterator rather than call operator[] or Get() + // repeatedly in a loop. char operator[](size_t i) const; + // Cord::TryFlat() + // // If this cord's representation is a single flat array, return a // string_view referencing that array. Otherwise return nullopt. absl::optional<absl::string_view> TryFlat() const; + // Cord::Flatten() + // // Flattens the cord into a single array and returns a view of the data. // // If the cord was already flat, the contents are not modified. @@ -574,6 +800,14 @@ class Cord { static bool GetFlatAux(absl::cord_internal::CordRep* rep, absl::string_view* fragment); + // Iterators for use inside Cord implementation + using InternalChunkIterator = + GenericChunkIterator<cord_internal::CordTreeMutablePath>; + using InternalChunkRange = GenericChunkRange<InternalChunkIterator>; + + InternalChunkIterator internal_chunk_begin() const; + InternalChunkRange InternalChunks() const; + // Helper for ForEachChunk() static void ForEachChunkAux( absl::cord_internal::CordRep* rep, @@ -608,6 +842,11 @@ class Cord { void AppendImpl(C&& src); }; +extern template class Cord::GenericChunkIterator< + cord_internal::CordTreeMutablePath>; +extern template class Cord::GenericChunkIterator< + cord_internal::CordTreeDynamicPath>; + ABSL_NAMESPACE_END } // namespace absl @@ -947,7 +1186,9 @@ inline bool Cord::StartsWith(absl::string_view rhs) const { return EqualsImpl(rhs, rhs_size); } -inline Cord::ChunkIterator::ChunkIterator(const Cord* cord) +template <typename StorageType> +inline Cord::GenericChunkIterator<StorageType>::GenericChunkIterator( + const Cord* cord) : bytes_remaining_(cord->size()) { if (cord->empty()) return; if (cord->contents_.is_tree()) { @@ -958,37 +1199,50 @@ inline Cord::ChunkIterator::ChunkIterator(const Cord* cord) } } -inline Cord::ChunkIterator Cord::ChunkIterator::operator++(int) { - ChunkIterator tmp(*this); +template <typename StorageType> +inline Cord::GenericChunkIterator<StorageType> +Cord::GenericChunkIterator<StorageType>::operator++(int) { + GenericChunkIterator tmp(*this); operator++(); return tmp; } -inline bool Cord::ChunkIterator::operator==(const ChunkIterator& other) const { +template <typename StorageType> +inline bool Cord::GenericChunkIterator<StorageType>::operator==( + const GenericChunkIterator<StorageType>& other) const { return bytes_remaining_ == other.bytes_remaining_; } -inline bool Cord::ChunkIterator::operator!=(const ChunkIterator& other) const { +template <typename StorageType> +inline bool Cord::GenericChunkIterator<StorageType>::operator!=( + const GenericChunkIterator<StorageType>& other) const { return !(*this == other); } -inline Cord::ChunkIterator::reference Cord::ChunkIterator::operator*() const { - assert(bytes_remaining_ != 0); +template <typename StorageType> +inline typename Cord::GenericChunkIterator<StorageType>::reference +Cord::GenericChunkIterator<StorageType>::operator*() const { + ABSL_HARDENING_ASSERT(bytes_remaining_ != 0); return current_chunk_; } -inline Cord::ChunkIterator::pointer Cord::ChunkIterator::operator->() const { - assert(bytes_remaining_ != 0); +template <typename StorageType> +inline typename Cord::GenericChunkIterator<StorageType>::pointer +Cord::GenericChunkIterator<StorageType>::operator->() const { + ABSL_HARDENING_ASSERT(bytes_remaining_ != 0); return ¤t_chunk_; } -inline void Cord::ChunkIterator::RemoveChunkPrefix(size_t n) { +template <typename StorageType> +inline void Cord::GenericChunkIterator<StorageType>::RemoveChunkPrefix( + size_t n) { assert(n < current_chunk_.size()); current_chunk_.remove_prefix(n); bytes_remaining_ -= n; } -inline void Cord::ChunkIterator::AdvanceBytes(size_t n) { +template <typename StorageType> +inline void Cord::GenericChunkIterator<StorageType>::AdvanceBytes(size_t n) { if (ABSL_PREDICT_TRUE(n < current_chunk_.size())) { RemoveChunkPrefix(n); } else if (n != 0) { @@ -1002,14 +1256,6 @@ inline Cord::ChunkIterator Cord::chunk_begin() const { inline Cord::ChunkIterator Cord::chunk_end() const { return ChunkIterator(); } -inline Cord::ChunkIterator Cord::ChunkRange::begin() const { - return cord_->chunk_begin(); -} - -inline Cord::ChunkIterator Cord::ChunkRange::end() const { - return cord_->chunk_end(); -} - inline Cord::ChunkRange Cord::Chunks() const { return ChunkRange(this); } inline Cord::CharIterator& Cord::CharIterator::operator++() { diff --git a/absl/strings/cord_test.cc b/absl/strings/cord_test.cc index 4afa4a26969d..0ec93b4c45b9 100644 --- a/absl/strings/cord_test.cc +++ b/absl/strings/cord_test.cc @@ -18,6 +18,7 @@ #include "absl/base/config.h" #include "absl/base/internal/endian.h" #include "absl/base/internal/raw_logging.h" +#include "absl/base/macros.h" #include "absl/container/fixed_array.h" #include "absl/strings/cord_test_helpers.h" #include "absl/strings/str_cat.h" @@ -1402,6 +1403,53 @@ TEST(CordChunkIterator, Operations) { VerifyChunkIterator(subcords, 128); } +TEST(CordChunkIterator, MaxLengthFullTree) { + // Start with a 1-byte cord, and then double its length in a loop. We should + // be able to do this until the point where we would overflow size_t. + + absl::Cord cord; + size_t size = 1; + AddExternalMemory("x", &cord); + EXPECT_EQ(cord.size(), size); + + const int kCordLengthDoublingLimit = std::numeric_limits<size_t>::digits - 1; + for (int i = 0; i < kCordLengthDoublingLimit; ++i) { + cord.Prepend(absl::Cord(cord)); + size <<= 1; + + EXPECT_EQ(cord.size(), size); + + auto chunk_it = cord.chunk_begin(); + EXPECT_EQ(*chunk_it, "x"); + } + + EXPECT_DEATH_IF_SUPPORTED( + (cord.Prepend(absl::Cord(cord)), *cord.chunk_begin()), + "Cord is too long"); +} + +TEST(CordChunkIterator, MaxDepth) { + // By reusing nodes, it's possible in pathological cases to build a Cord that + // exceeds both the maximum permissible length and depth. In this case, the + // violation of the maximum depth is reported. + absl::Cord left_child; + AddExternalMemory("x", &left_child); + absl::Cord root = left_child; + + for (int i = 0; i < absl::cord_internal::MaxCordDepth() - 2; ++i) { + size_t new_size = left_child.size() + root.size(); + root.Prepend(left_child); + EXPECT_EQ(root.size(), new_size); + + auto chunk_it = root.chunk_begin(); + EXPECT_EQ(*chunk_it, "x"); + + std::swap(left_child, root); + } + + EXPECT_DEATH_IF_SUPPORTED(root.Prepend(left_child), "Cord is too long"); +} + TEST(CordCharIterator, Traits) { static_assert(std::is_copy_constructible<absl::Cord::CharIterator>::value, ""); @@ -1580,3 +1628,23 @@ TEST(Cord, SmallBufferAssignFromOwnData) { } } } + +TEST(CordDeathTest, Hardening) { + absl::Cord cord("hello"); + // These statement should abort the program in all builds modes. + EXPECT_DEATH_IF_SUPPORTED(cord.RemovePrefix(6), ""); + EXPECT_DEATH_IF_SUPPORTED(cord.RemoveSuffix(6), ""); + + bool test_hardening = false; + ABSL_HARDENING_ASSERT([&]() { + // This only runs when ABSL_HARDENING_ASSERT is active. + test_hardening = true; + return true; + }()); + if (!test_hardening) return; + + EXPECT_DEATH_IF_SUPPORTED(cord[5], ""); + EXPECT_DEATH_IF_SUPPORTED(*cord.chunk_end(), ""); + EXPECT_DEATH_IF_SUPPORTED(static_cast<void>(cord.chunk_end()->empty()), ""); + EXPECT_DEATH_IF_SUPPORTED(++cord.chunk_end(), ""); +} diff --git a/absl/strings/internal/str_format/extension.h b/absl/strings/internal/str_format/extension.h index 968850ebfc84..bae2c0784421 100644 --- a/absl/strings/internal/str_format/extension.h +++ b/absl/strings/internal/str_format/extension.h @@ -155,8 +155,7 @@ enum class FormatConversionChar : uint8_t { d, i, o, u, x, X, // int f, F, e, E, g, G, a, A, // float n, p, // misc - kNone, - none = kNone + kNone }; // clang-format on @@ -288,11 +287,6 @@ class FormatConversionSpec { // negative value. int precision() const { return precision_; } - // Deprecated (use has_x_flag() instead). - Flags flags() const { return flags_; } - // Deprecated - FormatConversionChar conv() const { return conversion_char(); } - private: friend struct str_format_internal::FormatConversionSpecImplFriend; FormatConversionChar conv_ = FormatConversionChar::kNone; @@ -344,15 +338,7 @@ enum class FormatConversionCharSet : uint64_t { kFloating = a | e | f | g | A | E | F | G, kNumeric = kIntegral | kFloating, kString = s, - kPointer = p, - - // The following are deprecated - star = kStar, - integral = kIntegral, - floating = kFloating, - numeric = kNumeric, - string = kString, - pointer = kPointer + kPointer = p }; // Type safe OR operator. diff --git a/absl/strings/string_view.h b/absl/strings/string_view.h index 8e348fcd69ce..8a9db8c3d796 100644 --- a/absl/strings/string_view.h +++ b/absl/strings/string_view.h @@ -48,7 +48,7 @@ namespace absl { ABSL_NAMESPACE_BEGIN -using std::string_view; +using string_view = std::string_view; ABSL_NAMESPACE_END } // namespace absl |