diff options
author | Abseil Team <absl-team@google.com> | 2020-03-16T16·06-0700 |
---|---|---|
committer | vslashg <gfalcon@google.com> | 2020-03-16T19·11-0400 |
commit | 7853a7586c492ce8905c9e49f8679dada6354f2c (patch) | |
tree | 987d340989dde52a3ca9e7a5965e119e0241c4c2 /absl | |
parent | c6954897f7ece5011f0126db9117361dc1a6ff36 (diff) |
Export of internal Abseil changes
-- 91ca367a7548270155721bdda74611aeea2a2153 by Abseil Team <absl-team@google.com>: Replace the only usage of btree_node::swap with simpler logic using transfers and delete btree_node::swap. Add a benchmark for constructing small containers. PiperOrigin-RevId: 301169874 -- ff9d73a7125b7f8ab5733cda877204dfbfac138e by Derek Mauro <dmauro@google.com>: Ensure ABSL_CXX_STANDARD is set. Fixes #640 PiperOrigin-RevId: 301160106 -- 14ca0beee8c109e532134e7e9da7b072da1bf911 by Abseil Team <absl-team@google.com>: Rollback the change to make Cord iterators a fixed size. That change increased the iterator size, which can cause a deep recursion call to hit the stack memory limit, in turn causing a signal 11 failure. PiperOrigin-RevId: 301084915 -- 619e3cd9e56408bdb8b3b5a1e08dda1e95242264 by Matthew Brown <matthewbr@google.com>: Internal Change PiperOrigin-RevId: 300832828 -- 64f8d62ab4c4c78077dbe85a9595a8eeb6d16608 by Gennadiy Rozental <rogeeff@google.com>: Fix for empty braces support. We will call proper aggregate construction in case when {} is used as default value. In other words instead of "new T", we'll call "new T{}". PiperOrigin-RevId: 300715686 -- db3f65594d6db8b104b01262f884dff465b696ef by Abseil Team <absl-team@google.com>: Emscripten supports thread-local storage nowadays. PiperOrigin-RevId: 300675185 GitOrigin-RevId: 91ca367a7548270155721bdda74611aeea2a2153 Change-Id: I3344f745f9c3fc78775532b1808442fabd98e34a
Diffstat (limited to 'absl')
-rw-r--r-- | absl/base/config.h | 7 | ||||
-rw-r--r-- | absl/container/btree_benchmark.cc | 22 | ||||
-rw-r--r-- | absl/container/internal/btree.h | 76 | ||||
-rw-r--r-- | absl/copts/AbseilConfigureCopts.cmake | 2 | ||||
-rw-r--r-- | absl/flags/flag_test.cc | 67 | ||||
-rw-r--r-- | absl/flags/internal/flag.h | 2 | ||||
-rw-r--r-- | absl/strings/cord.cc | 249 | ||||
-rw-r--r-- | absl/strings/cord.h | 53 | ||||
-rw-r--r-- | absl/strings/cord_test.cc | 47 | ||||
-rw-r--r-- | absl/strings/internal/str_format/extension.h | 16 | ||||
-rw-r--r-- | absl/strings/str_format.h | 2 |
11 files changed, 257 insertions, 286 deletions
diff --git a/absl/base/config.h b/absl/base/config.h index ee99f94629a2..f54466deebad 100644 --- a/absl/base/config.h +++ b/absl/base/config.h @@ -262,13 +262,6 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || #endif #endif // defined(__ANDROID__) && defined(__clang__) -// Emscripten doesn't yet support `thread_local` or `__thread`. -// https://github.com/emscripten-core/emscripten/issues/3502 -#if defined(__EMSCRIPTEN__) -#undef ABSL_HAVE_TLS -#undef ABSL_HAVE_THREAD_LOCAL -#endif // defined(__EMSCRIPTEN__) - // ABSL_HAVE_INTRINSIC_INT128 // // Checks whether the __int128 compiler extension for a 128-bit integral type is diff --git a/absl/container/btree_benchmark.cc b/absl/container/btree_benchmark.cc index ca4d575c48b9..467986768aa1 100644 --- a/absl/container/btree_benchmark.cc +++ b/absl/container/btree_benchmark.cc @@ -134,6 +134,27 @@ void BM_InsertEnd(benchmark::State& state) { } } +// Benchmark inserting the first few elements in a container. In b-tree, this is +// when the root node grows. +template <typename T> +void BM_InsertSmall(benchmark::State& state) { + using V = typename remove_pair_const<typename T::value_type>::type; + + const int kSize = 8; + std::vector<V> values = GenerateValues<V>(kSize); + T container; + + while (state.KeepRunningBatch(kSize)) { + for (int i = 0; i < kSize; ++i) { + benchmark::DoNotOptimize(container.insert(values[i])); + } + state.PauseTiming(); + // Do not measure the time it takes to clear the container. + container.clear(); + state.ResumeTiming(); + } +} + template <typename T> void BM_LookupImpl(benchmark::State& state, bool sorted) { using V = typename remove_pair_const<typename T::value_type>::type; @@ -493,6 +514,7 @@ BTREE_TYPES(Time); MY_BENCHMARK4(type, Insert); \ MY_BENCHMARK4(type, InsertSorted); \ MY_BENCHMARK4(type, InsertEnd); \ + MY_BENCHMARK4(type, InsertSmall); \ MY_BENCHMARK4(type, Lookup); \ MY_BENCHMARK4(type, FullLookup); \ MY_BENCHMARK4(type, Delete); \ diff --git a/absl/container/internal/btree.h b/absl/container/internal/btree.h index d986f81e85e5..adf49f81c571 100644 --- a/absl/container/internal/btree.h +++ b/absl/container/internal/btree.h @@ -776,9 +776,6 @@ class btree_node { // delimiting key in the parent node onto itself. void merge(btree_node *src, allocator_type *alloc); - // Swaps the contents of `this` and `other`. - void swap(btree_node *other, allocator_type *alloc); - // Node allocation/deletion routines. void init_leaf(btree_node *parent, int max_count) { set_parent(parent); @@ -820,6 +817,14 @@ class btree_node { absl::container_internal::SanitizerPoisonObject(slot(i)); } + // Transfers value from slot `src_i` in `src` to slot `dest_i` in `this`. + void transfer(const size_type dest_i, const size_type src_i, btree_node *src, + allocator_type *alloc) { + absl::container_internal::SanitizerUnpoisonObject(slot(dest_i)); + params_type::transfer(alloc, slot(dest_i), src->slot(src_i)); + absl::container_internal::SanitizerPoisonObject(src->slot(src_i)); + } + // Move n values starting at value i in this node into the values starting at // value j in dest_node. void uninitialized_move_n(const size_type n, const size_type i, @@ -1752,54 +1757,6 @@ void btree_node<P>::merge(btree_node *src, allocator_type *alloc) { parent()->remove_value(position(), alloc); } -template <typename P> -void btree_node<P>::swap(btree_node *other, allocator_type *alloc) { - using std::swap; - assert(leaf() == other->leaf()); - - // Determine which is the smaller/larger node. - btree_node *smaller = this, *larger = other; - if (smaller->count() > larger->count()) { - swap(smaller, larger); - } - - // Swap the values. - for (slot_type *a = smaller->start_slot(), *b = larger->start_slot(), - *end = smaller->finish_slot(); - a != end; ++a, ++b) { - params_type::swap(alloc, a, b); - } - - // Move values that can't be swapped. - const size_type to_move = larger->count() - smaller->count(); - larger->uninitialized_move_n(to_move, smaller->finish(), smaller->finish(), - smaller, alloc); - larger->value_destroy_n(smaller->finish(), to_move, alloc); - - if (!leaf()) { - // Swap the child pointers. - std::swap_ranges(&smaller->mutable_child(smaller->start()), - &smaller->mutable_child(smaller->finish() + 1), - &larger->mutable_child(larger->start())); - // Update swapped children's parent pointers. - int i = smaller->start(); - int j = larger->start(); - for (; i <= smaller->finish(); ++i, ++j) { - smaller->child(i)->set_parent(smaller); - larger->child(j)->set_parent(larger); - } - // Move the child pointers that couldn't be swapped. - for (; j <= larger->finish(); ++i, ++j) { - smaller->init_child(i, larger->child(j)); - larger->clear_child(j); - } - } - - // Swap the `finish`s. - // TODO(ezb): with floating storage, will also need to swap starts. - swap(mutable_finish(), other->mutable_finish()); -} - //// // btree_iterator methods template <typename N, typename R, typename P> @@ -2492,6 +2449,7 @@ inline auto btree<P>::internal_emplace(iterator iter, Args &&... args) ++iter.position; } const int max_count = iter.node->max_count(); + allocator_type *alloc = mutable_allocator(); if (iter.node->count() == max_count) { // Make room in the leaf for the new item. if (max_count < kNodeValues) { @@ -2500,15 +2458,21 @@ inline auto btree<P>::internal_emplace(iterator iter, Args &&... args) assert(iter.node == root()); iter.node = new_leaf_root_node((std::min<int>)(kNodeValues, 2 * max_count)); - iter.node->swap(root(), mutable_allocator()); - delete_leaf_node(root()); - mutable_root() = rightmost_ = iter.node; + // Transfer the values from the old root to the new root. + node_type *old_root = root(); + node_type *new_root = iter.node; + for (int i = old_root->start(), f = old_root->finish(); i < f; ++i) { + new_root->transfer(i, i, old_root, alloc); + } + new_root->set_finish(old_root->finish()); + old_root->set_finish(old_root->start()); + delete_leaf_node(old_root); + mutable_root() = rightmost_ = new_root; } else { rebalance_or_split(&iter); } } - iter.node->emplace_value(iter.position, mutable_allocator(), - std::forward<Args>(args)...); + iter.node->emplace_value(iter.position, alloc, std::forward<Args>(args)...); ++size_; return iter; } diff --git a/absl/copts/AbseilConfigureCopts.cmake b/absl/copts/AbseilConfigureCopts.cmake index 390a07a02549..9557e36f68d4 100644 --- a/absl/copts/AbseilConfigureCopts.cmake +++ b/absl/copts/AbseilConfigureCopts.cmake @@ -63,3 +63,5 @@ else() set(ABSL_DEFAULT_COPTS "") set(ABSL_TEST_COPTS "") endif() + +set(ABSL_CXX_STANDARD "${CMAKE_CXX_STANDARD}") diff --git a/absl/flags/flag_test.cc b/absl/flags/flag_test.cc index 1e01b49cc4f2..3a0255769527 100644 --- a/absl/flags/flag_test.cc +++ b/absl/flags/flag_test.cc @@ -293,6 +293,18 @@ TEST_F(FlagTest, TestFlagDefinition) { // -------------------------------------------------------------------- TEST_F(FlagTest, TestDefault) { + EXPECT_EQ(FLAGS_test_flag_01.DefaultValue(), "true"); + EXPECT_EQ(FLAGS_test_flag_02.DefaultValue(), "1234"); + EXPECT_EQ(FLAGS_test_flag_03.DefaultValue(), "-34"); + EXPECT_EQ(FLAGS_test_flag_04.DefaultValue(), "189"); + EXPECT_EQ(FLAGS_test_flag_05.DefaultValue(), "10765"); + EXPECT_EQ(FLAGS_test_flag_06.DefaultValue(), "40000"); + EXPECT_EQ(FLAGS_test_flag_07.DefaultValue(), "-1234567"); + EXPECT_EQ(FLAGS_test_flag_08.DefaultValue(), "9876543"); + EXPECT_EQ(FLAGS_test_flag_09.DefaultValue(), "-9.876e-50"); + EXPECT_EQ(FLAGS_test_flag_10.DefaultValue(), "1.234e+12"); + EXPECT_EQ(FLAGS_test_flag_11.DefaultValue(), ""); + EXPECT_EQ(absl::GetFlag(FLAGS_test_flag_01), true); EXPECT_EQ(absl::GetFlag(FLAGS_test_flag_02), 1234); EXPECT_EQ(absl::GetFlag(FLAGS_test_flag_03), -34); @@ -308,6 +320,61 @@ TEST_F(FlagTest, TestDefault) { // -------------------------------------------------------------------- +struct NonTriviallyCopyableAggregate { + NonTriviallyCopyableAggregate() = default; + NonTriviallyCopyableAggregate(const NonTriviallyCopyableAggregate& rhs) + : value(rhs.value) {} + NonTriviallyCopyableAggregate& operator=( + const NonTriviallyCopyableAggregate& rhs) { + value = rhs.value; + return *this; + } + + int value; +}; +bool AbslParseFlag(absl::string_view src, NonTriviallyCopyableAggregate* f, + std::string* e) { + return absl::ParseFlag(src, &f->value, e); +} +std::string AbslUnparseFlag(const NonTriviallyCopyableAggregate& ntc) { + return absl::StrCat(ntc.value); +} + +bool operator==(const NonTriviallyCopyableAggregate& ntc1, + const NonTriviallyCopyableAggregate& ntc2) { + return ntc1.value == ntc2.value; +} + +} // namespace + +ABSL_FLAG(bool, test_flag_eb_01, {}, ""); +ABSL_FLAG(int32_t, test_flag_eb_02, {}, ""); +ABSL_FLAG(int64_t, test_flag_eb_03, {}, ""); +ABSL_FLAG(double, test_flag_eb_04, {}, ""); +ABSL_FLAG(std::string, test_flag_eb_05, {}, ""); +ABSL_FLAG(NonTriviallyCopyableAggregate, test_flag_eb_06, {}, ""); + +namespace { + +TEST_F(FlagTest, TestEmptyBracesDefault) { + EXPECT_EQ(FLAGS_test_flag_eb_01.DefaultValue(), "false"); + EXPECT_EQ(FLAGS_test_flag_eb_02.DefaultValue(), "0"); + EXPECT_EQ(FLAGS_test_flag_eb_03.DefaultValue(), "0"); + EXPECT_EQ(FLAGS_test_flag_eb_04.DefaultValue(), "0"); + EXPECT_EQ(FLAGS_test_flag_eb_05.DefaultValue(), ""); + EXPECT_EQ(FLAGS_test_flag_eb_06.DefaultValue(), "0"); + + EXPECT_EQ(absl::GetFlag(FLAGS_test_flag_eb_01), false); + EXPECT_EQ(absl::GetFlag(FLAGS_test_flag_eb_02), 0); + EXPECT_EQ(absl::GetFlag(FLAGS_test_flag_eb_03), 0); + EXPECT_EQ(absl::GetFlag(FLAGS_test_flag_eb_04), 0.0); + EXPECT_EQ(absl::GetFlag(FLAGS_test_flag_eb_05), ""); + EXPECT_EQ(absl::GetFlag(FLAGS_test_flag_eb_06), + NonTriviallyCopyableAggregate{}); +} + +// -------------------------------------------------------------------- + TEST_F(FlagTest, TestGetSet) { absl::SetFlag(&FLAGS_test_flag_01, false); EXPECT_EQ(absl::GetFlag(FLAGS_test_flag_01), false); diff --git a/absl/flags/internal/flag.h b/absl/flags/internal/flag.h index 344e31f65073..0ef0ee745641 100644 --- a/absl/flags/internal/flag.h +++ b/absl/flags/internal/flag.h @@ -647,7 +647,7 @@ T* MakeFromDefaultValue(T t) { template <typename T> T* MakeFromDefaultValue(EmptyBraces) { - return new T; + return new T{}; } } // namespace flags_internal diff --git a/absl/strings/cord.cc b/absl/strings/cord.cc index 415b239b0e9c..4f64f79965c7 100644 --- a/absl/strings/cord.cc +++ b/absl/strings/cord.cc @@ -30,6 +30,7 @@ #include "absl/base/internal/raw_logging.h" #include "absl/base/port.h" #include "absl/container/fixed_array.h" +#include "absl/container/inlined_vector.h" #include "absl/strings/escaping.h" #include "absl/strings/internal/cord_internal.h" #include "absl/strings/internal/resize_uninitialized.h" @@ -131,14 +132,6 @@ inline const CordRepExternal* CordRep::external() const { return static_cast<const CordRepExternal*>(this); } -using CordTreeConstPath = CordTreePath<const CordRep*, MaxCordDepth()>; - -// This type is used to store the list of pending nodes during re-balancing. -// Its maximum size is 2 * MaxCordDepth() because the tree has a maximum -// possible depth of MaxCordDepth() and every concat node along a tree path -// could theoretically be split during rebalancing. -using RebalancingStack = CordTreePath<CordRep*, 2 * MaxCordDepth()>; - } // namespace cord_internal static const size_t kFlatOverhead = offsetof(CordRep, data); @@ -187,78 +180,98 @@ static constexpr size_t TagToLength(uint8_t tag) { // Enforce that kMaxFlatSize maps to a well-known exact tag value. static_assert(TagToAllocatedSize(224) == kMaxFlatSize, "Bad tag logic"); -constexpr size_t Fibonacci(uint8_t n, const size_t a = 0, const size_t b = 1) { - return n == 0 - ? a - : n == 1 ? b - : Fibonacci(n - 1, b, - (a > (size_t(-1) - b)) ? size_t(-1) : a + b); +constexpr uint64_t Fibonacci(unsigned char n, uint64_t a = 0, uint64_t b = 1) { + return n == 0 ? a : Fibonacci(n - 1, b, a + b); } +static_assert(Fibonacci(63) == 6557470319842, + "Fibonacci values computed incorrectly"); + // Minimum length required for a given depth tree -- a tree is considered // balanced if -// length(t) >= kMinLength[depth(t)] -// The node depth is allowed to become larger to reduce rebalancing -// for larger strings (see ShouldRebalance). -constexpr size_t kMinLength[] = { - Fibonacci(2), Fibonacci(3), Fibonacci(4), Fibonacci(5), Fibonacci(6), - Fibonacci(7), Fibonacci(8), Fibonacci(9), Fibonacci(10), Fibonacci(11), - Fibonacci(12), Fibonacci(13), Fibonacci(14), Fibonacci(15), Fibonacci(16), - Fibonacci(17), Fibonacci(18), Fibonacci(19), Fibonacci(20), Fibonacci(21), - Fibonacci(22), Fibonacci(23), Fibonacci(24), Fibonacci(25), Fibonacci(26), - Fibonacci(27), Fibonacci(28), Fibonacci(29), Fibonacci(30), Fibonacci(31), - Fibonacci(32), Fibonacci(33), Fibonacci(34), Fibonacci(35), Fibonacci(36), - Fibonacci(37), Fibonacci(38), Fibonacci(39), Fibonacci(40), Fibonacci(41), - Fibonacci(42), Fibonacci(43), Fibonacci(44), Fibonacci(45), Fibonacci(46), - Fibonacci(47), Fibonacci(48), Fibonacci(49), Fibonacci(50), Fibonacci(51), - Fibonacci(52), Fibonacci(53), Fibonacci(54), Fibonacci(55), Fibonacci(56), - Fibonacci(57), Fibonacci(58), Fibonacci(59), Fibonacci(60), Fibonacci(61), - Fibonacci(62), Fibonacci(63), Fibonacci(64), Fibonacci(65), Fibonacci(66), - Fibonacci(67), Fibonacci(68), Fibonacci(69), Fibonacci(70), Fibonacci(71), - Fibonacci(72), Fibonacci(73), Fibonacci(74), Fibonacci(75), Fibonacci(76), - Fibonacci(77), Fibonacci(78), Fibonacci(79), Fibonacci(80), Fibonacci(81), - Fibonacci(82), Fibonacci(83), Fibonacci(84), Fibonacci(85), Fibonacci(86), - Fibonacci(87), Fibonacci(88), Fibonacci(89), Fibonacci(90), Fibonacci(91), - Fibonacci(92), Fibonacci(93), Fibonacci(94), Fibonacci(95)}; - -static_assert(sizeof(kMinLength) / sizeof(size_t) >= - (cord_internal::MaxCordDepth() + 1), - "Not enough elements in kMinLength array to cover all the " - "supported Cord depth(s)"); - -inline bool ShouldRebalance(const CordRep* node) { - if (node->tag != CONCAT) return false; - - size_t node_depth = node->concat()->depth(); - - if (node_depth <= 15) return false; - - // Rebalancing Cords is expensive, so we reduce how often rebalancing occurs - // by allowing shallow Cords to have twice the depth that the Fibonacci rule - // would otherwise imply. Deep Cords need to follow the rule more closely, - // however to ensure algorithm correctness. We implement this with linear - // interpolation. Cords of depth 16 are treated as though they have a depth - // of 16 * 1/2, and Cords of depth MaxCordDepth() interpolate to - // MaxCordDepth() * 1. - return node->length < - kMinLength[(node_depth * (cord_internal::MaxCordDepth() - 16)) / - (2 * cord_internal::MaxCordDepth() - 16 - node_depth)]; -} - -// Unlike root balancing condition this one is part of the re-balancing -// algorithm and has to be always matching against right depth for -// algorithm to be correct. -inline bool IsNodeBalanced(const CordRep* node) { - if (node->tag != CONCAT) return true; - - size_t node_depth = node->concat()->depth(); - - return node->length >= kMinLength[node_depth]; +// length(t) >= min_length[depth(t)] +// The root node depth is allowed to become twice as large to reduce rebalancing +// for larger strings (see IsRootBalanced). +static constexpr uint64_t min_length[] = { + Fibonacci(2), + Fibonacci(3), + Fibonacci(4), + Fibonacci(5), + Fibonacci(6), + Fibonacci(7), + Fibonacci(8), + Fibonacci(9), + Fibonacci(10), + Fibonacci(11), + Fibonacci(12), + Fibonacci(13), + Fibonacci(14), + Fibonacci(15), + Fibonacci(16), + Fibonacci(17), + Fibonacci(18), + Fibonacci(19), + Fibonacci(20), + Fibonacci(21), + Fibonacci(22), + Fibonacci(23), + Fibonacci(24), + Fibonacci(25), + Fibonacci(26), + Fibonacci(27), + Fibonacci(28), + Fibonacci(29), + Fibonacci(30), + Fibonacci(31), + Fibonacci(32), + Fibonacci(33), + Fibonacci(34), + Fibonacci(35), + Fibonacci(36), + Fibonacci(37), + Fibonacci(38), + Fibonacci(39), + Fibonacci(40), + Fibonacci(41), + Fibonacci(42), + Fibonacci(43), + Fibonacci(44), + Fibonacci(45), + Fibonacci(46), + Fibonacci(47), + 0xffffffffffffffffull, // Avoid overflow +}; + +static const int kMinLengthSize = ABSL_ARRAYSIZE(min_length); + +// The inlined size to use with absl::InlinedVector. +// +// Note: The InlinedVectors in this file (and in cord.h) do not need to use +// the same value for their inlined size. The fact that they do is historical. +// It may be desirable for each to use a different inlined size optimized for +// that InlinedVector's usage. +// +// TODO(jgm): Benchmark to see if there's a more optimal value than 47 for +// the inlined vector size (47 exists for backward compatibility). +static const int kInlinedVectorSize = 47; + +static inline bool IsRootBalanced(CordRep* node) { + if (node->tag != CONCAT) { + return true; + } else if (node->concat()->depth() <= 15) { + return true; + } else if (node->concat()->depth() > kMinLengthSize) { + return false; + } else { + // Allow depth to become twice as large as implied by fibonacci rule to + // reduce rebalancing for larger strings. + return (node->length >= min_length[node->concat()->depth() / 2]); + } } static CordRep* Rebalance(CordRep* node); -static void DumpNode(const CordRep* rep, bool include_data, std::ostream* os); -static bool VerifyNode(const CordRep* root, const CordRep* start_node, +static void DumpNode(CordRep* rep, bool include_data, std::ostream* os); +static bool VerifyNode(CordRep* root, CordRep* start_node, bool full_validation); static inline CordRep* VerifyTree(CordRep* node) { @@ -305,8 +318,7 @@ __attribute__((preserve_most)) static void UnrefInternal(CordRep* rep) { assert(rep != nullptr); - cord_internal::RebalancingStack pending; - + absl::InlinedVector<CordRep*, kInlinedVectorSize> pending; while (true) { if (rep->tag == CONCAT) { CordRepConcat* rep_concat = rep->concat(); @@ -388,11 +400,6 @@ static void SetConcatChildren(CordRepConcat* concat, CordRep* left, concat->length = left->length + right->length; concat->set_depth(1 + std::max(Depth(left), Depth(right))); - - ABSL_INTERNAL_CHECK(concat->depth() <= cord_internal::MaxCordDepth(), - "Cord depth exceeds max"); - ABSL_INTERNAL_CHECK(concat->length >= left->length, "Cord is too long"); - ABSL_INTERNAL_CHECK(concat->length >= right->length, "Cord is too long"); } // Create a concatenation of the specified nodes. @@ -418,7 +425,7 @@ static CordRep* RawConcat(CordRep* left, CordRep* right) { static CordRep* Concat(CordRep* left, CordRep* right) { CordRep* rep = RawConcat(left, right); - if (rep != nullptr && ShouldRebalance(rep)) { + if (rep != nullptr && !IsRootBalanced(rep)) { rep = Rebalance(rep); } return VerifyTree(rep); @@ -909,7 +916,7 @@ void Cord::Prepend(absl::string_view src) { static CordRep* RemovePrefixFrom(CordRep* node, size_t n) { if (n >= node->length) return nullptr; if (n == 0) return Ref(node); - cord_internal::CordTreeMutablePath rhs_stack; + absl::InlinedVector<CordRep*, kInlinedVectorSize> rhs_stack; while (node->tag == CONCAT) { assert(n <= node->length); @@ -950,7 +957,7 @@ static CordRep* RemovePrefixFrom(CordRep* node, size_t n) { static CordRep* RemoveSuffixFrom(CordRep* node, size_t n) { if (n >= node->length) return nullptr; if (n == 0) return Ref(node); - absl::cord_internal::CordTreeMutablePath lhs_stack; + absl::InlinedVector<CordRep*, kInlinedVectorSize> lhs_stack; bool inplace_ok = node->refcount.IsOne(); while (node->tag == CONCAT) { @@ -1021,7 +1028,6 @@ void Cord::RemoveSuffix(size_t n) { // Work item for NewSubRange(). struct SubRange { - SubRange() = default; SubRange(CordRep* a_node, size_t a_pos, size_t a_n) : node(a_node), pos(a_pos), n(a_n) {} CordRep* node; // nullptr means concat last 2 results. @@ -1030,11 +1036,8 @@ struct SubRange { }; static CordRep* NewSubRange(CordRep* node, size_t pos, size_t n) { - cord_internal::CordTreeMutablePath results; - // The algorithm below in worst case scenario adds up to 3 nodes to the `todo` - // list, but we also pop one out on every cycle. If original tree has depth d - // todo list can grew up to 2*d in size. - cord_internal::CordTreePath<SubRange, 2 * cord_internal::MaxCordDepth()> todo; + absl::InlinedVector<CordRep*, kInlinedVectorSize> results; + absl::InlinedVector<SubRange, kInlinedVectorSize> todo; todo.push_back(SubRange(node, pos, n)); do { const SubRange& sr = todo.back(); @@ -1071,7 +1074,7 @@ static CordRep* NewSubRange(CordRep* node, size_t pos, size_t n) { } } while (!todo.empty()); assert(results.size() == 1); - return results.back(); + return results[0]; } Cord Cord::Subcord(size_t pos, size_t new_size) const { @@ -1110,12 +1113,11 @@ Cord Cord::Subcord(size_t pos, size_t new_size) const { class CordForest { public: - explicit CordForest(size_t length) : root_length_(length), trees_({}) {} + explicit CordForest(size_t length) + : root_length_(length), trees_(kMinLengthSize, nullptr) {} void Build(CordRep* cord_root) { - // We are adding up to two nodes to the `pending` list, but we also popping - // one, so the size of `pending` will never exceed `MaxCordDepth()`. - cord_internal::CordTreeMutablePath pending(cord_root); + std::vector<CordRep*> pending = {cord_root}; while (!pending.empty()) { CordRep* node = pending.back(); @@ -1127,20 +1129,21 @@ class CordForest { } CordRepConcat* concat_node = node->concat(); - if (IsNodeBalanced(concat_node)) { - AddNode(node); - continue; - } - pending.push_back(concat_node->right); - pending.push_back(concat_node->left); - - if (concat_node->refcount.IsOne()) { - concat_node->left = concat_freelist_; - concat_freelist_ = concat_node; + if (concat_node->depth() >= kMinLengthSize || + concat_node->length < min_length[concat_node->depth()]) { + pending.push_back(concat_node->right); + pending.push_back(concat_node->left); + + if (concat_node->refcount.IsOne()) { + concat_node->left = concat_freelist_; + concat_freelist_ = concat_node; + } else { + Ref(concat_node->right); + Ref(concat_node->left); + Unref(concat_node); + } } else { - Ref(concat_node->right); - Ref(concat_node->left); - Unref(concat_node); + AddNode(node); } } } @@ -1172,7 +1175,7 @@ class CordForest { // Collect together everything with which we will merge with node int i = 0; - for (; node->length >= kMinLength[i + 1]; ++i) { + for (; node->length > min_length[i + 1]; ++i) { auto& tree_at_i = trees_[i]; if (tree_at_i == nullptr) continue; @@ -1183,7 +1186,7 @@ class CordForest { sum = AppendNode(node, sum); // Insert sum into appropriate place in the forest - for (; sum->length >= kMinLength[i]; ++i) { + for (; sum->length >= min_length[i]; ++i) { auto& tree_at_i = trees_[i]; if (tree_at_i == nullptr) continue; @@ -1191,7 +1194,7 @@ class CordForest { tree_at_i = nullptr; } - // kMinLength[0] == 1, which means sum->length >= kMinLength[0] + // min_length[0] == 1, which means sum->length >= min_length[0] assert(i > 0); trees_[i - 1] = sum; } @@ -1224,7 +1227,9 @@ class CordForest { } size_t root_length_; - std::array<cord_internal::CordRep*, cord_internal::MaxCordDepth()> trees_; + + // use an inlined vector instead of a flat array to get bounds checking + absl::InlinedVector<CordRep*, kInlinedVectorSize> trees_; // List of concat nodes we can re-use for Cord balancing. CordRepConcat* concat_freelist_ = nullptr; @@ -1836,18 +1841,18 @@ absl::string_view Cord::FlattenSlowPath() { } } -static void DumpNode(const CordRep* rep, bool include_data, std::ostream* os) { +static void DumpNode(CordRep* rep, bool include_data, std::ostream* os) { const int kIndentStep = 1; int indent = 0; - cord_internal::CordTreeConstPath stack; - cord_internal::CordTreePath<int, cord_internal::MaxCordDepth()> indents; + absl::InlinedVector<CordRep*, kInlinedVectorSize> stack; + absl::InlinedVector<int, kInlinedVectorSize> indents; for (;;) { *os << std::setw(3) << rep->refcount.Get(); *os << " " << std::setw(7) << rep->length; *os << " ["; - if (include_data) *os << static_cast<const void*>(rep); + if (include_data) *os << static_cast<void*>(rep); *os << "]"; - *os << " " << (IsNodeBalanced(rep) ? 'b' : 'u'); + *os << " " << (IsRootBalanced(rep) ? 'b' : 'u'); *os << " " << std::setw(indent) << ""; if (rep->tag == CONCAT) { *os << "CONCAT depth=" << Depth(rep) << "\n"; @@ -1868,7 +1873,7 @@ static void DumpNode(const CordRep* rep, bool include_data, std::ostream* os) { } else { *os << "FLAT cap=" << TagToLength(rep->tag) << " ["; if (include_data) - *os << absl::CEscape(absl::string_view(rep->data, rep->length)); + *os << absl::CEscape(std::string(rep->data, rep->length)); *os << "]\n"; } if (stack.empty()) break; @@ -1881,19 +1886,19 @@ static void DumpNode(const CordRep* rep, bool include_data, std::ostream* os) { ABSL_INTERNAL_CHECK(indents.empty(), ""); } -static std::string ReportError(const CordRep* root, const CordRep* node) { +static std::string ReportError(CordRep* root, CordRep* node) { std::ostringstream buf; buf << "Error at node " << node << " in:"; DumpNode(root, true, &buf); return buf.str(); } -static bool VerifyNode(const CordRep* root, const CordRep* start_node, +static bool VerifyNode(CordRep* root, CordRep* start_node, bool full_validation) { - cord_internal::CordTreeConstPath worklist; + absl::InlinedVector<CordRep*, 2> worklist; worklist.push_back(start_node); do { - const CordRep* node = worklist.back(); + CordRep* node = worklist.back(); worklist.pop_back(); ABSL_INTERNAL_CHECK(node != nullptr, ReportError(root, node)); @@ -1943,7 +1948,7 @@ static bool VerifyNode(const CordRep* root, const CordRep* start_node, // Iterate over the tree. cur_node is never a leaf node and leaf nodes will // never be appended to tree_stack. This reduces overhead from manipulating // tree_stack. - cord_internal::CordTreeConstPath tree_stack; + absl::InlinedVector<const CordRep*, kInlinedVectorSize> tree_stack; const CordRep* cur_node = rep; while (true) { const CordRep* next_node = nullptr; diff --git a/absl/strings/cord.h b/absl/strings/cord.h index eb236e50e22e..66645eef6e48 100644 --- a/absl/strings/cord.h +++ b/absl/strings/cord.h @@ -48,6 +48,7 @@ #include "absl/base/internal/per_thread_tls.h" #include "absl/base/macros.h" #include "absl/base/port.h" +#include "absl/container/inlined_vector.h" #include "absl/functional/function_ref.h" #include "absl/meta/type_traits.h" #include "absl/strings/internal/cord_internal.h" @@ -67,55 +68,6 @@ template <typename H> H HashFragmentedCord(H, const Cord&); } -namespace cord_internal { - -// It's expensive to keep a tree perfectly balanced, so instead we keep trees -// approximately balanced. A tree node N of depth D(N) that contains a string -// of L(N) characters is considered balanced if L >= Fibonacci(D + 2). -// The "+ 2" is used to ensure that every balanced leaf node contains at least -// one character. Here we presume that -// Fibonacci(0) = 0 -// Fibonacci(1) = 1 -// Fibonacci(2) = 1 -// Fibonacci(3) = 2 -// ... -// The algorithm is based on paper by Hans Boehm et al: -// https://www.cs.rit.edu/usr/local/pub/jeh/courses/QUARTERS/FP/Labs/CedarRope/rope-paper.pdf -// In this paper authors shows that rebalancing based on cord forest of already -// balanced subtrees can be proven to never produce tree of depth larger than -// largest Fibonacci number representable in the same integral type as cord size -// For 64 bit integers this is the 93rd Fibonacci number. For 32 bit integrals -// this is 47th Fibonacci number. -constexpr size_t MaxCordDepth() { return sizeof(size_t) == 8 ? 93 : 47; } - -// This class models fixed max size stack of CordRep pointers. -// The elements are being pushed back and popped from the back. -template <typename CordRepPtr, size_t N> -class CordTreePath { - public: - CordTreePath() {} - explicit CordTreePath(CordRepPtr root) { push_back(root); } - - bool empty() const { return size_ == 0; } - size_t size() const { return size_; } - void clear() { size_ = 0; } - - CordRepPtr back() { return data_[size_ - 1]; } - - void pop_back() { - --size_; - assert(size_ < N); - } - void push_back(CordRepPtr elem) { data_[size_++] = elem; } - - private: - CordRepPtr data_[N]; - size_t size_ = 0; -}; - -using CordTreeMutablePath = CordTreePath<CordRep*, MaxCordDepth()>; -} // namespace cord_internal - // A Cord is a sequence of characters. class Cord { private: @@ -333,7 +285,8 @@ class Cord { absl::cord_internal::CordRep* current_leaf_ = nullptr; // The number of bytes left in the `Cord` over which we are iterating. size_t bytes_remaining_ = 0; - absl::cord_internal::CordTreeMutablePath stack_of_right_children_; + absl::InlinedVector<absl::cord_internal::CordRep*, 4> + stack_of_right_children_; }; // Returns an iterator to the first chunk of the `Cord`. diff --git a/absl/strings/cord_test.cc b/absl/strings/cord_test.cc index f2d81d4c40d0..4afa4a26969d 100644 --- a/absl/strings/cord_test.cc +++ b/absl/strings/cord_test.cc @@ -1402,53 +1402,6 @@ TEST(CordChunkIterator, Operations) { VerifyChunkIterator(subcords, 128); } -TEST(CordChunkIterator, MaxLengthFullTree) { - // Start with a 1-byte cord, and then double its length in a loop. We should - // be able to do this until the point where we would overflow size_t. - - absl::Cord cord; - size_t size = 1; - AddExternalMemory("x", &cord); - EXPECT_EQ(cord.size(), size); - - const int kCordLengthDoublingLimit = std::numeric_limits<size_t>::digits - 1; - for (int i = 0; i < kCordLengthDoublingLimit; ++i) { - cord.Prepend(absl::Cord(cord)); - size <<= 1; - - EXPECT_EQ(cord.size(), size); - - auto chunk_it = cord.chunk_begin(); - EXPECT_EQ(*chunk_it, "x"); - } - - EXPECT_DEATH_IF_SUPPORTED( - (cord.Prepend(absl::Cord(cord)), *cord.chunk_begin()), - "Cord is too long"); -} - -TEST(CordChunkIterator, MaxDepth) { - // By reusing nodes, it's possible in pathological cases to build a Cord that - // exceeds both the maximum permissible length and depth. In this case, the - // violation of the maximum depth is reported. - absl::Cord left_child; - AddExternalMemory("x", &left_child); - absl::Cord root = left_child; - - for (int i = 0; i < absl::cord_internal::MaxCordDepth() - 2; ++i) { - size_t new_size = left_child.size() + root.size(); - root.Prepend(left_child); - EXPECT_EQ(root.size(), new_size); - - auto chunk_it = root.chunk_begin(); - EXPECT_EQ(*chunk_it, "x"); - - std::swap(left_child, root); - } - - EXPECT_DEATH_IF_SUPPORTED(root.Prepend(left_child), "Cord is too long"); -} - TEST(CordCharIterator, Traits) { static_assert(std::is_copy_constructible<absl::Cord::CharIterator>::value, ""); diff --git a/absl/strings/internal/str_format/extension.h b/absl/strings/internal/str_format/extension.h index d1665753d182..968850ebfc84 100644 --- a/absl/strings/internal/str_format/extension.h +++ b/absl/strings/internal/str_format/extension.h @@ -24,6 +24,7 @@ #include "absl/base/config.h" #include "absl/base/port.h" +#include "absl/meta/type_traits.h" #include "absl/strings/internal/str_format/output.h" #include "absl/strings/string_view.h" @@ -365,11 +366,22 @@ constexpr FormatConversionCharSet operator|(FormatConversionCharSet a, static_cast<uint64_t>(b)); } +// Overloaded conversion functions to support absl::ParsedFormat. // Get a conversion with a single character in it. -constexpr FormatConversionCharSet ConversionCharToConv(char c) { - return FormatConversionCharSet(FormatConversionCharToConvValue(c)); +constexpr FormatConversionCharSet ToFormatConversionCharSet(char c) { + return static_cast<FormatConversionCharSet>( + FormatConversionCharToConvValue(c)); } +// Get a conversion with a single character in it. +constexpr FormatConversionCharSet ToFormatConversionCharSet( + FormatConversionCharSet c) { + return c; +} + +template <typename T> +void ToFormatConversionCharSet(T) = delete; + // Checks whether `c` exists in `set`. constexpr bool Contains(FormatConversionCharSet set, char c) { return (static_cast<uint64_t>(set) & FormatConversionCharToConvValue(c)) != 0; diff --git a/absl/strings/str_format.h b/absl/strings/str_format.h index 2f9b4b2786f8..d40fca114be3 100644 --- a/absl/strings/str_format.h +++ b/absl/strings/str_format.h @@ -285,7 +285,7 @@ using FormatSpec = // } template <char... Conv> using ParsedFormat = str_format_internal::ExtendedParsedFormat< - str_format_internal::ConversionCharToConv(Conv)...>; + absl::str_format_internal::ToFormatConversionCharSet(Conv)...>; // StrFormat() // |