about summary refs log tree commit diff
path: root/absl
diff options
context:
space:
mode:
authorAbseil Team <absl-team@google.com>2019-06-28T00·24-0700
committerShaindel Schwartz <shaindel@google.com>2019-06-28T15·37-0400
commitc964fcffac27bd4a9ff67fe393410dd1146ef8b8 (patch)
tree3ff13a27c0de86de82d898933e808d4b7f69f823 /absl
parent72e09a54d993b192db32be14c65adf7e9bd08c31 (diff)
Export of internal Abseil changes.
--
c321829735accc2e6beb81e6a5a4421e5647b876 by CJ Johnson <johnsoncj@google.com>:

Updates the definition of InlinedVector::swap(InlinedVector&) to be exception safe and adds exception safety tests

PiperOrigin-RevId: 255511536

--
0d86445891748efb09430eb9ede267b54185a246 by CJ Johnson <johnsoncj@google.com>:

Updates the definition of InlinedVector::erase(...) to be exception safe and adds an exception safety test for it.

PiperOrigin-RevId: 255492671

--
f07e8fa62dfe9eb0d025b27fca8c6db43c5a328f by CJ Johnson <johnsoncj@google.com>:

Updates the implementation of InlinedVector::emplace_back(...) to be exception safe and adds exception safety tests

PiperOrigin-RevId: 255422837

--
4c3be92bfe4c1636a03cef8fd5aa802fed0d2c61 by Abseil Team <absl-team@google.com>:

Internal Change

PiperOrigin-RevId: 255422693

--
6df38ea42f00678c357a539016163f8ac4c084e6 by Gennadiy Rozental <rogeeff@google.com>:

Introduce public interfaces for setting and getting program usage messages.

PiperOrigin-RevId: 255291467

--
8f21d594aed3971d37db70226847c693eb548edb by Laramie Leavitt <lar@google.com>:

Move absl/random's copy of ABSL_ATTRIBUTE_FORCE_INLINE and
ABSL_ATTRIBUTE_NEVER_INLINE into .cc files and rename to
prevent conflicts.

https://github.com/abseil/abseil-cpp/issues/343

PiperOrigin-RevId: 255288599

--
6b7430ad0c8bd860fb9394894f5eeedd1acc9f77 by CJ Johnson <johnsoncj@google.com>:

Updates the ScopedAllocatorWorks test for InlinedVector to not rely on the byte count allocated by the standard library

In doing so, removes LegacyNextCapacityFrom(...) impl function from InlinedVector

Also applies clang-format to the test file

PiperOrigin-RevId: 255207606
GitOrigin-RevId: c321829735accc2e6beb81e6a5a4421e5647b876
Change-Id: I7438211c36c4549fca2e866658f8d579c65d7d52
Diffstat (limited to 'absl')
-rw-r--r--absl/container/inlined_vector.h142
-rw-r--r--absl/container/inlined_vector_exception_safety_test.cc82
-rw-r--r--absl/container/inlined_vector_test.cc135
-rw-r--r--absl/container/internal/inlined_vector.h182
-rw-r--r--absl/flags/BUILD.bazel2
-rw-r--r--absl/flags/CMakeLists.txt2
-rw-r--r--absl/flags/internal/usage.cc35
-rw-r--r--absl/flags/internal/usage.h14
-rw-r--r--absl/flags/internal/usage_test.cc12
-rw-r--r--absl/flags/usage.cc56
-rw-r--r--absl/flags/usage.h40
-rw-r--r--absl/random/internal/nanobenchmark.cc22
-rw-r--r--absl/random/internal/platform.h42
-rw-r--r--absl/random/internal/randen_hwaes.cc94
-rw-r--r--absl/random/internal/randen_slow.cc36
-rw-r--r--absl/strings/match.h3
16 files changed, 525 insertions, 374 deletions
diff --git a/absl/container/inlined_vector.h b/absl/container/inlined_vector.h
index e67885c2fc5d..7552723d6a37 100644
--- a/absl/container/inlined_vector.h
+++ b/absl/container/inlined_vector.h
@@ -640,28 +640,7 @@ class InlinedVector {
   // returning a `reference` to the emplaced element.
   template <typename... Args>
   reference emplace_back(Args&&... args) {
-    size_type s = size();
-    if (ABSL_PREDICT_FALSE(s == capacity())) {
-      size_type new_capacity = 2 * capacity();
-      pointer new_data =
-          AllocatorTraits::allocate(*storage_.GetAllocPtr(), new_capacity);
-      reference new_element =
-          Construct(new_data + s, std::forward<Args>(args)...);
-      UninitializedCopy(std::make_move_iterator(data()),
-                        std::make_move_iterator(data() + s), new_data);
-      ResetAllocation(new_data, new_capacity, s + 1);
-      return new_element;
-    } else {
-      pointer space;
-      if (storage_.GetIsAllocated()) {
-        storage_.SetAllocatedSize(s + 1);
-        space = storage_.GetAllocatedData();
-      } else {
-        storage_.SetInlinedSize(s + 1);
-        space = storage_.GetInlinedData();
-      }
-      return Construct(space + s, std::forward<Args>(args)...);
-    }
+    return storage_.EmplaceBack(std::forward<Args>(args)...);
   }
 
   // `InlinedVector::push_back()`
@@ -696,10 +675,7 @@ class InlinedVector {
     assert(pos >= begin());
     assert(pos < end());
 
-    iterator position = const_cast<iterator>(pos);
-    std::move(position + 1, end(), position);
-    pop_back();
-    return position;
+    return storage_.Erase(pos, pos + 1);
   }
 
   // Overload of `InlinedVector::erase()` for erasing all elements in the
@@ -707,28 +683,15 @@ class InlinedVector {
   // to the first element following the range erased or the end iterator if `to`
   // was the end iterator.
   iterator erase(const_iterator from, const_iterator to) {
-    assert(begin() <= from);
+    assert(from >= begin());
     assert(from <= to);
     assert(to <= end());
 
-    iterator range_start = const_cast<iterator>(from);
-    iterator range_end = const_cast<iterator>(to);
-
-    size_type s = size();
-    ptrdiff_t erase_gap = std::distance(range_start, range_end);
-    if (erase_gap > 0) {
-      pointer space;
-      if (storage_.GetIsAllocated()) {
-        space = storage_.GetAllocatedData();
-        storage_.SetAllocatedSize(s - erase_gap);
-      } else {
-        space = storage_.GetInlinedData();
-        storage_.SetInlinedSize(s - erase_gap);
-      }
-      std::move(range_end, space + s, range_start);
-      Destroy(space + s - erase_gap, space + s);
+    if (ABSL_PREDICT_TRUE(from != to)) {
+      return storage_.Erase(from, to);
+    } else {
+      return const_cast<iterator>(from);
     }
-    return range_start;
   }
 
   // `InlinedVector::clear()`
@@ -774,96 +737,9 @@ class InlinedVector {
   //
   // Swaps the contents of this inlined vector with the contents of `other`.
   void swap(InlinedVector& other) {
-    using std::swap;
-
-    if (ABSL_PREDICT_FALSE(this == std::addressof(other))) {
-      return;
-    }
-
-    bool is_allocated = storage_.GetIsAllocated();
-    bool other_is_allocated = other.storage_.GetIsAllocated();
-
-    if (is_allocated && other_is_allocated) {
-      // Both out of line, so just swap the tag, allocation, and allocator.
-      storage_.SwapSizeAndIsAllocated(std::addressof(other.storage_));
-      storage_.SwapAllocatedSizeAndCapacity(std::addressof(other.storage_));
-      swap(*storage_.GetAllocPtr(), *other.storage_.GetAllocPtr());
-
-      return;
-    }
-
-    if (!is_allocated && !other_is_allocated) {
-      // Both inlined: swap up to smaller size, then move remaining elements.
-      InlinedVector* a = this;
-      InlinedVector* b = std::addressof(other);
-      if (size() < other.size()) {
-        swap(a, b);
-      }
-
-      const size_type a_size = a->size();
-      const size_type b_size = b->size();
-      assert(a_size >= b_size);
-      // `a` is larger. Swap the elements up to the smaller array size.
-      std::swap_ranges(a->storage_.GetInlinedData(),
-                       a->storage_.GetInlinedData() + b_size,
-                       b->storage_.GetInlinedData());
-
-      // Move the remaining elements:
-      //   [`b_size`, `a_size`) from `a` -> [`b_size`, `a_size`) from `b`
-      b->UninitializedCopy(a->storage_.GetInlinedData() + b_size,
-                           a->storage_.GetInlinedData() + a_size,
-                           b->storage_.GetInlinedData() + b_size);
-      a->Destroy(a->storage_.GetInlinedData() + b_size,
-                 a->storage_.GetInlinedData() + a_size);
-
-      storage_.SwapSizeAndIsAllocated(std::addressof(other.storage_));
-      swap(*storage_.GetAllocPtr(), *other.storage_.GetAllocPtr());
-
-      assert(b->size() == a_size);
-      assert(a->size() == b_size);
-      return;
-    }
-
-    // One is out of line, one is inline.
-    // We first move the elements from the inlined vector into the
-    // inlined space in the other vector.  We then put the other vector's
-    // pointer/capacity into the originally inlined vector and swap
-    // the tags.
-    InlinedVector* a = this;
-    InlinedVector* b = std::addressof(other);
-    if (a->storage_.GetIsAllocated()) {
-      swap(a, b);
-    }
-
-    assert(!a->storage_.GetIsAllocated());
-    assert(b->storage_.GetIsAllocated());
-
-    const size_type a_size = a->size();
-    const size_type b_size = b->size();
-    // In an optimized build, `b_size` would be unused.
-    static_cast<void>(b_size);
-
-    // Made Local copies of `size()`, these can now be swapped
-    a->storage_.SwapSizeAndIsAllocated(std::addressof(b->storage_));
-
-    // Copy out before `b`'s union gets clobbered by `inline_space`
-    pointer b_data = b->storage_.GetAllocatedData();
-    size_type b_capacity = b->storage_.GetAllocatedCapacity();
-
-    b->UninitializedCopy(a->storage_.GetInlinedData(),
-                         a->storage_.GetInlinedData() + a_size,
-                         b->storage_.GetInlinedData());
-    a->Destroy(a->storage_.GetInlinedData(),
-               a->storage_.GetInlinedData() + a_size);
-
-    a->storage_.SetAllocatedData(b_data, b_capacity);
-
-    if (*a->storage_.GetAllocPtr() != *b->storage_.GetAllocPtr()) {
-      swap(*a->storage_.GetAllocPtr(), *b->storage_.GetAllocPtr());
+    if (ABSL_PREDICT_TRUE(this != std::addressof(other))) {
+      storage_.Swap(std::addressof(other.storage_));
     }
-
-    assert(b->size() == a_size);
-    assert(a->size() == b_size);
   }
 
  private:
diff --git a/absl/container/inlined_vector_exception_safety_test.cc b/absl/container/inlined_vector_exception_safety_test.cc
index 1775699e1b71..2d1ce485ae41 100644
--- a/absl/container/inlined_vector_exception_safety_test.cc
+++ b/absl/container/inlined_vector_exception_safety_test.cc
@@ -279,12 +279,34 @@ TYPED_TEST(TwoSizeTest, Resize) {
   }));
 }
 
+TYPED_TEST(OneSizeTest, EmplaceBack) {
+  using VecT = typename TypeParam::VecT;
+  constexpr static auto size = TypeParam::GetSizeAt(0);
+
+  VecT full_vec{size};
+  full_vec.resize(full_vec.capacity());
+
+  VecT nonfull_vec{size};
+  nonfull_vec.reserve(size + 1);
+
+  auto tester = testing::MakeExceptionSafetyTester().WithContracts(
+      InlinedVectorInvariants<VecT>);
+
+  EXPECT_TRUE(tester.WithInitialValue(nonfull_vec).Test([](VecT* vec) {
+    vec->emplace_back();  //
+  }));
+
+  EXPECT_TRUE(tester.WithInitialValue(full_vec).Test([](VecT* vec) {
+    vec->emplace_back();  //
+  }));
+}
+
 TYPED_TEST(OneSizeTest, PopBack) {
   using VecT = typename TypeParam::VecT;
   constexpr static auto size = TypeParam::GetSizeAt(0);
 
   auto tester = testing::MakeExceptionSafetyTester()
-                    .WithInitialValue(VecT(size))
+                    .WithInitialValue(VecT{size})
                     .WithContracts(NoThrowGuarantee<VecT>);
 
   EXPECT_TRUE(tester.Test([](VecT* vec) {
@@ -292,12 +314,47 @@ TYPED_TEST(OneSizeTest, PopBack) {
   }));
 }
 
+TYPED_TEST(OneSizeTest, Erase) {
+  using VecT = typename TypeParam::VecT;
+  constexpr static auto size = TypeParam::GetSizeAt(0);
+
+  auto tester = testing::MakeExceptionSafetyTester()
+                    .WithInitialValue(VecT{size})
+                    .WithContracts(InlinedVectorInvariants<VecT>);
+
+  EXPECT_TRUE(tester.Test([](VecT* vec) {
+    auto it = vec->begin();
+    vec->erase(it);
+  }));
+  EXPECT_TRUE(tester.Test([](VecT* vec) {
+    auto it = vec->begin() + (vec->size() / 2);
+    vec->erase(it);
+  }));
+  EXPECT_TRUE(tester.Test([](VecT* vec) {
+    auto it = vec->begin() + (vec->size() - 1);
+    vec->erase(it);
+  }));
+
+  EXPECT_TRUE(tester.Test([](VecT* vec) {
+    auto it = vec->begin();
+    vec->erase(it, it + 1);
+  }));
+  EXPECT_TRUE(tester.Test([](VecT* vec) {
+    auto it = vec->begin() + (vec->size() / 2);
+    vec->erase(it, it + 1);
+  }));
+  EXPECT_TRUE(tester.Test([](VecT* vec) {
+    auto it = vec->begin() + (vec->size() - 1);
+    vec->erase(it, it + 1);
+  }));
+}
+
 TYPED_TEST(OneSizeTest, Clear) {
   using VecT = typename TypeParam::VecT;
   constexpr static auto size = TypeParam::GetSizeAt(0);
 
   auto tester = testing::MakeExceptionSafetyTester()
-                    .WithInitialValue(VecT(size))
+                    .WithInitialValue(VecT{size})
                     .WithContracts(NoThrowGuarantee<VecT>);
 
   EXPECT_TRUE(tester.Test([](VecT* vec) {
@@ -332,4 +389,25 @@ TYPED_TEST(OneSizeTest, ShrinkToFit) {
   }));
 }
 
+TYPED_TEST(TwoSizeTest, Swap) {
+  using VecT = typename TypeParam::VecT;
+  constexpr static auto from_size = TypeParam::GetSizeAt(0);
+  constexpr static auto to_size = TypeParam::GetSizeAt(1);
+
+  auto tester = testing::MakeExceptionSafetyTester()
+                    .WithInitialValue(VecT{from_size})
+                    .WithContracts(InlinedVectorInvariants<VecT>);
+
+  EXPECT_TRUE(tester.Test([](VecT* vec) {
+    VecT other_vec{to_size};
+    vec->swap(other_vec);
+  }));
+
+  EXPECT_TRUE(tester.Test([](VecT* vec) {
+    using std::swap;
+    VecT other_vec{to_size};
+    swap(*vec, other_vec);
+  }));
+}
+
 }  // namespace
diff --git a/absl/container/inlined_vector_test.cc b/absl/container/inlined_vector_test.cc
index 60fe89b28aee..50315b83c147 100644
--- a/absl/container/inlined_vector_test.cc
+++ b/absl/container/inlined_vector_test.cc
@@ -76,12 +76,9 @@ TYPED_TEST_SUITE_P(InstanceTest);
 // destroyed in the erase(begin, end) test.
 class RefCounted {
  public:
-  RefCounted(int value, int* count) : value_(value), count_(count) {
-    Ref();
-  }
+  RefCounted(int value, int* count) : value_(value), count_(count) { Ref(); }
 
-  RefCounted(const RefCounted& v)
-      : value_(v.value_), count_(v.count_) {
+  RefCounted(const RefCounted& v) : value_(v.value_), count_(v.count_) {
     Ref();
   }
 
@@ -290,7 +287,7 @@ TEST(RefCountedVec, EraseBeginEnd) {
         }
 
         // Check that the elements at the end are preserved.
-        for (int i = erase_end; i< len; ++i) {
+        for (int i = erase_end; i < len; ++i) {
           EXPECT_EQ(1, counts[i]);
         }
       }
@@ -552,10 +549,10 @@ TEST(IntVec, Resize) {
     static const int kResizeElem = 1000000;
     for (int k = 0; k < 10; k++) {
       // Enlarging resize
-      v.resize(len+k, kResizeElem);
-      EXPECT_EQ(len+k, v.size());
-      EXPECT_LE(len+k, v.capacity());
-      for (int i = 0; i < len+k; i++) {
+      v.resize(len + k, kResizeElem);
+      EXPECT_EQ(len + k, v.size());
+      EXPECT_LE(len + k, v.capacity());
+      for (int i = 0; i < len + k; i++) {
         if (i < len) {
           EXPECT_EQ(i, v[i]);
         } else {
@@ -866,7 +863,7 @@ TYPED_TEST_P(InstanceTest, Swap) {
       auto min_len = std::min(l1, l2);
       auto max_len = std::max(l1, l2);
       for (int i = 0; i < l1; i++) a.push_back(Instance(i));
-      for (int i = 0; i < l2; i++) b.push_back(Instance(100+i));
+      for (int i = 0; i < l2; i++) b.push_back(Instance(100 + i));
       EXPECT_EQ(tracker.instances(), l1 + l2);
       tracker.ResetCopiesMovesSwaps();
       {
@@ -934,7 +931,7 @@ TEST(IntVec, EqualAndNotEqual) {
     EXPECT_FALSE(a == b);
     EXPECT_TRUE(a != b);
 
-    b[i] = b[i] - 1;    // Back to before
+    b[i] = b[i] - 1;  // Back to before
     EXPECT_TRUE(a == b);
     EXPECT_FALSE(a != b);
   }
@@ -1001,7 +998,7 @@ TYPED_TEST_P(InstanceTest, CountConstructorsDestructors) {
 
     // reserve() must not increase the number of initialized objects
     SCOPED_TRACE("reserve");
-    v.reserve(len+1000);
+    v.reserve(len + 1000);
     EXPECT_EQ(tracker.instances(), len);
     EXPECT_EQ(tracker.copies() + tracker.moves(), len);
 
@@ -1247,9 +1244,8 @@ void InstanceCountElemAssignWithAllocationTest() {
     absl::InlinedVector<Instance, 2> v(original_contents.begin(),
                                        original_contents.end());
     v.assign(3, Instance(123));
-    EXPECT_THAT(v,
-                AllOf(SizeIs(3),
-                      ElementsAre(ValueIs(123), ValueIs(123), ValueIs(123))));
+    EXPECT_THAT(v, AllOf(SizeIs(3), ElementsAre(ValueIs(123), ValueIs(123),
+                                                ValueIs(123))));
     EXPECT_LE(v.size(), v.capacity());
   }
 }
@@ -1528,8 +1524,8 @@ TYPED_TEST_P(InstanceTest, InitializerListAssign) {
     SCOPED_TRACE(original_size);
     absl::InlinedVector<Instance, 2> v(original_size, Instance(12345));
     v.assign({Instance(3), Instance(4), Instance(5)});
-    EXPECT_THAT(v, AllOf(SizeIs(3),
-                         ElementsAre(ValueIs(3), ValueIs(4), ValueIs(5))));
+    EXPECT_THAT(
+        v, AllOf(SizeIs(3), ElementsAre(ValueIs(3), ValueIs(4), ValueIs(5))));
     EXPECT_LE(3, v.capacity());
   }
 }
@@ -1554,7 +1550,7 @@ TEST(DynamicVec, DynamicVecCompiles) {
 TEST(AllocatorSupportTest, Constructors) {
   using MyAlloc = CountingAllocator<int>;
   using AllocVec = absl::InlinedVector<int, 4, MyAlloc>;
-  const int ia[] = { 0, 1, 2, 3, 4, 5, 6, 7 };
+  const int ia[] = {0, 1, 2, 3, 4, 5, 6, 7};
   int64_t allocated = 0;
   MyAlloc alloc(&allocated);
   { AllocVec ABSL_ATTRIBUTE_UNUSED v; }
@@ -1570,7 +1566,7 @@ TEST(AllocatorSupportTest, Constructors) {
 TEST(AllocatorSupportTest, CountAllocations) {
   using MyAlloc = CountingAllocator<int>;
   using AllocVec = absl::InlinedVector<int, 4, MyAlloc>;
-  const int ia[] = { 0, 1, 2, 3, 4, 5, 6, 7 };
+  const int ia[] = {0, 1, 2, 3, 4, 5, 6, 7};
   int64_t allocated = 0;
   MyAlloc alloc(&allocated);
   {
@@ -1634,8 +1630,8 @@ TEST(AllocatorSupportTest, SwapBothAllocated) {
   int64_t allocated1 = 0;
   int64_t allocated2 = 0;
   {
-    const int ia1[] = { 0, 1, 2, 3, 4, 5, 6, 7 };
-    const int ia2[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8 };
+    const int ia1[] = {0, 1, 2, 3, 4, 5, 6, 7};
+    const int ia2[] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
     MyAlloc a1(&allocated1);
     MyAlloc a2(&allocated2);
     AllocVec v1(ia1, ia1 + ABSL_ARRAYSIZE(ia1), a1);
@@ -1659,8 +1655,8 @@ TEST(AllocatorSupportTest, SwapOneAllocated) {
   int64_t allocated1 = 0;
   int64_t allocated2 = 0;
   {
-    const int ia1[] = { 0, 1, 2, 3, 4, 5, 6, 7 };
-    const int ia2[] = { 0, 1, 2, 3 };
+    const int ia1[] = {0, 1, 2, 3, 4, 5, 6, 7};
+    const int ia2[] = {0, 1, 2, 3};
     MyAlloc a1(&allocated1);
     MyAlloc a2(&allocated2);
     AllocVec v1(ia1, ia1 + ABSL_ARRAYSIZE(ia1), a1);
@@ -1681,65 +1677,42 @@ TEST(AllocatorSupportTest, SwapOneAllocated) {
 
 TEST(AllocatorSupportTest, ScopedAllocatorWorks) {
   using StdVector = std::vector<int, CountingAllocator<int>>;
-  using MyAlloc =
-      std::scoped_allocator_adaptor<CountingAllocator<StdVector>>;
-  using AllocVec = absl::InlinedVector<StdVector, 4, MyAlloc>;
-
-  // MSVC 2017's std::vector allocates different amounts of memory in debug
-  // versus opt mode.
-  int64_t test_allocated = 0;
-  StdVector v(CountingAllocator<int>{&test_allocated});
-  // The amount of memory allocated by a default constructed vector<int>
-  auto default_std_vec_allocated = test_allocated;
-  v.push_back(1);
-  // The amound of memory allocated by a copy-constructed vector<int> with one
-  // element.
-  int64_t one_element_std_vec_copy_allocated = test_allocated;
+  using Alloc = CountingAllocator<StdVector>;
+  using ScopedAlloc = std::scoped_allocator_adaptor<Alloc>;
+  using AllocVec = absl::InlinedVector<StdVector, 1, ScopedAlloc>;
 
-  int64_t allocated = 0;
-  AllocVec vec(MyAlloc{CountingAllocator<StdVector>{&allocated}});
-  EXPECT_EQ(allocated, 0);
+  {
+    int64_t total_allocated_byte_count = 0;
 
-  // This default constructs a vector<int>, but the allocator should pass itself
-  // into the vector<int>, so check allocation compared to that.
-  // The absl::InlinedVector does not allocate any memory.
-  // The vector<int> may allocate any memory.
-  auto expected = default_std_vec_allocated;
-  vec.resize(1);
-  EXPECT_EQ(allocated, expected);
-
-  // We make vector<int> allocate memory.
-  // It must go through the allocator even though we didn't construct the
-  // vector directly.  This assumes that vec[0] doesn't need to grow its
-  // allocation.
-  expected += sizeof(int);
-  vec[0].push_back(1);
-  EXPECT_EQ(allocated, expected);
-
-  // Another allocating vector.
-  expected += one_element_std_vec_copy_allocated;
-  vec.push_back(vec[0]);
-  EXPECT_EQ(allocated, expected);
-
-  // Overflow the inlined memory.
-  // The absl::InlinedVector will now allocate.
-  expected += sizeof(StdVector) * 8 + default_std_vec_allocated * 3;
-  vec.resize(5);
-  EXPECT_EQ(allocated, expected);
-
-  // Adding one more in external mode should also work.
-  expected += one_element_std_vec_copy_allocated;
-  vec.push_back(vec[0]);
-  EXPECT_EQ(allocated, expected);
-
-  // And extending these should still work.  This assumes that vec[0] does not
-  // need to grow its allocation.
-  expected += sizeof(int);
-  vec[0].push_back(1);
-  EXPECT_EQ(allocated, expected);
-
-  vec.clear();
-  EXPECT_EQ(allocated, 0);
+    AllocVec inlined_case(ScopedAlloc(Alloc(+&total_allocated_byte_count)));
+    inlined_case.emplace_back();
+
+    int64_t absl_responsible_for_count = total_allocated_byte_count;
+    EXPECT_EQ(absl_responsible_for_count, 0);
+
+    inlined_case[0].emplace_back();
+    EXPECT_GT(total_allocated_byte_count, absl_responsible_for_count);
+
+    inlined_case.clear();
+    EXPECT_EQ(total_allocated_byte_count, 0);
+  }
+
+  {
+    int64_t total_allocated_byte_count = 0;
+
+    AllocVec allocated_case(ScopedAlloc(Alloc(+&total_allocated_byte_count)));
+    allocated_case.emplace_back();
+    allocated_case.emplace_back();
+
+    int64_t absl_responsible_for_count = total_allocated_byte_count;
+    EXPECT_GT(absl_responsible_for_count, 0);
+
+    allocated_case[1].emplace_back();
+    EXPECT_GT(total_allocated_byte_count, absl_responsible_for_count);
+
+    allocated_case.clear();
+    EXPECT_EQ(total_allocated_byte_count, 0);
+  }
 }
 
 TEST(AllocatorSupportTest, SizeAllocConstructor) {
diff --git a/absl/container/internal/inlined_vector.h b/absl/container/internal/inlined_vector.h
index c2802c823a8d..84b97791fa20 100644
--- a/absl/container/internal/inlined_vector.h
+++ b/absl/container/internal/inlined_vector.h
@@ -364,16 +364,6 @@ class Storage {
     allocation_tx_ptr->GetCapacity() = 0;
   }
 
-  void SwapSizeAndIsAllocated(Storage* other) {
-    using std::swap;
-    swap(GetSizeAndIsAllocated(), other->GetSizeAndIsAllocated());
-  }
-
-  void SwapAllocatedSizeAndCapacity(Storage* other) {
-    using std::swap;
-    swap(data_.allocated, other->data_.allocated);
-  }
-
   void MemcpyFrom(const Storage& other_storage) {
     assert(IsMemcpyOk::value || other_storage.GetIsAllocated());
 
@@ -390,10 +380,17 @@ class Storage {
   template <typename ValueAdapter>
   void Resize(ValueAdapter values, size_type new_size);
 
+  template <typename... Args>
+  reference EmplaceBack(Args&&... args);
+
+  iterator Erase(const_iterator from, const_iterator to);
+
   void Reserve(size_type requested_capacity);
 
   void ShrinkToFit();
 
+  void Swap(Storage* other_storage_ptr);
+
  private:
   size_type& GetSizeAndIsAllocated() { return metadata_.template get<1>(); }
 
@@ -401,14 +398,8 @@ class Storage {
     return metadata_.template get<1>();
   }
 
-  static size_type LegacyNextCapacityFrom(size_type current_capacity,
-                                          size_type requested_capacity) {
-    // TODO(johnsoncj): Get rid of this old behavior.
-    size_type new_capacity = current_capacity;
-    while (new_capacity < requested_capacity) {
-      new_capacity *= 2;
-    }
-    return new_capacity;
+  static size_type NextCapacityFrom(size_type current_capacity) {
+    return current_capacity * 2;
   }
 
   using Metadata =
@@ -521,8 +512,7 @@ auto Storage<T, N, A>::Resize(ValueAdapter values, size_type new_size) -> void {
   absl::Span<value_type> destroy_loop;
 
   if (new_size > storage_view.capacity) {
-    pointer new_data = allocation_tx.Allocate(
-        LegacyNextCapacityFrom(storage_view.capacity, new_size));
+    pointer new_data = allocation_tx.Allocate(new_size);
 
     // Construct new objects in `new_data`
     construct_loop = {new_data + storage_view.size,
@@ -563,6 +553,75 @@ auto Storage<T, N, A>::Resize(ValueAdapter values, size_type new_size) -> void {
 }
 
 template <typename T, size_t N, typename A>
+template <typename... Args>
+auto Storage<T, N, A>::EmplaceBack(Args&&... args) -> reference {
+  StorageView storage_view = MakeStorageView();
+
+  AllocationTransaction allocation_tx(GetAllocPtr());
+
+  IteratorValueAdapter<MoveIterator> move_values(
+      MoveIterator(storage_view.data));
+
+  pointer construct_data =
+      (storage_view.size == storage_view.capacity
+           ? allocation_tx.Allocate(NextCapacityFrom(storage_view.capacity))
+           : storage_view.data);
+
+  pointer last_ptr = construct_data + storage_view.size;
+  AllocatorTraits::construct(*GetAllocPtr(), last_ptr,
+                             std::forward<Args>(args)...);
+
+  if (allocation_tx.DidAllocate()) {
+    ABSL_INTERNAL_TRY {
+      inlined_vector_internal::ConstructElements(
+          GetAllocPtr(), allocation_tx.GetData(), &move_values,
+          storage_view.size);
+    }
+    ABSL_INTERNAL_CATCH_ANY {
+      AllocatorTraits::destroy(*GetAllocPtr(), last_ptr);
+      ABSL_INTERNAL_RETHROW;
+    }
+
+    inlined_vector_internal::DestroyElements(GetAllocPtr(), storage_view.data,
+                                             storage_view.size);
+
+    DeallocateIfAllocated();
+    AcquireAllocation(&allocation_tx);
+    SetIsAllocated();
+  }
+
+  AddSize(1);
+  return *last_ptr;
+}
+
+template <typename T, size_t N, typename A>
+auto Storage<T, N, A>::Erase(const_iterator from, const_iterator to)
+    -> iterator {
+  assert(from != to);
+
+  StorageView storage_view = MakeStorageView();
+
+  size_type erase_size = std::distance(from, to);
+  size_type erase_index =
+      std::distance(const_iterator(storage_view.data), from);
+  size_type erase_end_index = erase_index + erase_size;
+
+  IteratorValueAdapter<MoveIterator> move_values(
+      MoveIterator(storage_view.data + erase_end_index));
+
+  inlined_vector_internal::AssignElements(storage_view.data + erase_index,
+                                          &move_values,
+                                          storage_view.size - erase_end_index);
+
+  inlined_vector_internal::DestroyElements(
+      GetAllocPtr(), storage_view.data + (storage_view.size - erase_size),
+      erase_size);
+
+  SubtractSize(erase_size);
+  return iterator(storage_view.data + erase_index);
+}
+
+template <typename T, size_t N, typename A>
 auto Storage<T, N, A>::Reserve(size_type requested_capacity) -> void {
   StorageView storage_view = MakeStorageView();
 
@@ -573,8 +632,7 @@ auto Storage<T, N, A>::Reserve(size_type requested_capacity) -> void {
   IteratorValueAdapter<MoveIterator> move_values(
       MoveIterator(storage_view.data));
 
-  pointer new_data = allocation_tx.Allocate(
-      LegacyNextCapacityFrom(storage_view.capacity, requested_capacity));
+  pointer new_data = allocation_tx.Allocate(requested_capacity);
 
   inlined_vector_internal::ConstructElements(GetAllocPtr(), new_data,
                                              &move_values, storage_view.size);
@@ -592,8 +650,8 @@ auto Storage<T, N, A>::ShrinkToFit() -> void {
   // May only be called on allocated instances!
   assert(GetIsAllocated());
 
-  StorageView storage_view = {GetAllocatedData(), GetSize(),
-                              GetAllocatedCapacity()};
+  StorageView storage_view{GetAllocatedData(), GetSize(),
+                           GetAllocatedCapacity()};
 
   AllocationTransaction allocation_tx(GetAllocPtr());
 
@@ -634,6 +692,82 @@ auto Storage<T, N, A>::ShrinkToFit() -> void {
   }
 }
 
+template <typename T, size_t N, typename A>
+auto Storage<T, N, A>::Swap(Storage* other_storage_ptr) -> void {
+  using std::swap;
+  assert(this != other_storage_ptr);
+
+  if (GetIsAllocated() && other_storage_ptr->GetIsAllocated()) {
+    // Both are allocated, thus we can swap the allocations at the top level.
+
+    swap(data_.allocated, other_storage_ptr->data_.allocated);
+  } else if (!GetIsAllocated() && !other_storage_ptr->GetIsAllocated()) {
+    // Both are inlined, thus element-wise swap up to smaller size, then move
+    // the remaining elements.
+
+    Storage* small_ptr = this;
+    Storage* large_ptr = other_storage_ptr;
+    if (small_ptr->GetSize() > large_ptr->GetSize()) swap(small_ptr, large_ptr);
+
+    for (size_type i = 0; i < small_ptr->GetSize(); ++i) {
+      swap(small_ptr->GetInlinedData()[i], large_ptr->GetInlinedData()[i]);
+    }
+
+    IteratorValueAdapter<MoveIterator> move_values(
+        MoveIterator(large_ptr->GetInlinedData() + small_ptr->GetSize()));
+
+    inlined_vector_internal::ConstructElements(
+        large_ptr->GetAllocPtr(),
+        small_ptr->GetInlinedData() + small_ptr->GetSize(), &move_values,
+        large_ptr->GetSize() - small_ptr->GetSize());
+
+    inlined_vector_internal::DestroyElements(
+        large_ptr->GetAllocPtr(),
+        large_ptr->GetInlinedData() + small_ptr->GetSize(),
+        large_ptr->GetSize() - small_ptr->GetSize());
+  } else {
+    // One is allocated and the other is inlined, thus we first move the
+    // elements from the inlined instance to the inlined space in the allocated
+    // instance and then we can finish by having the other vector take on the
+    // allocation.
+
+    Storage* allocated_ptr = this;
+    Storage* inlined_ptr = other_storage_ptr;
+    if (!allocated_ptr->GetIsAllocated()) swap(allocated_ptr, inlined_ptr);
+
+    StorageView allocated_storage_view{allocated_ptr->GetAllocatedData(),
+                                       allocated_ptr->GetSize(),
+                                       allocated_ptr->GetAllocatedCapacity()};
+
+    IteratorValueAdapter<MoveIterator> move_values(
+        MoveIterator(inlined_ptr->GetInlinedData()));
+
+    ABSL_INTERNAL_TRY {
+      inlined_vector_internal::ConstructElements(
+          inlined_ptr->GetAllocPtr(), allocated_ptr->GetInlinedData(),
+          &move_values, inlined_ptr->GetSize());
+    }
+    ABSL_INTERNAL_CATCH_ANY {
+      // Writing to inlined data will trample on the existing state, thus it
+      // needs to be restored when a construction fails.
+      allocated_ptr->SetAllocatedData(allocated_storage_view.data,
+                                      allocated_storage_view.capacity);
+      ABSL_INTERNAL_RETHROW;
+    }
+
+    inlined_vector_internal::DestroyElements(inlined_ptr->GetAllocPtr(),
+                                             inlined_ptr->GetInlinedData(),
+                                             inlined_ptr->GetSize());
+
+    inlined_ptr->SetAllocatedData(allocated_storage_view.data,
+                                  allocated_storage_view.capacity);
+  }
+
+  // All cases swap the size, `is_allocated` boolean and the allocator.
+  swap(GetSizeAndIsAllocated(), other_storage_ptr->GetSizeAndIsAllocated());
+  swap(*GetAllocPtr(), *other_storage_ptr->GetAllocPtr());
+}
+
 }  // namespace inlined_vector_internal
 }  // namespace absl
 
diff --git a/absl/flags/BUILD.bazel b/absl/flags/BUILD.bazel
index bb1a7aacde64..82e6ffcfcef3 100644
--- a/absl/flags/BUILD.bazel
+++ b/absl/flags/BUILD.bazel
@@ -158,9 +158,11 @@ cc_library(
     name = "usage",
     srcs = [
         "internal/usage.cc",
+        "usage.cc",
     ],
     hdrs = [
         "internal/usage.h",
+        "usage.h",
     ],
     copts = ABSL_DEFAULT_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
diff --git a/absl/flags/CMakeLists.txt b/absl/flags/CMakeLists.txt
index 9e0b441b350c..9c936eccc8e3 100644
--- a/absl/flags/CMakeLists.txt
+++ b/absl/flags/CMakeLists.txt
@@ -144,8 +144,10 @@ absl_cc_library(
     flags_usage
   SRCS
     "internal/usage.cc"
+    "usage.cc"
   HDRS
     "internal/usage.h"
+    "usage.h"
   COPTS
     ${ABSL_DEFAULT_COPTS}
   LINKOPTS
diff --git a/absl/flags/internal/usage.cc b/absl/flags/internal/usage.cc
index f17cc6c5787d..b1ff8b83c223 100644
--- a/absl/flags/internal/usage.cc
+++ b/absl/flags/internal/usage.cc
@@ -21,6 +21,7 @@
 #include "absl/flags/flag.h"
 #include "absl/flags/internal/path_util.h"
 #include "absl/flags/internal/program_name.h"
+#include "absl/flags/usage.h"
 #include "absl/flags/usage_config.h"
 #include "absl/strings/ascii.h"
 #include "absl/strings/str_cat.h"
@@ -204,7 +205,7 @@ void FlagsHelpImpl(std::ostream& out, flags_internal::FlagKindFilter filter_cb,
                    HelpFormat format = HelpFormat::kHumanReadable) {
   if (format == HelpFormat::kHumanReadable) {
     out << flags_internal::ShortProgramInvocationName() << ": "
-        << flags_internal::ProgramUsageMessage() << "\n\n";
+        << absl::ProgramUsageMessage() << "\n\n";
   } else {
     // XML schema is not a part of our public API for now.
     out << "<?xml version=\"1.0\"?>\n"
@@ -213,7 +214,7 @@ void FlagsHelpImpl(std::ostream& out, flags_internal::FlagKindFilter filter_cb,
         // The program name and usage.
         << XMLElement("program", flags_internal::ShortProgramInvocationName())
         << '\n'
-        << XMLElement("usage", flags_internal::ProgramUsageMessage()) << '\n';
+        << XMLElement("usage", absl::ProgramUsageMessage()) << '\n';
   }
 
   // Map of package name to
@@ -278,39 +279,9 @@ void FlagsHelpImpl(std::ostream& out, flags_internal::FlagKindFilter filter_cb,
   }
 }
 
-ABSL_CONST_INIT absl::Mutex usage_message_guard(absl::kConstInit);
-ABSL_CONST_INIT std::string* program_usage_message
-    GUARDED_BY(usage_message_guard) = nullptr;
-
 }  // namespace
 
 // --------------------------------------------------------------------
-// Sets the "usage" message to be used by help reporting routines.
-
-void SetProgramUsageMessage(absl::string_view new_usage_message) {
-  absl::MutexLock l(&usage_message_guard);
-
-  if (flags_internal::program_usage_message != nullptr) {
-    ABSL_INTERNAL_LOG(FATAL, "SetProgramUsageMessage() called twice.");
-    std::exit(1);
-  }
-
-  program_usage_message = new std::string(new_usage_message);
-}
-
-// --------------------------------------------------------------------
-// Returns the usage message set by SetProgramUsageMessage().
-// Note: We able to return string_view here only because calling
-// SetProgramUsageMessage twice is prohibited.
-absl::string_view ProgramUsageMessage() {
-  absl::MutexLock l(&usage_message_guard);
-
-  return program_usage_message != nullptr
-             ? absl::string_view(*program_usage_message)
-             : "Warning: SetProgramUsageMessage() never called";
-}
-
-// --------------------------------------------------------------------
 // Produces the help message describing specific flag.
 void FlagHelp(std::ostream& out, const flags_internal::CommandLineFlag& flag,
               HelpFormat format) {
diff --git a/absl/flags/internal/usage.h b/absl/flags/internal/usage.h
index 2d0ea7a72a4d..33f3f969bfaf 100644
--- a/absl/flags/internal/usage.h
+++ b/absl/flags/internal/usage.h
@@ -29,20 +29,6 @@
 namespace absl {
 namespace flags_internal {
 
-// Sets the "usage" message to be used by help reporting routines.
-// For example:
-//  absl::SetProgramUsageMessage(
-//      absl::StrCat("This program does nothing.  Sample usage:\n", argv[0],
-//                   " <uselessarg1> <uselessarg2>"));
-// Do not include commandline flags in the usage: we do that for you!
-// Note: Calling SetProgramUsageMessage twice will trigger a call to std::exit.
-void SetProgramUsageMessage(absl::string_view new_usage_message);
-
-// Returns the usage message set by SetProgramUsageMessage().
-absl::string_view ProgramUsageMessage();
-
-// --------------------------------------------------------------------
-
 // The format to report the help messages in.
 enum class HelpFormat {
   kHumanReadable,
diff --git a/absl/flags/internal/usage_test.cc b/absl/flags/internal/usage_test.cc
index cb40aa42378d..fa121fc933fc 100644
--- a/absl/flags/internal/usage_test.cc
+++ b/absl/flags/internal/usage_test.cc
@@ -13,14 +13,16 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
+#include "absl/flags/internal/usage.h"
+
 #include <sstream>
 
 #include "gtest/gtest.h"
 #include "absl/flags/flag.h"
-#include "absl/flags/parse.h"
 #include "absl/flags/internal/path_util.h"
 #include "absl/flags/internal/program_name.h"
-#include "absl/flags/internal/usage.h"
+#include "absl/flags/parse.h"
+#include "absl/flags/usage.h"
 #include "absl/flags/usage_config.h"
 #include "absl/memory/memory.h"
 #include "absl/strings/match.h"
@@ -81,11 +83,11 @@ class UsageReportingTest : public testing::Test {
 using UsageReportingDeathTest = UsageReportingTest;
 
 TEST_F(UsageReportingDeathTest, TestSetProgramUsageMessage) {
-  EXPECT_EQ(flags::ProgramUsageMessage(), "Custom usage message");
+  EXPECT_EQ(absl::ProgramUsageMessage(), "Custom usage message");
 
 #ifndef _WIN32
   // TODO(rogeeff): figure out why this does not work on Windows.
-  EXPECT_DEATH(flags::SetProgramUsageMessage("custom usage message"),
+  EXPECT_DEATH(absl::SetProgramUsageMessage("custom usage message"),
                ".*SetProgramUsageMessage\\(\\) called twice.*");
 #endif
 }
@@ -360,7 +362,7 @@ TEST_F(UsageReportingTest, TestUsageFlag_helpon) {
 int main(int argc, char* argv[]) {
   absl::GetFlag(FLAGS_undefok);  // Force linking of parse.cc
   flags::SetProgramInvocationName("usage_test");
-  flags::SetProgramUsageMessage("Custom usage message");
+  absl::SetProgramUsageMessage("Custom usage message");
   ::testing::InitGoogleTest(&argc, argv);
 
   return RUN_ALL_TESTS();
diff --git a/absl/flags/usage.cc b/absl/flags/usage.cc
new file mode 100644
index 000000000000..4c01f5101291
--- /dev/null
+++ b/absl/flags/usage.cc
@@ -0,0 +1,56 @@
+//
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+#include "absl/flags/usage.h"
+
+#include <string>
+
+#include "absl/flags/internal/usage.h"
+#include "absl/synchronization/mutex.h"
+
+namespace absl {
+namespace flags_internal {
+namespace {
+ABSL_CONST_INIT absl::Mutex usage_message_guard(absl::kConstInit);
+ABSL_CONST_INIT std::string* program_usage_message
+    GUARDED_BY(usage_message_guard) = nullptr;
+}  // namespace
+}  // namespace flags_internal
+
+// --------------------------------------------------------------------
+// Sets the "usage" message to be used by help reporting routines.
+void SetProgramUsageMessage(absl::string_view new_usage_message) {
+  absl::MutexLock l(&flags_internal::usage_message_guard);
+
+  if (flags_internal::program_usage_message != nullptr) {
+    ABSL_INTERNAL_LOG(FATAL, "SetProgramUsageMessage() called twice.");
+    std::exit(1);
+  }
+
+  flags_internal::program_usage_message = new std::string(new_usage_message);
+}
+
+// --------------------------------------------------------------------
+// Returns the usage message set by SetProgramUsageMessage().
+// Note: We able to return string_view here only because calling
+// SetProgramUsageMessage twice is prohibited.
+absl::string_view ProgramUsageMessage() {
+  absl::MutexLock l(&flags_internal::usage_message_guard);
+
+  return flags_internal::program_usage_message != nullptr
+             ? absl::string_view(*flags_internal::program_usage_message)
+             : "Warning: SetProgramUsageMessage() never called";
+}
+
+}  // namespace absl
diff --git a/absl/flags/usage.h b/absl/flags/usage.h
new file mode 100644
index 000000000000..3a12107137ad
--- /dev/null
+++ b/absl/flags/usage.h
@@ -0,0 +1,40 @@
+//
+//  Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_FLAGS_USAGE_H_
+#define ABSL_FLAGS_USAGE_H_
+
+#include "absl/strings/string_view.h"
+
+// --------------------------------------------------------------------
+// Usage reporting interfaces
+
+namespace absl {
+
+// Sets the "usage" message to be used by help reporting routines.
+// For example:
+//  absl::SetProgramUsageMessage(
+//      absl::StrCat("This program does nothing.  Sample usage:\n", argv[0],
+//                   " <uselessarg1> <uselessarg2>"));
+// Do not include commandline flags in the usage: we do that for you!
+// Note: Calling SetProgramUsageMessage twice will trigger a call to std::exit.
+void SetProgramUsageMessage(absl::string_view new_usage_message);
+
+// Returns the usage message set by SetProgramUsageMessage().
+absl::string_view ProgramUsageMessage();
+
+}  // namespace absl
+
+#endif  // ABSL_FLAGS_USAGE_H_
diff --git a/absl/random/internal/nanobenchmark.cc b/absl/random/internal/nanobenchmark.cc
index 5a8b1ed1dba1..7f37800c6830 100644
--- a/absl/random/internal/nanobenchmark.cc
+++ b/absl/random/internal/nanobenchmark.cc
@@ -59,6 +59,24 @@
 #include <time.h>  // NOLINT
 #endif
 
+// ABSL_HAVE_ATTRIBUTE
+#if !defined(ABSL_HAVE_ATTRIBUTE)
+#ifdef __has_attribute
+#define ABSL_HAVE_ATTRIBUTE(x) __has_attribute(x)
+#else
+#define ABSL_HAVE_ATTRIBUTE(x) 0
+#endif
+#endif
+
+// ABSL_RANDOM_INTERNAL_ATTRIBUTE_NEVER_INLINE prevents inlining of the method.
+#if ABSL_HAVE_ATTRIBUTE(noinline) || (defined(__GNUC__) && !defined(__clang__))
+#define ABSL_RANDOM_INTERNAL_ATTRIBUTE_NEVER_INLINE __attribute__((noinline))
+#elif defined(_MSC_VER)
+#define ABSL_RANDOM_INTERNAL_ATTRIBUTE_NEVER_INLINE __declspec(noinline)
+#else
+#define ABSL_RANDOM_INTERNAL_ATTRIBUTE_NEVER_INLINE
+#endif
+
 namespace absl {
 namespace random_internal_nanobenchmark {
 namespace {
@@ -658,8 +676,8 @@ Ticks TotalDuration(const Func func, const void* arg, const InputVec* inputs,
 }
 
 // (Nearly) empty Func for measuring timer overhead/resolution.
-ABSL_ATTRIBUTE_NEVER_INLINE FuncOutput EmptyFunc(const void* arg,
-                                                 const FuncInput input) {
+ABSL_RANDOM_INTERNAL_ATTRIBUTE_NEVER_INLINE FuncOutput
+EmptyFunc(const void* arg, const FuncInput input) {
   return input;
 }
 
diff --git a/absl/random/internal/platform.h b/absl/random/internal/platform.h
index 5edab3448bbf..d1ef5c249032 100644
--- a/absl/random/internal/platform.h
+++ b/absl/random/internal/platform.h
@@ -81,50 +81,8 @@
 // Attribute Checks
 // -----------------------------------------------------------------------------
 
-// ABSL_HAVE_ATTRIBUTE
-#undef ABSL_HAVE_ATTRIBUTE
-#ifdef __has_attribute
-#define ABSL_HAVE_ATTRIBUTE(x) __has_attribute(x)
-#else
-#define ABSL_HAVE_ATTRIBUTE(x) 0
-#endif
-
-// ABSL_ATTRIBUTE_ALWAYS_INLINE forces inlining of the method.
-#undef ABSL_ATTRIBUTE_ALWAYS_INLINE
-#if ABSL_HAVE_ATTRIBUTE(always_inline) || \
-    (defined(__GNUC__) && !defined(__clang__))
-#define ABSL_ATTRIBUTE_ALWAYS_INLINE __attribute__((always_inline))
-#elif defined(_MSC_VER)
-// We can achieve something similar to attribute((always_inline)) with MSVC by
-// using the __forceinline keyword, however this is not perfect. MSVC is
-// much less aggressive about inlining, and even with the __forceinline keyword.
-#define ABSL_ATTRIBUTE_ALWAYS_INLINE __forceinline
-#else
-#define ABSL_ATTRIBUTE_ALWAYS_INLINE
-#endif
-
-// ABSL_ATTRIBUTE_NEVER_INLINE prevents inlining of the method.
-#undef ABSL_ATTRIBUTE_NEVER_INLINE
-#if ABSL_HAVE_ATTRIBUTE(noinline) || (defined(__GNUC__) && !defined(__clang__))
-#define ABSL_ATTRIBUTE_NEVER_INLINE __attribute__((noinline))
-#elif defined(_MSC_VER)
-#define ABSL_ATTRIBUTE_NEVER_INLINE __declspec(noinline)
-#else
-#define ABSL_ATTRIBUTE_NEVER_INLINE
-#endif
-
-// ABSL_ATTRIBUTE_FLATTEN enables much more aggressive inlining within
-// the indicated function.
-#undef ABSL_ATTRIBUTE_FLATTEN
-#if ABSL_HAVE_ATTRIBUTE(flatten) || (defined(__GNUC__) && !defined(__clang__))
-#define ABSL_ATTRIBUTE_FLATTEN __attribute__((flatten))
-#else
-#define ABSL_ATTRIBUTE_FLATTEN
-#endif
-
 // ABSL_RANDOM_INTERNAL_RESTRICT annotates whether pointers may be considered
 // to be unaliased.
-#undef ABSL_RANDOM_INTERNAL_RESTRICT
 #if defined(__clang__) || defined(__GNUC__)
 #define ABSL_RANDOM_INTERNAL_RESTRICT __restrict__
 #elif defined(_MSC_VER)
diff --git a/absl/random/internal/randen_hwaes.cc b/absl/random/internal/randen_hwaes.cc
index 0fcd9a85a8b9..6b82d1d07ad8 100644
--- a/absl/random/internal/randen_hwaes.cc
+++ b/absl/random/internal/randen_hwaes.cc
@@ -24,6 +24,37 @@
 
 #include "absl/random/internal/platform.h"
 
+// ABSL_HAVE_ATTRIBUTE
+#if !defined(ABSL_HAVE_ATTRIBUTE)
+#ifdef __has_attribute
+#define ABSL_HAVE_ATTRIBUTE(x) __has_attribute(x)
+#else
+#define ABSL_HAVE_ATTRIBUTE(x) 0
+#endif
+#endif
+
+#if ABSL_HAVE_ATTRIBUTE(always_inline) || \
+    (defined(__GNUC__) && !defined(__clang__))
+#define ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE \
+  __attribute__((always_inline))
+#elif defined(_MSC_VER)
+// We can achieve something similar to attribute((always_inline)) with MSVC by
+// using the __forceinline keyword, however this is not perfect. MSVC is
+// much less aggressive about inlining, and even with the __forceinline keyword.
+#define ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE __forceinline
+#else
+#define ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE
+#endif
+
+// ABSL_ATTRIBUTE_FLATTEN enables much more aggressive inlining within
+// the indicated function.
+#undef ABSL_ATTRIBUTE_FLATTEN
+#if ABSL_HAVE_ATTRIBUTE(flatten) || (defined(__GNUC__) && !defined(__clang__))
+#define ABSL_ATTRIBUTE_FLATTEN __attribute__((flatten))
+#else
+#define ABSL_ATTRIBUTE_FLATTEN
+#endif
+
 // ABSL_RANDEN_HWAES_IMPL indicates whether this file will contain
 // a hardware accelerated implementation of randen, or whether it
 // will contain stubs that exit the process.
@@ -160,7 +191,7 @@ using Vector128 = __vector unsigned long long;  // NOLINT(runtime/int)
 
 namespace {
 
-inline ABSL_TARGET_CRYPTO ABSL_ATTRIBUTE_ALWAYS_INLINE Vector128
+inline ABSL_TARGET_CRYPTO ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE Vector128
 ReverseBytes(const Vector128& v) {
   // Reverses the bytes of the vector.
   const __vector unsigned char perm = {15, 14, 13, 12, 11, 10, 9, 8,
@@ -171,26 +202,26 @@ ReverseBytes(const Vector128& v) {
 // WARNING: these load/store in native byte order. It is OK to load and then
 // store an unchanged vector, but interpreting the bits as a number or input
 // to AES will have undefined results.
-inline ABSL_TARGET_CRYPTO ABSL_ATTRIBUTE_ALWAYS_INLINE Vector128
+inline ABSL_TARGET_CRYPTO ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE Vector128
 Vector128Load(const void* ABSL_RANDOM_INTERNAL_RESTRICT from) {
   return vec_vsx_ld(0, reinterpret_cast<const Vector128*>(from));
 }
 
-inline ABSL_TARGET_CRYPTO ABSL_ATTRIBUTE_ALWAYS_INLINE void Vector128Store(
-    const Vector128& v, void* ABSL_RANDOM_INTERNAL_RESTRICT to) {
+inline ABSL_TARGET_CRYPTO ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE void
+Vector128Store(const Vector128& v, void* ABSL_RANDOM_INTERNAL_RESTRICT to) {
   vec_vsx_st(v, 0, reinterpret_cast<Vector128*>(to));
 }
 
 // One round of AES. "round_key" is a public constant for breaking the
 // symmetry of AES (ensures previously equal columns differ afterwards).
-inline ABSL_TARGET_CRYPTO ABSL_ATTRIBUTE_ALWAYS_INLINE Vector128
+inline ABSL_TARGET_CRYPTO ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE Vector128
 AesRound(const Vector128& state, const Vector128& round_key) {
   return Vector128(__builtin_crypto_vcipher(state, round_key));
 }
 
 // Enables native loads in the round loop by pre-swapping.
-inline ABSL_TARGET_CRYPTO ABSL_ATTRIBUTE_ALWAYS_INLINE void SwapEndian(
-    uint64_t* ABSL_RANDOM_INTERNAL_RESTRICT state) {
+inline ABSL_TARGET_CRYPTO ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE void
+SwapEndian(uint64_t* ABSL_RANDOM_INTERNAL_RESTRICT state) {
   using absl::random_internal::RandenTraits;
   constexpr size_t kLanes = 2;
   constexpr size_t kFeistelBlocks = RandenTraits::kFeistelBlocks;
@@ -242,19 +273,19 @@ using Vector128 = uint8x16_t;
 
 namespace {
 
-inline ABSL_TARGET_CRYPTO ABSL_ATTRIBUTE_ALWAYS_INLINE Vector128
+inline ABSL_TARGET_CRYPTO ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE Vector128
 Vector128Load(const void* ABSL_RANDOM_INTERNAL_RESTRICT from) {
   return vld1q_u8(reinterpret_cast<const uint8_t*>(from));
 }
 
-inline ABSL_TARGET_CRYPTO ABSL_ATTRIBUTE_ALWAYS_INLINE void Vector128Store(
-    const Vector128& v, void* ABSL_RANDOM_INTERNAL_RESTRICT to) {
+inline ABSL_TARGET_CRYPTO ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE void
+Vector128Store(const Vector128& v, void* ABSL_RANDOM_INTERNAL_RESTRICT to) {
   vst1q_u8(reinterpret_cast<uint8_t*>(to), v);
 }
 
 // One round of AES. "round_key" is a public constant for breaking the
 // symmetry of AES (ensures previously equal columns differ afterwards).
-inline ABSL_TARGET_CRYPTO ABSL_ATTRIBUTE_ALWAYS_INLINE Vector128
+inline ABSL_TARGET_CRYPTO ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE Vector128
 AesRound(const Vector128& state, const Vector128& round_key) {
   // It is important to always use the full round function - omitting the
   // final MixColumns reduces security [https://eprint.iacr.org/2010/041.pdf]
@@ -266,8 +297,8 @@ AesRound(const Vector128& state, const Vector128& round_key) {
   return vaesmcq_u8(vaeseq_u8(state, uint8x16_t{})) ^ round_key;
 }
 
-inline ABSL_TARGET_CRYPTO ABSL_ATTRIBUTE_ALWAYS_INLINE void SwapEndian(
-    uint64_t* ABSL_RANDOM_INTERNAL_RESTRICT) {}
+inline ABSL_TARGET_CRYPTO ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE void
+SwapEndian(uint64_t* ABSL_RANDOM_INTERNAL_RESTRICT) {}
 
 }  // namespace
 
@@ -282,13 +313,15 @@ namespace {
 class Vector128 {
  public:
   // Convert from/to intrinsics.
-  inline ABSL_ATTRIBUTE_ALWAYS_INLINE explicit Vector128(
+  inline ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE explicit Vector128(
       const __m128i& Vector128)
       : data_(Vector128) {}
 
-  inline ABSL_ATTRIBUTE_ALWAYS_INLINE __m128i data() const { return data_; }
+  inline ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE __m128i data() const {
+    return data_;
+  }
 
-  inline ABSL_ATTRIBUTE_ALWAYS_INLINE Vector128& operator^=(
+  inline ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE Vector128& operator^=(
       const Vector128& other) {
     data_ = _mm_xor_si128(data_, other.data());
     return *this;
@@ -298,20 +331,20 @@ class Vector128 {
   __m128i data_;
 };
 
-inline ABSL_TARGET_CRYPTO ABSL_ATTRIBUTE_ALWAYS_INLINE Vector128
+inline ABSL_TARGET_CRYPTO ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE Vector128
 Vector128Load(const void* ABSL_RANDOM_INTERNAL_RESTRICT from) {
   return Vector128(_mm_load_si128(reinterpret_cast<const __m128i*>(from)));
 }
 
-inline ABSL_TARGET_CRYPTO ABSL_ATTRIBUTE_ALWAYS_INLINE void Vector128Store(
-    const Vector128& v, void* ABSL_RANDOM_INTERNAL_RESTRICT to) {
+inline ABSL_TARGET_CRYPTO ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE void
+Vector128Store(const Vector128& v, void* ABSL_RANDOM_INTERNAL_RESTRICT to) {
   _mm_store_si128(reinterpret_cast<__m128i * ABSL_RANDOM_INTERNAL_RESTRICT>(to),
                   v.data());
 }
 
 // One round of AES. "round_key" is a public constant for breaking the
 // symmetry of AES (ensures previously equal columns differ afterwards).
-inline ABSL_TARGET_CRYPTO ABSL_ATTRIBUTE_ALWAYS_INLINE Vector128
+inline ABSL_TARGET_CRYPTO ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE Vector128
 AesRound(const Vector128& state, const Vector128& round_key) {
   // It is important to always use the full round function - omitting the
   // final MixColumns reduces security [https://eprint.iacr.org/2010/041.pdf]
@@ -319,8 +352,8 @@ AesRound(const Vector128& state, const Vector128& round_key) {
   return Vector128(_mm_aesenc_si128(state.data(), round_key.data()));
 }
 
-inline ABSL_TARGET_CRYPTO ABSL_ATTRIBUTE_ALWAYS_INLINE void SwapEndian(
-    uint64_t* ABSL_RANDOM_INTERNAL_RESTRICT) {}
+inline ABSL_TARGET_CRYPTO ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE void
+SwapEndian(uint64_t* ABSL_RANDOM_INTERNAL_RESTRICT) {}
 
 }  // namespace
 
@@ -417,8 +450,8 @@ constexpr size_t kLanes = 2;
 
 // Block shuffles applies a shuffle to the entire state between AES rounds.
 // Improved odd-even shuffle from "New criterion for diffusion property".
-inline ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_TARGET_CRYPTO void BlockShuffle(
-    uint64_t* ABSL_RANDOM_INTERNAL_RESTRICT state) {
+inline ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE ABSL_TARGET_CRYPTO void
+BlockShuffle(uint64_t* ABSL_RANDOM_INTERNAL_RESTRICT state) {
   static_assert(kFeistelBlocks == 16, "Expecting 16 FeistelBlocks.");
 
   constexpr size_t shuffle[kFeistelBlocks] = {7,  2, 13, 4,  11, 8,  3, 6,
@@ -466,9 +499,10 @@ inline ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_TARGET_CRYPTO void BlockShuffle(
 // per 16 bytes (vs. 10 for AES-CTR). Computing eight round functions in
 // parallel hides the 7-cycle AESNI latency on HSW. Note that the Feistel
 // XORs are 'free' (included in the second AES instruction).
-inline ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_TARGET_CRYPTO const u64x2*
-FeistelRound(uint64_t* ABSL_RANDOM_INTERNAL_RESTRICT state,
-             const u64x2* ABSL_RANDOM_INTERNAL_RESTRICT keys) {
+inline ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE ABSL_TARGET_CRYPTO const
+    u64x2*
+    FeistelRound(uint64_t* ABSL_RANDOM_INTERNAL_RESTRICT state,
+                 const u64x2* ABSL_RANDOM_INTERNAL_RESTRICT keys) {
   static_assert(kFeistelBlocks == 16, "Expecting 16 FeistelBlocks.");
 
   // MSVC does a horrible job at unrolling loops.
@@ -527,9 +561,9 @@ FeistelRound(uint64_t* ABSL_RANDOM_INTERNAL_RESTRICT state,
 // Indistinguishable from ideal by chosen-ciphertext adversaries using less than
 // 2^64 queries if the round function is a PRF. This is similar to the b=8 case
 // of Simpira v2, but more efficient than its generic construction for b=16.
-inline ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_TARGET_CRYPTO void Permute(
-    const void* ABSL_RANDOM_INTERNAL_RESTRICT keys,
-    uint64_t* ABSL_RANDOM_INTERNAL_RESTRICT state) {
+inline ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE ABSL_TARGET_CRYPTO void
+Permute(const void* ABSL_RANDOM_INTERNAL_RESTRICT keys,
+        uint64_t* ABSL_RANDOM_INTERNAL_RESTRICT state) {
   const u64x2* ABSL_RANDOM_INTERNAL_RESTRICT keys128 =
       static_cast<const u64x2*>(keys);
 
diff --git a/absl/random/internal/randen_slow.cc b/absl/random/internal/randen_slow.cc
index b2ecabff878c..7a2e2daa53f5 100644
--- a/absl/random/internal/randen_slow.cc
+++ b/absl/random/internal/randen_slow.cc
@@ -20,6 +20,28 @@
 
 #include "absl/random/internal/platform.h"
 
+// ABSL_HAVE_ATTRIBUTE
+#if !defined(ABSL_HAVE_ATTRIBUTE)
+#ifdef __has_attribute
+#define ABSL_HAVE_ATTRIBUTE(x) __has_attribute(x)
+#else
+#define ABSL_HAVE_ATTRIBUTE(x) 0
+#endif
+#endif
+
+#if ABSL_HAVE_ATTRIBUTE(always_inline) || \
+    (defined(__GNUC__) && !defined(__clang__))
+#define ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE \
+  __attribute__((always_inline))
+#elif defined(_MSC_VER)
+// We can achieve something similar to attribute((always_inline)) with MSVC by
+// using the __forceinline keyword, however this is not perfect. MSVC is
+// much less aggressive about inlining, and even with the __forceinline keyword.
+#define ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE __forceinline
+#else
+#define ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE
+#endif
+
 namespace {
 
 // AES portions based on rijndael-alg-fst.c,
@@ -222,7 +244,7 @@ struct alignas(16) u64x2 {
 // as an underlying vector register.
 //
 struct Vector128 {
-  inline ABSL_ATTRIBUTE_ALWAYS_INLINE Vector128& operator^=(
+  inline ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE Vector128& operator^=(
       const Vector128& other) {
     s[0] ^= other.s[0];
     s[1] ^= other.s[1];
@@ -234,7 +256,7 @@ struct Vector128 {
   uint32_t s[4];
 };
 
-inline ABSL_ATTRIBUTE_ALWAYS_INLINE Vector128
+inline ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE Vector128
 Vector128Load(const void* ABSL_RANDOM_INTERNAL_RESTRICT from) {
   Vector128 result;
   const uint8_t* ABSL_RANDOM_INTERNAL_RESTRICT src =
@@ -259,7 +281,7 @@ Vector128Load(const void* ABSL_RANDOM_INTERNAL_RESTRICT from) {
   return result;
 }
 
-inline ABSL_ATTRIBUTE_ALWAYS_INLINE void Vector128Store(
+inline ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE void Vector128Store(
     const Vector128& v, void* ABSL_RANDOM_INTERNAL_RESTRICT to) {
   uint8_t* dst = reinterpret_cast<uint8_t*>(to);
   dst[0] = static_cast<uint8_t>(v.s[0] >> 24);
@@ -282,7 +304,7 @@ inline ABSL_ATTRIBUTE_ALWAYS_INLINE void Vector128Store(
 
 // One round of AES. "round_key" is a public constant for breaking the
 // symmetry of AES (ensures previously equal columns differ afterwards).
-inline ABSL_ATTRIBUTE_ALWAYS_INLINE Vector128
+inline ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE Vector128
 AesRound(const Vector128& state, const Vector128& round_key) {
   // clang-format off
   Vector128 result;
@@ -348,7 +370,7 @@ static_assert(kKeys == kRoundKeys, "kKeys and kRoundKeys must be equal");
 static constexpr size_t kLanes = 2;
 
 // The improved Feistel block shuffle function for 16 blocks.
-inline ABSL_ATTRIBUTE_ALWAYS_INLINE void BlockShuffle(
+inline ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE void BlockShuffle(
     uint64_t* ABSL_RANDOM_INTERNAL_RESTRICT state_u64) {
   static_assert(kFeistelBlocks == 16,
                 "Feistel block shuffle only works for 16 blocks.");
@@ -409,7 +431,7 @@ inline ABSL_ATTRIBUTE_ALWAYS_INLINE void BlockShuffle(
 // per 16 bytes (vs. 10 for AES-CTR). Computing eight round functions in
 // parallel hides the 7-cycle AESNI latency on HSW. Note that the Feistel
 // XORs are 'free' (included in the second AES instruction).
-inline ABSL_ATTRIBUTE_ALWAYS_INLINE const u64x2* FeistelRound(
+inline ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE const u64x2* FeistelRound(
     uint64_t* ABSL_RANDOM_INTERNAL_RESTRICT state,
     const u64x2* ABSL_RANDOM_INTERNAL_RESTRICT keys) {
   for (size_t branch = 0; branch < kFeistelBlocks; branch += 4) {
@@ -435,7 +457,7 @@ inline ABSL_ATTRIBUTE_ALWAYS_INLINE const u64x2* FeistelRound(
 // Indistinguishable from ideal by chosen-ciphertext adversaries using less than
 // 2^64 queries if the round function is a PRF. This is similar to the b=8 case
 // of Simpira v2, but more efficient than its generic construction for b=16.
-inline ABSL_ATTRIBUTE_ALWAYS_INLINE void Permute(
+inline ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE void Permute(
     const void* keys, uint64_t* ABSL_RANDOM_INTERNAL_RESTRICT state) {
   const u64x2* ABSL_RANDOM_INTERNAL_RESTRICT keys128 =
       static_cast<const u64x2*>(keys);
diff --git a/absl/strings/match.h b/absl/strings/match.h
index 5251b7ff293e..762f359f7967 100644
--- a/absl/strings/match.h
+++ b/absl/strings/match.h
@@ -62,8 +62,7 @@ inline bool EndsWith(absl::string_view text, absl::string_view suffix) {
   return suffix.empty() ||
          (text.size() >= suffix.size() &&
           memcmp(text.data() + (text.size() - suffix.size()), suffix.data(),
-                 suffix.size()) == 0
-         );
+                 suffix.size()) == 0);
 }
 
 // EqualsIgnoreCase()