diff options
author | Abseil Team <absl-team@google.com> | 2019-06-21T20·11-0700 |
---|---|---|
committer | Gennadiy Rozental <rogeeff@google.com> | 2019-06-21T20·18-0400 |
commit | e9324d926a9189e222741fce6e676f0944661a72 (patch) | |
tree | a08568a709940c376454da34c9d8aac021378e5f /absl/random/internal/pool_urbg.cc | |
parent | 43ef2148c0936ebf7cb4be6b19927a9d9d145b8f (diff) |
Export of internal Abseil changes.
-- 7a6ff16a85beb730c172d5d25cf1b5e1be885c56 by Laramie Leavitt <lar@google.com>: Internal change. PiperOrigin-RevId: 254454546 -- ff8f9bafaefc26d451f576ea4a06d150aed63f6f by Andy Soffer <asoffer@google.com>: Internal changes PiperOrigin-RevId: 254451562 -- deefc5b651b479ce36f0b4ef203e119c0c8936f2 by CJ Johnson <johnsoncj@google.com>: Account for subtracting unsigned values from the size of InlinedVector PiperOrigin-RevId: 254450625 -- 3c677316a27bcadc17e41957c809ca472d5fef14 by Andy Soffer <asoffer@google.com>: Add C++17's std::make_from_tuple to absl/utility/utility.h PiperOrigin-RevId: 254411573 -- 4ee3536a918830eeec402a28fc31a62c7c90b940 by CJ Johnson <johnsoncj@google.com>: Adds benchmark for the rest of the InlinedVector public API PiperOrigin-RevId: 254408378 -- e5a21a00700ee83498ff1efbf649169756463ee4 by CJ Johnson <johnsoncj@google.com>: Updates the definition of InlinedVector::shrink_to_fit() to be exception safe and adds exception safety tests for it. PiperOrigin-RevId: 254401387 -- 2ea82e72b86d82d78b4e4712a63a55981b53c64b by Laramie Leavitt <lar@google.com>: Use absl::InsecureBitGen in place of std::mt19937 in tests absl/random/...distribution_test.cc PiperOrigin-RevId: 254289444 -- fa099e02c413a7ffda732415e8105cad26a90337 by Andy Soffer <asoffer@google.com>: Internal changes PiperOrigin-RevId: 254286334 -- ce34b7f36933b30cfa35b9c9a5697a792b5666e4 by Andy Soffer <asoffer@google.com>: Internal changes PiperOrigin-RevId: 254273059 -- 6f9c473da7c2090c2e85a37c5f00622e8a912a89 by Jorg Brown <jorg@google.com>: Change absl::container_internal::CompressedTuple to instantiate its internal Storage class with the name of the type it's holding, rather than the name of the Tuple. This is not an externally-visible change, other than less compiler memory is used and less debug information is generated. PiperOrigin-RevId: 254269285 -- 8bd3c186bf2fc0c55d8a2dd6f28a5327502c9fba by Andy Soffer <asoffer@google.com>: Adding short-hand IntervalClosed for IntervalClosedClosed and IntervalOpen for IntervalOpenOpen. PiperOrigin-RevId: 254252419 -- ea957f99b6a04fccd42aa05605605f3b44b1ecfd by Abseil Team <absl-team@google.com>: Do not directly use __SIZEOF_INT128__. In order to avoid linker errors when building with clang-cl (__fixunsdfti, __udivti3 and __fixunssfti are undefined), this CL uses ABSL_HAVE_INTRINSIC_INT128 which is not defined for clang-cl. PiperOrigin-RevId: 254250739 -- 89ab385cd26b34d64130bce856253aaba96d2345 by Andy Soffer <asoffer@google.com>: Internal changes PiperOrigin-RevId: 254242321 -- cffc793d93eca6d6bdf7de733847b6ab4a255ae9 by CJ Johnson <johnsoncj@google.com>: Adds benchmark for InlinedVector::reserve(size_type) PiperOrigin-RevId: 254199226 -- c90c7a9fa3c8f0c9d5114036979548b055ea2f2a by Gennadiy Rozental <rogeeff@google.com>: Import of CCTZ from GitHub. PiperOrigin-RevId: 254072387 -- c4c388beae016c9570ab54ffa1d52660e4a85b7b by Laramie Leavitt <lar@google.com>: Internal cleanup. PiperOrigin-RevId: 254062381 -- d3c992e221cc74e5372d0c8fa410170b6a43c062 by Tom Manshreck <shreck@google.com>: Update distributions.h to Abseil standards PiperOrigin-RevId: 254054946 -- d15ad0035c34ef11b14fadc5a4a2d3ec415f5518 by CJ Johnson <johnsoncj@google.com>: Removes functions with only one caller from the implementation details of InlinedVector by manually inlining the definitions PiperOrigin-RevId: 254005427 -- 2f37e807efc3a8ef1f4b539bdd379917d4151520 by Andy Soffer <asoffer@google.com>: Initial release of Abseil Random PiperOrigin-RevId: 253999861 -- 24ed1694b6430791d781ed533a8f8ccf6cac5856 by CJ Johnson <johnsoncj@google.com>: Updates the definition of InlinedVector::assign(...)/InlinedVector::operator=(...) to new, exception-safe implementations with exception safety tests to boot PiperOrigin-RevId: 253993691 -- 5613d95f5a7e34a535cfaeadce801441e990843e by CJ Johnson <johnsoncj@google.com>: Adds benchmarks for InlinedVector::shrink_to_fit() PiperOrigin-RevId: 253989647 -- 2a96ddfdac40bbb8cb6a7f1aeab90917067c6e63 by Abseil Team <absl-team@google.com>: Initial release of Abseil Random PiperOrigin-RevId: 253927497 -- bf1aff8fc9ffa921ad74643e9525ecf25b0d8dc1 by Andy Soffer <asoffer@google.com>: Initial release of Abseil Random PiperOrigin-RevId: 253920512 -- bfc03f4a3dcda3cf3a4b84bdb84cda24e3394f41 by Laramie Leavitt <lar@google.com>: Internal change. PiperOrigin-RevId: 253886486 -- 05036cfcc078ca7c5f581a00dfb0daed568cbb69 by Eric Fiselier <ericwf@google.com>: Don't include `winsock2.h` because it drags in `windows.h` and friends, and they define awful macros like OPAQUE, ERROR, and more. This has the potential to break abseil users. Instead we only forward declare `timeval` and require Windows users include `winsock2.h` themselves. This is both inconsistent and poor QoI, but so including 'windows.h' is bad too. PiperOrigin-RevId: 253852615 GitOrigin-RevId: 7a6ff16a85beb730c172d5d25cf1b5e1be885c56 Change-Id: Icd6aff87da26f29ec8915da856f051129987cef6
Diffstat (limited to 'absl/random/internal/pool_urbg.cc')
-rw-r--r-- | absl/random/internal/pool_urbg.cc | 252 |
1 files changed, 252 insertions, 0 deletions
diff --git a/absl/random/internal/pool_urbg.cc b/absl/random/internal/pool_urbg.cc new file mode 100644 index 000000000000..b24eeeffeaa9 --- /dev/null +++ b/absl/random/internal/pool_urbg.cc @@ -0,0 +1,252 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/random/internal/pool_urbg.h" + +#include <algorithm> +#include <atomic> +#include <cstdint> +#include <cstring> +#include <iterator> + +#include "absl/base/attributes.h" +#include "absl/base/call_once.h" +#include "absl/base/config.h" +#include "absl/base/internal/endian.h" +#include "absl/base/internal/raw_logging.h" +#include "absl/base/internal/spinlock.h" +#include "absl/base/internal/sysinfo.h" +#include "absl/base/internal/unaligned_access.h" +#include "absl/base/optimization.h" +#include "absl/random/internal/randen.h" +#include "absl/random/internal/seed_material.h" +#include "absl/random/seed_gen_exception.h" + +using absl::base_internal::SpinLock; +using absl::base_internal::SpinLockHolder; + +namespace absl { +namespace random_internal { +namespace { + +// RandenPoolEntry is a thread-safe pseudorandom bit generator, implementing a +// single generator within a RandenPool<T>. It is an internal implementation +// detail, and does not aim to conform to [rand.req.urng]. +// +// NOTE: There are alignment issues when used on ARM, for instance. +// See the allocation code in PoolAlignedAlloc(). +class RandenPoolEntry { + public: + static constexpr size_t kState = RandenTraits::kStateBytes / sizeof(uint32_t); + static constexpr size_t kCapacity = + RandenTraits::kCapacityBytes / sizeof(uint32_t); + + void Init(absl::Span<const uint32_t> data) { + SpinLockHolder l(&mu_); // Always uncontested. + std::copy(data.begin(), data.end(), std::begin(state_)); + next_ = kState; + } + + // Copy bytes into out. + void Fill(uint8_t* out, size_t bytes) LOCKS_EXCLUDED(mu_); + + // Returns random bits from the buffer in units of T. + template <typename T> + inline T Generate() LOCKS_EXCLUDED(mu_); + + inline void MaybeRefill() EXCLUSIVE_LOCKS_REQUIRED(mu_) { + if (next_ >= kState) { + next_ = kCapacity; + impl_.Generate(state_); + } + } + + private: + // Randen URBG state. + uint32_t state_[kState] GUARDED_BY(mu_); // First to satisfy alignment. + SpinLock mu_; + const Randen impl_; + size_t next_ GUARDED_BY(mu_); +}; + +template <> +inline uint8_t RandenPoolEntry::Generate<uint8_t>() { + SpinLockHolder l(&mu_); + MaybeRefill(); + return static_cast<uint8_t>(state_[next_++]); +} + +template <> +inline uint16_t RandenPoolEntry::Generate<uint16_t>() { + SpinLockHolder l(&mu_); + MaybeRefill(); + return static_cast<uint16_t>(state_[next_++]); +} + +template <> +inline uint32_t RandenPoolEntry::Generate<uint32_t>() { + SpinLockHolder l(&mu_); + MaybeRefill(); + return state_[next_++]; +} + +template <> +inline uint64_t RandenPoolEntry::Generate<uint64_t>() { + SpinLockHolder l(&mu_); + if (next_ >= kState - 1) { + next_ = kCapacity; + impl_.Generate(state_); + } + auto p = state_ + next_; + next_ += 2; + + uint64_t result; + std::memcpy(&result, p, sizeof(result)); + return result; +} + +void RandenPoolEntry::Fill(uint8_t* out, size_t bytes) { + SpinLockHolder l(&mu_); + while (bytes > 0) { + MaybeRefill(); + size_t remaining = (kState - next_) * sizeof(state_[0]); + size_t to_copy = std::min(bytes, remaining); + std::memcpy(out, &state_[next_], to_copy); + out += to_copy; + bytes -= to_copy; + next_ += (to_copy + sizeof(state_[0]) - 1) / sizeof(state_[0]); + } +} + +// Number of pooled urbg entries. +static constexpr int kPoolSize = 8; + +// Shared pool entries. +static absl::once_flag pool_once; +ABSL_CACHELINE_ALIGNED static RandenPoolEntry* shared_pools[kPoolSize]; + +// Returns an id in the range [0 ... kPoolSize), which indexes into the +// pool of random engines. +// +// Each thread to access the pool is assigned a sequential ID (without reuse) +// from the pool-id space; the id is cached in a thread_local variable. +// This id is assigned based on the arrival-order of the thread to the +// GetPoolID call; this has no binary, CL, or runtime stability because +// on subsequent runs the order within the same program may be significantly +// different. However, as other thread IDs are not assigned sequentially, +// this is not expected to matter. +int GetPoolID() { + static_assert(kPoolSize >= 1, + "At least one urbg instance is required for PoolURBG"); + + ABSL_CONST_INIT static std::atomic<int64_t> sequence{0}; + +#ifdef ABSL_HAVE_THREAD_LOCAL + static thread_local int my_pool_id = -1; + if (ABSL_PREDICT_FALSE(my_pool_id < 0)) { + my_pool_id = (sequence++ % kPoolSize); + } + return my_pool_id; +#else + static pthread_key_t tid_key = [] { + pthread_key_t tmp_key; + int err = pthread_key_create(&tmp_key, nullptr); + if (err) { + ABSL_RAW_LOG(FATAL, "pthread_key_create failed with %d", err); + } + return tmp_key; + }(); + + // Store the value in the pthread_{get/set}specific. However an uninitialized + // value is 0, so add +1 to distinguish from the null value. + intptr_t my_pool_id = + reinterpret_cast<intptr_t>(pthread_getspecific(tid_key)); + if (ABSL_PREDICT_FALSE(my_pool_id == 0)) { + // No allocated ID, allocate the next value, cache it, and return. + my_pool_id = (sequence++ % kPoolSize) + 1; + int err = pthread_setspecific(tid_key, reinterpret_cast<void*>(my_pool_id)); + if (err) { + ABSL_RAW_LOG(FATAL, "pthread_setspecific failed with %d", err); + } + } + return my_pool_id - 1; +#endif +} + +// Allocate a RandenPoolEntry with at least 32-byte alignment, which is required +// by ARM platform code. +RandenPoolEntry* PoolAlignedAlloc() { + constexpr size_t kAlignment = + ABSL_CACHELINE_SIZE > 32 ? ABSL_CACHELINE_SIZE : 32; + + // Not all the platforms that we build for have std::aligned_alloc, however + // since we never free these objects, we can over allocate and munge the + // pointers to the correct alignment. + void* memory = std::malloc(sizeof(RandenPoolEntry) + kAlignment); + auto x = reinterpret_cast<intptr_t>(memory); + auto y = x % kAlignment; + void* aligned = + (y == 0) ? memory : reinterpret_cast<void*>(x + kAlignment - y); + return new (aligned) RandenPoolEntry(); +} + +// Allocate and initialize kPoolSize objects of type RandenPoolEntry. +// +// The initialization strategy is to initialize one object directly from +// OS entropy, then to use that object to seed all of the individual +// pool instances. +void InitPoolURBG() { + static constexpr size_t kSeedSize = + RandenTraits::kStateBytes / sizeof(uint32_t); + // Read the seed data from OS entropy once. + uint32_t seed_material[kPoolSize * kSeedSize]; + if (!random_internal::ReadSeedMaterialFromOSEntropy( + absl::MakeSpan(seed_material))) { + random_internal::ThrowSeedGenException(); + } + for (int i = 0; i < kPoolSize; i++) { + shared_pools[i] = PoolAlignedAlloc(); + shared_pools[i]->Init( + absl::MakeSpan(&seed_material[i * kSeedSize], kSeedSize)); + } +} + +// Returns the pool entry for the current thread. +RandenPoolEntry* GetPoolForCurrentThread() { + absl::call_once(pool_once, InitPoolURBG); + return shared_pools[GetPoolID()]; +} + +} // namespace + +template <typename T> +typename RandenPool<T>::result_type RandenPool<T>::Generate() { + auto* pool = GetPoolForCurrentThread(); + return pool->Generate<T>(); +} + +template <typename T> +void RandenPool<T>::Fill(absl::Span<result_type> data) { + auto* pool = GetPoolForCurrentThread(); + pool->Fill(reinterpret_cast<uint8_t*>(data.data()), + data.size() * sizeof(result_type)); +} + +template class RandenPool<uint8_t>; +template class RandenPool<uint16_t>; +template class RandenPool<uint32_t>; +template class RandenPool<uint64_t>; + +} // namespace random_internal +} // namespace absl |