about summary refs log tree commit diff
path: root/immer
diff options
context:
space:
mode:
authorVincent Ambo <mail@tazj.in>2020-07-15T07·20+0100
committerVincent Ambo <mail@tazj.in>2020-07-15T07·20+0100
commit7f19d641647ac4ef313ed88d6b5c140983ce5436 (patch)
tree31b66c81465293da5c093c5dde3e419758c0d6cc /immer
Squashed 'third_party/immer/' content from commit ad3e3556d
git-subtree-dir: third_party/immer
git-subtree-split: ad3e3556d38bb75966dd24c61a774970a7c7957e
Diffstat (limited to 'immer')
-rw-r--r--immer/algorithm.hpp212
-rw-r--r--immer/array.hpp364
-rw-r--r--immer/array_transient.hpp202
-rw-r--r--immer/atom.hpp254
-rw-r--r--immer/box.hpp194
-rw-r--r--immer/config.hpp93
-rw-r--r--immer/detail/arrays/no_capacity.hpp203
-rw-r--r--immer/detail/arrays/node.hpp127
-rw-r--r--immer/detail/arrays/with_capacity.hpp303
-rw-r--r--immer/detail/combine_standard_layout.hpp235
-rw-r--r--immer/detail/hamts/bits.hpp108
-rw-r--r--immer/detail/hamts/champ.hpp473
-rw-r--r--immer/detail/hamts/champ_iterator.hpp148
-rw-r--r--immer/detail/hamts/node.hpp717
-rw-r--r--immer/detail/iterator_facade.hpp212
-rw-r--r--immer/detail/rbts/bits.hpp33
-rw-r--r--immer/detail/rbts/node.hpp932
-rw-r--r--immer/detail/rbts/operations.hpp2461
-rw-r--r--immer/detail/rbts/position.hpp1977
-rw-r--r--immer/detail/rbts/rbtree.hpp509
-rw-r--r--immer/detail/rbts/rbtree_iterator.hpp99
-rw-r--r--immer/detail/rbts/rrbtree.hpp1396
-rw-r--r--immer/detail/rbts/rrbtree_iterator.hpp98
-rw-r--r--immer/detail/rbts/visitor.hpp56
-rw-r--r--immer/detail/ref_count_base.hpp36
-rw-r--r--immer/detail/type_traits.hpp223
-rw-r--r--immer/detail/util.hpp258
-rw-r--r--immer/experimental/detail/dvektor_impl.hpp498
-rw-r--r--immer/experimental/dvektor.hpp69
-rw-r--r--immer/flex_vector.hpp608
-rw-r--r--immer/flex_vector_transient.hpp251
-rw-r--r--immer/heap/cpp_heap.hpp41
-rw-r--r--immer/heap/debug_size_heap.hpp69
-rw-r--r--immer/heap/free_list_heap.hpp83
-rw-r--r--immer/heap/free_list_node.hpp24
-rw-r--r--immer/heap/gc_heap.hpp127
-rw-r--r--immer/heap/heap_policy.hpp141
-rw-r--r--immer/heap/identity_heap.hpp34
-rw-r--r--immer/heap/malloc_heap.hpp44
-rw-r--r--immer/heap/split_heap.hpp40
-rw-r--r--immer/heap/tags.hpp16
-rw-r--r--immer/heap/thread_local_free_list_heap.hpp55
-rw-r--r--immer/heap/unsafe_free_list_heap.hpp109
-rw-r--r--immer/heap/with_data.hpp43
-rw-r--r--immer/map.hpp342
-rw-r--r--immer/map_transient.hpp41
-rw-r--r--immer/memory_policy.hpp135
-rw-r--r--immer/refcount/enable_intrusive_ptr.hpp37
-rw-r--r--immer/refcount/no_refcount_policy.hpp45
-rw-r--r--immer/refcount/refcount_policy.hpp101
-rw-r--r--immer/refcount/unsafe_refcount_policy.hpp40
-rw-r--r--immer/set.hpp198
-rw-r--r--immer/set_transient.hpp40
-rw-r--r--immer/transience/gc_transience_policy.hpp110
-rw-r--r--immer/transience/no_transience_policy.hpp48
-rw-r--r--immer/vector.hpp412
-rw-r--r--immer/vector_transient.hpp203
57 files changed, 15927 insertions, 0 deletions
diff --git a/immer/algorithm.hpp b/immer/algorithm.hpp
new file mode 100644
index 000000000000..ecdc417e2b07
--- /dev/null
+++ b/immer/algorithm.hpp
@@ -0,0 +1,212 @@
+//
+// immer: immutable data structures for C++
+// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
+//
+// This software is distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
+//
+
+#pragma once
+
+#include <algorithm>
+#include <numeric>
+#include <type_traits>
+
+namespace immer {
+
+/**
+ * @defgroup algorithm
+ * @{
+ */
+
+/*@{*/
+// Right now these algorithms dispatch directly to the vector
+// implementations unconditionally.  This will be changed in the
+// future to support other kinds of containers.
+
+/*!
+ * Apply operation `fn` for every contiguous *chunk* of data in the
+ * range sequentially.  Each time, `Fn` is passed two `value_type`
+ * pointers describing a range over a part of the vector.  This allows
+ * iterating over the elements in the most efficient way.
+ *
+ * @rst
+ *
+ * .. tip:: This is a low level method. Most of the time, :doc:`other
+ *    wrapper algorithms <algorithms>` should be used instead.
+ *
+ * @endrst
+ */
+template <typename Range, typename Fn>
+void for_each_chunk(const Range& r, Fn&& fn)
+{
+    r.impl().for_each_chunk(std::forward<Fn>(fn));
+}
+
+template <typename Iterator, typename Fn>
+void for_each_chunk(const Iterator& first, const Iterator& last, Fn&& fn)
+{
+    assert(&first.impl() == &last.impl());
+    first.impl().for_each_chunk(
+        first.index(), last.index(), std::forward<Fn>(fn));
+}
+
+template <typename T, typename Fn>
+void for_each_chunk(const T* first, const T* last, Fn&& fn)
+{
+    std::forward<Fn>(fn)(first, last);
+}
+
+/*!
+ * Apply operation `fn` for every contiguous *chunk* of data in the
+ * range sequentially, until `fn` returns `false`.  Each time, `Fn` is
+ * passed two `value_type` pointers describing a range over a part of
+ * the vector.  This allows iterating over the elements in the most
+ * efficient way.
+ *
+ * @rst
+ *
+ * .. tip:: This is a low level method. Most of the time, :doc:`other
+ *    wrapper algorithms <algorithms>` should be used instead.
+ *
+ * @endrst
+ */
+template <typename Range, typename Fn>
+bool for_each_chunk_p(const Range& r, Fn&& fn)
+{
+    return r.impl().for_each_chunk_p(std::forward<Fn>(fn));
+}
+
+template <typename Iterator, typename Fn>
+bool for_each_chunk_p(const Iterator& first, const Iterator& last, Fn&& fn)
+{
+    assert(&first.impl() == &last.impl());
+    return first.impl().for_each_chunk_p(
+        first.index(), last.index(), std::forward<Fn>(fn));
+}
+
+template <typename T, typename Fn>
+bool for_each_chunk_p(const T* first, const T* last, Fn&& fn)
+{
+    return std::forward<Fn>(fn)(first, last);
+}
+
+/*!
+ * Equivalent of `std::accumulate` applied to the range `r`.
+ */
+template <typename Range, typename T>
+T accumulate(Range&& r, T init)
+{
+    for_each_chunk(r, [&](auto first, auto last) {
+        init = std::accumulate(first, last, init);
+    });
+    return init;
+}
+
+template <typename Range, typename T, typename Fn>
+T accumulate(Range&& r, T init, Fn fn)
+{
+    for_each_chunk(r, [&](auto first, auto last) {
+        init = std::accumulate(first, last, init, fn);
+    });
+    return init;
+}
+
+/*!
+ * Equivalent of `std::accumulate` applied to the range @f$ [first,
+ * last) @f$.
+ */
+template <typename Iterator, typename T>
+T accumulate(Iterator first, Iterator last, T init)
+{
+    for_each_chunk(first, last, [&](auto first, auto last) {
+        init = std::accumulate(first, last, init);
+    });
+    return init;
+}
+
+template <typename Iterator, typename T, typename Fn>
+T accumulate(Iterator first, Iterator last, T init, Fn fn)
+{
+    for_each_chunk(first, last, [&](auto first, auto last) {
+        init = std::accumulate(first, last, init, fn);
+    });
+    return init;
+}
+
+/*!
+ * Equivalent of `std::for_each` applied to the range `r`.
+ */
+template <typename Range, typename Fn>
+Fn&& for_each(Range&& r, Fn&& fn)
+{
+    for_each_chunk(r, [&](auto first, auto last) {
+        for (; first != last; ++first)
+            fn(*first);
+    });
+    return std::forward<Fn>(fn);
+}
+
+/*!
+ * Equivalent of `std::for_each` applied to the range @f$ [first,
+ * last) @f$.
+ */
+template <typename Iterator, typename Fn>
+Fn&& for_each(Iterator first, Iterator last, Fn&& fn)
+{
+    for_each_chunk(first, last, [&](auto first, auto last) {
+        for (; first != last; ++first)
+            fn(*first);
+    });
+    return std::forward<Fn>(fn);
+}
+
+/*!
+ * Equivalent of `std::copy` applied to the range `r`.
+ */
+template <typename Range, typename OutIter>
+OutIter copy(Range&& r, OutIter out)
+{
+    for_each_chunk(
+        r, [&](auto first, auto last) { out = std::copy(first, last, out); });
+    return out;
+}
+
+/*!
+ * Equivalent of `std::copy` applied to the range @f$ [first,
+ * last) @f$.
+ */
+template <typename InIter, typename OutIter>
+OutIter copy(InIter first, InIter last, OutIter out)
+{
+    for_each_chunk(first, last, [&](auto first, auto last) {
+        out = std::copy(first, last, out);
+    });
+    return out;
+}
+
+/*!
+ * Equivalent of `std::all_of` applied to the range `r`.
+ */
+template <typename Range, typename Pred>
+bool all_of(Range&& r, Pred p)
+{
+    return for_each_chunk_p(
+        r, [&](auto first, auto last) { return std::all_of(first, last, p); });
+}
+
+/*!
+ * Equivalent of `std::all_of` applied to the range @f$ [first, last)
+ * @f$.
+ */
+template <typename Iter, typename Pred>
+bool all_of(Iter first, Iter last, Pred p)
+{
+    return for_each_chunk_p(first, last, [&](auto first, auto last) {
+        return std::all_of(first, last, p);
+    });
+}
+
+/** @} */ // group: algorithm
+
+} // namespace immer
diff --git a/immer/array.hpp b/immer/array.hpp
new file mode 100644
index 000000000000..24c8f84c8bbf
--- /dev/null
+++ b/immer/array.hpp
@@ -0,0 +1,364 @@
+//
+// immer: immutable data structures for C++
+// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
+//
+// This software is distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
+//
+
+#pragma once
+
+#include <immer/detail/arrays/with_capacity.hpp>
+#include <immer/memory_policy.hpp>
+
+namespace immer {
+
+template <typename T, typename MemoryPolicy>
+class array_transient;
+
+/*!
+ * Immutable container that stores a sequence of elements in
+ * contiguous memory.
+ *
+ * @tparam T The type of the values to be stored in the container.
+ *
+ * @rst
+ *
+ * It supports the most efficient iteration and random access,
+ * equivalent to a ``std::vector`` or ``std::array``, but all
+ * manipulations are :math:`O(size)`.
+ *
+ * .. tip:: Don't be fooled by the bad complexity of this data
+ *    structure.  It is a great choice for short sequence or when it
+ *    is seldom or never changed.  This depends on the ``sizeof(T)``
+ *    and the expensiveness of its ``T``'s copy constructor, in case
+ *    of doubt, measure.  For basic types, using an `array` when
+ *    :math:`n < 100` is a good heuristic.
+ *
+ * @endrst
+ */
+template <typename T, typename MemoryPolicy = default_memory_policy>
+class array
+{
+    using impl_t =
+        std::conditional_t<MemoryPolicy::use_transient_rvalues,
+                           detail::arrays::with_capacity<T, MemoryPolicy>,
+                           detail::arrays::no_capacity<T, MemoryPolicy>>;
+
+    using move_t =
+        std::integral_constant<bool, MemoryPolicy::use_transient_rvalues>;
+
+public:
+    using value_type      = T;
+    using reference       = const T&;
+    using size_type       = std::size_t;
+    using difference_type = std::ptrdiff_t;
+    using const_reference = const T&;
+
+    using iterator         = const T*;
+    using const_iterator   = iterator;
+    using reverse_iterator = std::reverse_iterator<iterator>;
+
+    using memory_policy  = MemoryPolicy;
+    using transient_type = array_transient<T, MemoryPolicy>;
+
+    /*!
+     * Default constructor.  It creates an array of `size() == 0`.  It
+     * does not allocate memory and its complexity is @f$ O(1) @f$.
+     */
+    array() = default;
+
+    /*!
+     * Constructs an array containing the elements in `values`.
+     */
+    array(std::initializer_list<T> values)
+        : impl_{impl_t::from_initializer_list(values)}
+    {}
+
+    /*!
+     * Constructs a array containing the elements in the range
+     * defined by the forward iterator `first` and range sentinel `last`.
+     */
+    template <typename Iter,
+              typename Sent,
+              std::enable_if_t<detail::compatible_sentinel_v<Iter, Sent> &&
+                                   detail::is_forward_iterator_v<Iter>,
+                               bool> = true>
+    array(Iter first, Sent last)
+        : impl_{impl_t::from_range(first, last)}
+    {}
+
+    /*!
+     * Constructs a array containing the element `val` repeated `n`
+     * times.
+     */
+    array(size_type n, T v = {})
+        : impl_{impl_t::from_fill(n, v)}
+    {}
+
+    /*!
+     * Returns an iterator pointing at the first element of the
+     * collection. It does not allocate memory and its complexity is
+     * @f$ O(1) @f$.
+     */
+    IMMER_NODISCARD iterator begin() const { return impl_.data(); }
+
+    /*!
+     * Returns an iterator pointing just after the last element of the
+     * collection. It does not allocate and its complexity is @f$ O(1) @f$.
+     */
+    IMMER_NODISCARD iterator end() const { return impl_.data() + impl_.size; }
+
+    /*!
+     * Returns an iterator that traverses the collection backwards,
+     * pointing at the first element of the reversed collection. It
+     * does not allocate memory and its complexity is @f$ O(1) @f$.
+     */
+    IMMER_NODISCARD reverse_iterator rbegin() const
+    {
+        return reverse_iterator{end()};
+    }
+
+    /*!
+     * Returns an iterator that traverses the collection backwards,
+     * pointing after the last element of the reversed collection. It
+     * does not allocate memory and its complexity is @f$ O(1) @f$.
+     */
+    IMMER_NODISCARD reverse_iterator rend() const
+    {
+        return reverse_iterator{begin()};
+    }
+
+    /*!
+     * Returns the number of elements in the container.  It does
+     * not allocate memory and its complexity is @f$ O(1) @f$.
+     */
+    IMMER_NODISCARD std::size_t size() const { return impl_.size; }
+
+    /*!
+     * Returns `true` if there are no elements in the container.  It
+     * does not allocate memory and its complexity is @f$ O(1) @f$.
+     */
+    IMMER_NODISCARD bool empty() const { return impl_.size == 0; }
+
+    /*!
+     * Access the raw data.
+     */
+    IMMER_NODISCARD const T* data() const { return impl_.data(); }
+
+    /*!
+     * Access the last element.
+     */
+    IMMER_NODISCARD const T& back() const { return data()[size() - 1]; }
+
+    /*!
+     * Access the first element.
+     */
+    IMMER_NODISCARD const T& front() const { return data()[0]; }
+
+    /*!
+     * Returns a `const` reference to the element at position `index`.
+     * It is undefined when @f$ 0 index \geq size() @f$.  It does not
+     * allocate memory and its complexity is *effectively* @f$ O(1)
+     * @f$.
+     */
+    IMMER_NODISCARD reference operator[](size_type index) const
+    {
+        return impl_.get(index);
+    }
+
+    /*!
+     * Returns a `const` reference to the element at position
+     * `index`. It throws an `std::out_of_range` exception when @f$
+     * index \geq size() @f$.  It does not allocate memory and its
+     * complexity is *effectively* @f$ O(1) @f$.
+     */
+    reference at(size_type index) const { return impl_.get_check(index); }
+
+    /*!
+     * Returns whether the vectors are equal.
+     */
+    IMMER_NODISCARD bool operator==(const array& other) const
+    {
+        return impl_.equals(other.impl_);
+    }
+    IMMER_NODISCARD bool operator!=(const array& other) const
+    {
+        return !(*this == other);
+    }
+
+    /*!
+     * Returns an array with `value` inserted at the end.  It may
+     * allocate memory and its complexity is @f$ O(size) @f$.
+     *
+     * @rst
+     *
+     * **Example**
+     *   .. literalinclude:: ../example/array/array.cpp
+     *      :language: c++
+     *      :dedent: 8
+     *      :start-after: push-back/start
+     *      :end-before:  push-back/end
+     *
+     * @endrst
+     */
+    IMMER_NODISCARD array push_back(value_type value) const&
+    {
+        return impl_.push_back(std::move(value));
+    }
+
+    IMMER_NODISCARD decltype(auto) push_back(value_type value) &&
+    {
+        return push_back_move(move_t{}, std::move(value));
+    }
+
+    /*!
+     * Returns an array containing value `value` at position `idx`.
+     * Undefined for `index >= size()`.
+     * It may allocate memory and its complexity is @f$ O(size) @f$.
+     *
+     * @rst
+     *
+     * **Example**
+     *   .. literalinclude:: ../example/array/array.cpp
+     *      :language: c++
+     *      :dedent: 8
+     *      :start-after: set/start
+     *      :end-before:  set/end
+     *
+     * @endrst
+     */
+    IMMER_NODISCARD array set(std::size_t index, value_type value) const&
+    {
+        return impl_.assoc(index, std::move(value));
+    }
+
+    IMMER_NODISCARD decltype(auto) set(size_type index, value_type value) &&
+    {
+        return set_move(move_t{}, index, std::move(value));
+    }
+
+    /*!
+     * Returns an array containing the result of the expression
+     * `fn((*this)[idx])` at position `idx`.
+     * Undefined for `index >= size()`.
+     * It may allocate memory and its complexity is @f$ O(size) @f$.
+     *
+     * @rst
+     *
+     * **Example**
+     *   .. literalinclude:: ../example/array/array.cpp
+     *      :language: c++
+     *      :dedent: 8
+     *      :start-after: update/start
+     *      :end-before:  update/end
+     *
+     * @endrst
+     */
+    template <typename FnT>
+    IMMER_NODISCARD array update(std::size_t index, FnT&& fn) const&
+    {
+        return impl_.update(index, std::forward<FnT>(fn));
+    }
+
+    template <typename FnT>
+    IMMER_NODISCARD decltype(auto) update(size_type index, FnT&& fn) &&
+    {
+        return update_move(move_t{}, index, std::forward<FnT>(fn));
+    }
+
+    /*!
+     * Returns a array containing only the first `min(elems, size())`
+     * elements. It may allocate memory and its complexity is
+     * *effectively* @f$ O(1) @f$.
+     *
+     * @rst
+     *
+     * **Example**
+     *   .. literalinclude:: ../example/array/array.cpp
+     *      :language: c++
+     *      :dedent: 8
+     *      :start-after: take/start
+     *      :end-before:  take/end
+     *
+     * @endrst
+     */
+    IMMER_NODISCARD array take(size_type elems) const&
+    {
+        return impl_.take(elems);
+    }
+
+    IMMER_NODISCARD decltype(auto) take(size_type elems) &&
+    {
+        return take_move(move_t{}, elems);
+    }
+
+    /*!
+     * Returns an @a transient form of this container, an
+     * `immer::array_transient`.
+     */
+    IMMER_NODISCARD transient_type transient() const&
+    {
+        return transient_type{impl_};
+    }
+    IMMER_NODISCARD transient_type transient() &&
+    {
+        return transient_type{std::move(impl_)};
+    }
+
+    // Semi-private
+    const impl_t& impl() const { return impl_; }
+
+private:
+    friend transient_type;
+
+    array(impl_t impl)
+        : impl_(std::move(impl))
+    {}
+
+    array&& push_back_move(std::true_type, value_type value)
+    {
+        impl_.push_back_mut({}, std::move(value));
+        return std::move(*this);
+    }
+    array push_back_move(std::false_type, value_type value)
+    {
+        return impl_.push_back(std::move(value));
+    }
+
+    array&& set_move(std::true_type, size_type index, value_type value)
+    {
+        impl_.assoc_mut({}, index, std::move(value));
+        return std::move(*this);
+    }
+    array set_move(std::false_type, size_type index, value_type value)
+    {
+        return impl_.assoc(index, std::move(value));
+    }
+
+    template <typename Fn>
+    array&& update_move(std::true_type, size_type index, Fn&& fn)
+    {
+        impl_.update_mut({}, index, std::forward<Fn>(fn));
+        return std::move(*this);
+    }
+    template <typename Fn>
+    array update_move(std::false_type, size_type index, Fn&& fn)
+    {
+        return impl_.update(index, std::forward<Fn>(fn));
+    }
+
+    array&& take_move(std::true_type, size_type elems)
+    {
+        impl_.take_mut({}, elems);
+        return std::move(*this);
+    }
+    array take_move(std::false_type, size_type elems)
+    {
+        return impl_.take(elems);
+    }
+
+    impl_t impl_ = impl_t::empty();
+};
+
+} /* namespace immer */
diff --git a/immer/array_transient.hpp b/immer/array_transient.hpp
new file mode 100644
index 000000000000..bc2d1a5b7b2c
--- /dev/null
+++ b/immer/array_transient.hpp
@@ -0,0 +1,202 @@
+//
+// immer: immutable data structures for C++
+// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
+//
+// This software is distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
+//
+
+#pragma once
+
+#include <immer/detail/arrays/with_capacity.hpp>
+#include <immer/memory_policy.hpp>
+
+namespace immer {
+
+template <typename T, typename MemoryPolicy>
+class array;
+
+/*!
+ * Mutable version of `immer::array`.
+ *
+ * @rst
+ *
+ * Refer to :doc:`transients` to learn more about when and how to use
+ * the mutable versions of immutable containers.
+ *
+ * @endrst
+ */
+template <typename T, typename MemoryPolicy = default_memory_policy>
+class array_transient : MemoryPolicy::transience_t::owner
+{
+    using impl_t             = detail::arrays::with_capacity<T, MemoryPolicy>;
+    using impl_no_capacity_t = detail::arrays::no_capacity<T, MemoryPolicy>;
+    using owner_t            = typename MemoryPolicy::transience_t::owner;
+
+public:
+    using value_type      = T;
+    using reference       = const T&;
+    using size_type       = std::size_t;
+    using difference_type = std::ptrdiff_t;
+    using const_reference = const T&;
+
+    using iterator         = const T*;
+    using const_iterator   = iterator;
+    using reverse_iterator = std::reverse_iterator<iterator>;
+
+    using memory_policy   = MemoryPolicy;
+    using persistent_type = array<T, MemoryPolicy>;
+
+    /*!
+     * Default constructor.  It creates a mutable array of `size() ==
+     * 0`.  It does not allocate memory and its complexity is
+     * @f$ O(1) @f$.
+     */
+    array_transient() = default;
+
+    /*!
+     * Returns an iterator pointing at the first element of the
+     * collection. It does not allocate memory and its complexity is
+     * @f$ O(1) @f$.
+     */
+    IMMER_NODISCARD iterator begin() const { return impl_.data(); }
+
+    /*!
+     * Returns an iterator pointing just after the last element of the
+     * collection. It does not allocate and its complexity is @f$ O(1) @f$.
+     */
+    IMMER_NODISCARD iterator end() const { return impl_.data() + impl_.size; }
+
+    /*!
+     * Returns an iterator that traverses the collection backwards,
+     * pointing at the first element of the reversed collection. It
+     * does not allocate memory and its complexity is @f$ O(1) @f$.
+     */
+    IMMER_NODISCARD reverse_iterator rbegin() const
+    {
+        return reverse_iterator{end()};
+    }
+
+    /*!
+     * Returns an iterator that traverses the collection backwards,
+     * pointing after the last element of the reversed collection. It
+     * does not allocate memory and its complexity is @f$ O(1) @f$.
+     */
+    IMMER_NODISCARD reverse_iterator rend() const
+    {
+        return reverse_iterator{begin()};
+    }
+
+    /*!
+     * Returns the number of elements in the container.  It does
+     * not allocate memory and its complexity is @f$ O(1) @f$.
+     */
+    IMMER_NODISCARD std::size_t size() const { return impl_.size; }
+
+    /*!
+     * Returns `true` if there are no elements in the container.  It
+     * does not allocate memory and its complexity is @f$ O(1) @f$.
+     */
+    IMMER_NODISCARD bool empty() const { return impl_.size == 0; }
+
+    /*!
+     * Access the raw data.
+     */
+    IMMER_NODISCARD const T* data() const { return impl_.data(); }
+
+    /*!
+     * Provide mutable access to the raw underlaying data.
+     */
+    IMMER_NODISCARD T* data_mut() { return impl_.data_mut(*this); }
+
+    /*!
+     * Access the last element.
+     */
+    IMMER_NODISCARD const T& back() const { return data()[size() - 1]; }
+
+    /*!
+     * Access the first element.
+     */
+    IMMER_NODISCARD const T& front() const { return data()[0]; }
+
+    /*!
+     * Returns a `const` reference to the element at position `index`.
+     * It is undefined when @f$ 0 index \geq size() @f$.  It does not
+     * allocate memory and its complexity is *effectively* @f$ O(1)
+     * @f$.
+     */
+    reference operator[](size_type index) const { return impl_.get(index); }
+
+    /*!
+     * Returns a `const` reference to the element at position
+     * `index`. It throws an `std::out_of_range` exception when @f$
+     * index \geq size() @f$.  It does not allocate memory and its
+     * complexity is *effectively* @f$ O(1) @f$.
+     */
+    reference at(size_type index) const { return impl_.get_check(index); }
+
+    /*!
+     * Inserts `value` at the end.  It may allocate memory and its
+     * complexity is *effectively* @f$ O(1) @f$.
+     */
+    void push_back(value_type value)
+    {
+        impl_.push_back_mut(*this, std::move(value));
+    }
+
+    /*!
+     * Sets to the value `value` at position `idx`.
+     * Undefined for `index >= size()`.
+     * It may allocate memory and its complexity is
+     * *effectively* @f$ O(1) @f$.
+     */
+    void set(size_type index, value_type value)
+    {
+        impl_.assoc_mut(*this, index, std::move(value));
+    }
+
+    /*!
+     * Updates the array to contain the result of the expression
+     * `fn((*this)[idx])` at position `idx`.
+     * Undefined for `0 >= size()`.
+     * It may allocate memory and its complexity is
+     * *effectively* @f$ O(1) @f$.
+     */
+    template <typename FnT>
+    void update(size_type index, FnT&& fn)
+    {
+        impl_.update_mut(*this, index, std::forward<FnT>(fn));
+    }
+
+    /*!
+     * Resizes the array to only contain the first `min(elems, size())`
+     * elements. It may allocate memory and its complexity is
+     * *effectively* @f$ O(1) @f$.
+     */
+    void take(size_type elems) { impl_.take_mut(*this, elems); }
+
+    /*!
+     * Returns an @a immutable form of this container, an
+     * `immer::array`.
+     */
+    IMMER_NODISCARD persistent_type persistent() &
+    {
+        this->owner_t::operator=(owner_t{});
+        return persistent_type{impl_};
+    }
+    IMMER_NODISCARD persistent_type persistent() &&
+    {
+        return persistent_type{std::move(impl_)};
+    }
+
+private:
+    friend persistent_type;
+
+    array_transient(impl_t impl)
+        : impl_(std::move(impl))
+    {}
+
+    impl_t impl_ = impl_t::empty();
+};
+
+} // namespace immer
diff --git a/immer/atom.hpp b/immer/atom.hpp
new file mode 100644
index 000000000000..f3ebb5aa1c0e
--- /dev/null
+++ b/immer/atom.hpp
@@ -0,0 +1,254 @@
+//
+// immer: immutable data structures for C++
+// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
+//
+// This software is distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
+//
+
+#pragma once
+
+#include <immer/box.hpp>
+#include <immer/refcount/no_refcount_policy.hpp>
+
+#include <atomic>
+#include <type_traits>
+
+namespace immer {
+
+namespace detail {
+
+template <typename T, typename MemoryPolicy>
+struct refcount_atom_impl
+{
+    using box_type      = box<T, MemoryPolicy>;
+    using value_type    = T;
+    using memory_policy = MemoryPolicy;
+    using spinlock_t    = typename MemoryPolicy::refcount::spinlock_type;
+    using scoped_lock_t = typename spinlock_t::scoped_lock;
+
+    refcount_atom_impl(const refcount_atom_impl&) = delete;
+    refcount_atom_impl(refcount_atom_impl&&)      = delete;
+    refcount_atom_impl& operator=(const refcount_atom_impl&) = delete;
+    refcount_atom_impl& operator=(refcount_atom_impl&&) = delete;
+
+    refcount_atom_impl(box_type b)
+        : impl_{std::move(b)}
+    {}
+
+    box_type load() const
+    {
+        scoped_lock_t lock{lock_};
+        return impl_;
+    }
+
+    void store(box_type b)
+    {
+        scoped_lock_t lock{lock_};
+        impl_ = std::move(b);
+    }
+
+    box_type exchange(box_type b)
+    {
+        {
+            scoped_lock_t lock{lock_};
+            swap(b, impl_);
+        }
+        return b;
+    }
+
+    template <typename Fn>
+    box_type update(Fn&& fn)
+    {
+        while (true) {
+            auto oldv = load();
+            auto newv = oldv.update(fn);
+            {
+                scoped_lock_t lock{lock_};
+                if (oldv.impl_ == impl_.impl_) {
+                    impl_ = newv;
+                    return {newv};
+                }
+            }
+        }
+    }
+
+private:
+    mutable spinlock_t lock_;
+    box_type impl_;
+};
+
+template <typename T, typename MemoryPolicy>
+struct gc_atom_impl
+{
+    using box_type      = box<T, MemoryPolicy>;
+    using value_type    = T;
+    using memory_policy = MemoryPolicy;
+
+    static_assert(std::is_same<typename MemoryPolicy::refcount,
+                               no_refcount_policy>::value,
+                  "gc_atom_impl can only be used when there is no refcount!");
+
+    gc_atom_impl(const gc_atom_impl&) = delete;
+    gc_atom_impl(gc_atom_impl&&)      = delete;
+    gc_atom_impl& operator=(const gc_atom_impl&) = delete;
+    gc_atom_impl& operator=(gc_atom_impl&&) = delete;
+
+    gc_atom_impl(box_type b)
+        : impl_{b.impl_}
+    {}
+
+    box_type load() const { return {impl_.load()}; }
+
+    void store(box_type b) { impl_.store(b.impl_); }
+
+    box_type exchange(box_type b) { return {impl_.exchange(b.impl_)}; }
+
+    template <typename Fn>
+    box_type update(Fn&& fn)
+    {
+        while (true) {
+            auto oldv = box_type{impl_.load()};
+            auto newv = oldv.update(fn);
+            if (impl_.compare_exchange_weak(oldv.impl_, newv.impl_))
+                return {newv};
+        }
+    }
+
+private:
+    std::atomic<typename box_type::holder*> impl_;
+};
+
+} // namespace detail
+
+/*!
+ * Stores for boxed values of type `T` in a thread-safe manner.
+ *
+ * @see box
+ *
+ * @rst
+ *
+ * .. warning:: If memory policy used includes thread unsafe reference counting,
+ *    no no thread safety is assumed, and the atom becomes thread unsafe too!
+ *
+ * .. note:: ``box<T>`` provides a value based box of type ``T``, this is, we
+ * can think about it as a value-based version of ``std::shared_ptr``.  In a
+ *    similar fashion, ``atom<T>`` is in spirit the value-based equivalent of
+ *    C++20 ``std::atomic_shared_ptr``.  However, the API does not follow
+ *    ``std::atomic`` interface closely, since it attempts to be a higher level
+ *    construction, most similar to Clojure's ``(atom)``.  It is remarkable in
+ *    particular that, since ``box<T>`` underlying object is immutable, using
+ *    ``atom<T>`` is fully thread-safe in ways that ``std::atmic_shared_ptr`` is
+ *    not. This is so because dereferencing the underlying pointer in a
+ *    ``std::atomic_share_ptr`` may require further synchronization, in
+ * particular when invoking non-const methods.
+ *
+ * @endrst
+ */
+template <typename T, typename MemoryPolicy = default_memory_policy>
+class atom
+{
+public:
+    using box_type      = box<T, MemoryPolicy>;
+    using value_type    = T;
+    using memory_policy = MemoryPolicy;
+
+    atom(const atom&) = delete;
+    atom(atom&&)      = delete;
+    void operator=(const atom&) = delete;
+    void operator=(atom&&) = delete;
+
+    /*!
+     * Constructs an atom holding a value `b`;
+     */
+    atom(box_type v = {})
+        : impl_{std::move(v)}
+    {}
+
+    /*!
+     * Sets a new value in the atom.
+     */
+    atom& operator=(box_type b)
+    {
+        impl_.store(std::move(b));
+        return *this;
+    }
+
+    /*!
+     * Reads the currently stored value in a thread-safe manner.
+     */
+    operator box_type() const { return impl_.load(); }
+
+    /*!
+     * Reads the currently stored value in a thread-safe manner.
+     */
+    operator value_type() const { return *impl_.load(); }
+
+    /*!
+     * Reads the currently stored value in a thread-safe manner.
+     */
+    IMMER_NODISCARD box_type load() const { return impl_.load(); }
+
+    /*!
+     * Stores a new value in a thread-safe manner.
+     */
+    void store(box_type b) { impl_.store(std::move(b)); }
+
+    /*!
+     * Stores a new value and returns the old value, in a thread-safe manner.
+     */
+    IMMER_NODISCARD box_type exchange(box_type b)
+    {
+        return impl_.exchange(std::move(b));
+    }
+
+    /*!
+     * Stores the result of applying `fn` to the current value atomically and
+     * returns the new resulting value.
+     *
+     * @rst
+     *
+     * .. warning:: ``fn`` must be a pure function and have no side effects! The
+     *    function might be evaluated multiple times when multiple threads
+     *    content to update the value.
+     *
+     * @endrst
+     */
+    template <typename Fn>
+    box_type update(Fn&& fn)
+    {
+        return impl_.update(std::forward<Fn>(fn));
+    }
+
+private:
+    struct get_refcount_atom_impl
+    {
+        template <typename U, typename MP>
+        struct apply
+        {
+            using type = detail::refcount_atom_impl<U, MP>;
+        };
+    };
+
+    struct get_gc_atom_impl
+    {
+        template <typename U, typename MP>
+        struct apply
+        {
+            using type = detail::gc_atom_impl<U, MP>;
+        };
+    };
+
+    // If we are using "real" garbage collection (we assume this when we use
+    // `no_refcount_policy`), we just store the pointer in an atomic.  If we use
+    // reference counting, we rely on the reference counting spinlock.
+    using impl_t = typename std::conditional_t<
+        std::is_same<typename MemoryPolicy::refcount,
+                     no_refcount_policy>::value,
+        get_gc_atom_impl,
+        get_refcount_atom_impl>::template apply<T, MemoryPolicy>::type;
+
+    impl_t impl_;
+};
+
+} // namespace immer
diff --git a/immer/box.hpp b/immer/box.hpp
new file mode 100644
index 000000000000..0fd2961272ca
--- /dev/null
+++ b/immer/box.hpp
@@ -0,0 +1,194 @@
+//
+// immer: immutable data structures for C++
+// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
+//
+// This software is distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
+//
+
+#pragma once
+
+#include <immer/detail/util.hpp>
+#include <immer/memory_policy.hpp>
+
+namespace immer {
+
+namespace detail {
+
+template <typename U, typename MP>
+struct gc_atom_impl;
+
+template <typename U, typename MP>
+struct refcount_atom_impl;
+
+} // namespace detail
+
+/*!
+ * Immutable box for a single value of type `T`.
+ *
+ * The box is always copiable and movable. The `T` copy or move
+ * operations are never called.  Since a box is immutable, copying or
+ * moving just copy the underlying pointers.
+ */
+template <typename T, typename MemoryPolicy = default_memory_policy>
+class box
+{
+    friend struct detail::gc_atom_impl<T, MemoryPolicy>;
+    friend struct detail::refcount_atom_impl<T, MemoryPolicy>;
+
+    struct holder : MemoryPolicy::refcount
+    {
+        T value;
+
+        template <typename... Args>
+        holder(Args&&... args)
+            : value{std::forward<Args>(args)...}
+        {}
+    };
+
+    using heap = typename MemoryPolicy::heap::type;
+
+    holder* impl_ = nullptr;
+
+    box(holder* impl)
+        : impl_{impl}
+    {}
+
+public:
+    using value_type    = T;
+    using memory_policy = MemoryPolicy;
+
+    /*!
+     * Constructs a box holding `T{}`.
+     */
+    box()
+        : impl_{detail::make<heap, holder>()}
+    {}
+
+    /*!
+     * Constructs a box holding `T{arg}`
+     */
+    template <typename Arg,
+              typename Enable = std::enable_if_t<
+                  !std::is_same<box, std::decay_t<Arg>>::value &&
+                  std::is_constructible<T, Arg>::value>>
+    box(Arg&& arg)
+        : impl_{detail::make<heap, holder>(std::forward<Arg>(arg))}
+    {}
+
+    /*!
+     * Constructs a box holding `T{arg1, arg2, args...}`
+     */
+    template <typename Arg1, typename Arg2, typename... Args>
+    box(Arg1&& arg1, Arg2&& arg2, Args&&... args)
+        : impl_{detail::make<heap, holder>(std::forward<Arg1>(arg1),
+                                           std::forward<Arg2>(arg2),
+                                           std::forward<Args>(args)...)}
+    {}
+
+    friend void swap(box& a, box& b)
+    {
+        using std::swap;
+        swap(a.impl_, b.impl_);
+    }
+
+    box(box&& other) { swap(*this, other); }
+    box(const box& other)
+        : impl_(other.impl_)
+    {
+        impl_->inc();
+    }
+    box& operator=(box&& other)
+    {
+        swap(*this, other);
+        return *this;
+    }
+    box& operator=(const box& other)
+    {
+        auto aux = other;
+        swap(*this, aux);
+        return *this;
+    }
+    ~box()
+    {
+        if (impl_ && impl_->dec()) {
+            impl_->~holder();
+            heap::deallocate(sizeof(holder), impl_);
+        }
+    }
+
+    /*! Query the current value. */
+    IMMER_NODISCARD const T& get() const { return impl_->value; }
+
+    /*! Conversion to the boxed type. */
+    operator const T&() const { return get(); }
+
+    /*! Access via dereference */
+    const T& operator*() const { return get(); }
+
+    /*! Access via pointer member access */
+    const T* operator->() const { return &get(); }
+
+    /*! Comparison. */
+    IMMER_NODISCARD bool operator==(detail::exact_t<const box&> other) const
+    {
+        return impl_ == other.value.impl_ || get() == other.value.get();
+    }
+    // Note that the `exact_t` disambiguates comparisons against `T{}`
+    // directly.  In that case we want to use `operator T&` and
+    // compare directly.  We definitely never want to convert a value
+    // to a box (which causes an allocation) just to compare it.
+    IMMER_NODISCARD bool operator!=(detail::exact_t<const box&> other) const
+    {
+        return !(*this == other.value);
+    }
+    IMMER_NODISCARD bool operator<(detail::exact_t<const box&> other) const
+    {
+        return get() < other.value.get();
+    }
+
+    /*!
+     * Returns a new box built by applying the `fn` to the underlying
+     * value.
+     *
+     * @rst
+     *
+     * **Example**
+     *   .. literalinclude:: ../example/box/box.cpp
+     *      :language: c++
+     *      :dedent: 8
+     *      :start-after: update/start
+     *      :end-before:  update/end
+     *
+     * @endrst
+     */
+    template <typename Fn>
+    IMMER_NODISCARD box update(Fn&& fn) const&
+    {
+        return std::forward<Fn>(fn)(get());
+    }
+    template <typename Fn>
+    IMMER_NODISCARD box&& update(Fn&& fn) &&
+    {
+        if (impl_->unique())
+            impl_->value = std::forward<Fn>(fn)(std::move(impl_->value));
+        else
+            *this = std::forward<Fn>(fn)(impl_->value);
+        return std::move(*this);
+    }
+};
+
+} // namespace immer
+
+namespace std {
+
+template <typename T, typename MP>
+struct hash<immer::box<T, MP>>
+{
+    std::size_t operator()(const immer::box<T, MP>& x) const
+    {
+        return std::hash<T>{}(*x);
+    }
+};
+
+} // namespace std
diff --git a/immer/config.hpp b/immer/config.hpp
new file mode 100644
index 000000000000..581e905a4de3
--- /dev/null
+++ b/immer/config.hpp
@@ -0,0 +1,93 @@
+//
+// immer: immutable data structures for C++
+// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
+//
+// This software is distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
+//
+
+#pragma once
+
+#if defined(__has_cpp_attribute)
+#if __has_cpp_attribute(nodiscard)
+#define IMMER_NODISCARD [[nodiscard]]
+#endif
+#else
+#if _MSVC_LANG >= 201703L
+#define IMMER_NODISCARD [[nodiscard]]
+#endif
+#endif
+
+#ifndef IMMER_NODISCARD
+#define IMMER_NODISCARD
+#endif
+
+#ifndef IMMER_TAGGED_NODE
+#ifdef NDEBUG
+#define IMMER_TAGGED_NODE 0
+#else
+#define IMMER_TAGGED_NODE 1
+#endif
+#endif
+
+#if IMMER_TAGGED_NODE
+#define IMMER_ASSERT_TAGGED(assertion) assert(assertion)
+#else
+#define IMMER_ASSERT_TAGGED(assertion)
+#endif
+
+#ifndef IMMER_DEBUG_TRACES
+#define IMMER_DEBUG_TRACES 0
+#endif
+
+#ifndef IMMER_DEBUG_PRINT
+#define IMMER_DEBUG_PRINT 0
+#endif
+
+#ifndef IMMER_DEBUG_DEEP_CHECK
+#define IMMER_DEBUG_DEEP_CHECK 0
+#endif
+
+#if IMMER_DEBUG_TRACES || IMMER_DEBUG_PRINT
+#include <iostream>
+#include <prettyprint.hpp>
+#endif
+
+#if IMMER_DEBUG_TRACES
+#define IMMER_TRACE(...) std::cout << __VA_ARGS__ << std::endl
+#else
+#define IMMER_TRACE(...)
+#endif
+#define IMMER_TRACE_F(...)                                                     \
+    IMMER_TRACE(__FILE__ << ":" << __LINE__ << ": " << __VA_ARGS__)
+#define IMMER_TRACE_E(expr) IMMER_TRACE("    " << #expr << " = " << (expr))
+
+#if defined(_MSC_VER)
+#define IMMER_UNREACHABLE __assume(false)
+#define IMMER_LIKELY(cond) cond
+#define IMMER_UNLIKELY(cond) cond
+#define IMMER_FORCEINLINE __forceinline
+#define IMMER_PREFETCH(p)
+#else
+#define IMMER_UNREACHABLE __builtin_unreachable()
+#define IMMER_LIKELY(cond) __builtin_expect(!!(cond), 1)
+#define IMMER_UNLIKELY(cond) __builtin_expect(!!(cond), 0)
+#define IMMER_FORCEINLINE inline __attribute__((always_inline))
+#define IMMER_PREFETCH(p)
+// #define IMMER_PREFETCH(p)    __builtin_prefetch(p)
+#endif
+
+#define IMMER_DESCENT_DEEP 0
+
+#ifdef NDEBUG
+#define IMMER_ENABLE_DEBUG_SIZE_HEAP 0
+#else
+#define IMMER_ENABLE_DEBUG_SIZE_HEAP 1
+#endif
+
+namespace immer {
+
+const auto default_bits           = 5;
+const auto default_free_list_size = 1 << 10;
+
+} // namespace immer
diff --git a/immer/detail/arrays/no_capacity.hpp b/immer/detail/arrays/no_capacity.hpp
new file mode 100644
index 000000000000..9cb561e14bc1
--- /dev/null
+++ b/immer/detail/arrays/no_capacity.hpp
@@ -0,0 +1,203 @@
+//
+// immer: immutable data structures for C++
+// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
+//
+// This software is distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
+//
+
+#pragma once
+
+#include <immer/algorithm.hpp>
+#include <immer/detail/arrays/node.hpp>
+
+namespace immer {
+namespace detail {
+namespace arrays {
+
+template <typename T, typename MemoryPolicy>
+struct no_capacity
+{
+    using node_t = node<T, MemoryPolicy>;
+    using edit_t = typename MemoryPolicy::transience_t::edit;
+    using size_t = std::size_t;
+
+    node_t* ptr;
+    size_t size;
+
+    static const no_capacity& empty()
+    {
+        static const no_capacity empty_{
+            node_t::make_n(0),
+            0,
+        };
+        return empty_;
+    }
+
+    no_capacity(node_t* p, size_t s)
+        : ptr{p}
+        , size{s}
+    {}
+
+    no_capacity(const no_capacity& other)
+        : no_capacity{other.ptr, other.size}
+    {
+        inc();
+    }
+
+    no_capacity(no_capacity&& other)
+        : no_capacity{empty()}
+    {
+        swap(*this, other);
+    }
+
+    no_capacity& operator=(const no_capacity& other)
+    {
+        auto next = other;
+        swap(*this, next);
+        return *this;
+    }
+
+    no_capacity& operator=(no_capacity&& other)
+    {
+        swap(*this, other);
+        return *this;
+    }
+
+    friend void swap(no_capacity& x, no_capacity& y)
+    {
+        using std::swap;
+        swap(x.ptr, y.ptr);
+        swap(x.size, y.size);
+    }
+
+    ~no_capacity() { dec(); }
+
+    void inc()
+    {
+        using immer::detail::get;
+        ptr->refs().inc();
+    }
+
+    void dec()
+    {
+        using immer::detail::get;
+        if (ptr->refs().dec())
+            node_t::delete_n(ptr, size, size);
+    }
+
+    T* data() { return ptr->data(); }
+    const T* data() const { return ptr->data(); }
+
+    T* data_mut(edit_t e)
+    {
+        if (!ptr->can_mutate(e))
+            ptr = node_t::copy_e(e, size, ptr, size);
+        return data();
+    }
+
+    template <typename Iter,
+              typename Sent,
+              std::enable_if_t<is_forward_iterator_v<Iter> &&
+                                   compatible_sentinel_v<Iter, Sent>,
+                               bool> = true>
+    static no_capacity from_range(Iter first, Sent last)
+    {
+        auto count = static_cast<size_t>(distance(first, last));
+        if (count == 0)
+            return empty();
+        else
+            return {
+                node_t::copy_n(count, first, last),
+                count,
+            };
+    }
+
+    static no_capacity from_fill(size_t n, T v)
+    {
+        return {node_t::fill_n(n, v), n};
+    }
+
+    template <typename U>
+    static no_capacity from_initializer_list(std::initializer_list<U> values)
+    {
+        using namespace std;
+        return from_range(begin(values), end(values));
+    }
+
+    template <typename Fn>
+    void for_each_chunk(Fn&& fn) const
+    {
+        std::forward<Fn>(fn)(data(), data() + size);
+    }
+
+    template <typename Fn>
+    bool for_each_chunk_p(Fn&& fn) const
+    {
+        return std::forward<Fn>(fn)(data(), data() + size);
+    }
+
+    const T& get(std::size_t index) const { return data()[index]; }
+
+    const T& get_check(std::size_t index) const
+    {
+        if (index >= size)
+            throw std::out_of_range{"out of range"};
+        return data()[index];
+    }
+
+    bool equals(const no_capacity& other) const
+    {
+        return ptr == other.ptr ||
+               (size == other.size &&
+                std::equal(data(), data() + size, other.data()));
+    }
+
+    no_capacity push_back(T value) const
+    {
+        auto p = node_t::copy_n(size + 1, ptr, size);
+        try {
+            new (p->data() + size) T{std::move(value)};
+            return {p, size + 1};
+        } catch (...) {
+            node_t::delete_n(p, size, size + 1);
+            throw;
+        }
+    }
+
+    no_capacity assoc(std::size_t idx, T value) const
+    {
+        auto p = node_t::copy_n(size, ptr, size);
+        try {
+            p->data()[idx] = std::move(value);
+            return {p, size};
+        } catch (...) {
+            node_t::delete_n(p, size, size);
+            throw;
+        }
+    }
+
+    template <typename Fn>
+    no_capacity update(std::size_t idx, Fn&& op) const
+    {
+        auto p = node_t::copy_n(size, ptr, size);
+        try {
+            auto& elem = p->data()[idx];
+            elem       = std::forward<Fn>(op)(std::move(elem));
+            return {p, size};
+        } catch (...) {
+            node_t::delete_n(p, size, size);
+            throw;
+        }
+    }
+
+    no_capacity take(std::size_t sz) const
+    {
+        auto p = node_t::copy_n(sz, ptr, sz);
+        return {p, sz};
+    }
+};
+
+} // namespace arrays
+} // namespace detail
+} // namespace immer
diff --git a/immer/detail/arrays/node.hpp b/immer/detail/arrays/node.hpp
new file mode 100644
index 000000000000..f96a63a9f4af
--- /dev/null
+++ b/immer/detail/arrays/node.hpp
@@ -0,0 +1,127 @@
+//
+// immer: immutable data structures for C++
+// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
+//
+// This software is distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
+//
+
+#pragma once
+
+#include <immer/detail/combine_standard_layout.hpp>
+#include <immer/detail/type_traits.hpp>
+#include <immer/detail/util.hpp>
+
+#include <limits>
+
+namespace immer {
+namespace detail {
+namespace arrays {
+
+template <typename T, typename MemoryPolicy>
+struct node
+{
+    using memory     = MemoryPolicy;
+    using heap       = typename MemoryPolicy::heap::type;
+    using transience = typename memory::transience_t;
+    using refs_t     = typename memory::refcount;
+    using ownee_t    = typename transience::ownee;
+    using node_t     = node;
+    using edit_t     = typename transience::edit;
+
+    struct data_t
+    {
+        aligned_storage_for<T> buffer;
+    };
+
+    using impl_t = combine_standard_layout_t<data_t, refs_t, ownee_t>;
+
+    impl_t impl;
+
+    constexpr static std::size_t sizeof_n(size_t count)
+    {
+        return immer_offsetof(impl_t, d.buffer) +
+               sizeof(T) * (count == 0 ? 1 : count);
+    }
+
+    refs_t& refs() const { return auto_const_cast(get<refs_t>(impl)); }
+
+    const ownee_t& ownee() const { return get<ownee_t>(impl); }
+    ownee_t& ownee() { return get<ownee_t>(impl); }
+
+    const T* data() const { return reinterpret_cast<const T*>(&impl.d.buffer); }
+    T* data() { return reinterpret_cast<T*>(&impl.d.buffer); }
+
+    bool can_mutate(edit_t e) const
+    {
+        return refs().unique() || ownee().can_mutate(e);
+    }
+
+    static void delete_n(node_t* p, size_t sz, size_t cap)
+    {
+        destroy_n(p->data(), sz);
+        heap::deallocate(sizeof_n(cap), p);
+    }
+
+    static node_t* make_n(size_t n)
+    {
+        return new (heap::allocate(sizeof_n(n))) node_t{};
+    }
+
+    static node_t* make_e(edit_t e, size_t n)
+    {
+        auto p     = make_n(n);
+        p->ownee() = e;
+        return p;
+    }
+
+    static node_t* fill_n(size_t n, T v)
+    {
+        auto p = make_n(n);
+        try {
+            std::uninitialized_fill_n(p->data(), n, v);
+            return p;
+        } catch (...) {
+            heap::deallocate(sizeof_n(n), p);
+            throw;
+        }
+    }
+
+    template <typename Iter,
+              typename Sent,
+              std::enable_if_t<detail::compatible_sentinel_v<Iter, Sent>,
+                               bool> = true>
+    static node_t* copy_n(size_t n, Iter first, Sent last)
+    {
+        auto p = make_n(n);
+        try {
+            uninitialized_copy(first, last, p->data());
+            return p;
+        } catch (...) {
+            heap::deallocate(sizeof_n(n), p);
+            throw;
+        }
+    }
+
+    static node_t* copy_n(size_t n, node_t* p, size_t count)
+    {
+        return copy_n(n, p->data(), p->data() + count);
+    }
+
+    template <typename Iter>
+    static node_t* copy_e(edit_t e, size_t n, Iter first, Iter last)
+    {
+        auto p     = copy_n(n, first, last);
+        p->ownee() = e;
+        return p;
+    }
+
+    static node_t* copy_e(edit_t e, size_t n, node_t* p, size_t count)
+    {
+        return copy_e(e, n, p->data(), p->data() + count);
+    }
+};
+
+} // namespace arrays
+} // namespace detail
+} // namespace immer
diff --git a/immer/detail/arrays/with_capacity.hpp b/immer/detail/arrays/with_capacity.hpp
new file mode 100644
index 000000000000..290809e4b6e5
--- /dev/null
+++ b/immer/detail/arrays/with_capacity.hpp
@@ -0,0 +1,303 @@
+//
+// immer: immutable data structures for C++
+// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
+//
+// This software is distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
+//
+
+#pragma once
+
+#include <immer/detail/arrays/no_capacity.hpp>
+
+namespace immer {
+namespace detail {
+namespace arrays {
+
+template <typename T, typename MemoryPolicy>
+struct with_capacity
+{
+    using no_capacity_t = no_capacity<T, MemoryPolicy>;
+
+    using node_t = node<T, MemoryPolicy>;
+    using edit_t = typename MemoryPolicy::transience_t::edit;
+    using size_t = std::size_t;
+
+    node_t* ptr;
+    size_t size;
+    size_t capacity;
+
+    static const with_capacity& empty()
+    {
+        static const with_capacity empty_{node_t::make_n(1), 0, 1};
+        return empty_;
+    }
+
+    with_capacity(node_t* p, size_t s, size_t c)
+        : ptr{p}
+        , size{s}
+        , capacity{c}
+    {}
+
+    with_capacity(const with_capacity& other)
+        : with_capacity{other.ptr, other.size, other.capacity}
+    {
+        inc();
+    }
+
+    with_capacity(const no_capacity_t& other)
+        : with_capacity{other.ptr, other.size, other.size}
+    {
+        inc();
+    }
+
+    with_capacity(with_capacity&& other)
+        : with_capacity{empty()}
+    {
+        swap(*this, other);
+    }
+
+    with_capacity& operator=(const with_capacity& other)
+    {
+        auto next = other;
+        swap(*this, next);
+        return *this;
+    }
+
+    with_capacity& operator=(with_capacity&& other)
+    {
+        swap(*this, other);
+        return *this;
+    }
+
+    friend void swap(with_capacity& x, with_capacity& y)
+    {
+        using std::swap;
+        swap(x.ptr, y.ptr);
+        swap(x.size, y.size);
+        swap(x.capacity, y.capacity);
+    }
+
+    ~with_capacity() { dec(); }
+
+    void inc()
+    {
+        using immer::detail::get;
+        ptr->refs().inc();
+    }
+
+    void dec()
+    {
+        using immer::detail::get;
+        if (ptr->refs().dec())
+            node_t::delete_n(ptr, size, capacity);
+    }
+
+    const T* data() const { return ptr->data(); }
+    T* data() { return ptr->data(); }
+
+    T* data_mut(edit_t e)
+    {
+        if (!ptr->can_mutate(e)) {
+            auto p = node_t::copy_e(e, capacity, ptr, size);
+            dec();
+            ptr = p;
+        }
+        return data();
+    }
+
+    operator no_capacity_t() const
+    {
+        if (size == capacity) {
+            ptr->refs().inc();
+            return {ptr, size};
+        } else {
+            return {node_t::copy_n(size, ptr, size), size};
+        }
+    }
+
+    template <typename Iter,
+              typename Sent,
+              std::enable_if_t<is_forward_iterator_v<Iter> &&
+                                   compatible_sentinel_v<Iter, Sent>,
+                               bool> = true>
+    static with_capacity from_range(Iter first, Sent last)
+    {
+        auto count = static_cast<size_t>(distance(first, last));
+        if (count == 0)
+            return empty();
+        else
+            return {node_t::copy_n(count, first, last), count, count};
+    }
+
+    template <typename U>
+    static with_capacity from_initializer_list(std::initializer_list<U> values)
+    {
+        using namespace std;
+        return from_range(begin(values), end(values));
+    }
+
+    static with_capacity from_fill(size_t n, T v)
+    {
+        return {node_t::fill_n(n, v), n, n};
+    }
+
+    template <typename Fn>
+    void for_each_chunk(Fn&& fn) const
+    {
+        std::forward<Fn>(fn)(data(), data() + size);
+    }
+
+    template <typename Fn>
+    bool for_each_chunk_p(Fn&& fn) const
+    {
+        return std::forward<Fn>(fn)(data(), data() + size);
+    }
+
+    const T& get(std::size_t index) const { return data()[index]; }
+
+    const T& get_check(std::size_t index) const
+    {
+        if (index >= size)
+            throw std::out_of_range{"out of range"};
+        return data()[index];
+    }
+
+    bool equals(const with_capacity& other) const
+    {
+        return ptr == other.ptr ||
+               (size == other.size &&
+                std::equal(data(), data() + size, other.data()));
+    }
+
+    static size_t recommend_up(size_t sz, size_t cap)
+    {
+        auto max = std::numeric_limits<size_t>::max();
+        return sz <= cap ? cap
+                         : cap >= max / 2 ? max
+                                          /* otherwise */
+                                          : std::max(2 * cap, sz);
+    }
+
+    static size_t recommend_down(size_t sz, size_t cap)
+    {
+        return sz == 0 ? 1
+                       : sz < cap / 2 ? sz * 2 :
+                                      /* otherwise */ cap;
+    }
+
+    with_capacity push_back(T value) const
+    {
+        auto cap = recommend_up(size + 1, capacity);
+        auto p   = node_t::copy_n(cap, ptr, size);
+        try {
+            new (p->data() + size) T{std::move(value)};
+            return {p, size + 1, cap};
+        } catch (...) {
+            node_t::delete_n(p, size, cap);
+            throw;
+        }
+    }
+
+    void push_back_mut(edit_t e, T value)
+    {
+        if (ptr->can_mutate(e) && capacity > size) {
+            new (data() + size) T{std::move(value)};
+            ++size;
+        } else {
+            auto cap = recommend_up(size + 1, capacity);
+            auto p   = node_t::copy_e(e, cap, ptr, size);
+            try {
+                new (p->data() + size) T{std::move(value)};
+                *this = {p, size + 1, cap};
+            } catch (...) {
+                node_t::delete_n(p, size, cap);
+                throw;
+            }
+        }
+    }
+
+    with_capacity assoc(std::size_t idx, T value) const
+    {
+        auto p = node_t::copy_n(capacity, ptr, size);
+        try {
+            p->data()[idx] = std::move(value);
+            return {p, size, capacity};
+        } catch (...) {
+            node_t::delete_n(p, size, capacity);
+            throw;
+        }
+    }
+
+    void assoc_mut(edit_t e, std::size_t idx, T value)
+    {
+        if (ptr->can_mutate(e)) {
+            data()[idx] = std::move(value);
+        } else {
+            auto p = node_t::copy_n(capacity, ptr, size);
+            try {
+                p->data()[idx] = std::move(value);
+                *this          = {p, size, capacity};
+            } catch (...) {
+                node_t::delete_n(p, size, capacity);
+                throw;
+            }
+        }
+    }
+
+    template <typename Fn>
+    with_capacity update(std::size_t idx, Fn&& op) const
+    {
+        auto p = node_t::copy_n(capacity, ptr, size);
+        try {
+            auto& elem = p->data()[idx];
+            elem       = std::forward<Fn>(op)(std::move(elem));
+            return {p, size, capacity};
+        } catch (...) {
+            node_t::delete_n(p, size, capacity);
+            throw;
+        }
+    }
+
+    template <typename Fn>
+    void update_mut(edit_t e, std::size_t idx, Fn&& op)
+    {
+        if (ptr->can_mutate(e)) {
+            auto& elem = data()[idx];
+            elem       = std::forward<Fn>(op)(std::move(elem));
+        } else {
+            auto p = node_t::copy_e(e, capacity, ptr, size);
+            try {
+                auto& elem = p->data()[idx];
+                elem       = std::forward<Fn>(op)(std::move(elem));
+                *this      = {p, size, capacity};
+            } catch (...) {
+                node_t::delete_n(p, size, capacity);
+                throw;
+            }
+        }
+    }
+
+    with_capacity take(std::size_t sz) const
+    {
+        auto cap = recommend_down(sz, capacity);
+        auto p   = node_t::copy_n(cap, ptr, sz);
+        return {p, sz, cap};
+    }
+
+    void take_mut(edit_t e, std::size_t sz)
+    {
+        if (ptr->can_mutate(e)) {
+            destroy_n(data() + size, size - sz);
+            size = sz;
+        } else {
+            auto cap = recommend_down(sz, capacity);
+            auto p   = node_t::copy_e(e, cap, ptr, sz);
+            *this    = {p, sz, cap};
+        }
+    }
+};
+
+} // namespace arrays
+} // namespace detail
+} // namespace immer
diff --git a/immer/detail/combine_standard_layout.hpp b/immer/detail/combine_standard_layout.hpp
new file mode 100644
index 000000000000..55c69bc91682
--- /dev/null
+++ b/immer/detail/combine_standard_layout.hpp
@@ -0,0 +1,235 @@
+//
+// immer: immutable data structures for C++
+// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
+//
+// This software is distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
+//
+
+#pragma once
+
+#include <type_traits>
+
+#if defined(__GNUC__) && __GNUC__ == 7 && __GNUC_MINOR__ == 1
+#define IMMER_BROKEN_STANDARD_LAYOUT_DETECTION 1
+#define immer_offsetof(st, m) ((std::size_t) & (((st*) 0)->m))
+#else
+#define IMMER_BROKEN_STANDARD_LAYOUT_DETECTION 0
+#define immer_offsetof offsetof
+#endif
+
+namespace immer {
+namespace detail {
+
+//
+// Metafunction that returns a standard layout struct that combines
+// all the standard layout types in `Ts...`, while making sure that
+// empty base optimizations are used.
+//
+// To query a part of the type do `get<my_part>(x)`;
+//
+// This is useful when putting together a type that merges various
+// types coming from different policies.  Some of them might be empty,
+// so we shall enable empty base optimizations.  But if we just
+// inherit from all of them, we would break the "standard layout"
+// rules, preventing us from using `offseof(...)`.  So metafunction
+// will generate the type by sometimes inheriting, sometimes adding as
+// member.
+//
+// Note that the types are added to the combined type from right to
+// left!
+//
+template <typename... Ts>
+struct combine_standard_layout;
+
+template <typename... Ts>
+using combine_standard_layout_t = typename combine_standard_layout<Ts...>::type;
+
+namespace csl {
+
+template <typename T>
+struct type_t
+{};
+
+template <typename U, typename T>
+U& get(T& x);
+
+template <typename U, typename T>
+const U& get(const T& x);
+
+template <typename T, typename Next = void>
+struct inherit
+{
+    struct type
+        : T
+        , Next
+    {
+        using Next::get_;
+
+        template <typename U>
+        friend decltype(auto) get(type& x)
+        {
+            return x.get_(type_t<U>{});
+        }
+        template <typename U>
+        friend decltype(auto) get(const type& x)
+        {
+            return x.get_(type_t<U>{});
+        }
+
+        T& get_(type_t<T>) { return *this; }
+        const T& get_(type_t<T>) const { return *this; }
+    };
+};
+
+template <typename T>
+struct inherit<T, void>
+{
+    struct type : T
+    {
+        template <typename U>
+        friend decltype(auto) get(type& x)
+        {
+            return x.get_(type_t<U>{});
+        }
+        template <typename U>
+        friend decltype(auto) get(const type& x)
+        {
+            return x.get_(type_t<U>{});
+        }
+
+        T& get_(type_t<T>) { return *this; }
+        const T& get_(type_t<T>) const { return *this; }
+    };
+};
+
+template <typename T, typename Next = void>
+struct member
+{
+    struct type : Next
+    {
+        T d;
+
+        using Next::get_;
+
+        template <typename U>
+        friend decltype(auto) get(type& x)
+        {
+            return x.get_(type_t<U>{});
+        }
+        template <typename U>
+        friend decltype(auto) get(const type& x)
+        {
+            return x.get_(type_t<U>{});
+        }
+
+        T& get_(type_t<T>) { return d; }
+        const T& get_(type_t<T>) const { return d; }
+    };
+};
+
+template <typename T>
+struct member<T, void>
+{
+    struct type
+    {
+        T d;
+
+        template <typename U>
+        friend decltype(auto) get(type& x)
+        {
+            return x.get_(type_t<U>{});
+        }
+        template <typename U>
+        friend decltype(auto) get(const type& x)
+        {
+            return x.get_(type_t<U>{});
+        }
+
+        T& get_(type_t<T>) { return d; }
+        const T& get_(type_t<T>) const { return d; }
+    };
+};
+
+template <typename T, typename Next>
+struct member_two
+{
+    struct type
+    {
+        Next n;
+        T d;
+
+        template <typename U>
+        friend decltype(auto) get(type& x)
+        {
+            return x.get_(type_t<U>{});
+        }
+        template <typename U>
+        friend decltype(auto) get(const type& x)
+        {
+            return x.get_(type_t<U>{});
+        }
+
+        T& get_(type_t<T>) { return d; }
+        const T& get_(type_t<T>) const { return d; }
+
+        template <typename U>
+        auto get_(type_t<U> t) -> decltype(auto)
+        {
+            return n.get_(t);
+        }
+        template <typename U>
+        auto get_(type_t<U> t) const -> decltype(auto)
+        {
+            return n.get_(t);
+        }
+    };
+};
+
+template <typename... Ts>
+struct combine_standard_layout_aux;
+
+template <typename T>
+struct combine_standard_layout_aux<T>
+{
+    static_assert(std::is_standard_layout<T>::value, "");
+
+    using type = typename std::conditional_t<std::is_empty<T>::value,
+                                             csl::inherit<T>,
+                                             csl::member<T>>::type;
+};
+
+template <typename T, typename... Ts>
+struct combine_standard_layout_aux<T, Ts...>
+{
+    static_assert(std::is_standard_layout<T>::value, "");
+
+    using this_t = T;
+    using next_t = typename combine_standard_layout_aux<Ts...>::type;
+
+    static constexpr auto empty_this = std::is_empty<this_t>::value;
+    static constexpr auto empty_next = std::is_empty<next_t>::value;
+
+    using type = typename std::conditional_t<
+        empty_this,
+        inherit<this_t, next_t>,
+        std::conditional_t<empty_next,
+                           member<this_t, next_t>,
+                           member_two<this_t, next_t>>>::type;
+};
+
+} // namespace csl
+
+using csl::get;
+
+template <typename... Ts>
+struct combine_standard_layout
+{
+    using type = typename csl::combine_standard_layout_aux<Ts...>::type;
+#if !IMMER_BROKEN_STANDARD_LAYOUT_DETECTION
+    static_assert(std::is_standard_layout<type>::value, "");
+#endif
+};
+
+} // namespace detail
+} // namespace immer
diff --git a/immer/detail/hamts/bits.hpp b/immer/detail/hamts/bits.hpp
new file mode 100644
index 000000000000..b02caf770666
--- /dev/null
+++ b/immer/detail/hamts/bits.hpp
@@ -0,0 +1,108 @@
+//
+// immer: immutable data structures for C++
+// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
+//
+// This software is distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
+//
+
+#pragma once
+
+#include <cstdint>
+
+#if defined(_MSC_VER)
+#include <intrin.h> // __popcnt
+#endif
+
+namespace immer {
+namespace detail {
+namespace hamts {
+
+using size_t  = std::size_t;
+using hash_t  = std::size_t;
+using bits_t  = std::uint32_t;
+using count_t = std::uint32_t;
+using shift_t = std::uint32_t;
+
+template <bits_t B>
+struct get_bitmap_type
+{
+    static_assert(B < 6u, "B > 6 is not supported.");
+
+    using type = std::uint32_t;
+};
+
+template <>
+struct get_bitmap_type<6u>
+{
+    using type = std::uint64_t;
+};
+
+template <bits_t B, typename T = count_t>
+constexpr T branches = T{1u} << B;
+
+template <bits_t B, typename T = size_t>
+constexpr T mask = branches<B, T> - 1u;
+
+template <bits_t B, typename T = count_t>
+constexpr T max_depth = (sizeof(hash_t) * 8u + B - 1u) / B;
+
+template <bits_t B, typename T = count_t>
+constexpr T max_shift = max_depth<B, count_t>* B;
+
+#define IMMER_HAS_BUILTIN_POPCOUNT 1
+
+inline auto popcount_fallback(std::uint32_t x)
+{
+    // More alternatives:
+    // https://en.wikipedia.org/wiki/Hamming_weight
+    // http://wm.ite.pl/articles/sse-popcount.html
+    // http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
+    x = x - ((x >> 1) & 0x55555555u);
+    x = (x & 0x33333333u) + ((x >> 2) & 0x33333333u);
+    return (((x + (x >> 4u)) & 0xF0F0F0Fu) * 0x1010101u) >> 24u;
+}
+
+inline auto popcount_fallback(std::uint64_t x)
+{
+    x = x - ((x >> 1) & 0x5555555555555555u);
+    x = (x & 0x3333333333333333u) + ((x >> 2u) & 0x3333333333333333u);
+    return (((x + (x >> 4)) & 0x0F0F0F0F0F0F0F0Fu) * 0x0101010101010101u) >>
+           56u;
+}
+
+inline count_t popcount(std::uint32_t x)
+{
+#if IMMER_HAS_BUILTIN_POPCOUNT
+#if defined(_MSC_VER)
+    return __popcnt(x);
+#else
+    return __builtin_popcount(x);
+#endif
+#else
+    return popcount_fallback(x);
+#endif
+}
+
+inline count_t popcount(std::uint64_t x)
+{
+#if IMMER_HAS_BUILTIN_POPCOUNT
+#if defined(_MSC_VER)
+#if defined(_WIN64)
+    return __popcnt64(x);
+#else
+    // TODO: benchmark against popcount_fallback(std::uint64_t x)
+    return popcount(static_cast<std::uint32_t>(x >> 32)) +
+           popcount(static_cast<std::uint32_t>(x));
+#endif
+#else
+    return __builtin_popcountll(x);
+#endif
+#else
+    return popcount_fallback(x);
+#endif
+}
+
+} // namespace hamts
+} // namespace detail
+} // namespace immer
diff --git a/immer/detail/hamts/champ.hpp b/immer/detail/hamts/champ.hpp
new file mode 100644
index 000000000000..e3b55d397c72
--- /dev/null
+++ b/immer/detail/hamts/champ.hpp
@@ -0,0 +1,473 @@
+//
+// immer: immutable data structures for C++
+// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
+//
+// This software is distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
+//
+
+#pragma once
+
+#include <immer/config.hpp>
+#include <immer/detail/hamts/node.hpp>
+
+#include <algorithm>
+
+namespace immer {
+namespace detail {
+namespace hamts {
+
+template <typename T,
+          typename Hash,
+          typename Equal,
+          typename MemoryPolicy,
+          bits_t B>
+struct champ
+{
+    static constexpr auto bits = B;
+
+    using node_t   = node<T, Hash, Equal, MemoryPolicy, B>;
+    using bitmap_t = typename get_bitmap_type<B>::type;
+
+    static_assert(branches<B> <= sizeof(bitmap_t) * 8, "");
+
+    node_t* root;
+    size_t size;
+
+    static const champ& empty()
+    {
+        static const champ empty_{
+            node_t::make_inner_n(0),
+            0,
+        };
+        return empty_;
+    }
+
+    champ(node_t* r, size_t sz)
+        : root{r}
+        , size{sz}
+    {}
+
+    champ(const champ& other)
+        : champ{other.root, other.size}
+    {
+        inc();
+    }
+
+    champ(champ&& other)
+        : champ{empty()}
+    {
+        swap(*this, other);
+    }
+
+    champ& operator=(const champ& other)
+    {
+        auto next = other;
+        swap(*this, next);
+        return *this;
+    }
+
+    champ& operator=(champ&& other)
+    {
+        swap(*this, other);
+        return *this;
+    }
+
+    friend void swap(champ& x, champ& y)
+    {
+        using std::swap;
+        swap(x.root, y.root);
+        swap(x.size, y.size);
+    }
+
+    ~champ() { dec(); }
+
+    void inc() const { root->inc(); }
+
+    void dec() const
+    {
+        if (root->dec())
+            node_t::delete_deep(root, 0);
+    }
+
+    template <typename Fn>
+    void for_each_chunk(Fn&& fn) const
+    {
+        for_each_chunk_traversal(root, 0, fn);
+    }
+
+    template <typename Fn>
+    void for_each_chunk_traversal(node_t* node, count_t depth, Fn&& fn) const
+    {
+        if (depth < max_depth<B>) {
+            auto datamap = node->datamap();
+            if (datamap)
+                fn(node->values(), node->values() + popcount(datamap));
+            auto nodemap = node->nodemap();
+            if (nodemap) {
+                auto fst = node->children();
+                auto lst = fst + popcount(nodemap);
+                for (; fst != lst; ++fst)
+                    for_each_chunk_traversal(*fst, depth + 1, fn);
+            }
+        } else {
+            fn(node->collisions(),
+               node->collisions() + node->collision_count());
+        }
+    }
+
+    template <typename Project, typename Default, typename K>
+    decltype(auto) get(const K& k) const
+    {
+        auto node = root;
+        auto hash = Hash{}(k);
+        for (auto i = count_t{}; i < max_depth<B>; ++i) {
+            auto bit = bitmap_t{1u} << (hash & mask<B>);
+            if (node->nodemap() & bit) {
+                auto offset = popcount(node->nodemap() & (bit - 1));
+                node        = node->children()[offset];
+                hash        = hash >> B;
+            } else if (node->datamap() & bit) {
+                auto offset = popcount(node->datamap() & (bit - 1));
+                auto val    = node->values() + offset;
+                if (Equal{}(*val, k))
+                    return Project{}(*val);
+                else
+                    return Default{}();
+            } else {
+                return Default{}();
+            }
+        }
+        auto fst = node->collisions();
+        auto lst = fst + node->collision_count();
+        for (; fst != lst; ++fst)
+            if (Equal{}(*fst, k))
+                return Project{}(*fst);
+        return Default{}();
+    }
+
+    std::pair<node_t*, bool>
+    do_add(node_t* node, T v, hash_t hash, shift_t shift) const
+    {
+        if (shift == max_shift<B>) {
+            auto fst = node->collisions();
+            auto lst = fst + node->collision_count();
+            for (; fst != lst; ++fst)
+                if (Equal{}(*fst, v))
+                    return {
+                        node_t::copy_collision_replace(node, fst, std::move(v)),
+                        false};
+            return {node_t::copy_collision_insert(node, std::move(v)), true};
+        } else {
+            auto idx = (hash & (mask<B> << shift)) >> shift;
+            auto bit = bitmap_t{1u} << idx;
+            if (node->nodemap() & bit) {
+                auto offset = popcount(node->nodemap() & (bit - 1));
+                auto result = do_add(
+                    node->children()[offset], std::move(v), hash, shift + B);
+                try {
+                    result.first =
+                        node_t::copy_inner_replace(node, offset, result.first);
+                    return result;
+                } catch (...) {
+                    node_t::delete_deep_shift(result.first, shift + B);
+                    throw;
+                }
+            } else if (node->datamap() & bit) {
+                auto offset = popcount(node->datamap() & (bit - 1));
+                auto val    = node->values() + offset;
+                if (Equal{}(*val, v))
+                    return {node_t::copy_inner_replace_value(
+                                node, offset, std::move(v)),
+                            false};
+                else {
+                    auto child = node_t::make_merged(
+                        shift + B, std::move(v), hash, *val, Hash{}(*val));
+                    try {
+                        return {node_t::copy_inner_replace_merged(
+                                    node, bit, offset, child),
+                                true};
+                    } catch (...) {
+                        node_t::delete_deep_shift(child, shift + B);
+                        throw;
+                    }
+                }
+            } else {
+                return {
+                    node_t::copy_inner_insert_value(node, bit, std::move(v)),
+                    true};
+            }
+        }
+    }
+
+    champ add(T v) const
+    {
+        auto hash     = Hash{}(v);
+        auto res      = do_add(root, std::move(v), hash, 0);
+        auto new_size = size + (res.second ? 1 : 0);
+        return {res.first, new_size};
+    }
+
+    template <typename Project,
+              typename Default,
+              typename Combine,
+              typename K,
+              typename Fn>
+    std::pair<node_t*, bool>
+    do_update(node_t* node, K&& k, Fn&& fn, hash_t hash, shift_t shift) const
+    {
+        if (shift == max_shift<B>) {
+            auto fst = node->collisions();
+            auto lst = fst + node->collision_count();
+            for (; fst != lst; ++fst)
+                if (Equal{}(*fst, k))
+                    return {
+                        node_t::copy_collision_replace(
+                            node,
+                            fst,
+                            Combine{}(std::forward<K>(k),
+                                      std::forward<Fn>(fn)(Project{}(*fst)))),
+                        false};
+            return {node_t::copy_collision_insert(
+                        node,
+                        Combine{}(std::forward<K>(k),
+                                  std::forward<Fn>(fn)(Default{}()))),
+                    true};
+        } else {
+            auto idx = (hash & (mask<B> << shift)) >> shift;
+            auto bit = bitmap_t{1u} << idx;
+            if (node->nodemap() & bit) {
+                auto offset = popcount(node->nodemap() & (bit - 1));
+                auto result = do_update<Project, Default, Combine>(
+                    node->children()[offset],
+                    k,
+                    std::forward<Fn>(fn),
+                    hash,
+                    shift + B);
+                try {
+                    result.first =
+                        node_t::copy_inner_replace(node, offset, result.first);
+                    return result;
+                } catch (...) {
+                    node_t::delete_deep_shift(result.first, shift + B);
+                    throw;
+                }
+            } else if (node->datamap() & bit) {
+                auto offset = popcount(node->datamap() & (bit - 1));
+                auto val    = node->values() + offset;
+                if (Equal{}(*val, k))
+                    return {
+                        node_t::copy_inner_replace_value(
+                            node,
+                            offset,
+                            Combine{}(std::forward<K>(k),
+                                      std::forward<Fn>(fn)(Project{}(*val)))),
+                        false};
+                else {
+                    auto child = node_t::make_merged(
+                        shift + B,
+                        Combine{}(std::forward<K>(k),
+                                  std::forward<Fn>(fn)(Default{}())),
+                        hash,
+                        *val,
+                        Hash{}(*val));
+                    try {
+                        return {node_t::copy_inner_replace_merged(
+                                    node, bit, offset, child),
+                                true};
+                    } catch (...) {
+                        node_t::delete_deep_shift(child, shift + B);
+                        throw;
+                    }
+                }
+            } else {
+                return {node_t::copy_inner_insert_value(
+                            node,
+                            bit,
+                            Combine{}(std::forward<K>(k),
+                                      std::forward<Fn>(fn)(Default{}()))),
+                        true};
+            }
+        }
+    }
+
+    template <typename Project,
+              typename Default,
+              typename Combine,
+              typename K,
+              typename Fn>
+    champ update(const K& k, Fn&& fn) const
+    {
+        auto hash = Hash{}(k);
+        auto res  = do_update<Project, Default, Combine>(
+            root, k, std::forward<Fn>(fn), hash, 0);
+        auto new_size = size + (res.second ? 1 : 0);
+        return {res.first, new_size};
+    }
+
+    // basically:
+    //      variant<monostate_t, T*, node_t*>
+    // boo bad we are not using... C++17 :'(
+    struct sub_result
+    {
+        enum kind_t
+        {
+            nothing,
+            singleton,
+            tree
+        };
+
+        union data_t
+        {
+            T* singleton;
+            node_t* tree;
+        };
+
+        kind_t kind;
+        data_t data;
+
+        sub_result()
+            : kind{nothing} {};
+        sub_result(T* x)
+            : kind{singleton}
+        {
+            data.singleton = x;
+        };
+        sub_result(node_t* x)
+            : kind{tree}
+        {
+            data.tree = x;
+        };
+    };
+
+    template <typename K>
+    sub_result
+    do_sub(node_t* node, const K& k, hash_t hash, shift_t shift) const
+    {
+        if (shift == max_shift<B>) {
+            auto fst = node->collisions();
+            auto lst = fst + node->collision_count();
+            for (auto cur = fst; cur != lst; ++cur)
+                if (Equal{}(*cur, k))
+                    return node->collision_count() > 2
+                               ? node_t::copy_collision_remove(node, cur)
+                               : sub_result{fst + (cur == fst)};
+            return {};
+        } else {
+            auto idx = (hash & (mask<B> << shift)) >> shift;
+            auto bit = bitmap_t{1u} << idx;
+            if (node->nodemap() & bit) {
+                auto offset = popcount(node->nodemap() & (bit - 1));
+                auto result =
+                    do_sub(node->children()[offset], k, hash, shift + B);
+                switch (result.kind) {
+                case sub_result::nothing:
+                    return {};
+                case sub_result::singleton:
+                    return node->datamap() == 0 &&
+                                   popcount(node->nodemap()) == 1 && shift > 0
+                               ? result
+                               : node_t::copy_inner_replace_inline(
+                                     node, bit, offset, *result.data.singleton);
+                case sub_result::tree:
+                    try {
+                        return node_t::copy_inner_replace(
+                            node, offset, result.data.tree);
+                    } catch (...) {
+                        node_t::delete_deep_shift(result.data.tree, shift + B);
+                        throw;
+                    }
+                }
+            } else if (node->datamap() & bit) {
+                auto offset = popcount(node->datamap() & (bit - 1));
+                auto val    = node->values() + offset;
+                if (Equal{}(*val, k)) {
+                    auto nv = popcount(node->datamap());
+                    if (node->nodemap() || nv > 2)
+                        return node_t::copy_inner_remove_value(
+                            node, bit, offset);
+                    else if (nv == 2) {
+                        return shift > 0 ? sub_result{node->values() + !offset}
+                                         : node_t::make_inner_n(
+                                               0,
+                                               node->datamap() & ~bit,
+                                               node->values()[!offset]);
+                    } else {
+                        assert(shift == 0);
+                        return empty().root->inc();
+                    }
+                }
+            }
+            return {};
+        }
+    }
+
+    template <typename K>
+    champ sub(const K& k) const
+    {
+        auto hash = Hash{}(k);
+        auto res  = do_sub(root, k, hash, 0);
+        switch (res.kind) {
+        case sub_result::nothing:
+            return *this;
+        case sub_result::tree:
+            return {res.data.tree, size - 1};
+        default:
+            IMMER_UNREACHABLE;
+        }
+    }
+
+    template <typename Eq = Equal>
+    bool equals(const champ& other) const
+    {
+        return size == other.size && equals_tree<Eq>(root, other.root, 0);
+    }
+
+    template <typename Eq>
+    static bool equals_tree(const node_t* a, const node_t* b, count_t depth)
+    {
+        if (a == b)
+            return true;
+        else if (depth == max_depth<B>) {
+            auto nv = a->collision_count();
+            return nv == b->collision_count() &&
+                   equals_collisions<Eq>(a->collisions(), b->collisions(), nv);
+        } else {
+            if (a->nodemap() != b->nodemap() || a->datamap() != b->datamap())
+                return false;
+            auto n = popcount(a->nodemap());
+            for (auto i = count_t{}; i < n; ++i)
+                if (!equals_tree<Eq>(
+                        a->children()[i], b->children()[i], depth + 1))
+                    return false;
+            auto nv = popcount(a->datamap());
+            return !nv || equals_values<Eq>(a->values(), b->values(), nv);
+        }
+    }
+
+    template <typename Eq>
+    static bool equals_values(const T* a, const T* b, count_t n)
+    {
+        return std::equal(a, a + n, b, Eq{});
+    }
+
+    template <typename Eq>
+    static bool equals_collisions(const T* a, const T* b, count_t n)
+    {
+        auto ae = a + n;
+        auto be = b + n;
+        for (; a != ae; ++a) {
+            for (auto fst = b; fst != be; ++fst)
+                if (Eq{}(*a, *fst))
+                    goto good;
+            return false;
+        good:
+            continue;
+        }
+        return true;
+    }
+};
+
+} // namespace hamts
+} // namespace detail
+} // namespace immer
diff --git a/immer/detail/hamts/champ_iterator.hpp b/immer/detail/hamts/champ_iterator.hpp
new file mode 100644
index 000000000000..72673b41be03
--- /dev/null
+++ b/immer/detail/hamts/champ_iterator.hpp
@@ -0,0 +1,148 @@
+//
+// immer: immutable data structures for C++
+// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
+//
+// This software is distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
+//
+
+#pragma once
+
+#include <immer/detail/hamts/champ.hpp>
+#include <immer/detail/iterator_facade.hpp>
+
+namespace immer {
+namespace detail {
+namespace hamts {
+
+template <typename T, typename Hash, typename Eq, typename MP, bits_t B>
+struct champ_iterator
+    : iterator_facade<champ_iterator<T, Hash, Eq, MP, B>,
+                      std::forward_iterator_tag,
+                      T,
+                      const T&,
+                      std::ptrdiff_t,
+                      const T*>
+{
+    using tree_t = champ<T, Hash, Eq, MP, B>;
+    using node_t = typename tree_t::node_t;
+
+    struct end_t
+    {};
+
+    champ_iterator() = default;
+
+    champ_iterator(const tree_t& v)
+        : depth_{0}
+    {
+        if (v.root->datamap()) {
+            cur_ = v.root->values();
+            end_ = v.root->values() + popcount(v.root->datamap());
+        } else {
+            cur_ = end_ = nullptr;
+        }
+        path_[0] = &v.root;
+        ensure_valid_();
+    }
+
+    champ_iterator(const tree_t& v, end_t)
+        : cur_{nullptr}
+        , end_{nullptr}
+        , depth_{0}
+    {
+        path_[0] = &v.root;
+    }
+
+    champ_iterator(const champ_iterator& other)
+        : cur_{other.cur_}
+        , end_{other.end_}
+        , depth_{other.depth_}
+    {
+        std::copy(other.path_, other.path_ + depth_ + 1, path_);
+    }
+
+private:
+    friend iterator_core_access;
+
+    T* cur_;
+    T* end_;
+    count_t depth_;
+    node_t* const* path_[max_depth<B> + 1];
+
+    void increment()
+    {
+        ++cur_;
+        ensure_valid_();
+    }
+
+    bool step_down()
+    {
+        if (depth_ < max_depth<B>) {
+            auto parent = *path_[depth_];
+            if (parent->nodemap()) {
+                ++depth_;
+                path_[depth_] = parent->children();
+                auto child    = *path_[depth_];
+                if (depth_ < max_depth<B>) {
+                    if (child->datamap()) {
+                        cur_ = child->values();
+                        end_ = cur_ + popcount(child->datamap());
+                    }
+                } else {
+                    cur_ = child->collisions();
+                    end_ = cur_ + child->collision_count();
+                }
+                return true;
+            }
+        }
+        return false;
+    }
+
+    bool step_right()
+    {
+        while (depth_ > 0) {
+            auto parent = *path_[depth_ - 1];
+            auto last   = parent->children() + popcount(parent->nodemap());
+            auto next   = path_[depth_] + 1;
+            if (next < last) {
+                path_[depth_] = next;
+                auto child    = *path_[depth_];
+                if (depth_ < max_depth<B>) {
+                    if (child->datamap()) {
+                        cur_ = child->values();
+                        end_ = cur_ + popcount(child->datamap());
+                    }
+                } else {
+                    cur_ = child->collisions();
+                    end_ = cur_ + child->collision_count();
+                }
+                return true;
+            }
+            --depth_;
+        }
+        return false;
+    }
+
+    void ensure_valid_()
+    {
+        while (cur_ == end_) {
+            while (step_down())
+                if (cur_ != end_)
+                    return;
+            if (!step_right()) {
+                // end of sequence
+                assert(depth_ == 0);
+                cur_ = end_ = nullptr;
+                return;
+            }
+        }
+    }
+
+    bool equal(const champ_iterator& other) const { return cur_ == other.cur_; }
+
+    const T& dereference() const { return *cur_; }
+};
+
+} // namespace hamts
+} // namespace detail
+} // namespace immer
diff --git a/immer/detail/hamts/node.hpp b/immer/detail/hamts/node.hpp
new file mode 100644
index 000000000000..216e82b7874f
--- /dev/null
+++ b/immer/detail/hamts/node.hpp
@@ -0,0 +1,717 @@
+//
+// immer: immutable data structures for C++
+// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
+//
+// This software is distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
+//
+
+#pragma once
+
+#include <immer/detail/combine_standard_layout.hpp>
+#include <immer/detail/hamts/bits.hpp>
+#include <immer/detail/util.hpp>
+
+#include <cassert>
+
+namespace immer {
+namespace detail {
+namespace hamts {
+
+template <typename T,
+          typename Hash,
+          typename Equal,
+          typename MemoryPolicy,
+          bits_t B>
+struct node
+{
+    using node_t = node;
+
+    using memory      = MemoryPolicy;
+    using heap_policy = typename memory::heap;
+    using heap        = typename heap_policy::type;
+    using transience  = typename memory::transience_t;
+    using refs_t      = typename memory::refcount;
+    using ownee_t     = typename transience::ownee;
+    using edit_t      = typename transience::edit;
+    using value_t     = T;
+    using bitmap_t    = typename get_bitmap_type<B>::type;
+
+    enum class kind_t
+    {
+        collision,
+        inner
+    };
+
+    struct collision_t
+    {
+        count_t count;
+        aligned_storage_for<T> buffer;
+    };
+
+    struct values_data_t
+    {
+        aligned_storage_for<T> buffer;
+    };
+
+    using values_t = combine_standard_layout_t<values_data_t, refs_t>;
+
+    struct inner_t
+    {
+        bitmap_t nodemap;
+        bitmap_t datamap;
+        values_t* values;
+        aligned_storage_for<node_t*> buffer;
+    };
+
+    union data_t
+    {
+        inner_t inner;
+        collision_t collision;
+    };
+
+    struct impl_data_t
+    {
+#if IMMER_TAGGED_NODE
+        kind_t kind;
+#endif
+        data_t data;
+    };
+
+    using impl_t = combine_standard_layout_t<impl_data_t, refs_t>;
+
+    impl_t impl;
+
+    constexpr static std::size_t sizeof_values_n(count_t count)
+    {
+        return std::max(sizeof(values_t),
+                        immer_offsetof(values_t, d.buffer) +
+                            sizeof(values_data_t::buffer) * count);
+    }
+
+    constexpr static std::size_t sizeof_collision_n(count_t count)
+    {
+        return immer_offsetof(impl_t, d.data.collision.buffer) +
+               sizeof(collision_t::buffer) * count;
+    }
+
+    constexpr static std::size_t sizeof_inner_n(count_t count)
+    {
+        return immer_offsetof(impl_t, d.data.inner.buffer) +
+               sizeof(inner_t::buffer) * count;
+    }
+
+#if IMMER_TAGGED_NODE
+    kind_t kind() const { return impl.d.kind; }
+#endif
+
+    auto values()
+    {
+        IMMER_ASSERT_TAGGED(kind() == kind_t::inner);
+        assert(impl.d.data.inner.values);
+        return (T*) &impl.d.data.inner.values->d.buffer;
+    }
+
+    auto values() const
+    {
+        IMMER_ASSERT_TAGGED(kind() == kind_t::inner);
+        assert(impl.d.data.inner.values);
+        return (const T*) &impl.d.data.inner.values->d.buffer;
+    }
+
+    auto children()
+    {
+        IMMER_ASSERT_TAGGED(kind() == kind_t::inner);
+        return (node_t**) &impl.d.data.inner.buffer;
+    }
+
+    auto children() const
+    {
+        IMMER_ASSERT_TAGGED(kind() == kind_t::inner);
+        return (const node_t* const*) &impl.d.data.inner.buffer;
+    }
+
+    auto datamap() const
+    {
+        IMMER_ASSERT_TAGGED(kind() == kind_t::inner);
+        return impl.d.data.inner.datamap;
+    }
+
+    auto nodemap() const
+    {
+        IMMER_ASSERT_TAGGED(kind() == kind_t::inner);
+        return impl.d.data.inner.nodemap;
+    }
+
+    auto collision_count() const
+    {
+        IMMER_ASSERT_TAGGED(kind() == kind_t::collision);
+        return impl.d.data.collision.count;
+    }
+
+    T* collisions()
+    {
+        IMMER_ASSERT_TAGGED(kind() == kind_t::collision);
+        return (T*) &impl.d.data.collision.buffer;
+    }
+
+    const T* collisions() const
+    {
+        IMMER_ASSERT_TAGGED(kind() == kind_t::collision);
+        return (const T*) &impl.d.data.collision.buffer;
+    }
+
+    static refs_t& refs(const values_t* x)
+    {
+        return auto_const_cast(get<refs_t>(*x));
+    }
+    static const ownee_t& ownee(const values_t* x) { return get<ownee_t>(*x); }
+    static ownee_t& ownee(values_t* x) { return get<ownee_t>(*x); }
+
+    static refs_t& refs(const node_t* x)
+    {
+        return auto_const_cast(get<refs_t>(x->impl));
+    }
+    static const ownee_t& ownee(const node_t* x)
+    {
+        return get<ownee_t>(x->impl);
+    }
+    static ownee_t& ownee(node_t* x) { return get<ownee_t>(x->impl); }
+
+    static node_t* make_inner_n(count_t n)
+    {
+        assert(n <= branches<B>);
+        auto m = heap::allocate(sizeof_inner_n(n));
+        auto p = new (m) node_t;
+#if IMMER_TAGGED_NODE
+        p->impl.d.kind = node_t::kind_t::inner;
+#endif
+        p->impl.d.data.inner.nodemap = 0;
+        p->impl.d.data.inner.datamap = 0;
+        p->impl.d.data.inner.values  = nullptr;
+        return p;
+    }
+
+    static node_t* make_inner_n(count_t n, values_t* values)
+    {
+        auto p = make_inner_n(n);
+        if (values) {
+            p->impl.d.data.inner.values = values;
+            refs(values).inc();
+        }
+        return p;
+    }
+
+    static node_t* make_inner_n(count_t n, count_t nv)
+    {
+        assert(nv <= branches<B>);
+        auto p = make_inner_n(n);
+        if (nv) {
+            try {
+                p->impl.d.data.inner.values =
+                    new (heap::allocate(sizeof_values_n(nv))) values_t{};
+            } catch (...) {
+                deallocate_inner(p, n);
+                throw;
+            }
+        }
+        return p;
+    }
+
+    static node_t* make_inner_n(count_t n, count_t idx, node_t* child)
+    {
+        assert(n >= 1);
+        auto p                       = make_inner_n(n);
+        p->impl.d.data.inner.nodemap = bitmap_t{1u} << idx;
+        p->children()[0]             = child;
+        return p;
+    }
+
+    static node_t* make_inner_n(count_t n, bitmap_t bitmap, T x)
+    {
+        auto p                       = make_inner_n(n, 1);
+        p->impl.d.data.inner.datamap = bitmap;
+        try {
+            new (p->values()) T{std::move(x)};
+        } catch (...) {
+            deallocate_inner(p, n, 1);
+            throw;
+        }
+        return p;
+    }
+
+    static node_t*
+    make_inner_n(count_t n, count_t idx1, T x1, count_t idx2, T x2)
+    {
+        assert(idx1 != idx2);
+        auto p = make_inner_n(n, 2);
+        p->impl.d.data.inner.datamap =
+            (bitmap_t{1u} << idx1) | (bitmap_t{1u} << idx2);
+        auto assign = [&](auto&& x1, auto&& x2) {
+            auto vp = p->values();
+            try {
+                new (vp) T{std::move(x1)};
+                try {
+                    new (vp + 1) T{std::move(x2)};
+                } catch (...) {
+                    vp->~T();
+                    throw;
+                }
+            } catch (...) {
+                deallocate_inner(p, n, 2);
+                throw;
+            }
+        };
+        if (idx1 < idx2)
+            assign(x1, x2);
+        else
+            assign(x2, x1);
+        return p;
+    }
+
+    static node_t* make_collision_n(count_t n)
+    {
+        auto m = heap::allocate(sizeof_collision_n(n));
+        auto p = new (m) node_t;
+#if IMMER_TAGGED_NODE
+        p->impl.d.kind = node_t::kind_t::collision;
+#endif
+        p->impl.d.data.collision.count = n;
+        return p;
+    }
+
+    static node_t* make_collision(T v1, T v2)
+    {
+        auto m = heap::allocate(sizeof_collision_n(2));
+        auto p = new (m) node_t;
+#if IMMER_TAGGED_NODE
+        p->impl.d.kind = node_t::kind_t::collision;
+#endif
+        p->impl.d.data.collision.count = 2;
+        auto cols                      = p->collisions();
+        try {
+            new (cols) T{std::move(v1)};
+            try {
+                new (cols + 1) T{std::move(v2)};
+            } catch (...) {
+                cols->~T();
+                throw;
+            }
+        } catch (...) {
+            deallocate_collision(p, 2);
+            throw;
+        }
+        return p;
+    }
+
+    static node_t* copy_collision_insert(node_t* src, T v)
+    {
+        IMMER_ASSERT_TAGGED(src->kind() == kind_t::collision);
+        auto n    = src->collision_count();
+        auto dst  = make_collision_n(n + 1);
+        auto srcp = src->collisions();
+        auto dstp = dst->collisions();
+        try {
+            new (dstp) T{std::move(v)};
+            try {
+                std::uninitialized_copy(srcp, srcp + n, dstp + 1);
+            } catch (...) {
+                dstp->~T();
+                throw;
+            }
+        } catch (...) {
+            deallocate_collision(dst, n + 1);
+            throw;
+        }
+        return dst;
+    }
+
+    static node_t* copy_collision_remove(node_t* src, T* v)
+    {
+        IMMER_ASSERT_TAGGED(src->kind() == kind_t::collision);
+        assert(src->collision_count() > 1);
+        auto n    = src->collision_count();
+        auto dst  = make_collision_n(n - 1);
+        auto srcp = src->collisions();
+        auto dstp = dst->collisions();
+        try {
+            dstp = std::uninitialized_copy(srcp, v, dstp);
+            try {
+                std::uninitialized_copy(v + 1, srcp + n, dstp);
+            } catch (...) {
+                destroy(dst->collisions(), dstp);
+                throw;
+            }
+        } catch (...) {
+            deallocate_collision(dst, n - 1);
+            throw;
+        }
+        return dst;
+    }
+
+    static node_t* copy_collision_replace(node_t* src, T* pos, T v)
+    {
+        IMMER_ASSERT_TAGGED(src->kind() == kind_t::collision);
+        auto n    = src->collision_count();
+        auto dst  = make_collision_n(n);
+        auto srcp = src->collisions();
+        auto dstp = dst->collisions();
+        assert(pos >= srcp && pos < srcp + n);
+        try {
+            new (dstp) T{std::move(v)};
+            try {
+                dstp = std::uninitialized_copy(srcp, pos, dstp + 1);
+                try {
+                    std::uninitialized_copy(pos + 1, srcp + n, dstp);
+                } catch (...) {
+                    destroy(dst->collisions(), dstp);
+                    throw;
+                }
+            } catch (...) {
+                dst->collisions()->~T();
+                throw;
+            }
+        } catch (...) {
+            deallocate_collision(dst, n);
+            throw;
+        }
+        return dst;
+    }
+
+    static node_t*
+    copy_inner_replace(node_t* src, count_t offset, node_t* child)
+    {
+        IMMER_ASSERT_TAGGED(src->kind() == kind_t::inner);
+        auto n    = popcount(src->nodemap());
+        auto dst  = make_inner_n(n, src->impl.d.data.inner.values);
+        auto srcp = src->children();
+        auto dstp = dst->children();
+        dst->impl.d.data.inner.datamap = src->datamap();
+        dst->impl.d.data.inner.nodemap = src->nodemap();
+        std::uninitialized_copy(srcp, srcp + n, dstp);
+        inc_nodes(srcp, n);
+        srcp[offset]->dec_unsafe();
+        dstp[offset] = child;
+        return dst;
+    }
+
+    static node_t* copy_inner_replace_value(node_t* src, count_t offset, T v)
+    {
+        IMMER_ASSERT_TAGGED(src->kind() == kind_t::inner);
+        assert(offset < popcount(src->datamap()));
+        auto n                         = popcount(src->nodemap());
+        auto nv                        = popcount(src->datamap());
+        auto dst                       = make_inner_n(n, nv);
+        dst->impl.d.data.inner.datamap = src->datamap();
+        dst->impl.d.data.inner.nodemap = src->nodemap();
+        try {
+            std::uninitialized_copy(
+                src->values(), src->values() + nv, dst->values());
+            try {
+                dst->values()[offset] = std::move(v);
+            } catch (...) {
+                destroy_n(dst->values(), nv);
+                throw;
+            }
+        } catch (...) {
+            deallocate_inner(dst, n, nv);
+            throw;
+        }
+        inc_nodes(src->children(), n);
+        std::uninitialized_copy(
+            src->children(), src->children() + n, dst->children());
+        return dst;
+    }
+
+    static node_t* copy_inner_replace_merged(node_t* src,
+                                             bitmap_t bit,
+                                             count_t voffset,
+                                             node_t* node)
+    {
+        IMMER_ASSERT_TAGGED(src->kind() == kind_t::inner);
+        assert(!(src->nodemap() & bit));
+        assert(src->datamap() & bit);
+        assert(voffset == popcount(src->datamap() & (bit - 1)));
+        auto n                         = popcount(src->nodemap());
+        auto nv                        = popcount(src->datamap());
+        auto dst                       = make_inner_n(n + 1, nv - 1);
+        auto noffset                   = popcount(src->nodemap() & (bit - 1));
+        dst->impl.d.data.inner.datamap = src->datamap() & ~bit;
+        dst->impl.d.data.inner.nodemap = src->nodemap() | bit;
+        if (nv > 1) {
+            try {
+                std::uninitialized_copy(
+                    src->values(), src->values() + voffset, dst->values());
+                try {
+                    std::uninitialized_copy(src->values() + voffset + 1,
+                                            src->values() + nv,
+                                            dst->values() + voffset);
+                } catch (...) {
+                    destroy_n(dst->values(), voffset);
+                    throw;
+                }
+            } catch (...) {
+                deallocate_inner(dst, n + 1, nv - 1);
+                throw;
+            }
+        }
+        inc_nodes(src->children(), n);
+        std::uninitialized_copy(
+            src->children(), src->children() + noffset, dst->children());
+        std::uninitialized_copy(src->children() + noffset,
+                                src->children() + n,
+                                dst->children() + noffset + 1);
+        dst->children()[noffset] = node;
+        return dst;
+    }
+
+    static node_t* copy_inner_replace_inline(node_t* src,
+                                             bitmap_t bit,
+                                             count_t noffset,
+                                             T value)
+    {
+        IMMER_ASSERT_TAGGED(src->kind() == kind_t::inner);
+        assert(!(src->datamap() & bit));
+        assert(src->nodemap() & bit);
+        assert(noffset == popcount(src->nodemap() & (bit - 1)));
+        auto n                         = popcount(src->nodemap());
+        auto nv                        = popcount(src->datamap());
+        auto dst                       = make_inner_n(n - 1, nv + 1);
+        auto voffset                   = popcount(src->datamap() & (bit - 1));
+        dst->impl.d.data.inner.nodemap = src->nodemap() & ~bit;
+        dst->impl.d.data.inner.datamap = src->datamap() | bit;
+        try {
+            if (nv)
+                std::uninitialized_copy(
+                    src->values(), src->values() + voffset, dst->values());
+            try {
+                new (dst->values() + voffset) T{std::move(value)};
+                try {
+                    if (nv)
+                        std::uninitialized_copy(src->values() + voffset,
+                                                src->values() + nv,
+                                                dst->values() + voffset + 1);
+                } catch (...) {
+                    dst->values()[voffset].~T();
+                    throw;
+                }
+            } catch (...) {
+                destroy_n(dst->values(), voffset);
+                throw;
+            }
+        } catch (...) {
+            deallocate_inner(dst, n - 1, nv + 1);
+            throw;
+        }
+        inc_nodes(src->children(), n);
+        src->children()[noffset]->dec_unsafe();
+        std::uninitialized_copy(
+            src->children(), src->children() + noffset, dst->children());
+        std::uninitialized_copy(src->children() + noffset + 1,
+                                src->children() + n,
+                                dst->children() + noffset);
+        return dst;
+    }
+
+    static node_t*
+    copy_inner_remove_value(node_t* src, bitmap_t bit, count_t voffset)
+    {
+        IMMER_ASSERT_TAGGED(src->kind() == kind_t::inner);
+        assert(!(src->nodemap() & bit));
+        assert(src->datamap() & bit);
+        assert(voffset == popcount(src->datamap() & (bit - 1)));
+        auto n                         = popcount(src->nodemap());
+        auto nv                        = popcount(src->datamap());
+        auto dst                       = make_inner_n(n, nv - 1);
+        dst->impl.d.data.inner.datamap = src->datamap() & ~bit;
+        dst->impl.d.data.inner.nodemap = src->nodemap();
+        if (nv > 1) {
+            try {
+                std::uninitialized_copy(
+                    src->values(), src->values() + voffset, dst->values());
+                try {
+                    std::uninitialized_copy(src->values() + voffset + 1,
+                                            src->values() + nv,
+                                            dst->values() + voffset);
+                } catch (...) {
+                    destroy_n(dst->values(), voffset);
+                    throw;
+                }
+            } catch (...) {
+                deallocate_inner(dst, n, nv - 1);
+                throw;
+            }
+        }
+        inc_nodes(src->children(), n);
+        std::uninitialized_copy(
+            src->children(), src->children() + n, dst->children());
+        return dst;
+    }
+
+    static node_t* copy_inner_insert_value(node_t* src, bitmap_t bit, T v)
+    {
+        IMMER_ASSERT_TAGGED(src->kind() == kind_t::inner);
+        auto n                         = popcount(src->nodemap());
+        auto nv                        = popcount(src->datamap());
+        auto offset                    = popcount(src->datamap() & (bit - 1));
+        auto dst                       = make_inner_n(n, nv + 1);
+        dst->impl.d.data.inner.datamap = src->datamap() | bit;
+        dst->impl.d.data.inner.nodemap = src->nodemap();
+        try {
+            if (nv)
+                std::uninitialized_copy(
+                    src->values(), src->values() + offset, dst->values());
+            try {
+                new (dst->values() + offset) T{std::move(v)};
+                try {
+                    if (nv)
+                        std::uninitialized_copy(src->values() + offset,
+                                                src->values() + nv,
+                                                dst->values() + offset + 1);
+                } catch (...) {
+                    dst->values()[offset].~T();
+                    throw;
+                }
+            } catch (...) {
+                destroy_n(dst->values(), offset);
+                throw;
+            }
+        } catch (...) {
+            deallocate_inner(dst, n, nv + 1);
+            throw;
+        }
+        inc_nodes(src->children(), n);
+        std::uninitialized_copy(
+            src->children(), src->children() + n, dst->children());
+        return dst;
+    }
+
+    static node_t*
+    make_merged(shift_t shift, T v1, hash_t hash1, T v2, hash_t hash2)
+    {
+        if (shift < max_shift<B>) {
+            auto idx1 = hash1 & (mask<B> << shift);
+            auto idx2 = hash2 & (mask<B> << shift);
+            if (idx1 == idx2) {
+                auto merged = make_merged(
+                    shift + B, std::move(v1), hash1, std::move(v2), hash2);
+                try {
+                    return make_inner_n(1, idx1 >> shift, merged);
+                } catch (...) {
+                    delete_deep_shift(merged, shift + B);
+                    throw;
+                }
+            } else {
+                return make_inner_n(0,
+                                    idx1 >> shift,
+                                    std::move(v1),
+                                    idx2 >> shift,
+                                    std::move(v2));
+            }
+        } else {
+            return make_collision(std::move(v1), std::move(v2));
+        }
+    }
+
+    node_t* inc()
+    {
+        refs(this).inc();
+        return this;
+    }
+
+    const node_t* inc() const
+    {
+        refs(this).inc();
+        return this;
+    }
+
+    bool dec() const { return refs(this).dec(); }
+    void dec_unsafe() const { refs(this).dec_unsafe(); }
+
+    static void inc_nodes(node_t** p, count_t n)
+    {
+        for (auto i = p, e = i + n; i != e; ++i)
+            refs(*i).inc();
+    }
+
+    static void delete_values(values_t* p, count_t n)
+    {
+        assert(p);
+        deallocate_values(p, n);
+    }
+
+    static void delete_inner(node_t* p)
+    {
+        assert(p);
+        IMMER_ASSERT_TAGGED(p->kind() == kind_t::inner);
+        auto vp = p->impl.d.data.inner.values;
+        if (vp && refs(vp).dec())
+            delete_values(vp, popcount(p->datamap()));
+        deallocate_inner(p, popcount(p->nodemap()));
+    }
+
+    static void delete_collision(node_t* p)
+    {
+        assert(p);
+        IMMER_ASSERT_TAGGED(p->kind() == kind_t::collision);
+        auto n = p->collision_count();
+        deallocate_collision(p, n);
+    }
+
+    static void delete_deep(node_t* p, shift_t s)
+    {
+        if (s == max_depth<B>)
+            delete_collision(p);
+        else {
+            auto fst = p->children();
+            auto lst = fst + popcount(p->nodemap());
+            for (; fst != lst; ++fst)
+                if ((*fst)->dec())
+                    delete_deep(*fst, s + 1);
+            delete_inner(p);
+        }
+    }
+
+    static void delete_deep_shift(node_t* p, shift_t s)
+    {
+        if (s == max_shift<B>)
+            delete_collision(p);
+        else {
+            auto fst = p->children();
+            auto lst = fst + popcount(p->nodemap());
+            for (; fst != lst; ++fst)
+                if ((*fst)->dec())
+                    delete_deep_shift(*fst, s + B);
+            delete_inner(p);
+        }
+    }
+
+    static void deallocate_values(values_t* p, count_t n)
+    {
+        destroy_n((T*) &p->d.buffer, n);
+        heap::deallocate(node_t::sizeof_values_n(n), p);
+    }
+
+    static void deallocate_collision(node_t* p, count_t n)
+    {
+        destroy_n(p->collisions(), n);
+        heap::deallocate(node_t::sizeof_collision_n(n), p);
+    }
+
+    static void deallocate_inner(node_t* p, count_t n)
+    {
+        heap::deallocate(node_t::sizeof_inner_n(n), p);
+    }
+
+    static void deallocate_inner(node_t* p, count_t n, count_t nv)
+    {
+        assert(nv);
+        heap::deallocate(node_t::sizeof_values_n(nv),
+                         p->impl.d.data.inner.values);
+        heap::deallocate(node_t::sizeof_inner_n(n), p);
+    }
+};
+
+} // namespace hamts
+} // namespace detail
+} // namespace immer
diff --git a/immer/detail/iterator_facade.hpp b/immer/detail/iterator_facade.hpp
new file mode 100644
index 000000000000..ffc237943e09
--- /dev/null
+++ b/immer/detail/iterator_facade.hpp
@@ -0,0 +1,212 @@
+//
+// immer: immutable data structures for C++
+// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
+//
+// This software is distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
+//
+
+#pragma once
+
+#include <cstddef>
+#include <iterator>
+#include <type_traits>
+
+namespace immer {
+namespace detail {
+
+struct iterator_core_access
+{
+    template <typename T>
+    static decltype(auto) dereference(T&& x)
+    {
+        return x.dereference();
+    }
+
+    template <typename T>
+    static decltype(auto) increment(T&& x)
+    {
+        return x.increment();
+    }
+
+    template <typename T>
+    static decltype(auto) decrement(T&& x)
+    {
+        return x.decrement();
+    }
+
+    template <typename T1, typename T2>
+    static decltype(auto) equal(T1&& x1, T2&& x2)
+    {
+        return x1.equal(x2);
+    }
+
+    template <typename T, typename D>
+    static decltype(auto) advance(T&& x, D d)
+    {
+        return x.advance(d);
+    }
+
+    template <typename T1, typename T2>
+    static decltype(auto) distance_to(T1&& x1, T2&& x2)
+    {
+        return x1.distance_to(x2);
+    }
+};
+
+/*!
+ * Minimalistic reimplementation of boost::iterator_facade
+ */
+template <typename DerivedT,
+          typename IteratorCategoryT,
+          typename T,
+          typename ReferenceT      = T&,
+          typename DifferenceTypeT = std::ptrdiff_t,
+          typename PointerT        = T*>
+class iterator_facade
+{
+public:
+    using iterator_category = IteratorCategoryT;
+    using value_type        = T;
+    using difference_type   = DifferenceTypeT;
+    using pointer           = PointerT;
+    using reference         = ReferenceT;
+
+protected:
+    using access_t = iterator_core_access;
+
+    constexpr static auto is_random_access =
+        std::is_base_of<std::random_access_iterator_tag,
+                        IteratorCategoryT>::value;
+    constexpr static auto is_bidirectional =
+        std::is_base_of<std::bidirectional_iterator_tag,
+                        IteratorCategoryT>::value;
+
+    class reference_proxy
+    {
+        friend iterator_facade;
+        DerivedT iter_;
+
+        reference_proxy(DerivedT iter)
+            : iter_{std::move(iter)}
+        {}
+
+    public:
+        operator ReferenceT() const { return *iter_; }
+    };
+
+    const DerivedT& derived() const
+    {
+        static_assert(std::is_base_of<iterator_facade, DerivedT>::value,
+                      "must pass a derived thing");
+        return *static_cast<const DerivedT*>(this);
+    }
+    DerivedT& derived()
+    {
+        static_assert(std::is_base_of<iterator_facade, DerivedT>::value,
+                      "must pass a derived thing");
+        return *static_cast<DerivedT*>(this);
+    }
+
+public:
+    ReferenceT operator*() const { return access_t::dereference(derived()); }
+    PointerT operator->() const { return &access_t::dereference(derived()); }
+    reference_proxy operator[](DifferenceTypeT n) const
+    {
+        static_assert(is_random_access, "");
+        return derived() + n;
+    }
+
+    bool operator==(const DerivedT& rhs) const
+    {
+        return access_t::equal(derived(), rhs);
+    }
+    bool operator!=(const DerivedT& rhs) const
+    {
+        return !access_t::equal(derived(), rhs);
+    }
+
+    DerivedT& operator++()
+    {
+        access_t::increment(derived());
+        return derived();
+    }
+    DerivedT operator++(int)
+    {
+        auto tmp = derived();
+        access_t::increment(derived());
+        return tmp;
+    }
+
+    DerivedT& operator--()
+    {
+        static_assert(is_bidirectional || is_random_access, "");
+        access_t::decrement(derived());
+        return derived();
+    }
+    DerivedT operator--(int)
+    {
+        static_assert(is_bidirectional || is_random_access, "");
+        auto tmp = derived();
+        access_t::decrement(derived());
+        return tmp;
+    }
+
+    DerivedT& operator+=(DifferenceTypeT n)
+    {
+        access_t::advance(derived(), n);
+        return derived();
+    }
+    DerivedT& operator-=(DifferenceTypeT n)
+    {
+        access_t::advance(derived(), -n);
+        return derived();
+    }
+
+    DerivedT operator+(DifferenceTypeT n) const
+    {
+        static_assert(is_random_access, "");
+        auto tmp = derived();
+        return tmp += n;
+    }
+    friend DerivedT operator+(DifferenceTypeT n, const DerivedT& i)
+    {
+        static_assert(is_random_access, "");
+        return i + n;
+    }
+    DerivedT operator-(DifferenceTypeT n) const
+    {
+        static_assert(is_random_access, "");
+        auto tmp = derived();
+        return tmp -= n;
+    }
+    DifferenceTypeT operator-(const DerivedT& rhs) const
+    {
+        static_assert(is_random_access, "");
+        return access_t::distance_to(rhs, derived());
+    }
+
+    bool operator<(const DerivedT& rhs) const
+    {
+        static_assert(is_random_access, "");
+        return access_t::distance_to(derived(), rhs) > 0;
+    }
+    bool operator<=(const DerivedT& rhs) const
+    {
+        static_assert(is_random_access, "");
+        return access_t::distance_to(derived(), rhs) >= 0;
+    }
+    bool operator>(const DerivedT& rhs) const
+    {
+        static_assert(is_random_access, "");
+        return access_t::distance_to(derived(), rhs) < 0;
+    }
+    bool operator>=(const DerivedT& rhs) const
+    {
+        static_assert(is_random_access, "");
+        return access_t::distance_to(derived(), rhs) <= 0;
+    }
+};
+
+} // namespace detail
+} // namespace immer
diff --git a/immer/detail/rbts/bits.hpp b/immer/detail/rbts/bits.hpp
new file mode 100644
index 000000000000..58d4e3c9c93a
--- /dev/null
+++ b/immer/detail/rbts/bits.hpp
@@ -0,0 +1,33 @@
+//
+// immer: immutable data structures for C++
+// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
+//
+// This software is distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
+//
+
+#pragma once
+
+#include <cstdint>
+
+namespace immer {
+namespace detail {
+namespace rbts {
+
+using bits_t  = std::uint32_t;
+using shift_t = std::uint32_t;
+using count_t = std::uint32_t;
+using size_t  = std::size_t;
+
+template <bits_t B, typename T = count_t>
+constexpr T branches = T{1} << B;
+
+template <bits_t B, typename T = size_t>
+constexpr T mask = branches<B, T> - 1;
+
+template <bits_t B, bits_t BL>
+constexpr shift_t endshift = shift_t{BL} - shift_t{B};
+
+} // namespace rbts
+} // namespace detail
+} // namespace immer
diff --git a/immer/detail/rbts/node.hpp b/immer/detail/rbts/node.hpp
new file mode 100644
index 000000000000..e54e569636ac
--- /dev/null
+++ b/immer/detail/rbts/node.hpp
@@ -0,0 +1,932 @@
+//
+// immer: immutable data structures for C++
+// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
+//
+// This software is distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
+//
+
+#pragma once
+
+#include <immer/detail/combine_standard_layout.hpp>
+#include <immer/detail/rbts/bits.hpp>
+#include <immer/detail/util.hpp>
+#include <immer/heap/tags.hpp>
+
+#include <cassert>
+#include <cstddef>
+#include <memory>
+#include <type_traits>
+
+namespace immer {
+namespace detail {
+namespace rbts {
+
+template <typename T, typename MemoryPolicy, bits_t B, bits_t BL>
+struct node
+{
+    static constexpr auto bits      = B;
+    static constexpr auto bits_leaf = BL;
+
+    using node_t      = node;
+    using memory      = MemoryPolicy;
+    using heap_policy = typename memory::heap;
+    using transience  = typename memory::transience_t;
+    using refs_t      = typename memory::refcount;
+    using ownee_t     = typename transience::ownee;
+    using edit_t      = typename transience::edit;
+    using value_t     = T;
+
+    static constexpr bool embed_relaxed = memory::prefer_fewer_bigger_objects;
+
+    enum class kind_t
+    {
+        leaf,
+        inner
+    };
+
+    struct relaxed_data_t
+    {
+        count_t count;
+        size_t sizes[branches<B>];
+    };
+
+    using relaxed_data_with_meta_t =
+        combine_standard_layout_t<relaxed_data_t, refs_t, ownee_t>;
+
+    using relaxed_data_no_meta_t = combine_standard_layout_t<relaxed_data_t>;
+
+    using relaxed_t = std::conditional_t<embed_relaxed,
+                                         relaxed_data_no_meta_t,
+                                         relaxed_data_with_meta_t>;
+
+    struct leaf_t
+    {
+        aligned_storage_for<T> buffer;
+    };
+
+    struct inner_t
+    {
+        relaxed_t* relaxed;
+        aligned_storage_for<node_t*> buffer;
+    };
+
+    union data_t
+    {
+        inner_t inner;
+        leaf_t leaf;
+    };
+
+    struct impl_data_t
+    {
+#if IMMER_TAGGED_NODE
+        kind_t kind;
+#endif
+        data_t data;
+    };
+
+    using impl_t = combine_standard_layout_t<impl_data_t, refs_t, ownee_t>;
+
+    impl_t impl;
+
+    // assume that we need to keep headroom space in the node when we
+    // are doing reference counting, since any node may become
+    // transient when it has only one reference
+    constexpr static bool keep_headroom = !std::is_empty<refs_t>{};
+
+    constexpr static std::size_t sizeof_packed_leaf_n(count_t count)
+    {
+        return immer_offsetof(impl_t, d.data.leaf.buffer) +
+               sizeof(leaf_t::buffer) * count;
+    }
+
+    constexpr static std::size_t sizeof_packed_inner_n(count_t count)
+    {
+        return immer_offsetof(impl_t, d.data.inner.buffer) +
+               sizeof(inner_t::buffer) * count;
+    }
+
+    constexpr static std::size_t sizeof_packed_relaxed_n(count_t count)
+    {
+        return immer_offsetof(relaxed_t, d.sizes) + sizeof(size_t) * count;
+    }
+
+    constexpr static std::size_t sizeof_packed_inner_r_n(count_t count)
+    {
+        return embed_relaxed ? sizeof_packed_inner_n(count) +
+                                   sizeof_packed_relaxed_n(count)
+                             : sizeof_packed_inner_n(count);
+    }
+
+    constexpr static std::size_t max_sizeof_leaf =
+        sizeof_packed_leaf_n(branches<BL>);
+
+    constexpr static std::size_t max_sizeof_inner =
+        sizeof_packed_inner_n(branches<B>);
+
+    constexpr static std::size_t max_sizeof_relaxed =
+        sizeof_packed_relaxed_n(branches<B>);
+
+    constexpr static std::size_t max_sizeof_inner_r =
+        sizeof_packed_inner_r_n(branches<B>);
+
+    constexpr static std::size_t sizeof_inner_n(count_t n)
+    {
+        return keep_headroom ? max_sizeof_inner : sizeof_packed_inner_n(n);
+    }
+
+    constexpr static std::size_t sizeof_inner_r_n(count_t n)
+    {
+        return keep_headroom ? max_sizeof_inner_r : sizeof_packed_inner_r_n(n);
+    }
+
+    constexpr static std::size_t sizeof_relaxed_n(count_t n)
+    {
+        return keep_headroom ? max_sizeof_relaxed : sizeof_packed_relaxed_n(n);
+    }
+
+    constexpr static std::size_t sizeof_leaf_n(count_t n)
+    {
+        return keep_headroom ? max_sizeof_leaf : sizeof_packed_leaf_n(n);
+    }
+
+    using heap =
+        typename heap_policy::template optimized<max_sizeof_inner>::type;
+
+#if IMMER_TAGGED_NODE
+    kind_t kind() const { return impl.d.kind; }
+#endif
+
+    relaxed_t* relaxed()
+    {
+        IMMER_ASSERT_TAGGED(kind() == kind_t::inner);
+        return impl.d.data.inner.relaxed;
+    }
+
+    const relaxed_t* relaxed() const
+    {
+        IMMER_ASSERT_TAGGED(kind() == kind_t::inner);
+        return impl.d.data.inner.relaxed;
+    }
+
+    node_t** inner()
+    {
+        IMMER_ASSERT_TAGGED(kind() == kind_t::inner);
+        return reinterpret_cast<node_t**>(&impl.d.data.inner.buffer);
+    }
+
+    T* leaf()
+    {
+        IMMER_ASSERT_TAGGED(kind() == kind_t::leaf);
+        return reinterpret_cast<T*>(&impl.d.data.leaf.buffer);
+    }
+
+    static refs_t& refs(const relaxed_t* x)
+    {
+        return auto_const_cast(get<refs_t>(*x));
+    }
+    static const ownee_t& ownee(const relaxed_t* x) { return get<ownee_t>(*x); }
+    static ownee_t& ownee(relaxed_t* x) { return get<ownee_t>(*x); }
+
+    static refs_t& refs(const node_t* x)
+    {
+        return auto_const_cast(get<refs_t>(x->impl));
+    }
+    static const ownee_t& ownee(const node_t* x)
+    {
+        return get<ownee_t>(x->impl);
+    }
+    static ownee_t& ownee(node_t* x) { return get<ownee_t>(x->impl); }
+
+    static node_t* make_inner_n(count_t n)
+    {
+        assert(n <= branches<B>);
+        auto m                       = heap::allocate(sizeof_inner_n(n));
+        auto p                       = new (m) node_t;
+        p->impl.d.data.inner.relaxed = nullptr;
+#if IMMER_TAGGED_NODE
+        p->impl.d.kind = node_t::kind_t::inner;
+#endif
+        return p;
+    }
+
+    static node_t* make_inner_e(edit_t e)
+    {
+        auto m                       = heap::allocate(max_sizeof_inner);
+        auto p                       = new (m) node_t;
+        ownee(p)                     = e;
+        p->impl.d.data.inner.relaxed = nullptr;
+#if IMMER_TAGGED_NODE
+        p->impl.d.kind = node_t::kind_t::inner;
+#endif
+        return p;
+    }
+
+    static node_t* make_inner_r_n(count_t n)
+    {
+        assert(n <= branches<B>);
+        auto mp = heap::allocate(sizeof_inner_r_n(n));
+        auto mr = static_cast<void*>(nullptr);
+        if (embed_relaxed) {
+            mr = reinterpret_cast<unsigned char*>(mp) + sizeof_inner_n(n);
+        } else {
+            try {
+                mr = heap::allocate(sizeof_relaxed_n(n), norefs_tag{});
+            } catch (...) {
+                heap::deallocate(sizeof_inner_r_n(n), mp);
+                throw;
+            }
+        }
+        auto p                       = new (mp) node_t;
+        auto r                       = new (mr) relaxed_t;
+        r->d.count                   = 0;
+        p->impl.d.data.inner.relaxed = r;
+#if IMMER_TAGGED_NODE
+        p->impl.d.kind = node_t::kind_t::inner;
+#endif
+        return p;
+    }
+
+    static node_t* make_inner_sr_n(count_t n, relaxed_t* r)
+    {
+        return static_if<embed_relaxed, node_t*>(
+            [&](auto) { return node_t::make_inner_r_n(n); },
+            [&](auto) {
+                auto p =
+                    new (heap::allocate(node_t::sizeof_inner_r_n(n))) node_t;
+                assert(r->d.count >= n);
+                node_t::refs(r).inc();
+                p->impl.d.data.inner.relaxed = r;
+#if IMMER_TAGGED_NODE
+                p->impl.d.kind = node_t::kind_t::inner;
+#endif
+                return p;
+            });
+    }
+
+    static node_t* make_inner_r_e(edit_t e)
+    {
+        auto mp = heap::allocate(max_sizeof_inner_r);
+        auto mr = static_cast<void*>(nullptr);
+        if (embed_relaxed) {
+            mr = reinterpret_cast<unsigned char*>(mp) + max_sizeof_inner;
+        } else {
+            try {
+                mr = heap::allocate(max_sizeof_relaxed, norefs_tag{});
+            } catch (...) {
+                heap::deallocate(max_sizeof_inner_r, mp);
+                throw;
+            }
+        }
+        auto p   = new (mp) node_t;
+        auto r   = new (mr) relaxed_t;
+        ownee(p) = e;
+        static_if<!embed_relaxed>([&](auto) { node_t::ownee(r) = e; });
+        r->d.count                   = 0;
+        p->impl.d.data.inner.relaxed = r;
+#if IMMER_TAGGED_NODE
+        p->impl.d.kind = node_t::kind_t::inner;
+#endif
+        return p;
+    }
+
+    static node_t* make_inner_sr_e(edit_t e, relaxed_t* r)
+    {
+        return static_if<embed_relaxed, node_t*>(
+            [&](auto) { return node_t::make_inner_r_e(e); },
+            [&](auto) {
+                auto p =
+                    new (heap::allocate(node_t::max_sizeof_inner_r)) node_t;
+                node_t::refs(r).inc();
+                p->impl.d.data.inner.relaxed = r;
+                node_t::ownee(p)             = e;
+#if IMMER_TAGGED_NODE
+                p->impl.d.kind = node_t::kind_t::inner;
+#endif
+                return p;
+            });
+    }
+
+    static node_t* make_leaf_n(count_t n)
+    {
+        assert(n <= branches<BL>);
+        auto p = new (heap::allocate(sizeof_leaf_n(n))) node_t;
+#if IMMER_TAGGED_NODE
+        p->impl.d.kind = node_t::kind_t::leaf;
+#endif
+        return p;
+    }
+
+    static node_t* make_leaf_e(edit_t e)
+    {
+        auto p   = new (heap::allocate(max_sizeof_leaf)) node_t;
+        ownee(p) = e;
+#if IMMER_TAGGED_NODE
+        p->impl.d.kind = node_t::kind_t::leaf;
+#endif
+        return p;
+    }
+
+    static node_t* make_inner_n(count_t n, node_t* x)
+    {
+        assert(n >= 1);
+        auto p        = make_inner_n(n);
+        p->inner()[0] = x;
+        return p;
+    }
+
+    static node_t* make_inner_n(edit_t n, node_t* x)
+    {
+        assert(n >= 1);
+        auto p        = make_inner_n(n);
+        p->inner()[0] = x;
+        return p;
+    }
+
+    static node_t* make_inner_n(count_t n, node_t* x, node_t* y)
+    {
+        assert(n >= 2);
+        auto p        = make_inner_n(n);
+        p->inner()[0] = x;
+        p->inner()[1] = y;
+        return p;
+    }
+
+    static node_t* make_inner_r_n(count_t n, node_t* x)
+    {
+        assert(n >= 1);
+        auto p        = make_inner_r_n(n);
+        auto r        = p->relaxed();
+        p->inner()[0] = x;
+        r->d.count    = 1;
+        return p;
+    }
+
+    static node_t* make_inner_r_n(count_t n, node_t* x, size_t xs)
+    {
+        assert(n >= 1);
+        auto p        = make_inner_r_n(n);
+        auto r        = p->relaxed();
+        p->inner()[0] = x;
+        r->d.sizes[0] = xs;
+        r->d.count    = 1;
+        return p;
+    }
+
+    static node_t* make_inner_r_n(count_t n, node_t* x, node_t* y)
+    {
+        assert(n >= 2);
+        auto p        = make_inner_r_n(n);
+        auto r        = p->relaxed();
+        p->inner()[0] = x;
+        p->inner()[1] = y;
+        r->d.count    = 2;
+        return p;
+    }
+
+    static node_t* make_inner_r_n(count_t n, node_t* x, size_t xs, node_t* y)
+    {
+        assert(n >= 2);
+        auto p        = make_inner_r_n(n);
+        auto r        = p->relaxed();
+        p->inner()[0] = x;
+        p->inner()[1] = y;
+        r->d.sizes[0] = xs;
+        r->d.count    = 2;
+        return p;
+    }
+
+    static node_t*
+    make_inner_r_n(count_t n, node_t* x, size_t xs, node_t* y, size_t ys)
+    {
+        assert(n >= 2);
+        auto p        = make_inner_r_n(n);
+        auto r        = p->relaxed();
+        p->inner()[0] = x;
+        p->inner()[1] = y;
+        r->d.sizes[0] = xs;
+        r->d.sizes[1] = xs + ys;
+        r->d.count    = 2;
+        return p;
+    }
+
+    static node_t* make_inner_r_n(count_t n,
+                                  node_t* x,
+                                  size_t xs,
+                                  node_t* y,
+                                  size_t ys,
+                                  node_t* z,
+                                  size_t zs)
+    {
+        assert(n >= 3);
+        auto p        = make_inner_r_n(n);
+        auto r        = p->relaxed();
+        p->inner()[0] = x;
+        p->inner()[1] = y;
+        p->inner()[2] = z;
+        r->d.sizes[0] = xs;
+        r->d.sizes[1] = xs + ys;
+        r->d.sizes[2] = xs + ys + zs;
+        r->d.count    = 3;
+        return p;
+    }
+
+    template <typename U>
+    static node_t* make_leaf_n(count_t n, U&& x)
+    {
+        assert(n >= 1);
+        auto p = make_leaf_n(n);
+        try {
+            new (p->leaf()) T{std::forward<U>(x)};
+        } catch (...) {
+            heap::deallocate(node_t::sizeof_leaf_n(n), p);
+            throw;
+        }
+        return p;
+    }
+
+    template <typename U>
+    static node_t* make_leaf_e(edit_t e, U&& x)
+    {
+        auto p = make_leaf_e(e);
+        try {
+            new (p->leaf()) T{std::forward<U>(x)};
+        } catch (...) {
+            heap::deallocate(node_t::max_sizeof_leaf, p);
+            throw;
+        }
+        return p;
+    }
+
+    static node_t* make_path(shift_t shift, node_t* node)
+    {
+        IMMER_ASSERT_TAGGED(node->kind() == kind_t::leaf);
+        if (shift == endshift<B, BL>)
+            return node;
+        else {
+            auto n = node_t::make_inner_n(1);
+            try {
+                n->inner()[0] = make_path(shift - B, node);
+            } catch (...) {
+                heap::deallocate(node_t::sizeof_inner_n(1), n);
+                throw;
+            }
+            return n;
+        }
+    }
+
+    static node_t* make_path_e(edit_t e, shift_t shift, node_t* node)
+    {
+        IMMER_ASSERT_TAGGED(node->kind() == kind_t::leaf);
+        if (shift == endshift<B, BL>)
+            return node;
+        else {
+            auto n = node_t::make_inner_e(e);
+            try {
+                n->inner()[0] = make_path_e(e, shift - B, node);
+            } catch (...) {
+                heap::deallocate(node_t::max_sizeof_inner, n);
+                throw;
+            }
+            return n;
+        }
+    }
+
+    static node_t* copy_inner(node_t* src, count_t n)
+    {
+        IMMER_ASSERT_TAGGED(src->kind() == kind_t::inner);
+        auto dst = make_inner_n(n);
+        inc_nodes(src->inner(), n);
+        std::uninitialized_copy(src->inner(), src->inner() + n, dst->inner());
+        return dst;
+    }
+
+    static node_t* copy_inner_n(count_t allocn, node_t* src, count_t n)
+    {
+        assert(allocn >= n);
+        IMMER_ASSERT_TAGGED(src->kind() == kind_t::inner);
+        auto dst = make_inner_n(allocn);
+        return do_copy_inner(dst, src, n);
+    }
+
+    static node_t* copy_inner_e(edit_t e, node_t* src, count_t n)
+    {
+        IMMER_ASSERT_TAGGED(src->kind() == kind_t::inner);
+        auto dst = make_inner_e(e);
+        return do_copy_inner(dst, src, n);
+    }
+
+    static node_t* do_copy_inner(node_t* dst, node_t* src, count_t n)
+    {
+        IMMER_ASSERT_TAGGED(dst->kind() == kind_t::inner);
+        IMMER_ASSERT_TAGGED(src->kind() == kind_t::inner);
+        auto p = src->inner();
+        inc_nodes(p, n);
+        std::uninitialized_copy(p, p + n, dst->inner());
+        return dst;
+    }
+
+    static node_t* copy_inner_r(node_t* src, count_t n)
+    {
+        IMMER_ASSERT_TAGGED(src->kind() == kind_t::inner);
+        auto dst = make_inner_r_n(n);
+        return do_copy_inner_r(dst, src, n);
+    }
+
+    static node_t* copy_inner_r_n(count_t allocn, node_t* src, count_t n)
+    {
+        assert(allocn >= n);
+        IMMER_ASSERT_TAGGED(src->kind() == kind_t::inner);
+        auto dst = make_inner_r_n(allocn);
+        return do_copy_inner_r(dst, src, n);
+    }
+
+    static node_t* copy_inner_r_e(edit_t e, node_t* src, count_t n)
+    {
+        IMMER_ASSERT_TAGGED(src->kind() == kind_t::inner);
+        auto dst = make_inner_r_e(e);
+        return do_copy_inner_r(dst, src, n);
+    }
+
+    static node_t* copy_inner_sr_e(edit_t e, node_t* src, count_t n)
+    {
+        IMMER_ASSERT_TAGGED(src->kind() == kind_t::inner);
+        auto dst = make_inner_sr_e(e, src->relaxed());
+        return do_copy_inner_sr(dst, src, n);
+    }
+
+    static node_t* do_copy_inner_r(node_t* dst, node_t* src, count_t n)
+    {
+        IMMER_ASSERT_TAGGED(dst->kind() == kind_t::inner);
+        IMMER_ASSERT_TAGGED(src->kind() == kind_t::inner);
+        auto src_r = src->relaxed();
+        auto dst_r = dst->relaxed();
+        inc_nodes(src->inner(), n);
+        std::copy(src->inner(), src->inner() + n, dst->inner());
+        std::copy(src_r->d.sizes, src_r->d.sizes + n, dst_r->d.sizes);
+        dst_r->d.count = n;
+        return dst;
+    }
+
+    static node_t* do_copy_inner_sr(node_t* dst, node_t* src, count_t n)
+    {
+        if (embed_relaxed)
+            return do_copy_inner_r(dst, src, n);
+        else {
+            inc_nodes(src->inner(), n);
+            std::copy(src->inner(), src->inner() + n, dst->inner());
+            return dst;
+        }
+    }
+
+    static node_t* copy_leaf(node_t* src, count_t n)
+    {
+        IMMER_ASSERT_TAGGED(src->kind() == kind_t::leaf);
+        auto dst = make_leaf_n(n);
+        try {
+            std::uninitialized_copy(src->leaf(), src->leaf() + n, dst->leaf());
+        } catch (...) {
+            heap::deallocate(node_t::sizeof_leaf_n(n), dst);
+            throw;
+        }
+        return dst;
+    }
+
+    static node_t* copy_leaf_e(edit_t e, node_t* src, count_t n)
+    {
+        IMMER_ASSERT_TAGGED(src->kind() == kind_t::leaf);
+        auto dst = make_leaf_e(e);
+        try {
+            std::uninitialized_copy(src->leaf(), src->leaf() + n, dst->leaf());
+        } catch (...) {
+            heap::deallocate(node_t::max_sizeof_leaf, dst);
+            throw;
+        }
+        return dst;
+    }
+
+    static node_t* copy_leaf_n(count_t allocn, node_t* src, count_t n)
+    {
+        assert(allocn >= n);
+        IMMER_ASSERT_TAGGED(src->kind() == kind_t::leaf);
+        auto dst = make_leaf_n(allocn);
+        try {
+            std::uninitialized_copy(src->leaf(), src->leaf() + n, dst->leaf());
+        } catch (...) {
+            heap::deallocate(node_t::sizeof_leaf_n(allocn), dst);
+            throw;
+        }
+        return dst;
+    }
+
+    static node_t* copy_leaf(node_t* src1, count_t n1, node_t* src2, count_t n2)
+    {
+        IMMER_ASSERT_TAGGED(src1->kind() == kind_t::leaf);
+        IMMER_ASSERT_TAGGED(src2->kind() == kind_t::leaf);
+        auto dst = make_leaf_n(n1 + n2);
+        try {
+            std::uninitialized_copy(
+                src1->leaf(), src1->leaf() + n1, dst->leaf());
+        } catch (...) {
+            heap::deallocate(node_t::sizeof_leaf_n(n1 + n2), dst);
+            throw;
+        }
+        try {
+            std::uninitialized_copy(
+                src2->leaf(), src2->leaf() + n2, dst->leaf() + n1);
+        } catch (...) {
+            destroy_n(dst->leaf(), n1);
+            heap::deallocate(node_t::sizeof_leaf_n(n1 + n2), dst);
+            throw;
+        }
+        return dst;
+    }
+
+    static node_t*
+    copy_leaf_e(edit_t e, node_t* src1, count_t n1, node_t* src2, count_t n2)
+    {
+        IMMER_ASSERT_TAGGED(src1->kind() == kind_t::leaf);
+        IMMER_ASSERT_TAGGED(src2->kind() == kind_t::leaf);
+        auto dst = make_leaf_e(e);
+        try {
+            std::uninitialized_copy(
+                src1->leaf(), src1->leaf() + n1, dst->leaf());
+        } catch (...) {
+            heap::deallocate(max_sizeof_leaf, dst);
+            throw;
+        }
+        try {
+            std::uninitialized_copy(
+                src2->leaf(), src2->leaf() + n2, dst->leaf() + n1);
+        } catch (...) {
+            destroy_n(dst->leaf(), n1);
+            heap::deallocate(max_sizeof_leaf, dst);
+            throw;
+        }
+        return dst;
+    }
+
+    static node_t* copy_leaf_e(edit_t e, node_t* src, count_t idx, count_t last)
+    {
+        IMMER_ASSERT_TAGGED(src->kind() == kind_t::leaf);
+        auto dst = make_leaf_e(e);
+        try {
+            std::uninitialized_copy(
+                src->leaf() + idx, src->leaf() + last, dst->leaf());
+        } catch (...) {
+            heap::deallocate(max_sizeof_leaf, dst);
+            throw;
+        }
+        return dst;
+    }
+
+    static node_t* copy_leaf(node_t* src, count_t idx, count_t last)
+    {
+        IMMER_ASSERT_TAGGED(src->kind() == kind_t::leaf);
+        auto dst = make_leaf_n(last - idx);
+        try {
+            std::uninitialized_copy(
+                src->leaf() + idx, src->leaf() + last, dst->leaf());
+        } catch (...) {
+            heap::deallocate(node_t::sizeof_leaf_n(last - idx), dst);
+            throw;
+        }
+        return dst;
+    }
+
+    template <typename U>
+    static node_t* copy_leaf_emplace(node_t* src, count_t n, U&& x)
+    {
+        auto dst = copy_leaf_n(n + 1, src, n);
+        try {
+            new (dst->leaf() + n) T{std::forward<U>(x)};
+        } catch (...) {
+            destroy_n(dst->leaf(), n);
+            heap::deallocate(node_t::sizeof_leaf_n(n + 1), dst);
+            throw;
+        }
+        return dst;
+    }
+
+    static void delete_inner(node_t* p, count_t n)
+    {
+        IMMER_ASSERT_TAGGED(p->kind() == kind_t::inner);
+        assert(!p->relaxed());
+        heap::deallocate(ownee(p).owned() ? node_t::max_sizeof_inner
+                                          : node_t::sizeof_inner_n(n),
+                         p);
+    }
+
+    static void delete_inner_e(node_t* p)
+    {
+        IMMER_ASSERT_TAGGED(p->kind() == kind_t::inner);
+        assert(!p->relaxed());
+        heap::deallocate(node_t::max_sizeof_inner, p);
+    }
+
+    static void delete_inner_any(node_t* p, count_t n)
+    {
+        if (p->relaxed())
+            delete_inner_r(p, n);
+        else
+            delete_inner(p, n);
+    }
+
+    static void delete_inner_r(node_t* p, count_t n)
+    {
+        IMMER_ASSERT_TAGGED(p->kind() == kind_t::inner);
+        auto r = p->relaxed();
+        assert(r);
+        static_if<!embed_relaxed>([&](auto) {
+            if (node_t::refs(r).dec())
+                heap::deallocate(node_t::ownee(r).owned()
+                                     ? node_t::max_sizeof_relaxed
+                                     : node_t::sizeof_relaxed_n(n),
+                                 r);
+        });
+        heap::deallocate(ownee(p).owned() ? node_t::max_sizeof_inner_r
+                                          : node_t::sizeof_inner_r_n(n),
+                         p);
+    }
+
+    static void delete_inner_r_e(node_t* p)
+    {
+        IMMER_ASSERT_TAGGED(p->kind() == kind_t::inner);
+        auto r = p->relaxed();
+        assert(r);
+        static_if<!embed_relaxed>([&](auto) {
+            if (node_t::refs(r).dec())
+                heap::deallocate(node_t::max_sizeof_relaxed, r);
+        });
+        heap::deallocate(node_t::max_sizeof_inner_r, p);
+    }
+
+    static void delete_leaf(node_t* p, count_t n)
+    {
+        IMMER_ASSERT_TAGGED(p->kind() == kind_t::leaf);
+        destroy_n(p->leaf(), n);
+        heap::deallocate(ownee(p).owned() ? node_t::max_sizeof_leaf
+                                          : node_t::sizeof_leaf_n(n),
+                         p);
+    }
+
+    bool can_mutate(edit_t e) const
+    {
+        return refs(this).unique() || ownee(this).can_mutate(e);
+    }
+
+    bool can_relax() const { return !embed_relaxed || relaxed(); }
+
+    relaxed_t* ensure_mutable_relaxed(edit_t e)
+    {
+        auto src_r = relaxed();
+        return static_if<embed_relaxed, relaxed_t*>(
+            [&](auto) { return src_r; },
+            [&](auto) {
+                if (node_t::refs(src_r).unique() ||
+                    node_t::ownee(src_r).can_mutate(e))
+                    return src_r;
+                else {
+                    if (src_r)
+                        node_t::refs(src_r).dec_unsafe();
+                    auto dst_r = impl.d.data.inner.relaxed =
+                        new (heap::allocate(max_sizeof_relaxed)) relaxed_t;
+                    node_t::ownee(dst_r) = e;
+                    return dst_r;
+                }
+            });
+    }
+
+    relaxed_t* ensure_mutable_relaxed_e(edit_t e, edit_t ec)
+    {
+        auto src_r = relaxed();
+        return static_if<embed_relaxed, relaxed_t*>(
+            [&](auto) { return src_r; },
+            [&](auto) {
+                if (src_r && (node_t::refs(src_r).unique() ||
+                              node_t::ownee(src_r).can_mutate(e))) {
+                    node_t::ownee(src_r) = ec;
+                    return src_r;
+                } else {
+                    if (src_r)
+                        node_t::refs(src_r).dec_unsafe();
+                    auto dst_r = impl.d.data.inner.relaxed =
+                        new (heap::allocate(max_sizeof_relaxed)) relaxed_t;
+                    node_t::ownee(dst_r) = ec;
+                    return dst_r;
+                }
+            });
+    }
+
+    relaxed_t* ensure_mutable_relaxed_n(edit_t e, count_t n)
+    {
+        auto src_r = relaxed();
+        return static_if<embed_relaxed, relaxed_t*>(
+            [&](auto) { return src_r; },
+            [&](auto) {
+                if (node_t::refs(src_r).unique() ||
+                    node_t::ownee(src_r).can_mutate(e))
+                    return src_r;
+                else {
+                    if (src_r)
+                        node_t::refs(src_r).dec_unsafe();
+                    auto dst_r =
+                        new (heap::allocate(max_sizeof_relaxed)) relaxed_t;
+                    std::copy(
+                        src_r->d.sizes, src_r->d.sizes + n, dst_r->d.sizes);
+                    node_t::ownee(dst_r)             = e;
+                    return impl.d.data.inner.relaxed = dst_r;
+                }
+            });
+    }
+
+    node_t* inc()
+    {
+        refs(this).inc();
+        return this;
+    }
+
+    const node_t* inc() const
+    {
+        refs(this).inc();
+        return this;
+    }
+
+    bool dec() const { return refs(this).dec(); }
+    void dec_unsafe() const { refs(this).dec_unsafe(); }
+
+    static void inc_nodes(node_t** p, count_t n)
+    {
+        for (auto i = p, e = i + n; i != e; ++i)
+            refs(*i).inc();
+    }
+
+#if IMMER_TAGGED_NODE
+    shift_t compute_shift()
+    {
+        if (kind() == kind_t::leaf)
+            return endshift<B, BL>;
+        else
+            return B + inner()[0]->compute_shift();
+    }
+#endif
+
+    bool check(shift_t shift, size_t size)
+    {
+#if IMMER_DEBUG_DEEP_CHECK
+        assert(size > 0);
+        if (shift == endshift<B, BL>) {
+            IMMER_ASSERT_TAGGED(kind() == kind_t::leaf);
+            assert(size <= branches<BL>);
+        } else if (auto r = relaxed()) {
+            auto count = r->d.count;
+            assert(count > 0);
+            assert(count <= branches<B>);
+            if (r->d.sizes[count - 1] != size) {
+                IMMER_TRACE_F("check");
+                IMMER_TRACE_E(r->d.sizes[count - 1]);
+                IMMER_TRACE_E(size);
+            }
+            assert(r->d.sizes[count - 1] == size);
+            for (auto i = 1; i < count; ++i)
+                assert(r->d.sizes[i - 1] < r->d.sizes[i]);
+            auto last_size = size_t{};
+            for (auto i = 0; i < count; ++i) {
+                assert(inner()[i]->check(shift - B, r->d.sizes[i] - last_size));
+                last_size = r->d.sizes[i];
+            }
+        } else {
+            assert(size <= branches<B> << shift);
+            auto count =
+                (size >> shift) + (size - ((size >> shift) << shift) > 0);
+            assert(count <= branches<B>);
+            if (count) {
+                for (auto i = 1; i < count - 1; ++i)
+                    assert(inner()[i]->check(shift - B, 1 << shift));
+                assert(inner()[count - 1]->check(
+                    shift - B, size - ((count - 1) << shift)));
+            }
+        }
+#endif // IMMER_DEBUG_DEEP_CHECK
+        return true;
+    }
+};
+
+template <typename T, typename MP, bits_t B>
+constexpr bits_t derive_bits_leaf_aux()
+{
+    using node_t               = node<T, MP, B, B>;
+    constexpr auto sizeof_elem = sizeof(T);
+    constexpr auto space =
+        node_t::max_sizeof_inner - node_t::sizeof_packed_leaf_n(0);
+    constexpr auto full_elems = space / sizeof_elem;
+    constexpr auto BL         = log2(full_elems);
+    return BL;
+}
+
+template <typename T, typename MP, bits_t B>
+constexpr bits_t derive_bits_leaf = derive_bits_leaf_aux<T, MP, B>();
+
+} // namespace rbts
+} // namespace detail
+} // namespace immer
diff --git a/immer/detail/rbts/operations.hpp b/immer/detail/rbts/operations.hpp
new file mode 100644
index 000000000000..ff703e892b42
--- /dev/null
+++ b/immer/detail/rbts/operations.hpp
@@ -0,0 +1,2461 @@
+//
+// immer: immutable data structures for C++
+// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
+//
+// This software is distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
+//
+
+#pragma once
+
+#include <algorithm>
+#include <memory>
+#include <numeric>
+#include <utility>
+
+#include <immer/config.hpp>
+#include <immer/detail/rbts/position.hpp>
+#include <immer/detail/rbts/visitor.hpp>
+#include <immer/detail/util.hpp>
+#include <immer/heap/tags.hpp>
+
+namespace immer {
+namespace detail {
+namespace rbts {
+
+template <typename T>
+struct array_for_visitor : visitor_base<array_for_visitor<T>>
+{
+    using this_t = array_for_visitor;
+
+    template <typename PosT>
+    static T* visit_inner(PosT&& pos, size_t idx)
+    {
+        return pos.descend(this_t{}, idx);
+    }
+
+    template <typename PosT>
+    static T* visit_leaf(PosT&& pos, size_t)
+    {
+        return pos.node()->leaf();
+    }
+};
+
+template <typename T>
+struct region_for_visitor : visitor_base<region_for_visitor<T>>
+{
+    using this_t   = region_for_visitor;
+    using result_t = std::tuple<T*, size_t, size_t>;
+
+    template <typename PosT>
+    static result_t visit_inner(PosT&& pos, size_t idx)
+    {
+        return pos.towards(this_t{}, idx);
+    }
+
+    template <typename PosT>
+    static result_t visit_leaf(PosT&& pos, size_t idx)
+    {
+        return std::make_tuple(pos.node()->leaf(), pos.index(idx), pos.count());
+    }
+};
+
+template <typename T>
+struct get_visitor : visitor_base<get_visitor<T>>
+{
+    using this_t = get_visitor;
+
+    template <typename PosT>
+    static const T& visit_inner(PosT&& pos, size_t idx)
+    {
+        return pos.descend(this_t{}, idx);
+    }
+
+    template <typename PosT>
+    static const T& visit_leaf(PosT&& pos, size_t idx)
+    {
+        return pos.node()->leaf()[pos.index(idx)];
+    }
+};
+
+struct for_each_chunk_visitor : visitor_base<for_each_chunk_visitor>
+{
+    using this_t = for_each_chunk_visitor;
+
+    template <typename Pos, typename Fn>
+    static void visit_inner(Pos&& pos, Fn&& fn)
+    {
+        pos.each(this_t{}, fn);
+    }
+
+    template <typename Pos, typename Fn>
+    static void visit_leaf(Pos&& pos, Fn&& fn)
+    {
+        auto data = pos.node()->leaf();
+        fn(data, data + pos.count());
+    }
+};
+
+struct for_each_chunk_p_visitor : visitor_base<for_each_chunk_p_visitor>
+{
+    using this_t = for_each_chunk_p_visitor;
+
+    template <typename Pos, typename Fn>
+    static bool visit_inner(Pos&& pos, Fn&& fn)
+    {
+        return pos.each_pred(this_t{}, fn);
+    }
+
+    template <typename Pos, typename Fn>
+    static bool visit_leaf(Pos&& pos, Fn&& fn)
+    {
+        auto data = pos.node()->leaf();
+        return fn(data, data + pos.count());
+    }
+};
+
+struct for_each_chunk_left_visitor : visitor_base<for_each_chunk_left_visitor>
+{
+    using this_t = for_each_chunk_left_visitor;
+
+    template <typename Pos, typename Fn>
+    static void visit_inner(Pos&& pos, size_t last, Fn&& fn)
+    {
+        auto l = pos.index(last);
+        pos.each_left(for_each_chunk_visitor{}, l, fn);
+        pos.towards_oh(this_t{}, last, l, fn);
+    }
+
+    template <typename Pos, typename Fn>
+    static void visit_leaf(Pos&& pos, size_t last, Fn&& fn)
+    {
+        auto data = pos.node()->leaf();
+        auto l    = pos.index(last);
+        fn(data, data + l + 1);
+    }
+};
+
+struct for_each_chunk_right_visitor : visitor_base<for_each_chunk_right_visitor>
+{
+    using this_t = for_each_chunk_right_visitor;
+
+    template <typename Pos, typename Fn>
+    static void visit_inner(Pos&& pos, size_t first, Fn&& fn)
+    {
+        auto f = pos.index(first);
+        pos.towards_oh(this_t{}, first, f, fn);
+        pos.each_right(for_each_chunk_visitor{}, f + 1, fn);
+    }
+
+    template <typename Pos, typename Fn>
+    static void visit_leaf(Pos&& pos, size_t first, Fn&& fn)
+    {
+        auto data = pos.node()->leaf();
+        auto f    = pos.index(first);
+        fn(data + f, data + pos.count());
+    }
+};
+
+struct for_each_chunk_i_visitor : visitor_base<for_each_chunk_i_visitor>
+{
+    using this_t = for_each_chunk_i_visitor;
+
+    template <typename Pos, typename Fn>
+    static void visit_relaxed(Pos&& pos, size_t first, size_t last, Fn&& fn)
+    {
+        // we are going towards *two* indices, so we need to do the
+        // relaxed as a special case to correct the second index
+        if (first < last) {
+            auto f = pos.index(first);
+            auto l = pos.index(last - 1);
+            if (f == l) {
+                auto sbh = pos.size_before(f);
+                pos.towards_oh_sbh(this_t{}, first, f, sbh, last - sbh, fn);
+            } else {
+                assert(f < l);
+                pos.towards_oh(for_each_chunk_right_visitor{}, first, f, fn);
+                pos.each_i(for_each_chunk_visitor{}, f + 1, l, fn);
+                pos.towards_oh(for_each_chunk_left_visitor{}, last - 1, l, fn);
+            }
+        }
+    }
+
+    template <typename Pos, typename Fn>
+    static void visit_regular(Pos&& pos, size_t first, size_t last, Fn&& fn)
+    {
+        if (first < last) {
+            auto f = pos.index(first);
+            auto l = pos.index(last - 1);
+            if (f == l)
+                pos.towards_oh(this_t{}, first, f, last, fn);
+            else {
+                assert(f < l);
+                pos.towards_oh(for_each_chunk_right_visitor{}, first, f, fn);
+                pos.each_i(for_each_chunk_visitor{}, f + 1, l, fn);
+                pos.towards_oh(for_each_chunk_left_visitor{}, last - 1, l, fn);
+            }
+        }
+    }
+
+    template <typename Pos, typename Fn>
+    static void visit_leaf(Pos&& pos, size_t first, size_t last, Fn&& fn)
+    {
+        auto data = pos.node()->leaf();
+        if (first < last) {
+            auto f = pos.index(first);
+            auto l = pos.index(last - 1);
+            fn(data + f, data + l + 1);
+        }
+    }
+};
+
+struct for_each_chunk_p_left_visitor
+    : visitor_base<for_each_chunk_p_left_visitor>
+{
+    using this_t = for_each_chunk_p_left_visitor;
+
+    template <typename Pos, typename Fn>
+    static bool visit_inner(Pos&& pos, size_t last, Fn&& fn)
+    {
+        auto l = pos.index(last);
+        return pos.each_pred_left(for_each_chunk_p_visitor{}, l, fn) &&
+               pos.towards_oh(this_t{}, last, l, fn);
+    }
+
+    template <typename Pos, typename Fn>
+    static bool visit_leaf(Pos&& pos, size_t last, Fn&& fn)
+    {
+        auto data = pos.node()->leaf();
+        auto l    = pos.index(last);
+        return fn(data, data + l + 1);
+    }
+};
+
+struct for_each_chunk_p_right_visitor
+    : visitor_base<for_each_chunk_p_right_visitor>
+{
+    using this_t = for_each_chunk_p_right_visitor;
+
+    template <typename Pos, typename Fn>
+    static bool visit_inner(Pos&& pos, size_t first, Fn&& fn)
+    {
+        auto f = pos.index(first);
+        return pos.towards_oh(this_t{}, first, f, fn) &&
+               pos.each_pred_right(for_each_chunk_p_visitor{}, f + 1, fn);
+    }
+
+    template <typename Pos, typename Fn>
+    static bool visit_leaf(Pos&& pos, size_t first, Fn&& fn)
+    {
+        auto data = pos.node()->leaf();
+        auto f    = pos.index(first);
+        return fn(data + f, data + pos.count());
+    }
+};
+
+struct for_each_chunk_p_i_visitor : visitor_base<for_each_chunk_p_i_visitor>
+{
+    using this_t = for_each_chunk_p_i_visitor;
+
+    template <typename Pos, typename Fn>
+    static bool visit_relaxed(Pos&& pos, size_t first, size_t last, Fn&& fn)
+    {
+        // we are going towards *two* indices, so we need to do the
+        // relaxed as a special case to correct the second index
+        if (first < last) {
+            auto f = pos.index(first);
+            auto l = pos.index(last - 1);
+            if (f == l) {
+                auto sbh = pos.size_before(f);
+                return pos.towards_oh_sbh(
+                    this_t{}, first, f, sbh, last - sbh, fn);
+            } else {
+                assert(f < l);
+                return pos.towards_oh(
+                           for_each_chunk_p_right_visitor{}, first, f, fn) &&
+                       pos.each_pred_i(
+                           for_each_chunk_p_visitor{}, f + 1, l, fn) &&
+                       pos.towards_oh(
+                           for_each_chunk_p_left_visitor{}, last - 1, l, fn);
+            }
+        }
+        return true;
+    }
+
+    template <typename Pos, typename Fn>
+    static bool visit_regular(Pos&& pos, size_t first, size_t last, Fn&& fn)
+    {
+        if (first < last) {
+            auto f = pos.index(first);
+            auto l = pos.index(last - 1);
+            if (f == l)
+                return pos.towards_oh(this_t{}, first, f, last, fn);
+            else {
+                assert(f < l);
+                return pos.towards_oh(
+                           for_each_chunk_p_right_visitor{}, first, f, fn) &&
+                       pos.each_pred_i(
+                           for_each_chunk_p_visitor{}, f + 1, l, fn) &&
+                       pos.towards_oh(
+                           for_each_chunk_p_left_visitor{}, last - 1, l, fn);
+            }
+        }
+        return true;
+    }
+
+    template <typename Pos, typename Fn>
+    static bool visit_leaf(Pos&& pos, size_t first, size_t last, Fn&& fn)
+    {
+        auto data = pos.node()->leaf();
+        if (first < last) {
+            auto f = pos.index(first);
+            auto l = pos.index(last - 1);
+            return fn(data + f, data + l + 1);
+        }
+        return true;
+    }
+};
+
+struct equals_visitor : visitor_base<equals_visitor>
+{
+    using this_t = equals_visitor;
+
+    struct this_aux_t : visitor_base<this_aux_t>
+    {
+        template <typename PosR, typename PosL, typename Iter>
+        static bool visit_inner(
+            PosR&& posr, count_t i, PosL&& posl, Iter&& first, size_t idx)
+        {
+            return posl.nth_sub(i, this_t{}, posr, first, idx);
+        }
+
+        template <typename PosR, typename PosL, typename Iter>
+        static bool visit_leaf(
+            PosR&& posr, count_t i, PosL&& posl, Iter&& first, size_t idx)
+        {
+            return posl.nth_sub_leaf(i, this_t{}, posr, first, idx);
+        }
+    };
+
+    struct rrb : visitor_base<rrb>
+    {
+        template <typename PosR, typename Iter, typename Node>
+        static bool visit_node(PosR&& posr,
+                               Iter&& first,
+                               Node* rootl,
+                               shift_t shiftl,
+                               size_t sizel)
+        {
+            assert(shiftl <= posr.shift());
+            return shiftl == posr.shift()
+                       ? visit_maybe_relaxed_sub(rootl,
+                                                 shiftl,
+                                                 sizel,
+                                                 this_t{},
+                                                 posr,
+                                                 first,
+                                                 size_t{})
+                       : posr.first_sub_inner(
+                             rrb{}, first, rootl, shiftl, sizel);
+        }
+    };
+
+    template <typename Iter>
+    static auto equal_chunk_p(Iter&& iter)
+    {
+        return [iter](auto f, auto e) mutable {
+            if (f == &*iter) {
+                iter += e - f;
+                return true;
+            }
+            for (; f != e; ++f, ++iter)
+                if (*f != *iter)
+                    return false;
+            return true;
+        };
+    }
+
+    template <typename PosL, typename PosR, typename Iter>
+    static bool
+    visit_relaxed(PosL&& posl, PosR&& posr, Iter&& first, size_t idx)
+    {
+        auto nl = posl.node();
+        auto nr = posr.node();
+        if (nl == nr)
+            return true;
+        auto cl = posl.count();
+        auto cr = posr.count();
+        assert(cr > 0);
+        auto sbr = size_t{};
+        auto i   = count_t{};
+        auto j   = count_t{};
+        for (; i < cl; ++i) {
+            auto sbl = posl.size_before(i);
+            for (; j + 1 < cr && (sbr = posr.size_before(j)) < sbl; ++j)
+                ;
+            auto res =
+                sbl == sbr
+                    ? posr.nth_sub(j, this_aux_t{}, i, posl, first, idx + sbl)
+                    : posl.nth_sub(i,
+                                   for_each_chunk_p_visitor{},
+                                   this_t::equal_chunk_p(first + (idx + sbl)));
+            if (!res)
+                return false;
+        }
+        return true;
+    }
+
+    template <typename PosL, typename PosR, typename Iter>
+    static std::enable_if_t<is_relaxed_v<PosR>, bool>
+    visit_regular(PosL&& posl, PosR&& posr, Iter&& first, size_t idx)
+    {
+        return this_t::visit_relaxed(posl, posr, first, idx);
+    }
+
+    template <typename PosL, typename PosR, typename Iter>
+    static std::enable_if_t<!is_relaxed_v<PosR>, bool>
+    visit_regular(PosL&& posl, PosR&& posr, Iter&& first, size_t idx)
+    {
+        return posl.count() >= posr.count()
+                   ? this_t::visit_regular(posl, posr.node())
+                   : this_t::visit_regular(posr, posl.node());
+    }
+
+    template <typename PosL, typename PosR, typename Iter>
+    static bool visit_leaf(PosL&& posl, PosR&& posr, Iter&& first, size_t idx)
+    {
+        if (posl.node() == posr.node())
+            return true;
+        auto cl = posl.count();
+        auto cr = posr.count();
+        auto mp = std::min(cl, cr);
+        return std::equal(posl.node()->leaf(),
+                          posl.node()->leaf() + mp,
+                          posr.node()->leaf()) &&
+               std::equal(posl.node()->leaf() + mp,
+                          posl.node()->leaf() + posl.count(),
+                          first + (idx + mp));
+    }
+
+    template <typename Pos, typename NodeT>
+    static bool visit_regular(Pos&& pos, NodeT* other)
+    {
+        auto node = pos.node();
+        return node == other || pos.each_pred_zip(this_t{}, other);
+    }
+
+    template <typename Pos, typename NodeT>
+    static bool visit_leaf(Pos&& pos, NodeT* other)
+    {
+        auto node = pos.node();
+        return node == other || std::equal(node->leaf(),
+                                           node->leaf() + pos.count(),
+                                           other->leaf());
+    }
+};
+
+template <typename NodeT>
+struct update_visitor : visitor_base<update_visitor<NodeT>>
+{
+    using node_t = NodeT;
+    using this_t = update_visitor;
+
+    template <typename Pos, typename Fn>
+    static node_t* visit_relaxed(Pos&& pos, size_t idx, Fn&& fn)
+    {
+        auto offset = pos.index(idx);
+        auto count  = pos.count();
+        auto node   = node_t::make_inner_sr_n(count, pos.relaxed());
+        try {
+            auto child = pos.towards_oh(this_t{}, idx, offset, fn);
+            node_t::do_copy_inner_sr(node, pos.node(), count);
+            node->inner()[offset]->dec_unsafe();
+            node->inner()[offset] = child;
+            return node;
+        } catch (...) {
+            node_t::delete_inner_r(node, count);
+            throw;
+        }
+    }
+
+    template <typename Pos, typename Fn>
+    static node_t* visit_regular(Pos&& pos, size_t idx, Fn&& fn)
+    {
+        auto offset = pos.index(idx);
+        auto count  = pos.count();
+        auto node   = node_t::make_inner_n(count);
+        try {
+            auto child = pos.towards_oh_ch(this_t{}, idx, offset, count, fn);
+            node_t::do_copy_inner(node, pos.node(), count);
+            node->inner()[offset]->dec_unsafe();
+            node->inner()[offset] = child;
+            return node;
+        } catch (...) {
+            node_t::delete_inner(node, count);
+            throw;
+        }
+    }
+
+    template <typename Pos, typename Fn>
+    static node_t* visit_leaf(Pos&& pos, size_t idx, Fn&& fn)
+    {
+        auto offset = pos.index(idx);
+        auto node   = node_t::copy_leaf(pos.node(), pos.count());
+        try {
+            node->leaf()[offset] =
+                std::forward<Fn>(fn)(std::move(node->leaf()[offset]));
+            return node;
+        } catch (...) {
+            node_t::delete_leaf(node, pos.count());
+            throw;
+        }
+    }
+};
+
+struct dec_visitor : visitor_base<dec_visitor>
+{
+    using this_t = dec_visitor;
+
+    template <typename Pos>
+    static void visit_relaxed(Pos&& p)
+    {
+        using node_t = node_type<Pos>;
+        auto node    = p.node();
+        if (node->dec()) {
+            p.each(this_t{});
+            node_t::delete_inner_r(node, p.count());
+        }
+    }
+
+    template <typename Pos>
+    static void visit_regular(Pos&& p)
+    {
+        using node_t = node_type<Pos>;
+        auto node    = p.node();
+        if (node->dec()) {
+            p.each(this_t{});
+            node_t::delete_inner(node, p.count());
+        }
+    }
+
+    template <typename Pos>
+    static void visit_leaf(Pos&& p)
+    {
+        using node_t = node_type<Pos>;
+        auto node    = p.node();
+        if (node->dec()) {
+            node_t::delete_leaf(node, p.count());
+        }
+    }
+};
+
+template <typename NodeT>
+void dec_leaf(NodeT* node, count_t n)
+{
+    make_leaf_sub_pos(node, n).visit(dec_visitor{});
+}
+
+template <typename NodeT>
+void dec_inner(NodeT* node, shift_t shift, size_t size)
+{
+    visit_maybe_relaxed_sub(node, shift, size, dec_visitor());
+}
+
+template <typename NodeT>
+void dec_relaxed(NodeT* node, shift_t shift)
+{
+    make_relaxed_pos(node, shift, node->relaxed()).visit(dec_visitor());
+}
+
+template <typename NodeT>
+void dec_regular(NodeT* node, shift_t shift, size_t size)
+{
+    make_regular_pos(node, shift, size).visit(dec_visitor());
+}
+
+template <typename NodeT>
+void dec_empty_regular(NodeT* node)
+{
+    make_empty_regular_pos(node).visit(dec_visitor());
+}
+
+template <typename NodeT>
+struct get_mut_visitor : visitor_base<get_mut_visitor<NodeT>>
+{
+    using node_t  = NodeT;
+    using this_t  = get_mut_visitor;
+    using value_t = typename NodeT::value_t;
+    using edit_t  = typename NodeT::edit_t;
+
+    template <typename Pos>
+    static value_t&
+    visit_relaxed(Pos&& pos, size_t idx, edit_t e, node_t** location)
+    {
+        auto offset = pos.index(idx);
+        auto count  = pos.count();
+        auto node   = pos.node();
+        if (node->can_mutate(e)) {
+            return pos.towards_oh(
+                this_t{}, idx, offset, e, &node->inner()[offset]);
+        } else {
+            auto new_node = node_t::copy_inner_sr_e(e, node, count);
+            try {
+                auto& res = pos.towards_oh(
+                    this_t{}, idx, offset, e, &new_node->inner()[offset]);
+                pos.visit(dec_visitor{});
+                *location = new_node;
+                return res;
+            } catch (...) {
+                dec_relaxed(new_node, pos.shift());
+                throw;
+            }
+        }
+    }
+
+    template <typename Pos>
+    static value_t&
+    visit_regular(Pos&& pos, size_t idx, edit_t e, node_t** location)
+    {
+        assert(pos.node() == *location);
+        auto offset = pos.index(idx);
+        auto count  = pos.count();
+        auto node   = pos.node();
+        if (node->can_mutate(e)) {
+            return pos.towards_oh_ch(
+                this_t{}, idx, offset, count, e, &node->inner()[offset]);
+        } else {
+            auto new_node = node_t::copy_inner_e(e, node, count);
+            try {
+                auto& res = pos.towards_oh_ch(this_t{},
+                                              idx,
+                                              offset,
+                                              count,
+                                              e,
+                                              &new_node->inner()[offset]);
+                pos.visit(dec_visitor{});
+                *location = new_node;
+                return res;
+            } catch (...) {
+                dec_regular(new_node, pos.shift(), pos.size());
+                throw;
+            }
+        }
+    }
+
+    template <typename Pos>
+    static value_t&
+    visit_leaf(Pos&& pos, size_t idx, edit_t e, node_t** location)
+    {
+        assert(pos.node() == *location);
+        auto node = pos.node();
+        if (node->can_mutate(e)) {
+            return node->leaf()[pos.index(idx)];
+        } else {
+            auto new_node = node_t::copy_leaf_e(e, pos.node(), pos.count());
+            pos.visit(dec_visitor{});
+            *location = new_node;
+            return new_node->leaf()[pos.index(idx)];
+        }
+    }
+};
+
+template <typename NodeT, bool Mutating = true>
+struct push_tail_mut_visitor
+    : visitor_base<push_tail_mut_visitor<NodeT, Mutating>>
+{
+    static constexpr auto B  = NodeT::bits;
+    static constexpr auto BL = NodeT::bits_leaf;
+
+    using this_t        = push_tail_mut_visitor;
+    using this_no_mut_t = push_tail_mut_visitor<NodeT, false>;
+    using node_t        = NodeT;
+    using edit_t        = typename NodeT::edit_t;
+
+    template <typename Pos>
+    static node_t* visit_relaxed(Pos&& pos, edit_t e, node_t* tail, count_t ts)
+    {
+        auto node     = pos.node();
+        auto level    = pos.shift();
+        auto idx      = pos.count() - 1;
+        auto children = pos.size(idx);
+        auto new_idx =
+            children == size_t{1} << level || level == BL ? idx + 1 : idx;
+        auto new_child = static_cast<node_t*>(nullptr);
+        auto mutate    = Mutating && node->can_mutate(e);
+
+        if (new_idx >= branches<B>)
+            return nullptr;
+        else if (idx == new_idx) {
+            new_child =
+                mutate ? pos.last_oh_csh(this_t{}, idx, children, e, tail, ts)
+                       : pos.last_oh_csh(
+                             this_no_mut_t{}, idx, children, e, tail, ts);
+            if (!new_child) {
+                if (++new_idx < branches<B>)
+                    new_child = node_t::make_path_e(e, level - B, tail);
+                else
+                    return nullptr;
+            }
+        } else
+            new_child = node_t::make_path_e(e, level - B, tail);
+
+        if (mutate) {
+            auto count             = new_idx + 1;
+            auto relaxed           = node->ensure_mutable_relaxed_n(e, new_idx);
+            node->inner()[new_idx] = new_child;
+            relaxed->d.sizes[new_idx] = pos.size() + ts;
+            relaxed->d.count          = count;
+            return node;
+        } else {
+            try {
+                auto count    = new_idx + 1;
+                auto new_node = node_t::copy_inner_r_e(e, pos.node(), new_idx);
+                auto relaxed  = new_node->relaxed();
+                new_node->inner()[new_idx] = new_child;
+                relaxed->d.sizes[new_idx]  = pos.size() + ts;
+                relaxed->d.count           = count;
+                if (Mutating)
+                    pos.visit(dec_visitor{});
+                return new_node;
+            } catch (...) {
+                auto shift = pos.shift();
+                auto size  = new_idx == idx ? children + ts : ts;
+                if (shift > BL) {
+                    tail->inc();
+                    dec_inner(new_child, shift - B, size);
+                }
+                throw;
+            }
+        }
+    }
+
+    template <typename Pos, typename... Args>
+    static node_t* visit_regular(Pos&& pos, edit_t e, node_t* tail, Args&&...)
+    {
+        assert((pos.size() & mask<BL>) == 0);
+        auto node    = pos.node();
+        auto idx     = pos.index(pos.size() - 1);
+        auto new_idx = pos.index(pos.size() + branches<BL> - 1);
+        auto mutate  = Mutating && node->can_mutate(e);
+        if (mutate) {
+            node->inner()[new_idx] =
+                idx == new_idx ? pos.last_oh(this_t{}, idx, e, tail)
+                               /* otherwise */
+                               : node_t::make_path_e(e, pos.shift() - B, tail);
+            return node;
+        } else {
+            auto new_parent = node_t::make_inner_e(e);
+            try {
+                new_parent->inner()[new_idx] =
+                    idx == new_idx
+                        ? pos.last_oh(this_no_mut_t{}, idx, e, tail)
+                        /* otherwise */
+                        : node_t::make_path_e(e, pos.shift() - B, tail);
+                node_t::do_copy_inner(new_parent, node, new_idx);
+                if (Mutating)
+                    pos.visit(dec_visitor{});
+                return new_parent;
+            } catch (...) {
+                node_t::delete_inner_e(new_parent);
+                throw;
+            }
+        }
+    }
+
+    template <typename Pos, typename... Args>
+    static node_t* visit_leaf(Pos&& pos, edit_t e, node_t* tail, Args&&...)
+    {
+        IMMER_UNREACHABLE;
+    }
+};
+
+template <typename NodeT>
+struct push_tail_visitor : visitor_base<push_tail_visitor<NodeT>>
+{
+    static constexpr auto B  = NodeT::bits;
+    static constexpr auto BL = NodeT::bits_leaf;
+
+    using this_t = push_tail_visitor;
+    using node_t = NodeT;
+
+    template <typename Pos>
+    static node_t* visit_relaxed(Pos&& pos, node_t* tail, count_t ts)
+    {
+        auto level    = pos.shift();
+        auto idx      = pos.count() - 1;
+        auto children = pos.size(idx);
+        auto new_idx =
+            children == size_t{1} << level || level == BL ? idx + 1 : idx;
+        auto new_child = static_cast<node_t*>(nullptr);
+        if (new_idx >= branches<B>)
+            return nullptr;
+        else if (idx == new_idx) {
+            new_child = pos.last_oh_csh(this_t{}, idx, children, tail, ts);
+            if (!new_child) {
+                if (++new_idx < branches<B>)
+                    new_child = node_t::make_path(level - B, tail);
+                else
+                    return nullptr;
+            }
+        } else
+            new_child = node_t::make_path(level - B, tail);
+        try {
+            auto count = new_idx + 1;
+            auto new_parent =
+                node_t::copy_inner_r_n(count, pos.node(), new_idx);
+            auto new_relaxed              = new_parent->relaxed();
+            new_parent->inner()[new_idx]  = new_child;
+            new_relaxed->d.sizes[new_idx] = pos.size() + ts;
+            new_relaxed->d.count          = count;
+            return new_parent;
+        } catch (...) {
+            auto shift = pos.shift();
+            auto size  = new_idx == idx ? children + ts : ts;
+            if (shift > BL) {
+                tail->inc();
+                dec_inner(new_child, shift - B, size);
+            }
+            throw;
+        }
+    }
+
+    template <typename Pos, typename... Args>
+    static node_t* visit_regular(Pos&& pos, node_t* tail, Args&&...)
+    {
+        assert((pos.size() & mask<BL>) == 0);
+        auto idx        = pos.index(pos.size() - 1);
+        auto new_idx    = pos.index(pos.size() + branches<BL> - 1);
+        auto count      = new_idx + 1;
+        auto new_parent = node_t::make_inner_n(count);
+        try {
+            new_parent->inner()[new_idx] =
+                idx == new_idx ? pos.last_oh(this_t{}, idx, tail)
+                               /* otherwise */
+                               : node_t::make_path(pos.shift() - B, tail);
+        } catch (...) {
+            node_t::delete_inner(new_parent, count);
+            throw;
+        }
+        return node_t::do_copy_inner(new_parent, pos.node(), new_idx);
+    }
+
+    template <typename Pos, typename... Args>
+    static node_t* visit_leaf(Pos&& pos, node_t* tail, Args&&...)
+    {
+        IMMER_UNREACHABLE;
+    }
+};
+
+struct dec_right_visitor : visitor_base<dec_right_visitor>
+{
+    using this_t = dec_right_visitor;
+    using dec_t  = dec_visitor;
+
+    template <typename Pos>
+    static void visit_relaxed(Pos&& p, count_t idx)
+    {
+        using node_t = node_type<Pos>;
+        auto node    = p.node();
+        if (node->dec()) {
+            p.each_right(dec_t{}, idx);
+            node_t::delete_inner_r(node, p.count());
+        }
+    }
+
+    template <typename Pos>
+    static void visit_regular(Pos&& p, count_t idx)
+    {
+        using node_t = node_type<Pos>;
+        auto node    = p.node();
+        if (node->dec()) {
+            p.each_right(dec_t{}, idx);
+            node_t::delete_inner(node, p.count());
+        }
+    }
+
+    template <typename Pos>
+    static void visit_leaf(Pos&& p, count_t idx)
+    {
+        IMMER_UNREACHABLE;
+    }
+};
+
+template <typename NodeT, bool Collapse = true, bool Mutating = true>
+struct slice_right_mut_visitor
+    : visitor_base<slice_right_mut_visitor<NodeT, Collapse, Mutating>>
+{
+    using node_t = NodeT;
+    using this_t = slice_right_mut_visitor;
+    using edit_t = typename NodeT::edit_t;
+
+    // returns a new shift, new root, the new tail size and the new tail
+    using result_t             = std::tuple<shift_t, NodeT*, count_t, NodeT*>;
+    using no_collapse_t        = slice_right_mut_visitor<NodeT, false, true>;
+    using no_collapse_no_mut_t = slice_right_mut_visitor<NodeT, false, false>;
+    using no_mut_t = slice_right_mut_visitor<NodeT, Collapse, false>;
+
+    static constexpr auto B  = NodeT::bits;
+    static constexpr auto BL = NodeT::bits_leaf;
+
+    template <typename PosT>
+    static result_t visit_relaxed(PosT&& pos, size_t last, edit_t e)
+    {
+        auto idx    = pos.index(last);
+        auto node   = pos.node();
+        auto mutate = Mutating && node->can_mutate(e);
+        if (Collapse && idx == 0) {
+            auto res = mutate ? pos.towards_oh(this_t{}, last, idx, e)
+                              : pos.towards_oh(no_mut_t{}, last, idx, e);
+            if (Mutating)
+                pos.visit(dec_right_visitor{}, count_t{1});
+            return res;
+        } else {
+            using std::get;
+            auto subs =
+                mutate ? pos.towards_oh(no_collapse_t{}, last, idx, e)
+                       : pos.towards_oh(no_collapse_no_mut_t{}, last, idx, e);
+            auto next = get<1>(subs);
+            auto ts   = get<2>(subs);
+            auto tail = get<3>(subs);
+            try {
+                if (next) {
+                    if (mutate) {
+                        auto nodr = node->ensure_mutable_relaxed_n(e, idx);
+                        pos.each_right(dec_visitor{}, idx + 1);
+                        node->inner()[idx] = next;
+                        nodr->d.sizes[idx] = last + 1 - ts;
+                        nodr->d.count      = idx + 1;
+                        return std::make_tuple(pos.shift(), node, ts, tail);
+                    } else {
+                        auto newn = node_t::copy_inner_r_e(e, node, idx);
+                        auto newr = newn->relaxed();
+                        newn->inner()[idx] = next;
+                        newr->d.sizes[idx] = last + 1 - ts;
+                        newr->d.count      = idx + 1;
+                        if (Mutating)
+                            pos.visit(dec_visitor{});
+                        return std::make_tuple(pos.shift(), newn, ts, tail);
+                    }
+                } else if (idx == 0) {
+                    if (Mutating)
+                        pos.visit(dec_right_visitor{}, count_t{1});
+                    return std::make_tuple(pos.shift(), nullptr, ts, tail);
+                } else if (Collapse && idx == 1 && pos.shift() > BL) {
+                    auto newn = pos.node()->inner()[0];
+                    if (!mutate)
+                        newn->inc();
+                    if (Mutating)
+                        pos.visit(dec_right_visitor{}, count_t{2});
+                    return std::make_tuple(pos.shift() - B, newn, ts, tail);
+                } else {
+                    if (mutate) {
+                        pos.each_right(dec_visitor{}, idx + 1);
+                        node->ensure_mutable_relaxed_n(e, idx)->d.count = idx;
+                        return std::make_tuple(pos.shift(), node, ts, tail);
+                    } else {
+                        auto newn = node_t::copy_inner_r_e(e, node, idx);
+                        if (Mutating)
+                            pos.visit(dec_visitor{});
+                        return std::make_tuple(pos.shift(), newn, ts, tail);
+                    }
+                }
+            } catch (...) {
+                assert(!mutate);
+                assert(!next || pos.shift() > BL);
+                if (next)
+                    dec_inner(next,
+                              pos.shift() - B,
+                              last + 1 - ts - pos.size_before(idx));
+                dec_leaf(tail, ts);
+                throw;
+            }
+        }
+    }
+
+    template <typename PosT>
+    static result_t visit_regular(PosT&& pos, size_t last, edit_t e)
+    {
+        auto idx    = pos.index(last);
+        auto node   = pos.node();
+        auto mutate = Mutating && node->can_mutate(e);
+        if (Collapse && idx == 0) {
+            auto res = mutate ? pos.towards_oh(this_t{}, last, idx, e)
+                              : pos.towards_oh(no_mut_t{}, last, idx, e);
+            if (Mutating)
+                pos.visit(dec_right_visitor{}, count_t{1});
+            return res;
+        } else {
+            using std::get;
+            auto subs =
+                mutate ? pos.towards_oh(no_collapse_t{}, last, idx, e)
+                       : pos.towards_oh(no_collapse_no_mut_t{}, last, idx, e);
+            auto next = get<1>(subs);
+            auto ts   = get<2>(subs);
+            auto tail = get<3>(subs);
+            try {
+                if (next) {
+                    if (mutate) {
+                        node->inner()[idx] = next;
+                        pos.each_right(dec_visitor{}, idx + 1);
+                        return std::make_tuple(pos.shift(), node, ts, tail);
+                    } else {
+                        auto newn          = node_t::copy_inner_e(e, node, idx);
+                        newn->inner()[idx] = next;
+                        if (Mutating)
+                            pos.visit(dec_visitor{});
+                        return std::make_tuple(pos.shift(), newn, ts, tail);
+                    }
+                } else if (idx == 0) {
+                    if (Mutating)
+                        pos.visit(dec_right_visitor{}, count_t{1});
+                    return std::make_tuple(pos.shift(), nullptr, ts, tail);
+                } else if (Collapse && idx == 1 && pos.shift() > BL) {
+                    auto newn = pos.node()->inner()[0];
+                    if (!mutate)
+                        newn->inc();
+                    if (Mutating)
+                        pos.visit(dec_right_visitor{}, count_t{2});
+                    return std::make_tuple(pos.shift() - B, newn, ts, tail);
+                } else {
+                    if (mutate) {
+                        pos.each_right(dec_visitor{}, idx + 1);
+                        return std::make_tuple(pos.shift(), node, ts, tail);
+                    } else {
+                        auto newn = node_t::copy_inner_e(e, node, idx);
+                        if (Mutating)
+                            pos.visit(dec_visitor{});
+                        return std::make_tuple(pos.shift(), newn, ts, tail);
+                    }
+                }
+            } catch (...) {
+                assert(!mutate);
+                assert(!next || pos.shift() > BL);
+                assert(tail);
+                if (next)
+                    dec_regular(next, pos.shift() - B, last + 1 - ts);
+                dec_leaf(tail, ts);
+                throw;
+            }
+        }
+    }
+
+    template <typename PosT>
+    static result_t visit_leaf(PosT&& pos, size_t last, edit_t e)
+    {
+        auto old_tail_size = pos.count();
+        auto new_tail_size = pos.index(last) + 1;
+        auto node          = pos.node();
+        auto mutate        = Mutating && node->can_mutate(e);
+        if (new_tail_size == old_tail_size) {
+            if (!Mutating)
+                node->inc();
+            return std::make_tuple(0, nullptr, new_tail_size, node);
+        } else if (mutate) {
+            destroy_n(node->leaf() + new_tail_size,
+                      old_tail_size - new_tail_size);
+            return std::make_tuple(0, nullptr, new_tail_size, node);
+        } else {
+            auto new_tail = node_t::copy_leaf_e(e, node, new_tail_size);
+            if (Mutating)
+                pos.visit(dec_visitor{});
+            return std::make_tuple(0, nullptr, new_tail_size, new_tail);
+        }
+    }
+};
+
+template <typename NodeT, bool Collapse = true>
+struct slice_right_visitor : visitor_base<slice_right_visitor<NodeT, Collapse>>
+{
+    using node_t = NodeT;
+    using this_t = slice_right_visitor;
+
+    // returns a new shift, new root, the new tail size and the new tail
+    using result_t      = std::tuple<shift_t, NodeT*, count_t, NodeT*>;
+    using no_collapse_t = slice_right_visitor<NodeT, false>;
+
+    static constexpr auto B  = NodeT::bits;
+    static constexpr auto BL = NodeT::bits_leaf;
+
+    template <typename PosT>
+    static result_t visit_relaxed(PosT&& pos, size_t last)
+    {
+        auto idx = pos.index(last);
+        if (Collapse && idx == 0) {
+            return pos.towards_oh(this_t{}, last, idx);
+        } else {
+            using std::get;
+            auto subs = pos.towards_oh(no_collapse_t{}, last, idx);
+            auto next = get<1>(subs);
+            auto ts   = get<2>(subs);
+            auto tail = get<3>(subs);
+            try {
+                if (next) {
+                    auto count = idx + 1;
+                    auto newn  = node_t::copy_inner_r_n(count, pos.node(), idx);
+                    auto newr  = newn->relaxed();
+                    newn->inner()[idx] = next;
+                    newr->d.sizes[idx] = last + 1 - ts;
+                    newr->d.count      = count;
+                    return std::make_tuple(pos.shift(), newn, ts, tail);
+                } else if (idx == 0) {
+                    return std::make_tuple(pos.shift(), nullptr, ts, tail);
+                } else if (Collapse && idx == 1 && pos.shift() > BL) {
+                    auto newn = pos.node()->inner()[0];
+                    return std::make_tuple(
+                        pos.shift() - B, newn->inc(), ts, tail);
+                } else {
+                    auto newn = node_t::copy_inner_r(pos.node(), idx);
+                    return std::make_tuple(pos.shift(), newn, ts, tail);
+                }
+            } catch (...) {
+                assert(!next || pos.shift() > BL);
+                if (next)
+                    dec_inner(next,
+                              pos.shift() - B,
+                              last + 1 - ts - pos.size_before(idx));
+                if (tail)
+                    dec_leaf(tail, ts);
+                throw;
+            }
+        }
+    }
+
+    template <typename PosT>
+    static result_t visit_regular(PosT&& pos, size_t last)
+    {
+        auto idx = pos.index(last);
+        if (Collapse && idx == 0) {
+            return pos.towards_oh(this_t{}, last, idx);
+        } else {
+            using std::get;
+            auto subs = pos.towards_oh(no_collapse_t{}, last, idx);
+            auto next = get<1>(subs);
+            auto ts   = get<2>(subs);
+            auto tail = get<3>(subs);
+            try {
+                if (next) {
+                    auto newn = node_t::copy_inner_n(idx + 1, pos.node(), idx);
+                    newn->inner()[idx] = next;
+                    return std::make_tuple(pos.shift(), newn, ts, tail);
+                } else if (idx == 0) {
+                    return std::make_tuple(pos.shift(), nullptr, ts, tail);
+                } else if (Collapse && idx == 1 && pos.shift() > BL) {
+                    auto newn = pos.node()->inner()[0];
+                    return std::make_tuple(
+                        pos.shift() - B, newn->inc(), ts, tail);
+                } else {
+                    auto newn = node_t::copy_inner_n(idx, pos.node(), idx);
+                    return std::make_tuple(pos.shift(), newn, ts, tail);
+                }
+            } catch (...) {
+                assert(!next || pos.shift() > BL);
+                assert(tail);
+                if (next)
+                    dec_regular(next, pos.shift() - B, last + 1 - ts);
+                dec_leaf(tail, ts);
+                throw;
+            }
+        }
+    }
+
+    template <typename PosT>
+    static result_t visit_leaf(PosT&& pos, size_t last)
+    {
+        auto old_tail_size = pos.count();
+        auto new_tail_size = pos.index(last) + 1;
+        auto new_tail      = new_tail_size == old_tail_size
+                            ? pos.node()->inc()
+                            : node_t::copy_leaf(pos.node(), new_tail_size);
+        return std::make_tuple(0, nullptr, new_tail_size, new_tail);
+    }
+};
+
+struct dec_left_visitor : visitor_base<dec_left_visitor>
+{
+    using this_t = dec_left_visitor;
+    using dec_t  = dec_visitor;
+
+    template <typename Pos>
+    static void visit_relaxed(Pos&& p, count_t idx)
+    {
+        using node_t = node_type<Pos>;
+        auto node    = p.node();
+        if (node->dec()) {
+            p.each_left(dec_t{}, idx);
+            node_t::delete_inner_r(node, p.count());
+        }
+    }
+
+    template <typename Pos>
+    static void visit_regular(Pos&& p, count_t idx)
+    {
+        using node_t = node_type<Pos>;
+        auto node    = p.node();
+        if (node->dec()) {
+            p.each_left(dec_t{}, idx);
+            node_t::delete_inner(node, p.count());
+        }
+    }
+
+    template <typename Pos>
+    static void visit_leaf(Pos&& p, count_t idx)
+    {
+        IMMER_UNREACHABLE;
+    }
+};
+
+template <typename NodeT, bool Collapse = true, bool Mutating = true>
+struct slice_left_mut_visitor
+    : visitor_base<slice_left_mut_visitor<NodeT, Collapse, Mutating>>
+{
+    using node_t    = NodeT;
+    using this_t    = slice_left_mut_visitor;
+    using edit_t    = typename NodeT::edit_t;
+    using value_t   = typename NodeT::value_t;
+    using relaxed_t = typename NodeT::relaxed_t;
+    // returns a new shift and new root
+    using result_t = std::tuple<shift_t, NodeT*>;
+
+    using no_collapse_t        = slice_left_mut_visitor<NodeT, false, true>;
+    using no_collapse_no_mut_t = slice_left_mut_visitor<NodeT, false, false>;
+    using no_mut_t             = slice_left_mut_visitor<NodeT, Collapse, false>;
+
+    static constexpr auto B  = NodeT::bits;
+    static constexpr auto BL = NodeT::bits_leaf;
+
+    template <typename PosT>
+    static result_t visit_relaxed(PosT&& pos, size_t first, edit_t e)
+    {
+        auto idx                = pos.subindex(first);
+        auto count              = pos.count();
+        auto node               = pos.node();
+        auto mutate             = Mutating && node->can_mutate(e);
+        auto left_size          = pos.size_before(idx);
+        auto child_size         = pos.size_sbh(idx, left_size);
+        auto dropped_size       = first;
+        auto child_dropped_size = dropped_size - left_size;
+        if (Collapse && pos.shift() > BL && idx == pos.count() - 1) {
+            auto r = mutate ? pos.towards_sub_oh(this_t{}, first, idx, e)
+                            : pos.towards_sub_oh(no_mut_t{}, first, idx, e);
+            if (Mutating)
+                pos.visit(dec_left_visitor{}, idx);
+            return r;
+        } else {
+            using std::get;
+            auto newn = mutate ? (node->ensure_mutable_relaxed(e), node)
+                               : node_t::make_inner_r_e(e);
+            auto newr           = newn->relaxed();
+            auto newcount       = count - idx;
+            auto new_child_size = child_size - child_dropped_size;
+            try {
+                auto subs =
+                    mutate ? pos.towards_sub_oh(no_collapse_t{}, first, idx, e)
+                           : pos.towards_sub_oh(
+                                 no_collapse_no_mut_t{}, first, idx, e);
+                if (mutate)
+                    pos.each_left(dec_visitor{}, idx);
+                pos.copy_sizes(
+                    idx + 1, newcount - 1, new_child_size, newr->d.sizes + 1);
+                std::uninitialized_copy(node->inner() + idx + 1,
+                                        node->inner() + count,
+                                        newn->inner() + 1);
+                newn->inner()[0] = get<1>(subs);
+                newr->d.sizes[0] = new_child_size;
+                newr->d.count    = newcount;
+                if (!mutate) {
+                    node_t::inc_nodes(newn->inner() + 1, newcount - 1);
+                    if (Mutating)
+                        pos.visit(dec_visitor{});
+                }
+                return std::make_tuple(pos.shift(), newn);
+            } catch (...) {
+                if (!mutate)
+                    node_t::delete_inner_r_e(newn);
+                throw;
+            }
+        }
+    }
+
+    template <typename PosT>
+    static result_t visit_regular(PosT&& pos, size_t first, edit_t e)
+    {
+        auto idx    = pos.subindex(first);
+        auto count  = pos.count();
+        auto node   = pos.node();
+        auto mutate = Mutating
+                      // this is more restrictive than actually needed because
+                      // it causes the algorithm to also avoid mutating the leaf
+                      // in place
+                      && !node_t::embed_relaxed && node->can_mutate(e);
+        auto left_size          = pos.size_before(idx);
+        auto child_size         = pos.size_sbh(idx, left_size);
+        auto dropped_size       = first;
+        auto child_dropped_size = dropped_size - left_size;
+        if (Collapse && pos.shift() > BL && idx == pos.count() - 1) {
+            auto r = mutate ? pos.towards_sub_oh(this_t{}, first, idx, e)
+                            : pos.towards_sub_oh(no_mut_t{}, first, idx, e);
+            if (Mutating)
+                pos.visit(dec_left_visitor{}, idx);
+            return r;
+        } else {
+            using std::get;
+            // if possible, we convert the node to a relaxed one
+            // simply by allocating a `relaxed_t` size table for
+            // it... maybe some of this magic should be moved as a
+            // `node<...>` static method...
+            auto newcount = count - idx;
+            auto newn =
+                mutate ? (node->impl.d.data.inner.relaxed = new (
+                              node_t::heap::allocate(node_t::max_sizeof_relaxed,
+                                                     norefs_tag{})) relaxed_t,
+                          node)
+                       : node_t::make_inner_r_e(e);
+            auto newr = newn->relaxed();
+            try {
+                auto subs =
+                    mutate ? pos.towards_sub_oh(no_collapse_t{}, first, idx, e)
+                           : pos.towards_sub_oh(
+                                 no_collapse_no_mut_t{}, first, idx, e);
+                if (mutate)
+                    pos.each_left(dec_visitor{}, idx);
+                newr->d.sizes[0] = child_size - child_dropped_size;
+                pos.copy_sizes(
+                    idx + 1, newcount - 1, newr->d.sizes[0], newr->d.sizes + 1);
+                newr->d.count    = newcount;
+                newn->inner()[0] = get<1>(subs);
+                std::uninitialized_copy(node->inner() + idx + 1,
+                                        node->inner() + count,
+                                        newn->inner() + 1);
+                if (!mutate) {
+                    node_t::inc_nodes(newn->inner() + 1, newcount - 1);
+                    if (Mutating)
+                        pos.visit(dec_visitor{});
+                }
+                return std::make_tuple(pos.shift(), newn);
+            } catch (...) {
+                if (!mutate)
+                    node_t::delete_inner_r_e(newn);
+                else {
+                    // restore the regular node that we were
+                    // attempting to relax...
+                    node_t::heap::deallocate(node_t::max_sizeof_relaxed,
+                                             node->impl.d.data.inner.relaxed);
+                    node->impl.d.data.inner.relaxed = nullptr;
+                }
+                throw;
+            }
+        }
+    }
+
+    template <typename PosT>
+    static result_t visit_leaf(PosT&& pos, size_t first, edit_t e)
+    {
+        auto node   = pos.node();
+        auto idx    = pos.index(first);
+        auto count  = pos.count();
+        auto mutate = Mutating &&
+                      std::is_nothrow_move_constructible<value_t>::value &&
+                      node->can_mutate(e);
+        if (mutate) {
+            auto data     = node->leaf();
+            auto newcount = count - idx;
+            std::move(data + idx, data + count, data);
+            destroy_n(data + newcount, idx);
+            return std::make_tuple(0, node);
+        } else {
+            auto newn = node_t::copy_leaf_e(e, node, idx, count);
+            if (Mutating)
+                pos.visit(dec_visitor{});
+            return std::make_tuple(0, newn);
+        }
+    }
+};
+
+template <typename NodeT, bool Collapse = true>
+struct slice_left_visitor : visitor_base<slice_left_visitor<NodeT, Collapse>>
+{
+    using node_t = NodeT;
+    using this_t = slice_left_visitor;
+
+    // returns a new shift and new root
+    using result_t      = std::tuple<shift_t, NodeT*>;
+    using no_collapse_t = slice_left_visitor<NodeT, false>;
+
+    static constexpr auto B  = NodeT::bits;
+    static constexpr auto BL = NodeT::bits_leaf;
+
+    template <typename PosT>
+    static result_t visit_inner(PosT&& pos, size_t first)
+    {
+        auto idx                = pos.subindex(first);
+        auto count              = pos.count();
+        auto left_size          = pos.size_before(idx);
+        auto child_size         = pos.size_sbh(idx, left_size);
+        auto dropped_size       = first;
+        auto child_dropped_size = dropped_size - left_size;
+        if (Collapse && pos.shift() > BL && idx == pos.count() - 1) {
+            return pos.towards_sub_oh(this_t{}, first, idx);
+        } else {
+            using std::get;
+            auto n    = pos.node();
+            auto newc = count - idx;
+            auto newn = node_t::make_inner_r_n(newc);
+            try {
+                auto subs     = pos.towards_sub_oh(no_collapse_t{}, first, idx);
+                auto newr     = newn->relaxed();
+                newr->d.count = count - idx;
+                newr->d.sizes[0] = child_size - child_dropped_size;
+                pos.copy_sizes(idx + 1,
+                               newr->d.count - 1,
+                               newr->d.sizes[0],
+                               newr->d.sizes + 1);
+                assert(newr->d.sizes[newr->d.count - 1] ==
+                       pos.size() - dropped_size);
+                newn->inner()[0] = get<1>(subs);
+                std::uninitialized_copy(n->inner() + idx + 1,
+                                        n->inner() + count,
+                                        newn->inner() + 1);
+                node_t::inc_nodes(newn->inner() + 1, newr->d.count - 1);
+                return std::make_tuple(pos.shift(), newn);
+            } catch (...) {
+                node_t::delete_inner_r(newn, newc);
+                throw;
+            }
+        }
+    }
+
+    template <typename PosT>
+    static result_t visit_leaf(PosT&& pos, size_t first)
+    {
+        auto n = node_t::copy_leaf(pos.node(), pos.index(first), pos.count());
+        return std::make_tuple(0, n);
+    }
+};
+
+template <typename Node>
+struct concat_center_pos
+{
+    static constexpr auto B  = Node::bits;
+    static constexpr auto BL = Node::bits_leaf;
+
+    static constexpr count_t max_children = 3;
+
+    using node_t = Node;
+    using edit_t = typename Node::edit_t;
+
+    shift_t shift_ = 0u;
+    count_t count_ = 0u;
+    node_t* nodes_[max_children];
+    size_t sizes_[max_children];
+
+    auto shift() const { return shift_; }
+
+    concat_center_pos(shift_t s, Node* n0, size_t s0)
+        : shift_{s}
+        , count_{1}
+        , nodes_{n0}
+        , sizes_{s0}
+    {}
+
+    concat_center_pos(shift_t s, Node* n0, size_t s0, Node* n1, size_t s1)
+        : shift_{s}
+        , count_{2}
+        , nodes_{n0, n1}
+        , sizes_{s0, s1}
+    {}
+
+    concat_center_pos(shift_t s,
+                      Node* n0,
+                      size_t s0,
+                      Node* n1,
+                      size_t s1,
+                      Node* n2,
+                      size_t s2)
+        : shift_{s}
+        , count_{3}
+        , nodes_{n0, n1, n2}
+        , sizes_{s0, s1, s2}
+    {}
+
+    template <typename Visitor, typename... Args>
+    void each_sub(Visitor v, Args&&... args)
+    {
+        if (shift_ == BL) {
+            for (auto i = count_t{0}; i < count_; ++i)
+                make_leaf_sub_pos(nodes_[i], sizes_[i]).visit(v, args...);
+        } else {
+            for (auto i = count_t{0}; i < count_; ++i)
+                make_relaxed_pos(nodes_[i], shift_ - B, nodes_[i]->relaxed())
+                    .visit(v, args...);
+        }
+    }
+
+    relaxed_pos<Node> realize() &&
+    {
+        if (count_ > 1) {
+            try {
+                auto result = node_t::make_inner_r_n(count_);
+                auto r      = result->relaxed();
+                r->d.count  = count_;
+                std::copy(nodes_, nodes_ + count_, result->inner());
+                std::copy(sizes_, sizes_ + count_, r->d.sizes);
+                return {result, shift_, r};
+            } catch (...) {
+                each_sub(dec_visitor{});
+                throw;
+            }
+        } else {
+            assert(shift_ >= B + BL);
+            return {nodes_[0], shift_ - B, nodes_[0]->relaxed()};
+        }
+    }
+
+    relaxed_pos<Node> realize_e(edit_t e)
+    {
+        if (count_ > 1) {
+            auto result = node_t::make_inner_r_e(e);
+            auto r      = result->relaxed();
+            r->d.count  = count_;
+            std::copy(nodes_, nodes_ + count_, result->inner());
+            std::copy(sizes_, sizes_ + count_, r->d.sizes);
+            return {result, shift_, r};
+        } else {
+            assert(shift_ >= B + BL);
+            return {nodes_[0], shift_ - B, nodes_[0]->relaxed()};
+        }
+    }
+};
+
+template <typename Node>
+struct concat_merger
+{
+    using node_t             = Node;
+    static constexpr auto B  = Node::bits;
+    static constexpr auto BL = Node::bits_leaf;
+
+    using result_t = concat_center_pos<Node>;
+
+    count_t* curr_;
+    count_t n_;
+    result_t result_;
+
+    concat_merger(shift_t shift, count_t* counts, count_t n)
+        : curr_{counts}
+        , n_{n}
+        , result_{
+              shift + B, node_t::make_inner_r_n(std::min(n_, branches<B>)), 0}
+    {}
+
+    node_t* to_        = {};
+    count_t to_offset_ = {};
+    size_t to_size_    = {};
+
+    void add_child(node_t* p, size_t size)
+    {
+        ++curr_;
+        auto parent  = result_.nodes_[result_.count_ - 1];
+        auto relaxed = parent->relaxed();
+        if (relaxed->d.count == branches<B>) {
+            assert(result_.count_ < result_t::max_children);
+            n_ -= branches<B>;
+            parent  = node_t::make_inner_r_n(std::min(n_, branches<B>));
+            relaxed = parent->relaxed();
+            result_.nodes_[result_.count_] = parent;
+            result_.sizes_[result_.count_] = result_.sizes_[result_.count_ - 1];
+            ++result_.count_;
+        }
+        auto idx = relaxed->d.count++;
+        result_.sizes_[result_.count_ - 1] += size;
+        relaxed->d.sizes[idx] = size + (idx ? relaxed->d.sizes[idx - 1] : 0);
+        parent->inner()[idx]  = p;
+    };
+
+    template <typename Pos>
+    void merge_leaf(Pos&& p)
+    {
+        auto from       = p.node();
+        auto from_size  = p.size();
+        auto from_count = p.count();
+        assert(from_size);
+        if (!to_ && *curr_ == from_count) {
+            add_child(from, from_size);
+            from->inc();
+        } else {
+            auto from_offset = count_t{};
+            auto from_data   = from->leaf();
+            do {
+                if (!to_) {
+                    to_        = node_t::make_leaf_n(*curr_);
+                    to_offset_ = 0;
+                }
+                auto data = to_->leaf();
+                auto to_copy =
+                    std::min(from_count - from_offset, *curr_ - to_offset_);
+                std::uninitialized_copy(from_data + from_offset,
+                                        from_data + from_offset + to_copy,
+                                        data + to_offset_);
+                to_offset_ += to_copy;
+                from_offset += to_copy;
+                if (*curr_ == to_offset_) {
+                    add_child(to_, to_offset_);
+                    to_ = nullptr;
+                }
+            } while (from_offset != from_count);
+        }
+    }
+
+    template <typename Pos>
+    void merge_inner(Pos&& p)
+    {
+        auto from       = p.node();
+        auto from_size  = p.size();
+        auto from_count = p.count();
+        assert(from_size);
+        if (!to_ && *curr_ == from_count) {
+            add_child(from, from_size);
+            from->inc();
+        } else {
+            auto from_offset = count_t{};
+            auto from_data   = from->inner();
+            do {
+                if (!to_) {
+                    to_        = node_t::make_inner_r_n(*curr_);
+                    to_offset_ = 0;
+                    to_size_   = 0;
+                }
+                auto data = to_->inner();
+                auto to_copy =
+                    std::min(from_count - from_offset, *curr_ - to_offset_);
+                std::uninitialized_copy(from_data + from_offset,
+                                        from_data + from_offset + to_copy,
+                                        data + to_offset_);
+                node_t::inc_nodes(from_data + from_offset, to_copy);
+                auto sizes = to_->relaxed()->d.sizes;
+                p.copy_sizes(
+                    from_offset, to_copy, to_size_, sizes + to_offset_);
+                to_offset_ += to_copy;
+                from_offset += to_copy;
+                to_size_ = sizes[to_offset_ - 1];
+                if (*curr_ == to_offset_) {
+                    to_->relaxed()->d.count = to_offset_;
+                    add_child(to_, to_size_);
+                    to_ = nullptr;
+                }
+            } while (from_offset != from_count);
+        }
+    }
+
+    concat_center_pos<Node> finish() const
+    {
+        assert(!to_);
+        return result_;
+    }
+
+    void abort()
+    {
+        auto shift = result_.shift_ - B;
+        if (to_) {
+            if (shift == BL)
+                node_t::delete_leaf(to_, to_offset_);
+            else {
+                to_->relaxed()->d.count = to_offset_;
+                dec_relaxed(to_, shift - B);
+            }
+        }
+        result_.each_sub(dec_visitor());
+    }
+};
+
+struct concat_merger_visitor : visitor_base<concat_merger_visitor>
+{
+    using this_t = concat_merger_visitor;
+
+    template <typename Pos, typename Merger>
+    static void visit_inner(Pos&& p, Merger& merger)
+    {
+        merger.merge_inner(p);
+    }
+
+    template <typename Pos, typename Merger>
+    static void visit_leaf(Pos&& p, Merger& merger)
+    {
+        merger.merge_leaf(p);
+    }
+};
+
+struct concat_rebalance_plan_fill_visitor
+    : visitor_base<concat_rebalance_plan_fill_visitor>
+{
+    using this_t = concat_rebalance_plan_fill_visitor;
+
+    template <typename Pos, typename Plan>
+    static void visit_node(Pos&& p, Plan& plan)
+    {
+        auto count = p.count();
+        assert(plan.n < Plan::max_children);
+        plan.counts[plan.n++] = count;
+        plan.total += count;
+    }
+};
+
+template <bits_t B, bits_t BL>
+struct concat_rebalance_plan
+{
+    static constexpr auto max_children = 2 * branches<B> + 1;
+
+    count_t counts[max_children];
+    count_t n     = 0u;
+    count_t total = 0u;
+
+    template <typename LPos, typename CPos, typename RPos>
+    void fill(LPos&& lpos, CPos&& cpos, RPos&& rpos)
+    {
+        assert(n == 0u);
+        assert(total == 0u);
+        using visitor_t = concat_rebalance_plan_fill_visitor;
+        lpos.each_left_sub(visitor_t{}, *this);
+        cpos.each_sub(visitor_t{}, *this);
+        rpos.each_right_sub(visitor_t{}, *this);
+    }
+
+    void shuffle(shift_t shift)
+    {
+        // gcc seems to not really understand this code... :(
+#if !defined(_MSC_VER)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Warray-bounds"
+#endif
+        constexpr count_t rrb_extras    = 2;
+        constexpr count_t rrb_invariant = 1;
+        const auto bits                 = shift == BL ? BL : B;
+        const auto branches             = count_t{1} << bits;
+        const auto optimal              = ((total - 1) >> bits) + 1;
+        count_t i                       = 0;
+        while (n >= optimal + rrb_extras) {
+            // skip ok nodes
+            while (counts[i] > branches - rrb_invariant)
+                i++;
+            // short node, redistribute
+            auto remaining = counts[i];
+            do {
+                auto count = std::min(remaining + counts[i + 1], branches);
+                counts[i]  = count;
+                remaining += counts[i + 1] - count;
+                ++i;
+            } while (remaining > 0);
+            // remove node
+            std::move(counts + i + 1, counts + n, counts + i);
+            --n;
+            --i;
+        }
+#if !defined(_MSC_VER)
+#pragma GCC diagnostic pop
+#endif
+    }
+
+    template <typename LPos, typename CPos, typename RPos>
+    concat_center_pos<node_type<CPos>>
+    merge(LPos&& lpos, CPos&& cpos, RPos&& rpos)
+    {
+        using node_t    = node_type<CPos>;
+        using merger_t  = concat_merger<node_t>;
+        using visitor_t = concat_merger_visitor;
+        auto merger     = merger_t{cpos.shift(), counts, n};
+        try {
+            lpos.each_left_sub(visitor_t{}, merger);
+            cpos.each_sub(visitor_t{}, merger);
+            rpos.each_right_sub(visitor_t{}, merger);
+            cpos.each_sub(dec_visitor{});
+            return merger.finish();
+        } catch (...) {
+            merger.abort();
+            throw;
+        }
+    }
+};
+
+template <typename Node, typename LPos, typename CPos, typename RPos>
+concat_center_pos<Node> concat_rebalance(LPos&& lpos, CPos&& cpos, RPos&& rpos)
+{
+    auto plan = concat_rebalance_plan<Node::bits, Node::bits_leaf>{};
+    plan.fill(lpos, cpos, rpos);
+    plan.shuffle(cpos.shift());
+    try {
+        return plan.merge(lpos, cpos, rpos);
+    } catch (...) {
+        cpos.each_sub(dec_visitor{});
+        throw;
+    }
+}
+
+template <typename Node, typename LPos, typename TPos, typename RPos>
+concat_center_pos<Node> concat_leafs(LPos&& lpos, TPos&& tpos, RPos&& rpos)
+{
+    static_assert(Node::bits >= 2, "");
+    assert(lpos.shift() == tpos.shift());
+    assert(lpos.shift() == rpos.shift());
+    assert(lpos.shift() == 0);
+    if (tpos.count() > 0)
+        return {
+            Node::bits_leaf,
+            lpos.node()->inc(),
+            lpos.count(),
+            tpos.node()->inc(),
+            tpos.count(),
+            rpos.node()->inc(),
+            rpos.count(),
+        };
+    else
+        return {
+            Node::bits_leaf,
+            lpos.node()->inc(),
+            lpos.count(),
+            rpos.node()->inc(),
+            rpos.count(),
+        };
+}
+
+template <typename Node>
+struct concat_left_visitor;
+template <typename Node>
+struct concat_right_visitor;
+template <typename Node>
+struct concat_both_visitor;
+
+template <typename Node, typename LPos, typename TPos, typename RPos>
+concat_center_pos<Node> concat_inners(LPos&& lpos, TPos&& tpos, RPos&& rpos)
+{
+    auto lshift = lpos.shift();
+    auto rshift = rpos.shift();
+    if (lshift > rshift) {
+        auto cpos = lpos.last_sub(concat_left_visitor<Node>{}, tpos, rpos);
+        return concat_rebalance<Node>(lpos, cpos, null_sub_pos{});
+    } else if (lshift < rshift) {
+        auto cpos = rpos.first_sub(concat_right_visitor<Node>{}, lpos, tpos);
+        return concat_rebalance<Node>(null_sub_pos{}, cpos, rpos);
+    } else {
+        assert(lshift == rshift);
+        assert(Node::bits_leaf == 0u || lshift > 0);
+        auto cpos = lpos.last_sub(concat_both_visitor<Node>{}, tpos, rpos);
+        return concat_rebalance<Node>(lpos, cpos, rpos);
+    }
+}
+
+template <typename Node>
+struct concat_left_visitor : visitor_base<concat_left_visitor<Node>>
+{
+    using this_t = concat_left_visitor;
+
+    template <typename LPos, typename TPos, typename RPos>
+    static concat_center_pos<Node>
+    visit_inner(LPos&& lpos, TPos&& tpos, RPos&& rpos)
+    {
+        return concat_inners<Node>(lpos, tpos, rpos);
+    }
+
+    template <typename LPos, typename TPos, typename RPos>
+    static concat_center_pos<Node>
+    visit_leaf(LPos&& lpos, TPos&& tpos, RPos&& rpos)
+    {
+        IMMER_UNREACHABLE;
+    }
+};
+
+template <typename Node>
+struct concat_right_visitor : visitor_base<concat_right_visitor<Node>>
+{
+    using this_t = concat_right_visitor;
+
+    template <typename RPos, typename LPos, typename TPos>
+    static concat_center_pos<Node>
+    visit_inner(RPos&& rpos, LPos&& lpos, TPos&& tpos)
+    {
+        return concat_inners<Node>(lpos, tpos, rpos);
+    }
+
+    template <typename RPos, typename LPos, typename TPos>
+    static concat_center_pos<Node>
+    visit_leaf(RPos&& rpos, LPos&& lpos, TPos&& tpos)
+    {
+        return concat_leafs<Node>(lpos, tpos, rpos);
+    }
+};
+
+template <typename Node>
+struct concat_both_visitor : visitor_base<concat_both_visitor<Node>>
+{
+    using this_t = concat_both_visitor;
+
+    template <typename LPos, typename TPos, typename RPos>
+    static concat_center_pos<Node>
+    visit_inner(LPos&& lpos, TPos&& tpos, RPos&& rpos)
+    {
+        return rpos.first_sub(concat_right_visitor<Node>{}, lpos, tpos);
+    }
+
+    template <typename LPos, typename TPos, typename RPos>
+    static concat_center_pos<Node>
+    visit_leaf(LPos&& lpos, TPos&& tpos, RPos&& rpos)
+    {
+        return rpos.first_sub_leaf(concat_right_visitor<Node>{}, lpos, tpos);
+    }
+};
+
+template <typename Node>
+struct concat_trees_right_visitor
+    : visitor_base<concat_trees_right_visitor<Node>>
+{
+    using this_t = concat_trees_right_visitor;
+
+    template <typename RPos, typename LPos, typename TPos>
+    static concat_center_pos<Node>
+    visit_node(RPos&& rpos, LPos&& lpos, TPos&& tpos)
+    {
+        return concat_inners<Node>(lpos, tpos, rpos);
+    }
+};
+
+template <typename Node>
+struct concat_trees_left_visitor : visitor_base<concat_trees_left_visitor<Node>>
+{
+    using this_t = concat_trees_left_visitor;
+
+    template <typename LPos, typename TPos, typename... Args>
+    static concat_center_pos<Node>
+    visit_node(LPos&& lpos, TPos&& tpos, Args&&... args)
+    {
+        return visit_maybe_relaxed_sub(
+            args..., concat_trees_right_visitor<Node>{}, lpos, tpos);
+    }
+};
+
+template <typename Node>
+relaxed_pos<Node> concat_trees(Node* lroot,
+                               shift_t lshift,
+                               size_t lsize,
+                               Node* ltail,
+                               count_t ltcount,
+                               Node* rroot,
+                               shift_t rshift,
+                               size_t rsize)
+{
+    return visit_maybe_relaxed_sub(lroot,
+                                   lshift,
+                                   lsize,
+                                   concat_trees_left_visitor<Node>{},
+                                   make_leaf_pos(ltail, ltcount),
+                                   rroot,
+                                   rshift,
+                                   rsize)
+        .realize();
+}
+
+template <typename Node>
+relaxed_pos<Node> concat_trees(
+    Node* ltail, count_t ltcount, Node* rroot, shift_t rshift, size_t rsize)
+{
+    return make_singleton_regular_sub_pos(ltail, ltcount)
+        .visit(concat_trees_left_visitor<Node>{},
+               empty_leaf_pos<Node>{},
+               rroot,
+               rshift,
+               rsize)
+        .realize();
+}
+
+template <typename Node>
+using concat_center_mut_pos = concat_center_pos<Node>;
+
+template <typename Node>
+struct concat_merger_mut
+{
+    using node_t = Node;
+    using edit_t = typename Node::edit_t;
+
+    static constexpr auto B  = Node::bits;
+    static constexpr auto BL = Node::bits_leaf;
+
+    using result_t = concat_center_pos<Node>;
+
+    edit_t ec_ = {};
+
+    count_t* curr_;
+    count_t n_;
+    result_t result_;
+    count_t count_      = 0;
+    node_t* candidate_  = nullptr;
+    edit_t candidate_e_ = Node::memory::transience_t::noone;
+
+    concat_merger_mut(edit_t ec,
+                      shift_t shift,
+                      count_t* counts,
+                      count_t n,
+                      edit_t candidate_e,
+                      node_t* candidate)
+        : ec_{ec}
+        , curr_{counts}
+        , n_{n}
+        , result_{shift + B, nullptr, 0}
+    {
+        if (candidate) {
+            candidate->ensure_mutable_relaxed_e(candidate_e, ec);
+            result_.nodes_[0] = candidate;
+        } else {
+            result_.nodes_[0] = node_t::make_inner_r_e(ec);
+        }
+    }
+
+    node_t* to_        = {};
+    count_t to_offset_ = {};
+    size_t to_size_    = {};
+
+    void set_candidate(edit_t candidate_e, node_t* candidate)
+    {
+        candidate_   = candidate;
+        candidate_e_ = candidate_e;
+    }
+
+    void add_child(node_t* p, size_t size)
+    {
+        ++curr_;
+        auto parent  = result_.nodes_[result_.count_ - 1];
+        auto relaxed = parent->relaxed();
+        if (count_ == branches<B>) {
+            parent->relaxed()->d.count = count_;
+            assert(result_.count_ < result_t::max_children);
+            n_ -= branches<B>;
+            if (candidate_) {
+                parent = candidate_;
+                parent->ensure_mutable_relaxed_e(candidate_e_, ec_);
+                candidate_ = nullptr;
+            } else
+                parent = node_t::make_inner_r_e(ec_);
+            count_                         = 0;
+            relaxed                        = parent->relaxed();
+            result_.nodes_[result_.count_] = parent;
+            result_.sizes_[result_.count_] = result_.sizes_[result_.count_ - 1];
+            ++result_.count_;
+        }
+        auto idx = count_++;
+        result_.sizes_[result_.count_ - 1] += size;
+        relaxed->d.sizes[idx] = size + (idx ? relaxed->d.sizes[idx - 1] : 0);
+        parent->inner()[idx]  = p;
+    };
+
+    template <typename Pos>
+    void merge_leaf(Pos&& p, edit_t e)
+    {
+        auto from       = p.node();
+        auto from_size  = p.size();
+        auto from_count = p.count();
+        assert(from_size);
+        if (!to_ && *curr_ == from_count) {
+            add_child(from, from_size);
+        } else {
+            auto from_offset = count_t{};
+            auto from_data   = from->leaf();
+            auto from_mutate = from->can_mutate(e);
+            do {
+                if (!to_) {
+                    if (from_mutate) {
+                        node_t::ownee(from) = ec_;
+                        to_                 = from->inc();
+                        assert(from_count);
+                    } else {
+                        to_ = node_t::make_leaf_e(ec_);
+                    }
+                    to_offset_ = 0;
+                }
+                auto data = to_->leaf();
+                auto to_copy =
+                    std::min(from_count - from_offset, *curr_ - to_offset_);
+                if (from == to_) {
+                    if (from_offset != to_offset_)
+                        std::move(from_data + from_offset,
+                                  from_data + from_offset + to_copy,
+                                  data + to_offset_);
+                } else {
+                    if (!from_mutate)
+                        std::uninitialized_copy(from_data + from_offset,
+                                                from_data + from_offset +
+                                                    to_copy,
+                                                data + to_offset_);
+                    else
+                        detail::uninitialized_move(from_data + from_offset,
+                                                   from_data + from_offset +
+                                                       to_copy,
+                                                   data + to_offset_);
+                }
+                to_offset_ += to_copy;
+                from_offset += to_copy;
+                if (*curr_ == to_offset_) {
+                    add_child(to_, to_offset_);
+                    to_ = nullptr;
+                }
+            } while (from_offset != from_count);
+        }
+    }
+
+    template <typename Pos>
+    void merge_inner(Pos&& p, edit_t e)
+    {
+        auto from       = p.node();
+        auto from_size  = p.size();
+        auto from_count = p.count();
+        assert(from_size);
+        if (!to_ && *curr_ == from_count) {
+            add_child(from, from_size);
+        } else {
+            auto from_offset = count_t{};
+            auto from_data   = from->inner();
+            auto from_mutate = from->can_relax() && from->can_mutate(e);
+            do {
+                if (!to_) {
+                    if (from_mutate) {
+                        node_t::ownee(from) = ec_;
+                        from->ensure_mutable_relaxed_e(e, ec_);
+                        to_ = from;
+                    } else {
+                        to_ = node_t::make_inner_r_e(ec_);
+                    }
+                    to_offset_ = 0;
+                    to_size_   = 0;
+                }
+                auto data = to_->inner();
+                auto to_copy =
+                    std::min(from_count - from_offset, *curr_ - to_offset_);
+                auto sizes = to_->relaxed()->d.sizes;
+                if (from != to_ || from_offset != to_offset_) {
+                    std::copy(from_data + from_offset,
+                              from_data + from_offset + to_copy,
+                              data + to_offset_);
+                    p.copy_sizes(
+                        from_offset, to_copy, to_size_, sizes + to_offset_);
+                }
+                to_offset_ += to_copy;
+                from_offset += to_copy;
+                to_size_ = sizes[to_offset_ - 1];
+                if (*curr_ == to_offset_) {
+                    to_->relaxed()->d.count = to_offset_;
+                    add_child(to_, to_size_);
+                    to_ = nullptr;
+                }
+            } while (from_offset != from_count);
+        }
+    }
+
+    concat_center_pos<Node> finish() const
+    {
+        assert(!to_);
+        result_.nodes_[result_.count_ - 1]->relaxed()->d.count = count_;
+        return result_;
+    }
+
+    void abort()
+    {
+        // We may have mutated stuff the tree in place, leaving
+        // everything in a corrupted state...  It should be possible
+        // to define cleanup properly, but that is a task for some
+        // other day... ;)
+        std::terminate();
+    }
+};
+
+struct concat_merger_mut_visitor : visitor_base<concat_merger_mut_visitor>
+{
+    using this_t = concat_merger_mut_visitor;
+
+    template <typename Pos, typename Merger>
+    static void visit_inner(Pos&& p, Merger& merger, edit_type<Pos> e)
+    {
+        merger.merge_inner(p, e);
+    }
+
+    template <typename Pos, typename Merger>
+    static void visit_leaf(Pos&& p, Merger& merger, edit_type<Pos> e)
+    {
+        merger.merge_leaf(p, e);
+    }
+};
+
+template <bits_t B, bits_t BL>
+struct concat_rebalance_plan_mut : concat_rebalance_plan<B, BL>
+{
+    using this_t = concat_rebalance_plan_mut;
+
+    template <typename LPos, typename CPos, typename RPos>
+    concat_center_mut_pos<node_type<CPos>> merge(edit_type<CPos> ec,
+                                                 edit_type<CPos> el,
+                                                 LPos&& lpos,
+                                                 CPos&& cpos,
+                                                 edit_type<CPos> er,
+                                                 RPos&& rpos)
+    {
+        using node_t    = node_type<CPos>;
+        using merger_t  = concat_merger_mut<node_t>;
+        using visitor_t = concat_merger_mut_visitor;
+        auto lnode      = ((node_t*) lpos.node());
+        auto rnode      = ((node_t*) rpos.node());
+        auto lmut2      = lnode && lnode->can_relax() && lnode->can_mutate(el);
+        auto rmut2      = rnode && rnode->can_relax() && rnode->can_mutate(er);
+        auto merger     = merger_t{ec,
+                               cpos.shift(),
+                               this->counts,
+                               this->n,
+                               el,
+                               lmut2 ? lnode : nullptr};
+        try {
+            lpos.each_left_sub(visitor_t{}, merger, el);
+            cpos.each_sub(visitor_t{}, merger, ec);
+            if (rmut2)
+                merger.set_candidate(er, rnode);
+            rpos.each_right_sub(visitor_t{}, merger, er);
+            return merger.finish();
+        } catch (...) {
+            merger.abort();
+            throw;
+        }
+    }
+};
+
+template <typename Node, typename LPos, typename CPos, typename RPos>
+concat_center_pos<Node> concat_rebalance_mut(edit_type<Node> ec,
+                                             edit_type<Node> el,
+                                             LPos&& lpos,
+                                             CPos&& cpos,
+                                             edit_type<Node> er,
+                                             RPos&& rpos)
+{
+    auto plan = concat_rebalance_plan_mut<Node::bits, Node::bits_leaf>{};
+    plan.fill(lpos, cpos, rpos);
+    plan.shuffle(cpos.shift());
+    return plan.merge(ec, el, lpos, cpos, er, rpos);
+}
+
+template <typename Node, typename LPos, typename TPos, typename RPos>
+concat_center_mut_pos<Node> concat_leafs_mut(edit_type<Node> ec,
+                                             edit_type<Node> el,
+                                             LPos&& lpos,
+                                             TPos&& tpos,
+                                             edit_type<Node> er,
+                                             RPos&& rpos)
+{
+    static_assert(Node::bits >= 2, "");
+    assert(lpos.shift() == tpos.shift());
+    assert(lpos.shift() == rpos.shift());
+    assert(lpos.shift() == 0);
+    if (tpos.count() > 0)
+        return {
+            Node::bits_leaf,
+            lpos.node(),
+            lpos.count(),
+            tpos.node(),
+            tpos.count(),
+            rpos.node(),
+            rpos.count(),
+        };
+    else
+        return {
+            Node::bits_leaf,
+            lpos.node(),
+            lpos.count(),
+            rpos.node(),
+            rpos.count(),
+        };
+}
+
+template <typename Node>
+struct concat_left_mut_visitor;
+template <typename Node>
+struct concat_right_mut_visitor;
+template <typename Node>
+struct concat_both_mut_visitor;
+
+template <typename Node, typename LPos, typename TPos, typename RPos>
+concat_center_mut_pos<Node> concat_inners_mut(edit_type<Node> ec,
+                                              edit_type<Node> el,
+                                              LPos&& lpos,
+                                              TPos&& tpos,
+                                              edit_type<Node> er,
+                                              RPos&& rpos)
+{
+    auto lshift = lpos.shift();
+    auto rshift = rpos.shift();
+    // lpos.node() can be null it is a singleton_regular_sub_pos<...>,
+    // this is, when the tree is just a tail...
+    if (lshift > rshift) {
+        auto cpos = lpos.last_sub(
+            concat_left_mut_visitor<Node>{}, ec, el, tpos, er, rpos);
+        return concat_rebalance_mut<Node>(
+            ec, el, lpos, cpos, er, null_sub_pos{});
+    } else if (lshift < rshift) {
+        auto cpos = rpos.first_sub(
+            concat_right_mut_visitor<Node>{}, ec, el, lpos, tpos, er);
+        return concat_rebalance_mut<Node>(
+            ec, el, null_sub_pos{}, cpos, er, rpos);
+    } else {
+        assert(lshift == rshift);
+        assert(Node::bits_leaf == 0u || lshift > 0);
+        auto cpos = lpos.last_sub(
+            concat_both_mut_visitor<Node>{}, ec, el, tpos, er, rpos);
+        return concat_rebalance_mut<Node>(ec, el, lpos, cpos, er, rpos);
+    }
+}
+
+template <typename Node>
+struct concat_left_mut_visitor : visitor_base<concat_left_mut_visitor<Node>>
+{
+    using this_t = concat_left_mut_visitor;
+    using edit_t = typename Node::edit_t;
+
+    template <typename LPos, typename TPos, typename RPos>
+    static concat_center_mut_pos<Node> visit_inner(
+        LPos&& lpos, edit_t ec, edit_t el, TPos&& tpos, edit_t er, RPos&& rpos)
+    {
+        return concat_inners_mut<Node>(ec, el, lpos, tpos, er, rpos);
+    }
+
+    template <typename LPos, typename TPos, typename RPos>
+    static concat_center_mut_pos<Node> visit_leaf(
+        LPos&& lpos, edit_t ec, edit_t el, TPos&& tpos, edit_t er, RPos&& rpos)
+    {
+        IMMER_UNREACHABLE;
+    }
+};
+
+template <typename Node>
+struct concat_right_mut_visitor : visitor_base<concat_right_mut_visitor<Node>>
+{
+    using this_t = concat_right_mut_visitor;
+    using edit_t = typename Node::edit_t;
+
+    template <typename RPos, typename LPos, typename TPos>
+    static concat_center_mut_pos<Node> visit_inner(
+        RPos&& rpos, edit_t ec, edit_t el, LPos&& lpos, TPos&& tpos, edit_t er)
+    {
+        return concat_inners_mut<Node>(ec, el, lpos, tpos, er, rpos);
+    }
+
+    template <typename RPos, typename LPos, typename TPos>
+    static concat_center_mut_pos<Node> visit_leaf(
+        RPos&& rpos, edit_t ec, edit_t el, LPos&& lpos, TPos&& tpos, edit_t er)
+    {
+        return concat_leafs_mut<Node>(ec, el, lpos, tpos, er, rpos);
+    }
+};
+
+template <typename Node>
+struct concat_both_mut_visitor : visitor_base<concat_both_mut_visitor<Node>>
+{
+    using this_t = concat_both_mut_visitor;
+    using edit_t = typename Node::edit_t;
+
+    template <typename LPos, typename TPos, typename RPos>
+    static concat_center_mut_pos<Node> visit_inner(
+        LPos&& lpos, edit_t ec, edit_t el, TPos&& tpos, edit_t er, RPos&& rpos)
+    {
+        return rpos.first_sub(
+            concat_right_mut_visitor<Node>{}, ec, el, lpos, tpos, er);
+    }
+
+    template <typename LPos, typename TPos, typename RPos>
+    static concat_center_mut_pos<Node> visit_leaf(
+        LPos&& lpos, edit_t ec, edit_t el, TPos&& tpos, edit_t er, RPos&& rpos)
+    {
+        return rpos.first_sub_leaf(
+            concat_right_mut_visitor<Node>{}, ec, el, lpos, tpos, er);
+    }
+};
+
+template <typename Node>
+struct concat_trees_right_mut_visitor
+    : visitor_base<concat_trees_right_mut_visitor<Node>>
+{
+    using this_t = concat_trees_right_mut_visitor;
+    using edit_t = typename Node::edit_t;
+
+    template <typename RPos, typename LPos, typename TPos>
+    static concat_center_mut_pos<Node> visit_node(
+        RPos&& rpos, edit_t ec, edit_t el, LPos&& lpos, TPos&& tpos, edit_t er)
+    {
+        return concat_inners_mut<Node>(ec, el, lpos, tpos, er, rpos);
+    }
+};
+
+template <typename Node>
+struct concat_trees_left_mut_visitor
+    : visitor_base<concat_trees_left_mut_visitor<Node>>
+{
+    using this_t = concat_trees_left_mut_visitor;
+    using edit_t = typename Node::edit_t;
+
+    template <typename LPos, typename TPos, typename... Args>
+    static concat_center_mut_pos<Node> visit_node(LPos&& lpos,
+                                                  edit_t ec,
+                                                  edit_t el,
+                                                  TPos&& tpos,
+                                                  edit_t er,
+                                                  Args&&... args)
+    {
+        return visit_maybe_relaxed_sub(args...,
+                                       concat_trees_right_mut_visitor<Node>{},
+                                       ec,
+                                       el,
+                                       lpos,
+                                       tpos,
+                                       er);
+    }
+};
+
+template <typename Node>
+relaxed_pos<Node> concat_trees_mut(edit_type<Node> ec,
+                                   edit_type<Node> el,
+                                   Node* lroot,
+                                   shift_t lshift,
+                                   size_t lsize,
+                                   Node* ltail,
+                                   count_t ltcount,
+                                   edit_type<Node> er,
+                                   Node* rroot,
+                                   shift_t rshift,
+                                   size_t rsize)
+{
+    return visit_maybe_relaxed_sub(lroot,
+                                   lshift,
+                                   lsize,
+                                   concat_trees_left_mut_visitor<Node>{},
+                                   ec,
+                                   el,
+                                   make_leaf_pos(ltail, ltcount),
+                                   er,
+                                   rroot,
+                                   rshift,
+                                   rsize)
+        .realize_e(ec);
+}
+
+template <typename Node>
+relaxed_pos<Node> concat_trees_mut(edit_type<Node> ec,
+                                   edit_type<Node> el,
+                                   Node* ltail,
+                                   count_t ltcount,
+                                   edit_type<Node> er,
+                                   Node* rroot,
+                                   shift_t rshift,
+                                   size_t rsize)
+{
+    return make_singleton_regular_sub_pos(ltail, ltcount)
+        .visit(concat_trees_left_mut_visitor<Node>{},
+               ec,
+               el,
+               empty_leaf_pos<Node>{},
+               er,
+               rroot,
+               rshift,
+               rsize)
+        .realize_e(ec);
+}
+
+} // namespace rbts
+} // namespace detail
+} // namespace immer
diff --git a/immer/detail/rbts/position.hpp b/immer/detail/rbts/position.hpp
new file mode 100644
index 000000000000..e9472b294088
--- /dev/null
+++ b/immer/detail/rbts/position.hpp
@@ -0,0 +1,1977 @@
+//
+// immer: immutable data structures for C++
+// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
+//
+// This software is distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
+//
+
+#pragma once
+
+#include <immer/config.hpp>
+#include <immer/detail/rbts/bits.hpp>
+
+#include <cassert>
+#include <type_traits>
+#include <utility>
+
+namespace immer {
+namespace detail {
+namespace rbts {
+
+template <typename Pos>
+constexpr auto bits = std::decay_t<Pos>::node_t::bits;
+
+template <typename Pos>
+constexpr auto bits_leaf = std::decay_t<Pos>::node_t::bits_leaf;
+
+template <typename Pos>
+using node_type = typename std::decay<Pos>::type::node_t;
+
+template <typename Pos>
+using edit_type = typename std::decay<Pos>::type::node_t::edit_t;
+
+template <typename NodeT>
+struct empty_regular_pos
+{
+    using node_t = NodeT;
+    node_t* node_;
+
+    count_t count() const { return 0; }
+    node_t* node() const { return node_; }
+    shift_t shift() const { return 0; }
+    size_t size() const { return 0; }
+
+    template <typename Visitor, typename... Args>
+    void each(Visitor, Args&&...)
+    {}
+    template <typename Visitor, typename... Args>
+    bool each_pred(Visitor, Args&&...)
+    {
+        return true;
+    }
+
+    template <typename Visitor, typename... Args>
+    decltype(auto) visit(Visitor v, Args&&... args)
+    {
+        return Visitor::visit_regular(*this, std::forward<Args>(args)...);
+    }
+};
+
+template <typename NodeT>
+empty_regular_pos<NodeT> make_empty_regular_pos(NodeT* node)
+{
+    return {node};
+}
+
+template <typename NodeT>
+struct empty_leaf_pos
+{
+    using node_t = NodeT;
+    node_t* node_;
+
+    count_t count() const { return 0; }
+    node_t* node() const { return node_; }
+    shift_t shift() const { return 0; }
+    size_t size() const { return 0; }
+
+    template <typename Visitor, typename... Args>
+    decltype(auto) visit(Visitor v, Args&&... args)
+    {
+        return Visitor::visit_leaf(*this, std::forward<Args>(args)...);
+    }
+};
+
+template <typename NodeT>
+empty_leaf_pos<NodeT> make_empty_leaf_pos(NodeT* node)
+{
+    assert(node);
+    return {node};
+}
+
+template <typename NodeT>
+struct leaf_pos
+{
+    static constexpr auto B  = NodeT::bits;
+    static constexpr auto BL = NodeT::bits_leaf;
+
+    using node_t = NodeT;
+    node_t* node_;
+    size_t size_;
+
+    count_t count() const { return index(size_ - 1) + 1; }
+    node_t* node() const { return node_; }
+    size_t size() const { return size_; }
+    shift_t shift() const { return 0; }
+    count_t index(size_t idx) const { return idx & mask<BL>; }
+    count_t subindex(size_t idx) const { return idx; }
+
+    template <typename Visitor, typename... Args>
+    decltype(auto) visit(Visitor v, Args&&... args)
+    {
+        return Visitor::visit_leaf(*this, std::forward<Args>(args)...);
+    }
+};
+
+template <typename NodeT>
+leaf_pos<NodeT> make_leaf_pos(NodeT* node, size_t size)
+{
+    assert(node);
+    assert(size > 0);
+    return {node, size};
+}
+
+template <typename NodeT>
+struct leaf_sub_pos
+{
+    static constexpr auto B  = NodeT::bits;
+    static constexpr auto BL = NodeT::bits_leaf;
+
+    using node_t = NodeT;
+    node_t* node_;
+    count_t count_;
+
+    count_t count() const { return count_; }
+    node_t* node() const { return node_; }
+    size_t size() const { return count_; }
+    shift_t shift() const { return 0; }
+    count_t index(size_t idx) const { return idx & mask<BL>; }
+    count_t subindex(size_t idx) const { return idx; }
+
+    template <typename Visitor, typename... Args>
+    decltype(auto) visit(Visitor v, Args&&... args)
+    {
+        return Visitor::visit_leaf(*this, std::forward<Args>(args)...);
+    }
+};
+
+template <typename NodeT>
+leaf_sub_pos<NodeT> make_leaf_sub_pos(NodeT* node, count_t count)
+{
+    assert(node);
+    assert(count <= branches<NodeT::bits_leaf>);
+    return {node, count};
+}
+
+template <typename NodeT>
+struct leaf_descent_pos
+{
+    static constexpr auto B  = NodeT::bits;
+    static constexpr auto BL = NodeT::bits_leaf;
+
+    using node_t = NodeT;
+    node_t* node_;
+
+    node_t* node() const { return node_; }
+    shift_t shift() const { return 0; }
+    count_t index(size_t idx) const { return idx & mask<BL>; }
+
+    template <typename... Args>
+    decltype(auto) descend(Args&&...)
+    {}
+
+    template <typename Visitor, typename... Args>
+    decltype(auto) visit(Visitor v, Args&&... args)
+    {
+        return Visitor::visit_leaf(*this, std::forward<Args>(args)...);
+    }
+};
+
+template <typename NodeT>
+leaf_descent_pos<NodeT> make_leaf_descent_pos(NodeT* node)
+{
+    assert(node);
+    return {node};
+}
+
+template <typename NodeT>
+struct full_leaf_pos
+{
+    static constexpr auto B  = NodeT::bits;
+    static constexpr auto BL = NodeT::bits_leaf;
+
+    using node_t = NodeT;
+    node_t* node_;
+
+    count_t count() const { return branches<BL>; }
+    node_t* node() const { return node_; }
+    size_t size() const { return branches<BL>; }
+    shift_t shift() const { return 0; }
+    count_t index(size_t idx) const { return idx & mask<BL>; }
+    count_t subindex(size_t idx) const { return idx; }
+
+    template <typename Visitor, typename... Args>
+    decltype(auto) visit(Visitor v, Args&&... args)
+    {
+        return Visitor::visit_leaf(*this, std::forward<Args>(args)...);
+    }
+};
+
+template <typename NodeT>
+full_leaf_pos<NodeT> make_full_leaf_pos(NodeT* node)
+{
+    assert(node);
+    return {node};
+}
+
+template <typename NodeT>
+struct regular_pos
+{
+    static constexpr auto B  = NodeT::bits;
+    static constexpr auto BL = NodeT::bits_leaf;
+
+    using node_t = NodeT;
+    node_t* node_;
+    shift_t shift_;
+    size_t size_;
+
+    count_t count() const { return index(size_ - 1) + 1; }
+    node_t* node() const { return node_; }
+    size_t size() const { return size_; }
+    shift_t shift() const { return shift_; }
+    count_t index(size_t idx) const { return (idx >> shift_) & mask<B>; }
+    count_t subindex(size_t idx) const { return idx >> shift_; }
+    size_t this_size() const
+    {
+        return ((size_ - 1) & ~(~size_t{} << (shift_ + B))) + 1;
+    }
+
+    template <typename Visitor, typename... Args>
+    void each(Visitor v, Args&&... args)
+    {
+        return each_regular(*this, v, args...);
+    }
+
+    template <typename Visitor, typename... Args>
+    bool each_pred(Visitor v, Args&&... args)
+    {
+        return each_pred_regular(*this, v, args...);
+    }
+
+    template <typename Visitor, typename... Args>
+    bool each_pred_zip(Visitor v, node_t* other, Args&&... args)
+    {
+        return each_pred_zip_regular(*this, v, other, args...);
+    }
+
+    template <typename Visitor, typename... Args>
+    bool each_pred_i(Visitor v, count_t i, count_t n, Args&&... args)
+    {
+        return each_pred_i_regular(*this, v, i, n, args...);
+    }
+
+    template <typename Visitor, typename... Args>
+    bool each_pred_right(Visitor v, count_t start, Args&&... args)
+    {
+        return each_pred_right_regular(*this, v, start, args...);
+    }
+
+    template <typename Visitor, typename... Args>
+    bool each_pred_left(Visitor v, count_t n, Args&&... args)
+    {
+        return each_pred_left_regular(*this, v, n, args...);
+    }
+
+    template <typename Visitor, typename... Args>
+    void each_i(Visitor v, count_t i, count_t n, Args&&... args)
+    {
+        return each_i_regular(*this, v, i, n, args...);
+    }
+
+    template <typename Visitor, typename... Args>
+    void each_right(Visitor v, count_t start, Args&&... args)
+    {
+        return each_right_regular(*this, v, start, args...);
+    }
+
+    template <typename Visitor, typename... Args>
+    void each_left(Visitor v, count_t n, Args&&... args)
+    {
+        return each_left_regular(*this, v, n, args...);
+    }
+
+    template <typename Visitor, typename... Args>
+    decltype(auto) towards(Visitor v, size_t idx, Args&&... args)
+    {
+        return towards_oh_ch_regular(
+            *this, v, idx, index(idx), count(), args...);
+    }
+
+    template <typename Visitor, typename... Args>
+    decltype(auto)
+    towards_oh(Visitor v, size_t idx, count_t offset_hint, Args&&... args)
+    {
+        return towards_oh_ch_regular(
+            *this, v, idx, offset_hint, count(), args...);
+    }
+
+    template <typename Visitor, typename... Args>
+    decltype(auto) towards_oh_ch(Visitor v,
+                                 size_t idx,
+                                 count_t offset_hint,
+                                 count_t count_hint,
+                                 Args&&... args)
+    {
+        return towards_oh_ch_regular(
+            *this, v, idx, offset_hint, count(), args...);
+    }
+
+    template <typename Visitor, typename... Args>
+    decltype(auto)
+    towards_sub_oh(Visitor v, size_t idx, count_t offset_hint, Args&&... args)
+    {
+        return towards_sub_oh_regular(*this, v, idx, offset_hint, args...);
+    }
+
+    template <typename Visitor, typename... Args>
+    decltype(auto) last_oh(Visitor v, count_t offset_hint, Args&&... args)
+    {
+        return last_oh_regular(*this, v, offset_hint, args...);
+    }
+
+    template <typename Visitor, typename... Args>
+    decltype(auto) visit(Visitor v, Args&&... args)
+    {
+        return Visitor::visit_regular(*this, std::forward<Args>(args)...);
+    }
+};
+
+template <typename Pos, typename Visitor, typename... Args>
+void each_regular(Pos&& p, Visitor v, Args&&... args)
+{
+    constexpr auto B  = bits<Pos>;
+    constexpr auto BL = bits_leaf<Pos>;
+    auto n            = p.node()->inner();
+    auto last         = p.count() - 1;
+    auto e            = n + last;
+    if (p.shift() == BL) {
+        for (; n != e; ++n) {
+            IMMER_PREFETCH(n + 1);
+            make_full_leaf_pos(*n).visit(v, args...);
+        }
+        make_leaf_pos(*n, p.size()).visit(v, args...);
+    } else {
+        auto ss = p.shift() - B;
+        for (; n != e; ++n)
+            make_full_pos(*n, ss).visit(v, args...);
+        make_regular_pos(*n, ss, p.size()).visit(v, args...);
+    }
+}
+
+template <typename Pos, typename Visitor, typename... Args>
+bool each_pred_regular(Pos&& p, Visitor v, Args&&... args)
+{
+    constexpr auto B  = bits<Pos>;
+    constexpr auto BL = bits_leaf<Pos>;
+    auto n            = p.node()->inner();
+    auto last         = p.count() - 1;
+    auto e            = n + last;
+    if (p.shift() == BL) {
+        for (; n != e; ++n) {
+            IMMER_PREFETCH(n + 1);
+            if (!make_full_leaf_pos(*n).visit(v, args...))
+                return false;
+        }
+        return make_leaf_pos(*n, p.size()).visit(v, args...);
+    } else {
+        auto ss = p.shift() - B;
+        for (; n != e; ++n)
+            if (!make_full_pos(*n, ss).visit(v, args...))
+                return false;
+        return make_regular_pos(*n, ss, p.size()).visit(v, args...);
+    }
+}
+
+template <typename Pos, typename Visitor, typename... Args>
+bool each_pred_zip_regular(Pos&& p,
+                           Visitor v,
+                           node_type<Pos>* other,
+                           Args&&... args)
+{
+    constexpr auto B  = bits<Pos>;
+    constexpr auto BL = bits_leaf<Pos>;
+
+    auto n    = p.node()->inner();
+    auto n2   = other->inner();
+    auto last = p.count() - 1;
+    auto e    = n + last;
+    if (p.shift() == BL) {
+        for (; n != e; ++n, ++n2) {
+            IMMER_PREFETCH(n + 1);
+            IMMER_PREFETCH(n2 + 1);
+            if (!make_full_leaf_pos(*n).visit(v, *n2, args...))
+                return false;
+        }
+        return make_leaf_pos(*n, p.size()).visit(v, *n2, args...);
+    } else {
+        auto ss = p.shift() - B;
+        for (; n != e; ++n, ++n2)
+            if (!make_full_pos(*n, ss).visit(v, *n2, args...))
+                return false;
+        return make_regular_pos(*n, ss, p.size()).visit(v, *n2, args...);
+    }
+}
+
+template <typename Pos, typename Visitor, typename... Args>
+bool each_pred_i_regular(
+    Pos&& p, Visitor v, count_t f, count_t l, Args&&... args)
+{
+    constexpr auto B  = bits<Pos>;
+    constexpr auto BL = bits_leaf<Pos>;
+
+    if (p.shift() == BL) {
+        if (l > f) {
+            if (l < p.count()) {
+                auto n = p.node()->inner() + f;
+                auto e = p.node()->inner() + l;
+                for (; n < e; ++n) {
+                    IMMER_PREFETCH(n + 1);
+                    if (!make_full_leaf_pos(*n).visit(v, args...))
+                        return false;
+                }
+            } else {
+                auto n = p.node()->inner() + f;
+                auto e = p.node()->inner() + l - 1;
+                for (; n < e; ++n) {
+                    IMMER_PREFETCH(n + 1);
+                    if (!make_full_leaf_pos(*n).visit(v, args...))
+                        return false;
+                }
+                if (!make_leaf_pos(*n, p.size()).visit(v, args...))
+                    return false;
+            }
+        }
+    } else {
+        if (l > f) {
+            auto ss = p.shift() - B;
+            if (l < p.count()) {
+                auto n = p.node()->inner() + f;
+                auto e = p.node()->inner() + l;
+                for (; n < e; ++n)
+                    if (!make_full_pos(*n, ss).visit(v, args...))
+                        return false;
+            } else {
+                auto n = p.node()->inner() + f;
+                auto e = p.node()->inner() + l - 1;
+                for (; n < e; ++n)
+                    if (!make_full_pos(*n, ss).visit(v, args...))
+                        return false;
+                if (!make_regular_pos(*n, ss, p.size()).visit(v, args...))
+                    return false;
+            }
+        }
+    }
+    return true;
+}
+
+template <typename Pos, typename Visitor, typename... Args>
+bool each_pred_left_regular(Pos&& p, Visitor v, count_t last, Args&&... args)
+{
+    constexpr auto B  = bits<Pos>;
+    constexpr auto BL = bits_leaf<Pos>;
+    assert(last < p.count());
+    if (p.shift() == BL) {
+        auto n = p.node()->inner();
+        auto e = n + last;
+        for (; n != e; ++n) {
+            IMMER_PREFETCH(n + 1);
+            if (!make_full_leaf_pos(*n).visit(v, args...))
+                return false;
+        }
+    } else {
+        auto n  = p.node()->inner();
+        auto e  = n + last;
+        auto ss = p.shift() - B;
+        for (; n != e; ++n)
+            if (!make_full_pos(*n, ss).visit(v, args...))
+                return false;
+    }
+    return true;
+}
+
+template <typename Pos, typename Visitor, typename... Args>
+bool each_pred_right_regular(Pos&& p, Visitor v, count_t start, Args&&... args)
+{
+    constexpr auto B  = bits<Pos>;
+    constexpr auto BL = bits_leaf<Pos>;
+
+    if (p.shift() == BL) {
+        auto n    = p.node()->inner() + start;
+        auto last = p.count() - 1;
+        auto e    = p.node()->inner() + last;
+        if (n <= e) {
+            for (; n != e; ++n) {
+                IMMER_PREFETCH(n + 1);
+                if (!make_full_leaf_pos(*n).visit(v, args...))
+                    return false;
+            }
+            if (!make_leaf_pos(*n, p.size()).visit(v, args...))
+                return false;
+        }
+    } else {
+        auto n    = p.node()->inner() + start;
+        auto last = p.count() - 1;
+        auto e    = p.node()->inner() + last;
+        auto ss   = p.shift() - B;
+        if (n <= e) {
+            for (; n != e; ++n)
+                if (!make_full_pos(*n, ss).visit(v, args...))
+                    return false;
+            if (!make_regular_pos(*n, ss, p.size()).visit(v, args...))
+                return false;
+        }
+    }
+    return true;
+}
+
+template <typename Pos, typename Visitor, typename... Args>
+void each_i_regular(Pos&& p, Visitor v, count_t f, count_t l, Args&&... args)
+{
+    constexpr auto B  = bits<Pos>;
+    constexpr auto BL = bits_leaf<Pos>;
+
+    if (p.shift() == BL) {
+        if (l > f) {
+            if (l < p.count()) {
+                auto n = p.node()->inner() + f;
+                auto e = p.node()->inner() + l;
+                for (; n < e; ++n) {
+                    IMMER_PREFETCH(n + 1);
+                    make_full_leaf_pos(*n).visit(v, args...);
+                }
+            } else {
+                auto n = p.node()->inner() + f;
+                auto e = p.node()->inner() + l - 1;
+                for (; n < e; ++n) {
+                    IMMER_PREFETCH(n + 1);
+                    make_full_leaf_pos(*n).visit(v, args...);
+                }
+                make_leaf_pos(*n, p.size()).visit(v, args...);
+            }
+        }
+    } else {
+        if (l > f) {
+            auto ss = p.shift() - B;
+            if (l < p.count()) {
+                auto n = p.node()->inner() + f;
+                auto e = p.node()->inner() + l;
+                for (; n < e; ++n)
+                    make_full_pos(*n, ss).visit(v, args...);
+            } else {
+                auto n = p.node()->inner() + f;
+                auto e = p.node()->inner() + l - 1;
+                for (; n < e; ++n)
+                    make_full_pos(*n, ss).visit(v, args...);
+                make_regular_pos(*n, ss, p.size()).visit(v, args...);
+            }
+        }
+    }
+}
+
+template <typename Pos, typename Visitor, typename... Args>
+void each_left_regular(Pos&& p, Visitor v, count_t last, Args&&... args)
+{
+    constexpr auto B  = bits<Pos>;
+    constexpr auto BL = bits_leaf<Pos>;
+    assert(last < p.count());
+    if (p.shift() == BL) {
+        auto n = p.node()->inner();
+        auto e = n + last;
+        for (; n != e; ++n) {
+            IMMER_PREFETCH(n + 1);
+            make_full_leaf_pos(*n).visit(v, args...);
+        }
+    } else {
+        auto n  = p.node()->inner();
+        auto e  = n + last;
+        auto ss = p.shift() - B;
+        for (; n != e; ++n)
+            make_full_pos(*n, ss).visit(v, args...);
+    }
+}
+
+template <typename Pos, typename Visitor, typename... Args>
+void each_right_regular(Pos&& p, Visitor v, count_t start, Args&&... args)
+{
+    constexpr auto B  = bits<Pos>;
+    constexpr auto BL = bits_leaf<Pos>;
+
+    if (p.shift() == BL) {
+        auto n    = p.node()->inner() + start;
+        auto last = p.count() - 1;
+        auto e    = p.node()->inner() + last;
+        if (n <= e) {
+            for (; n != e; ++n) {
+                IMMER_PREFETCH(n + 1);
+                make_full_leaf_pos(*n).visit(v, args...);
+            }
+            make_leaf_pos(*n, p.size()).visit(v, args...);
+        }
+    } else {
+        auto n    = p.node()->inner() + start;
+        auto last = p.count() - 1;
+        auto e    = p.node()->inner() + last;
+        auto ss   = p.shift() - B;
+        if (n <= e) {
+            for (; n != e; ++n)
+                make_full_pos(*n, ss).visit(v, args...);
+            make_regular_pos(*n, ss, p.size()).visit(v, args...);
+        }
+    }
+}
+
+template <typename Pos, typename Visitor, typename... Args>
+decltype(auto) towards_oh_ch_regular(Pos&& p,
+                                     Visitor v,
+                                     size_t idx,
+                                     count_t offset_hint,
+                                     count_t count_hint,
+                                     Args&&... args)
+{
+    constexpr auto B  = bits<Pos>;
+    constexpr auto BL = bits_leaf<Pos>;
+    assert(offset_hint == p.index(idx));
+    assert(count_hint == p.count());
+    auto is_leaf = p.shift() == BL;
+    auto child   = p.node()->inner()[offset_hint];
+    auto is_full = offset_hint + 1 != count_hint;
+    return is_full
+               ? (is_leaf ? make_full_leaf_pos(child).visit(v, idx, args...)
+                          : make_full_pos(child, p.shift() - B)
+                                .visit(v, idx, args...))
+               : (is_leaf
+                      ? make_leaf_pos(child, p.size()).visit(v, idx, args...)
+                      : make_regular_pos(child, p.shift() - B, p.size())
+                            .visit(v, idx, args...));
+}
+
+template <typename Pos, typename Visitor, typename... Args>
+decltype(auto) towards_sub_oh_regular(
+    Pos&& p, Visitor v, size_t idx, count_t offset_hint, Args&&... args)
+{
+    constexpr auto B  = bits<Pos>;
+    constexpr auto BL = bits_leaf<Pos>;
+    assert(offset_hint == p.index(idx));
+    auto is_leaf = p.shift() == BL;
+    auto child   = p.node()->inner()[offset_hint];
+    auto lsize   = offset_hint << p.shift();
+    auto size    = p.this_size();
+    auto is_full = (size - lsize) >= (size_t{1} << p.shift());
+    return is_full
+               ? (is_leaf
+                      ? make_full_leaf_pos(child).visit(v, idx - lsize, args...)
+                      : make_full_pos(child, p.shift() - B)
+                            .visit(v, idx - lsize, args...))
+               : (is_leaf
+                      ? make_leaf_sub_pos(child, size - lsize)
+                            .visit(v, idx - lsize, args...)
+                      : make_regular_sub_pos(child, p.shift() - B, size - lsize)
+                            .visit(v, idx - lsize, args...));
+}
+
+template <typename Pos, typename Visitor, typename... Args>
+decltype(auto)
+last_oh_regular(Pos&& p, Visitor v, count_t offset_hint, Args&&... args)
+{
+    assert(offset_hint == p.count() - 1);
+    constexpr auto B  = bits<Pos>;
+    constexpr auto BL = bits_leaf<Pos>;
+    auto child        = p.node()->inner()[offset_hint];
+    auto is_leaf      = p.shift() == BL;
+    return is_leaf ? make_leaf_pos(child, p.size()).visit(v, args...)
+                   : make_regular_pos(child, p.shift() - B, p.size())
+                         .visit(v, args...);
+}
+
+template <typename NodeT>
+regular_pos<NodeT> make_regular_pos(NodeT* node, shift_t shift, size_t size)
+{
+    assert(node);
+    assert(shift >= NodeT::bits_leaf);
+    assert(size > 0);
+    return {node, shift, size};
+}
+
+struct null_sub_pos
+{
+    auto node() const { return nullptr; }
+
+    template <typename Visitor, typename... Args>
+    void each_sub(Visitor, Args&&...)
+    {}
+    template <typename Visitor, typename... Args>
+    void each_right_sub(Visitor, Args&&...)
+    {}
+    template <typename Visitor, typename... Args>
+    void each_left_sub(Visitor, Args&&...)
+    {}
+    template <typename Visitor, typename... Args>
+    void visit(Visitor, Args&&...)
+    {}
+};
+
+template <typename NodeT>
+struct singleton_regular_sub_pos
+{
+    // this is a fake regular pos made out of a single child... useful
+    // to treat a single leaf node as a whole tree
+
+    static constexpr auto B  = NodeT::bits;
+    static constexpr auto BL = NodeT::bits_leaf;
+
+    using node_t = NodeT;
+    node_t* leaf_;
+    count_t count_;
+
+    count_t count() const { return 1; }
+    node_t* node() const { return nullptr; }
+    size_t size() const { return count_; }
+    shift_t shift() const { return BL; }
+    count_t index(size_t idx) const { return 0; }
+    count_t subindex(size_t idx) const { return 0; }
+    size_t size_before(count_t offset) const { return 0; }
+    size_t this_size() const { return count_; }
+    size_t size(count_t offset) { return count_; }
+
+    template <typename Visitor, typename... Args>
+    void each_left_sub(Visitor v, Args&&... args)
+    {}
+    template <typename Visitor, typename... Args>
+    void each(Visitor v, Args&&... args)
+    {}
+
+    template <typename Visitor, typename... Args>
+    decltype(auto) last_sub(Visitor v, Args&&... args)
+    {
+        return make_leaf_sub_pos(leaf_, count_).visit(v, args...);
+    }
+
+    template <typename Visitor, typename... Args>
+    decltype(auto) visit(Visitor v, Args&&... args)
+    {
+        return Visitor::visit_regular(*this, std::forward<Args>(args)...);
+    }
+};
+
+template <typename NodeT>
+auto make_singleton_regular_sub_pos(NodeT* leaf, count_t count)
+{
+    assert(leaf);
+    IMMER_ASSERT_TAGGED(leaf->kind() == NodeT::kind_t::leaf);
+    assert(count > 0);
+    return singleton_regular_sub_pos<NodeT>{leaf, count};
+}
+
+template <typename NodeT>
+struct regular_sub_pos
+{
+    static constexpr auto B  = NodeT::bits;
+    static constexpr auto BL = NodeT::bits_leaf;
+
+    using node_t = NodeT;
+    node_t* node_;
+    shift_t shift_;
+    size_t size_;
+
+    count_t count() const { return subindex(size_ - 1) + 1; }
+    node_t* node() const { return node_; }
+    size_t size() const { return size_; }
+    shift_t shift() const { return shift_; }
+    count_t index(size_t idx) const { return (idx >> shift_) & mask<B>; }
+    count_t subindex(size_t idx) const { return idx >> shift_; }
+    size_t size_before(count_t offset) const { return offset << shift_; }
+    size_t this_size() const { return size_; }
+
+    auto size(count_t offset)
+    {
+        return offset == subindex(size_ - 1) ? size_ - size_before(offset)
+                                             : size_t{1} << shift_;
+    }
+
+    auto size_sbh(count_t offset, size_t size_before_hint)
+    {
+        assert(size_before_hint == size_before(offset));
+        return offset == subindex(size_ - 1) ? size_ - size_before_hint
+                                             : size_t{1} << shift_;
+    }
+
+    void copy_sizes(count_t offset, count_t n, size_t init, size_t* sizes)
+    {
+        if (n) {
+            auto last = offset + n - 1;
+            auto e    = sizes + n - 1;
+            for (; sizes != e; ++sizes)
+                init = *sizes = init + (size_t{1} << shift_);
+            *sizes = init + size(last);
+        }
+    }
+
+    template <typename Visitor, typename... Args>
+    void each(Visitor v, Args&&... args)
+    {
+        return each_regular(*this, v, args...);
+    }
+
+    template <typename Visitor, typename... Args>
+    bool each_pred(Visitor v, Args&&... args)
+    {
+        return each_pred_regular(*this, v, args...);
+    }
+
+    template <typename Visitor, typename... Args>
+    bool each_pred_zip(Visitor v, node_t* other, Args&&... args)
+    {
+        return each_pred_zip_regular(*this, v, other, args...);
+    }
+
+    template <typename Visitor, typename... Args>
+    bool each_pred_i(Visitor v, count_t i, count_t n, Args&&... args)
+    {
+        return each_pred_i_regular(*this, v, i, n, args...);
+    }
+
+    template <typename Visitor, typename... Args>
+    bool each_pred_right(Visitor v, count_t start, Args&&... args)
+    {
+        return each_pred_right_regular(*this, v, start, args...);
+    }
+
+    template <typename Visitor, typename... Args>
+    bool each_pred_left(Visitor v, count_t last, Args&&... args)
+    {
+        return each_pred_left_regular(*this, v, last, args...);
+    }
+
+    template <typename Visitor, typename... Args>
+    void each_i(Visitor v, count_t i, count_t n, Args&&... args)
+    {
+        return each_i_regular(*this, v, i, n, args...);
+    }
+
+    template <typename Visitor, typename... Args>
+    void each_right(Visitor v, count_t start, Args&&... args)
+    {
+        return each_right_regular(*this, v, start, args...);
+    }
+
+    template <typename Visitor, typename... Args>
+    void each_left(Visitor v, count_t last, Args&&... args)
+    {
+        return each_left_regular(*this, v, last, args...);
+    }
+
+    template <typename Visitor, typename... Args>
+    void each_right_sub_(Visitor v, count_t i, Args&&... args)
+    {
+        auto last  = count() - 1;
+        auto lsize = size_ - (last << shift_);
+        auto n     = node()->inner() + i;
+        auto e     = node()->inner() + last;
+        if (shift() == BL) {
+            for (; n != e; ++n) {
+                IMMER_PREFETCH(n + 1);
+                make_full_leaf_pos(*n).visit(v, args...);
+            }
+            make_leaf_sub_pos(*n, lsize).visit(v, args...);
+        } else {
+            auto ss = shift_ - B;
+            for (; n != e; ++n)
+                make_full_pos(*n, ss).visit(v, args...);
+            make_regular_sub_pos(*n, ss, lsize).visit(v, args...);
+        }
+    }
+
+    template <typename Visitor, typename... Args>
+    void each_sub(Visitor v, Args&&... args)
+    {
+        each_right_sub_(v, 0, args...);
+    }
+
+    template <typename Visitor, typename... Args>
+    void each_right_sub(Visitor v, Args&&... args)
+    {
+        if (count() > 1)
+            each_right_sub_(v, 1, args...);
+    }
+
+    template <typename Visitor, typename... Args>
+    void each_left_sub(Visitor v, Args&&... args)
+    {
+        each_left(v, count() - 1, args...);
+    }
+
+    template <typename Visitor, typename... Args>
+    decltype(auto) towards(Visitor v, size_t idx, Args&&... args)
+    {
+        return towards_oh_ch_regular(
+            *this, v, idx, index(idx), count(), args...);
+    }
+
+    template <typename Visitor, typename... Args>
+    decltype(auto)
+    towards_oh(Visitor v, size_t idx, count_t offset_hint, Args&&... args)
+    {
+        return towards_oh_ch_regular(
+            *this, v, idx, offset_hint, count(), args...);
+    }
+
+    template <typename Visitor, typename... Args>
+    decltype(auto) towards_oh_ch(Visitor v,
+                                 size_t idx,
+                                 count_t offset_hint,
+                                 count_t count_hint,
+                                 Args&&... args)
+    {
+        return towards_oh_ch_regular(
+            *this, v, idx, offset_hint, count(), args...);
+    }
+
+    template <typename Visitor, typename... Args>
+    decltype(auto)
+    towards_sub_oh(Visitor v, size_t idx, count_t offset_hint, Args&&... args)
+    {
+        return towards_sub_oh_regular(*this, v, idx, offset_hint, args...);
+    }
+
+    template <typename Visitor, typename... Args>
+    decltype(auto) last_oh(Visitor v, count_t offset_hint, Args&&... args)
+    {
+        return last_oh_regular(*this, v, offset_hint, args...);
+    }
+
+    template <typename Visitor, typename... Args>
+    decltype(auto) last_sub(Visitor v, Args&&... args)
+    {
+        auto offset  = count() - 1;
+        auto child   = node_->inner()[offset];
+        auto is_leaf = shift_ == BL;
+        auto lsize   = size_ - (offset << shift_);
+        return is_leaf ? make_leaf_sub_pos(child, lsize).visit(v, args...)
+                       : make_regular_sub_pos(child, shift_ - B, lsize)
+                             .visit(v, args...);
+    }
+
+    template <typename Visitor, typename... Args>
+    decltype(auto) first_sub(Visitor v, Args&&... args)
+    {
+        auto is_leaf = shift_ == BL;
+        auto child   = node_->inner()[0];
+        auto is_full = size_ >= (size_t{1} << shift_);
+        return is_full
+                   ? (is_leaf
+                          ? make_full_leaf_pos(child).visit(v, args...)
+                          : make_full_pos(child, shift_ - B).visit(v, args...))
+                   : (is_leaf
+                          ? make_leaf_sub_pos(child, size_).visit(v, args...)
+                          : make_regular_sub_pos(child, shift_ - B, size_)
+                                .visit(v, args...));
+    }
+
+    template <typename Visitor, typename... Args>
+    decltype(auto) first_sub_leaf(Visitor v, Args&&... args)
+    {
+        assert(shift_ == BL);
+        auto child   = node_->inner()[0];
+        auto is_full = size_ >= branches<BL>;
+        return is_full ? make_full_leaf_pos(child).visit(v, args...)
+                       : make_leaf_sub_pos(child, size_).visit(v, args...);
+    }
+
+    template <typename Visitor, typename... Args>
+    decltype(auto) first_sub_inner(Visitor v, Args&&... args)
+    {
+        assert(shift_ >= BL);
+        auto child   = node_->inner()[0];
+        auto is_full = size_ >= branches<BL>;
+        return is_full ? make_full_pos(child, shift_ - B).visit(v, args...)
+                       : make_regular_sub_pos(child, shift_ - B, size_)
+                             .visit(v, args...);
+    }
+
+    template <typename Visitor, typename... Args>
+    decltype(auto) nth_sub(count_t idx, Visitor v, Args&&... args)
+    {
+        assert(idx < count());
+        auto is_leaf = shift_ == BL;
+        auto child   = node_->inner()[idx];
+        auto lsize   = size(idx);
+        auto is_full = idx + 1 < count();
+        return is_full
+                   ? (is_leaf
+                          ? make_full_leaf_pos(child).visit(v, args...)
+                          : make_full_pos(child, shift_ - B).visit(v, args...))
+                   : (is_leaf
+                          ? make_leaf_sub_pos(child, lsize).visit(v, args...)
+                          : make_regular_sub_pos(child, shift_ - B, lsize)
+                                .visit(v, args...));
+    }
+
+    template <typename Visitor, typename... Args>
+    decltype(auto) nth_sub_leaf(count_t idx, Visitor v, Args&&... args)
+    {
+        assert(shift_ == BL);
+        auto child   = node_->inner()[idx];
+        auto lsize   = size(idx);
+        auto is_full = idx + 1 < count();
+        return is_full ? make_full_leaf_pos(child).visit(v, args...)
+                       : make_leaf_sub_pos(child, lsize).visit(v, args...);
+    }
+
+    template <typename Visitor, typename... Args>
+    decltype(auto) visit(Visitor v, Args&&... args)
+    {
+        return Visitor::visit_regular(*this, std::forward<Args>(args)...);
+    }
+};
+
+template <typename NodeT>
+regular_sub_pos<NodeT>
+make_regular_sub_pos(NodeT* node, shift_t shift, size_t size)
+{
+    assert(node);
+    assert(shift >= NodeT::bits_leaf);
+    assert(size > 0);
+    assert(size <= (branches<NodeT::bits, size_t> << shift));
+    return {node, shift, size};
+}
+
+template <typename NodeT,
+          shift_t Shift,
+          bits_t B  = NodeT::bits,
+          bits_t BL = NodeT::bits_leaf>
+struct regular_descent_pos
+{
+    static_assert(Shift > 0, "not leaf...");
+
+    using node_t = NodeT;
+    node_t* node_;
+
+    node_t* node() const { return node_; }
+    shift_t shift() const { return Shift; }
+    count_t index(size_t idx) const
+    {
+#if !defined(_MSC_VER)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wshift-count-overflow"
+#endif
+        return (idx >> Shift) & mask<B>;
+#if !defined(_MSC_VER)
+#pragma GCC diagnostic pop
+#endif
+    }
+
+    template <typename Visitor>
+    decltype(auto) descend(Visitor v, size_t idx)
+    {
+        auto offset = index(idx);
+        auto child  = node_->inner()[offset];
+        return regular_descent_pos<NodeT, Shift - B>{child}.visit(v, idx);
+    }
+
+    template <typename Visitor, typename... Args>
+    decltype(auto) visit(Visitor v, Args&&... args)
+    {
+        return Visitor::visit_regular(*this, std::forward<Args>(args)...);
+    }
+};
+
+template <typename NodeT, bits_t B, bits_t BL>
+struct regular_descent_pos<NodeT, BL, B, BL>
+{
+    using node_t = NodeT;
+    node_t* node_;
+
+    node_t* node() const { return node_; }
+    shift_t shift() const { return BL; }
+    count_t index(size_t idx) const { return (idx >> BL) & mask<B>; }
+
+    template <typename Visitor>
+    decltype(auto) descend(Visitor v, size_t idx)
+    {
+        auto offset = index(idx);
+        auto child  = node_->inner()[offset];
+        return make_leaf_descent_pos(child).visit(v, idx);
+    }
+
+    template <typename Visitor, typename... Args>
+    decltype(auto) visit(Visitor v, Args&&... args)
+    {
+        return Visitor::visit_regular(*this, std::forward<Args>(args)...);
+    }
+};
+
+template <typename NodeT, typename Visitor>
+decltype(auto)
+visit_regular_descent(NodeT* node, shift_t shift, Visitor v, size_t idx)
+{
+    constexpr auto B  = NodeT::bits;
+    constexpr auto BL = NodeT::bits_leaf;
+    assert(node);
+    assert(shift >= BL);
+    switch (shift) {
+    case BL + B * 0:
+        return regular_descent_pos<NodeT, BL + B * 0>{node}.visit(v, idx);
+    case BL + B * 1:
+        return regular_descent_pos<NodeT, BL + B * 1>{node}.visit(v, idx);
+    case BL + B * 2:
+        return regular_descent_pos<NodeT, BL + B * 2>{node}.visit(v, idx);
+    case BL + B * 3:
+        return regular_descent_pos<NodeT, BL + B * 3>{node}.visit(v, idx);
+    case BL + B * 4:
+        return regular_descent_pos<NodeT, BL + B * 4>{node}.visit(v, idx);
+    case BL + B * 5:
+        return regular_descent_pos<NodeT, BL + B * 5>{node}.visit(v, idx);
+#if IMMER_DESCENT_DEEP
+    default:
+        for (auto level = shift; level != endshift<B, BL>; level -= B)
+            node = node->inner()[(idx >> level) & mask<B>];
+        return make_leaf_descent_pos(node).visit(v, idx);
+#endif // IMMER_DEEP_DESCENT
+    }
+    IMMER_UNREACHABLE;
+}
+
+template <typename NodeT>
+struct full_pos
+{
+    static constexpr auto B  = NodeT::bits;
+    static constexpr auto BL = NodeT::bits_leaf;
+
+    using node_t = NodeT;
+    node_t* node_;
+    shift_t shift_;
+
+    count_t count() const { return branches<B>; }
+    node_t* node() const { return node_; }
+    size_t size() const { return branches<B> << shift_; }
+    shift_t shift() const { return shift_; }
+    count_t index(size_t idx) const { return (idx >> shift_) & mask<B>; }
+    count_t subindex(size_t idx) const { return idx >> shift_; }
+    size_t size(count_t offset) const { return size_t{1} << shift_; }
+    size_t size_sbh(count_t offset, size_t) const
+    {
+        return size_t{1} << shift_;
+    }
+    size_t size_before(count_t offset) const { return offset << shift_; }
+
+    void copy_sizes(count_t offset, count_t n, size_t init, size_t* sizes)
+    {
+        auto e = sizes + n;
+        for (; sizes != e; ++sizes)
+            init = *sizes = init + (size_t{1} << shift_);
+    }
+
+    template <typename Visitor, typename... Args>
+    void each(Visitor v, Args&&... args)
+    {
+        auto p = node_->inner();
+        auto e = p + branches<B>;
+        if (shift_ == BL) {
+            for (; p != e; ++p) {
+                IMMER_PREFETCH(p + 1);
+                make_full_leaf_pos(*p).visit(v, args...);
+            }
+        } else {
+            auto ss = shift_ - B;
+            for (; p != e; ++p)
+                make_full_pos(*p, ss).visit(v, args...);
+        }
+    }
+
+    template <typename Visitor, typename... Args>
+    bool each_pred(Visitor v, Args&&... args)
+    {
+        auto p = node_->inner();
+        auto e = p + branches<B>;
+        if (shift_ == BL) {
+            for (; p != e; ++p) {
+                IMMER_PREFETCH(p + 1);
+                if (!make_full_leaf_pos(*p).visit(v, args...))
+                    return false;
+            }
+        } else {
+            auto ss = shift_ - B;
+            for (; p != e; ++p)
+                if (!make_full_pos(*p, ss).visit(v, args...))
+                    return false;
+        }
+        return true;
+    }
+
+    template <typename Visitor, typename... Args>
+    bool each_pred_zip(Visitor v, node_t* other, Args&&... args)
+    {
+        auto p  = node_->inner();
+        auto p2 = other->inner();
+        auto e  = p + branches<B>;
+        if (shift_ == BL) {
+            for (; p != e; ++p, ++p2) {
+                IMMER_PREFETCH(p + 1);
+                if (!make_full_leaf_pos(*p).visit(v, *p2, args...))
+                    return false;
+            }
+        } else {
+            auto ss = shift_ - B;
+            for (; p != e; ++p, ++p2)
+                if (!make_full_pos(*p, ss).visit(v, *p2, args...))
+                    return false;
+        }
+        return true;
+    }
+
+    template <typename Visitor, typename... Args>
+    bool each_pred_i(Visitor v, count_t i, count_t n, Args&&... args)
+    {
+        auto p = node_->inner() + i;
+        auto e = node_->inner() + n;
+        if (shift_ == BL) {
+            for (; p != e; ++p) {
+                IMMER_PREFETCH(p + 1);
+                if (!make_full_leaf_pos(*p).visit(v, args...))
+                    return false;
+            }
+        } else {
+            auto ss = shift_ - B;
+            for (; p != e; ++p)
+                if (!make_full_pos(*p, ss).visit(v, args...))
+                    return false;
+        }
+        return true;
+    }
+
+    template <typename Visitor, typename... Args>
+    void each_i(Visitor v, count_t i, count_t n, Args&&... args)
+    {
+        auto p = node_->inner() + i;
+        auto e = node_->inner() + n;
+        if (shift_ == BL) {
+            for (; p != e; ++p) {
+                IMMER_PREFETCH(p + 1);
+                make_full_leaf_pos(*p).visit(v, args...);
+            }
+        } else {
+            auto ss = shift_ - B;
+            for (; p != e; ++p)
+                make_full_pos(*p, ss).visit(v, args...);
+        }
+    }
+
+    template <typename Visitor, typename... Args>
+    bool each_pred_right(Visitor v, count_t start, Args&&... args)
+    {
+        return each_pred_i(v, start, branches<B>, args...);
+    }
+
+    template <typename Visitor, typename... Args>
+    bool each_pred_left(Visitor v, count_t last, Args&&... args)
+    {
+        return each_pred_i(v, 0, last, args...);
+    }
+
+    template <typename Visitor, typename... Args>
+    void each_sub(Visitor v, Args&&... args)
+    {
+        each(v, args...);
+    }
+
+    template <typename Visitor, typename... Args>
+    void each_left_sub(Visitor v, Args&&... args)
+    {
+        each_i(v, 0, branches<B> - 1, args...);
+    }
+
+    template <typename Visitor, typename... Args>
+    void each_right_sub(Visitor v, Args&&... args)
+    {
+        each_i(v, 1, branches<B>, args...);
+    }
+
+    template <typename Visitor, typename... Args>
+    void each_right(Visitor v, count_t start, Args&&... args)
+    {
+        each_i(v, start, branches<B>, args...);
+    }
+
+    template <typename Visitor, typename... Args>
+    void each_left(Visitor v, count_t last, Args&&... args)
+    {
+        each_i(v, 0, last, args...);
+    }
+
+    template <typename Visitor, typename... Args>
+    decltype(auto) towards(Visitor v, size_t idx, Args&&... args)
+    {
+        return towards_oh(v, idx, index(idx), args...);
+    }
+
+    template <typename Visitor, typename... Args>
+    decltype(auto) towards_oh_ch(
+        Visitor v, size_t idx, count_t offset_hint, count_t, Args&&... args)
+    {
+        return towards_oh(v, idx, offset_hint, args...);
+    }
+
+    template <typename Visitor, typename... Args>
+    decltype(auto)
+    towards_oh(Visitor v, size_t idx, count_t offset_hint, Args&&... args)
+    {
+        assert(offset_hint == index(idx));
+        auto is_leaf = shift_ == BL;
+        auto child   = node_->inner()[offset_hint];
+        return is_leaf
+                   ? make_full_leaf_pos(child).visit(v, idx, args...)
+                   : make_full_pos(child, shift_ - B).visit(v, idx, args...);
+    }
+
+    template <typename Visitor, typename... Args>
+    decltype(auto)
+    towards_sub_oh(Visitor v, size_t idx, count_t offset_hint, Args&&... args)
+    {
+        assert(offset_hint == index(idx));
+        auto is_leaf = shift_ == BL;
+        auto child   = node_->inner()[offset_hint];
+        auto lsize   = offset_hint << shift_;
+        return is_leaf
+                   ? make_full_leaf_pos(child).visit(v, idx - lsize, args...)
+                   : make_full_pos(child, shift_ - B)
+                         .visit(v, idx - lsize, args...);
+    }
+
+    template <typename Visitor, typename... Args>
+    decltype(auto) first_sub(Visitor v, Args&&... args)
+    {
+        auto is_leaf = shift_ == BL;
+        auto child   = node_->inner()[0];
+        return is_leaf ? make_full_leaf_pos(child).visit(v, args...)
+                       : make_full_pos(child, shift_ - B).visit(v, args...);
+    }
+
+    template <typename Visitor, typename... Args>
+    decltype(auto) first_sub_leaf(Visitor v, Args&&... args)
+    {
+        assert(shift_ == BL);
+        auto child = node_->inner()[0];
+        return make_full_leaf_pos(child).visit(v, args...);
+    }
+
+    template <typename Visitor, typename... Args>
+    decltype(auto) first_sub_inner(Visitor v, Args&&... args)
+    {
+        assert(shift_ >= BL);
+        auto child = node_->inner()[0];
+        return make_full_pos(child, shift_ - B).visit(v, args...);
+    }
+
+    template <typename Visitor, typename... Args>
+    decltype(auto) nth_sub(count_t idx, Visitor v, Args&&... args)
+    {
+        assert(idx < count());
+        auto is_leaf = shift_ == BL;
+        auto child   = node_->inner()[idx];
+        return is_leaf ? make_full_leaf_pos(child).visit(v, args...)
+                       : make_full_pos(child, shift_ - B).visit(v, args...);
+    }
+
+    template <typename Visitor, typename... Args>
+    decltype(auto) nth_sub_leaf(count_t idx, Visitor v, Args&&... args)
+    {
+        assert(shift_ == BL);
+        assert(idx < count());
+        auto child = node_->inner()[idx];
+        return make_full_leaf_pos(child).visit(v, args...);
+    }
+
+    template <typename Visitor, typename... Args>
+    decltype(auto) visit(Visitor v, Args&&... args)
+    {
+        return Visitor::visit_regular(*this, std::forward<Args>(args)...);
+    }
+};
+
+template <typename NodeT>
+full_pos<NodeT> make_full_pos(NodeT* node, shift_t shift)
+{
+    assert(node);
+    assert(shift >= NodeT::bits_leaf);
+    return {node, shift};
+}
+
+template <typename NodeT>
+struct relaxed_pos
+{
+    static constexpr auto B  = NodeT::bits;
+    static constexpr auto BL = NodeT::bits_leaf;
+
+    using node_t    = NodeT;
+    using relaxed_t = typename NodeT::relaxed_t;
+    node_t* node_;
+    shift_t shift_;
+    relaxed_t* relaxed_;
+
+    count_t count() const { return relaxed_->d.count; }
+    node_t* node() const { return node_; }
+    size_t size() const { return relaxed_->d.sizes[relaxed_->d.count - 1]; }
+    shift_t shift() const { return shift_; }
+    count_t subindex(size_t idx) const { return index(idx); }
+    relaxed_t* relaxed() const { return relaxed_; }
+
+    size_t size_before(count_t offset) const
+    {
+        return offset ? relaxed_->d.sizes[offset - 1] : 0;
+    }
+
+    size_t size(count_t offset) const
+    {
+        return size_sbh(offset, size_before(offset));
+    }
+
+    size_t size_sbh(count_t offset, size_t size_before_hint) const
+    {
+        assert(size_before_hint == size_before(offset));
+        return relaxed_->d.sizes[offset] - size_before_hint;
+    }
+
+    count_t index(size_t idx) const
+    {
+        auto offset = idx >> shift_;
+        while (relaxed_->d.sizes[offset] <= idx)
+            ++offset;
+        return offset;
+    }
+
+    void copy_sizes(count_t offset, count_t n, size_t init, size_t* sizes)
+    {
+        auto e     = sizes + n;
+        auto prev  = size_before(offset);
+        auto these = relaxed_->d.sizes + offset;
+        for (; sizes != e; ++sizes, ++these) {
+            auto this_size = *these;
+            init = *sizes = init + (this_size - prev);
+            prev          = this_size;
+        }
+    }
+
+    template <typename Visitor, typename... Args>
+    void each(Visitor v, Args&&... args)
+    {
+        each_left(v, relaxed_->d.count, args...);
+    }
+
+    template <typename Visitor, typename... Args>
+    bool each_pred(Visitor v, Args&&... args)
+    {
+        auto p = node_->inner();
+        auto s = size_t{};
+        auto n = count();
+        if (shift_ == BL) {
+            for (auto i = count_t{0}; i < n; ++i) {
+                IMMER_PREFETCH(p + i + 1);
+                if (!make_leaf_sub_pos(p[i], relaxed_->d.sizes[i] - s)
+                         .visit(v, args...))
+                    return false;
+                s = relaxed_->d.sizes[i];
+            }
+        } else {
+            auto ss = shift_ - B;
+            for (auto i = count_t{0}; i < n; ++i) {
+                if (!visit_maybe_relaxed_sub(
+                        p[i], ss, relaxed_->d.sizes[i] - s, v, args...))
+                    return false;
+                s = relaxed_->d.sizes[i];
+            }
+        }
+        return true;
+    }
+
+    template <typename Visitor, typename... Args>
+    bool each_pred_i(Visitor v, count_t i, count_t n, Args&&... args)
+    {
+        if (shift_ == BL) {
+            auto p = node_->inner();
+            auto s = i > 0 ? relaxed_->d.sizes[i - 1] : 0;
+            for (; i < n; ++i) {
+                IMMER_PREFETCH(p + i + 1);
+                if (!make_leaf_sub_pos(p[i], relaxed_->d.sizes[i] - s)
+                         .visit(v, args...))
+                    return false;
+                s = relaxed_->d.sizes[i];
+            }
+        } else {
+            auto p  = node_->inner();
+            auto s  = i > 0 ? relaxed_->d.sizes[i - 1] : 0;
+            auto ss = shift_ - B;
+            for (; i < n; ++i) {
+                if (!visit_maybe_relaxed_sub(
+                        p[i], ss, relaxed_->d.sizes[i] - s, v, args...))
+                    return false;
+                s = relaxed_->d.sizes[i];
+            }
+        }
+        return true;
+    }
+
+    template <typename Visitor, typename... Args>
+    bool each_pred_left(Visitor v, count_t n, Args&&... args)
+    {
+        auto p = node_->inner();
+        auto s = size_t{};
+        if (shift_ == BL) {
+            for (auto i = count_t{0}; i < n; ++i) {
+                IMMER_PREFETCH(p + i + 1);
+                if (!make_leaf_sub_pos(p[i], relaxed_->d.sizes[i] - s)
+                         .visit(v, args...))
+                    return false;
+                s = relaxed_->d.sizes[i];
+            }
+        } else {
+            auto ss = shift_ - B;
+            for (auto i = count_t{0}; i < n; ++i) {
+                if (!visit_maybe_relaxed_sub(
+                        p[i], ss, relaxed_->d.sizes[i] - s, v, args...))
+                    return false;
+                s = relaxed_->d.sizes[i];
+            }
+        }
+        return true;
+    }
+
+    template <typename Visitor, typename... Args>
+    bool each_pred_right(Visitor v, count_t start, Args&&... args)
+    {
+        assert(start > 0);
+        assert(start <= relaxed_->d.count);
+        auto s = relaxed_->d.sizes[start - 1];
+        auto p = node_->inner();
+        if (shift_ == BL) {
+            for (auto i = start; i < relaxed_->d.count; ++i) {
+                IMMER_PREFETCH(p + i + 1);
+                if (!make_leaf_sub_pos(p[i], relaxed_->d.sizes[i] - s)
+                         .visit(v, args...))
+                    return false;
+                s = relaxed_->d.sizes[i];
+            }
+        } else {
+            auto ss = shift_ - B;
+            for (auto i = start; i < relaxed_->d.count; ++i) {
+                if (!visit_maybe_relaxed_sub(
+                        p[i], ss, relaxed_->d.sizes[i] - s, v, args...))
+                    return false;
+                s = relaxed_->d.sizes[i];
+            }
+        }
+        return true;
+    }
+
+    template <typename Visitor, typename... Args>
+    void each_i(Visitor v, count_t i, count_t n, Args&&... args)
+    {
+        if (shift_ == BL) {
+            auto p = node_->inner();
+            auto s = i > 0 ? relaxed_->d.sizes[i - 1] : 0;
+            for (; i < n; ++i) {
+                IMMER_PREFETCH(p + i + 1);
+                make_leaf_sub_pos(p[i], relaxed_->d.sizes[i] - s)
+                    .visit(v, args...);
+                s = relaxed_->d.sizes[i];
+            }
+        } else {
+            auto p  = node_->inner();
+            auto s  = i > 0 ? relaxed_->d.sizes[i - 1] : 0;
+            auto ss = shift_ - B;
+            for (; i < n; ++i) {
+                visit_maybe_relaxed_sub(
+                    p[i], ss, relaxed_->d.sizes[i] - s, v, args...);
+                s = relaxed_->d.sizes[i];
+            }
+        }
+    }
+
+    template <typename Visitor, typename... Args>
+    void each_sub(Visitor v, Args&&... args)
+    {
+        each_left(v, relaxed_->d.count, args...);
+    }
+
+    template <typename Visitor, typename... Args>
+    void each_left_sub(Visitor v, Args&&... args)
+    {
+        each_left(v, relaxed_->d.count - 1, args...);
+    }
+
+    template <typename Visitor, typename... Args>
+    void each_left(Visitor v, count_t n, Args&&... args)
+    {
+        auto p = node_->inner();
+        auto s = size_t{};
+        if (shift_ == BL) {
+            for (auto i = count_t{0}; i < n; ++i) {
+                IMMER_PREFETCH(p + i + 1);
+                make_leaf_sub_pos(p[i], relaxed_->d.sizes[i] - s)
+                    .visit(v, args...);
+                s = relaxed_->d.sizes[i];
+            }
+        } else {
+            auto ss = shift_ - B;
+            for (auto i = count_t{0}; i < n; ++i) {
+                visit_maybe_relaxed_sub(
+                    p[i], ss, relaxed_->d.sizes[i] - s, v, args...);
+                s = relaxed_->d.sizes[i];
+            }
+        }
+    }
+
+    template <typename Visitor, typename... Args>
+    void each_right_sub(Visitor v, Args&&... args)
+    {
+        each_right(v, 1, std::forward<Args>(args)...);
+    }
+
+    template <typename Visitor, typename... Args>
+    void each_right(Visitor v, count_t start, Args&&... args)
+    {
+        assert(start > 0);
+        assert(start <= relaxed_->d.count);
+        auto s = relaxed_->d.sizes[start - 1];
+        auto p = node_->inner();
+        if (shift_ == BL) {
+            for (auto i = start; i < relaxed_->d.count; ++i) {
+                IMMER_PREFETCH(p + i + 1);
+                make_leaf_sub_pos(p[i], relaxed_->d.sizes[i] - s)
+                    .visit(v, args...);
+                s = relaxed_->d.sizes[i];
+            }
+        } else {
+            auto ss = shift_ - B;
+            for (auto i = start; i < relaxed_->d.count; ++i) {
+                visit_maybe_relaxed_sub(
+                    p[i], ss, relaxed_->d.sizes[i] - s, v, args...);
+                s = relaxed_->d.sizes[i];
+            }
+        }
+    }
+
+    template <typename Visitor, typename... Args>
+    decltype(auto) towards(Visitor v, size_t idx, Args&&... args)
+    {
+        return towards_oh(v, idx, subindex(idx), args...);
+    }
+
+    template <typename Visitor, typename... Args>
+    decltype(auto)
+    towards_oh(Visitor v, size_t idx, count_t offset_hint, Args&&... args)
+    {
+        assert(offset_hint == index(idx));
+        auto left_size = offset_hint ? relaxed_->d.sizes[offset_hint - 1] : 0;
+        return towards_oh_sbh(v, idx, offset_hint, left_size, args...);
+    }
+
+    template <typename Visitor, typename... Args>
+    decltype(auto) towards_oh_sbh(Visitor v,
+                                  size_t idx,
+                                  count_t offset_hint,
+                                  size_t left_size_hint,
+                                  Args&&... args)
+    {
+        return towards_sub_oh_sbh(v, idx, offset_hint, left_size_hint, args...);
+    }
+
+    template <typename Visitor, typename... Args>
+    decltype(auto)
+    towards_sub_oh(Visitor v, size_t idx, count_t offset_hint, Args&&... args)
+    {
+        assert(offset_hint == index(idx));
+        auto left_size = offset_hint ? relaxed_->d.sizes[offset_hint - 1] : 0;
+        return towards_sub_oh_sbh(v, idx, offset_hint, left_size, args...);
+    }
+
+    template <typename Visitor, typename... Args>
+    decltype(auto) towards_sub_oh_sbh(Visitor v,
+                                      size_t idx,
+                                      count_t offset_hint,
+                                      size_t left_size_hint,
+                                      Args&&... args)
+    {
+        assert(offset_hint == index(idx));
+        assert(left_size_hint ==
+               (offset_hint ? relaxed_->d.sizes[offset_hint - 1] : 0));
+        auto child     = node_->inner()[offset_hint];
+        auto is_leaf   = shift_ == BL;
+        auto next_size = relaxed_->d.sizes[offset_hint] - left_size_hint;
+        auto next_idx  = idx - left_size_hint;
+        return is_leaf
+                   ? make_leaf_sub_pos(child, next_size)
+                         .visit(v, next_idx, args...)
+                   : visit_maybe_relaxed_sub(
+                         child, shift_ - B, next_size, v, next_idx, args...);
+    }
+
+    template <typename Visitor, typename... Args>
+    decltype(auto) last_oh_csh(Visitor v,
+                               count_t offset_hint,
+                               size_t child_size_hint,
+                               Args&&... args)
+    {
+        assert(offset_hint == count() - 1);
+        assert(child_size_hint == size(offset_hint));
+        auto child   = node_->inner()[offset_hint];
+        auto is_leaf = shift_ == BL;
+        return is_leaf
+                   ? make_leaf_sub_pos(child, child_size_hint).visit(v, args...)
+                   : visit_maybe_relaxed_sub(
+                         child, shift_ - B, child_size_hint, v, args...);
+    }
+
+    template <typename Visitor, typename... Args>
+    decltype(auto) last_sub(Visitor v, Args&&... args)
+    {
+        auto offset     = relaxed_->d.count - 1;
+        auto child      = node_->inner()[offset];
+        auto child_size = size(offset);
+        auto is_leaf    = shift_ == BL;
+        return is_leaf ? make_leaf_sub_pos(child, child_size).visit(v, args...)
+                       : visit_maybe_relaxed_sub(
+                             child, shift_ - B, child_size, v, args...);
+    }
+
+    template <typename Visitor, typename... Args>
+    decltype(auto) first_sub(Visitor v, Args&&... args)
+    {
+        auto child      = node_->inner()[0];
+        auto child_size = relaxed_->d.sizes[0];
+        auto is_leaf    = shift_ == BL;
+        return is_leaf ? make_leaf_sub_pos(child, child_size).visit(v, args...)
+                       : visit_maybe_relaxed_sub(
+                             child, shift_ - B, child_size, v, args...);
+    }
+
+    template <typename Visitor, typename... Args>
+    decltype(auto) first_sub_leaf(Visitor v, Args&&... args)
+    {
+        assert(shift_ == BL);
+        auto child      = node_->inner()[0];
+        auto child_size = relaxed_->d.sizes[0];
+        return make_leaf_sub_pos(child, child_size).visit(v, args...);
+    }
+
+    template <typename Visitor, typename... Args>
+    decltype(auto) first_sub_inner(Visitor v, Args&&... args)
+    {
+        assert(shift_ > BL);
+        auto child      = node_->inner()[0];
+        auto child_size = relaxed_->d.sizes[0];
+        return visit_maybe_relaxed_sub(
+            child, shift_ - B, child_size, v, args...);
+    }
+
+    template <typename Visitor, typename... Args>
+    decltype(auto) nth_sub(count_t offset, Visitor v, Args&&... args)
+    {
+        auto child      = node_->inner()[offset];
+        auto child_size = size(offset);
+        auto is_leaf    = shift_ == BL;
+        return is_leaf ? make_leaf_sub_pos(child, child_size).visit(v, args...)
+                       : visit_maybe_relaxed_sub(
+                             child, shift_ - B, child_size, v, args...);
+    }
+
+    template <typename Visitor, typename... Args>
+    decltype(auto) nth_sub_leaf(count_t offset, Visitor v, Args&&... args)
+    {
+        assert(shift_ == BL);
+        auto child      = node_->inner()[offset];
+        auto child_size = size(offset);
+        return make_leaf_sub_pos(child, child_size).visit(v, args...);
+    }
+
+    template <typename Visitor, typename... Args>
+    decltype(auto) visit(Visitor v, Args&&... args)
+    {
+        return Visitor::visit_relaxed(*this, std::forward<Args>(args)...);
+    }
+};
+
+template <typename Pos>
+using is_relaxed = std::is_same<relaxed_pos<typename std::decay_t<Pos>::node_t>,
+                                std::decay_t<Pos>>;
+
+template <typename Pos>
+constexpr auto is_relaxed_v = is_relaxed<Pos>::value;
+
+template <typename NodeT>
+relaxed_pos<NodeT>
+make_relaxed_pos(NodeT* node, shift_t shift, typename NodeT::relaxed_t* relaxed)
+{
+    assert(node);
+    assert(relaxed);
+    assert(shift >= NodeT::bits_leaf);
+    return {node, shift, relaxed};
+}
+
+template <typename NodeT, typename Visitor, typename... Args>
+decltype(auto) visit_maybe_relaxed_sub(
+    NodeT* node, shift_t shift, size_t size, Visitor v, Args&&... args)
+{
+    assert(node);
+    auto relaxed = node->relaxed();
+    if (relaxed) {
+        assert(size == relaxed->d.sizes[relaxed->d.count - 1]);
+        return make_relaxed_pos(node, shift, relaxed)
+            .visit(v, std::forward<Args>(args)...);
+    } else {
+        return make_regular_sub_pos(node, shift, size)
+            .visit(v, std::forward<Args>(args)...);
+    }
+}
+
+template <typename NodeT,
+          shift_t Shift,
+          bits_t B  = NodeT::bits,
+          bits_t BL = NodeT::bits_leaf>
+struct relaxed_descent_pos
+{
+    static_assert(Shift > 0, "not leaf...");
+
+    using node_t    = NodeT;
+    using relaxed_t = typename NodeT::relaxed_t;
+    node_t* node_;
+    relaxed_t* relaxed_;
+
+    count_t count() const { return relaxed_->d.count; }
+    node_t* node() const { return node_; }
+    shift_t shift() const { return Shift; }
+    size_t size() const { return relaxed_->d.sizes[relaxed_->d.count - 1]; }
+
+    count_t index(size_t idx) const
+    {
+        // make gcc happy
+#if !defined(_MSC_VER)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wshift-count-overflow"
+#endif
+        auto offset = idx >> Shift;
+#if !defined(_MSC_VER)
+#pragma GCC diagnostic pop
+#endif
+        while (relaxed_->d.sizes[offset] <= idx)
+            ++offset;
+        return offset;
+    }
+
+    template <typename Visitor>
+    decltype(auto) descend(Visitor v, size_t idx)
+    {
+        auto offset    = index(idx);
+        auto child     = node_->inner()[offset];
+        auto left_size = offset ? relaxed_->d.sizes[offset - 1] : 0;
+        auto next_idx  = idx - left_size;
+        auto r         = child->relaxed();
+        return r ? relaxed_descent_pos<NodeT, Shift - B>{child, r}.visit(
+                       v, next_idx)
+                 : regular_descent_pos<NodeT, Shift - B>{child}.visit(v,
+                                                                      next_idx);
+    }
+
+    template <typename Visitor, typename... Args>
+    decltype(auto) visit(Visitor v, Args&&... args)
+    {
+        return Visitor::visit_relaxed(*this, std::forward<Args>(args)...);
+    }
+};
+
+template <typename NodeT, bits_t B, bits_t BL>
+struct relaxed_descent_pos<NodeT, BL, B, BL>
+{
+    using node_t    = NodeT;
+    using relaxed_t = typename NodeT::relaxed_t;
+    node_t* node_;
+    relaxed_t* relaxed_;
+
+    count_t count() const { return relaxed_->d.count; }
+    node_t* node() const { return node_; }
+    shift_t shift() const { return BL; }
+    size_t size() const { return relaxed_->d.sizes[relaxed_->d.count - 1]; }
+
+    count_t index(size_t idx) const
+    {
+        auto offset = (idx >> BL) & mask<B>;
+        while (relaxed_->d.sizes[offset] <= idx)
+            ++offset;
+        return offset;
+    }
+
+    template <typename Visitor>
+    decltype(auto) descend(Visitor v, size_t idx)
+    {
+        auto offset    = index(idx);
+        auto child     = node_->inner()[offset];
+        auto left_size = offset ? relaxed_->d.sizes[offset - 1] : 0;
+        auto next_idx  = idx - left_size;
+        return leaf_descent_pos<NodeT>{child}.visit(v, next_idx);
+    }
+
+    template <typename Visitor, typename... Args>
+    decltype(auto) visit(Visitor v, Args&&... args)
+    {
+        return Visitor::visit_relaxed(*this, std::forward<Args>(args)...);
+    }
+};
+
+template <typename NodeT, typename Visitor, typename... Args>
+decltype(auto)
+visit_maybe_relaxed_descent(NodeT* node, shift_t shift, Visitor v, size_t idx)
+{
+    constexpr auto B  = NodeT::bits;
+    constexpr auto BL = NodeT::bits_leaf;
+    assert(node);
+    assert(shift >= BL);
+    auto r = node->relaxed();
+    if (r) {
+        switch (shift) {
+        case BL + B * 0:
+            return relaxed_descent_pos<NodeT, BL + B * 0>{node, r}.visit(v,
+                                                                         idx);
+        case BL + B * 1:
+            return relaxed_descent_pos<NodeT, BL + B * 1>{node, r}.visit(v,
+                                                                         idx);
+        case BL + B * 2:
+            return relaxed_descent_pos<NodeT, BL + B * 2>{node, r}.visit(v,
+                                                                         idx);
+        case BL + B * 3:
+            return relaxed_descent_pos<NodeT, BL + B * 3>{node, r}.visit(v,
+                                                                         idx);
+        case BL + B * 4:
+            return relaxed_descent_pos<NodeT, BL + B * 4>{node, r}.visit(v,
+                                                                         idx);
+        case BL + B * 5:
+            return relaxed_descent_pos<NodeT, BL + B * 5>{node, r}.visit(v,
+                                                                         idx);
+#if IMMER_DESCENT_DEEP
+        default:
+            for (auto level = shift; level != endshift<B, BL>; level -= B) {
+                auto r = node->relaxed();
+                if (r) {
+                    auto node_idx = (idx >> level) & mask<B>;
+                    while (r->d.sizes[node_idx] <= idx)
+                        ++node_idx;
+                    if (node_idx)
+                        idx -= r->d.sizes[node_idx - 1];
+                    node = node->inner()[node_idx];
+                } else {
+                    do {
+                        node = node->inner()[(idx >> level) & mask<B>];
+                    } while ((level -= B) != endshift<B, BL>);
+                    return make_leaf_descent_pos(node).visit(v, idx);
+                }
+            }
+            return make_leaf_descent_pos(node).visit(v, idx);
+#endif // IMMER_DESCENT_DEEP
+        }
+        IMMER_UNREACHABLE;
+    } else {
+        return visit_regular_descent(node, shift, v, idx);
+    }
+}
+
+} // namespace rbts
+} // namespace detail
+} // namespace immer
diff --git a/immer/detail/rbts/rbtree.hpp b/immer/detail/rbts/rbtree.hpp
new file mode 100644
index 000000000000..b31f5e9b0788
--- /dev/null
+++ b/immer/detail/rbts/rbtree.hpp
@@ -0,0 +1,509 @@
+//
+// immer: immutable data structures for C++
+// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
+//
+// This software is distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
+//
+
+#pragma once
+
+#include <immer/detail/rbts/node.hpp>
+#include <immer/detail/rbts/operations.hpp>
+#include <immer/detail/rbts/position.hpp>
+
+#include <immer/detail/type_traits.hpp>
+
+#include <cassert>
+#include <memory>
+#include <numeric>
+
+namespace immer {
+namespace detail {
+namespace rbts {
+
+template <typename T, typename MemoryPolicy, bits_t B, bits_t BL>
+struct rbtree
+{
+    using node_t  = node<T, MemoryPolicy, B, BL>;
+    using edit_t  = typename node_t::edit_t;
+    using owner_t = typename MemoryPolicy::transience_t::owner;
+
+    size_t size;
+    shift_t shift;
+    node_t* root;
+    node_t* tail;
+
+    static const rbtree& empty()
+    {
+        static const rbtree empty_{
+            0, BL, node_t::make_inner_n(0u), node_t::make_leaf_n(0u)};
+        return empty_;
+    }
+
+    template <typename U>
+    static auto from_initializer_list(std::initializer_list<U> values)
+    {
+        auto e      = owner_t{};
+        auto result = rbtree{empty()};
+        for (auto&& v : values)
+            result.push_back_mut(e, v);
+        return result;
+    }
+
+    template <typename Iter,
+              typename Sent,
+              std::enable_if_t<compatible_sentinel_v<Iter, Sent>, bool> = true>
+    static auto from_range(Iter first, Sent last)
+    {
+        auto e      = owner_t{};
+        auto result = rbtree{empty()};
+        for (; first != last; ++first)
+            result.push_back_mut(e, *first);
+        return result;
+    }
+
+    static auto from_fill(size_t n, T v)
+    {
+        auto e      = owner_t{};
+        auto result = rbtree{empty()};
+        while (n-- > 0)
+            result.push_back_mut(e, v);
+        return result;
+    }
+
+    rbtree(size_t sz, shift_t sh, node_t* r, node_t* t)
+        : size{sz}
+        , shift{sh}
+        , root{r}
+        , tail{t}
+    {
+        assert(check_tree());
+    }
+
+    rbtree(const rbtree& other)
+        : rbtree{other.size, other.shift, other.root, other.tail}
+    {
+        inc();
+    }
+
+    rbtree(rbtree&& other)
+        : rbtree{empty()}
+    {
+        swap(*this, other);
+    }
+
+    rbtree& operator=(const rbtree& other)
+    {
+        auto next = other;
+        swap(*this, next);
+        return *this;
+    }
+
+    rbtree& operator=(rbtree&& other)
+    {
+        swap(*this, other);
+        return *this;
+    }
+
+    friend void swap(rbtree& x, rbtree& y)
+    {
+        using std::swap;
+        swap(x.size, y.size);
+        swap(x.shift, y.shift);
+        swap(x.root, y.root);
+        swap(x.tail, y.tail);
+    }
+
+    ~rbtree() { dec(); }
+
+    void inc() const
+    {
+        root->inc();
+        tail->inc();
+    }
+
+    void dec() const { traverse(dec_visitor()); }
+
+    auto tail_size() const { return size ? ((size - 1) & mask<BL>) +1 : 0; }
+
+    auto tail_offset() const { return size ? (size - 1) & ~mask<BL> : 0; }
+
+    template <typename Visitor, typename... Args>
+    void traverse(Visitor v, Args&&... args) const
+    {
+        auto tail_off  = tail_offset();
+        auto tail_size = size - tail_off;
+
+        if (tail_off)
+            make_regular_sub_pos(root, shift, tail_off).visit(v, args...);
+        else
+            make_empty_regular_pos(root).visit(v, args...);
+
+        make_leaf_sub_pos(tail, tail_size).visit(v, args...);
+    }
+
+    template <typename Visitor, typename... Args>
+    void traverse(Visitor v, size_t first, size_t last, Args&&... args) const
+    {
+        auto tail_off  = tail_offset();
+        auto tail_size = size - tail_off;
+
+        if (first < tail_off)
+            make_regular_sub_pos(root, shift, tail_off)
+                .visit(v, first, last < tail_off ? last : tail_off, args...);
+        if (last > tail_off)
+            make_leaf_sub_pos(tail, tail_size)
+                .visit(v,
+                       first > tail_off ? first - tail_off : 0,
+                       last - tail_off,
+                       args...);
+    }
+
+    template <typename Visitor, typename... Args>
+    bool traverse_p(Visitor v, Args&&... args) const
+    {
+        auto tail_off  = tail_offset();
+        auto tail_size = size - tail_off;
+        return (tail_off ? make_regular_sub_pos(root, shift, tail_off)
+                               .visit(v, args...)
+                         : make_empty_regular_pos(root).visit(v, args...)) &&
+               make_leaf_sub_pos(tail, tail_size).visit(v, args...);
+    }
+
+    template <typename Visitor, typename... Args>
+    bool traverse_p(Visitor v, size_t first, size_t last, Args&&... args) const
+    {
+        auto tail_off  = tail_offset();
+        auto tail_size = size - tail_off;
+
+        return (first < tail_off ? make_regular_sub_pos(root, shift, tail_off)
+                                       .visit(v,
+                                              first,
+                                              last < tail_off ? last : tail_off,
+                                              args...)
+                                 : true) &&
+               (last > tail_off
+                    ? make_leaf_sub_pos(tail, tail_size)
+                          .visit(v,
+                                 first > tail_off ? first - tail_off : 0,
+                                 last - tail_off,
+                                 args...)
+                    : true);
+    }
+
+    template <typename Visitor>
+    decltype(auto) descend(Visitor v, size_t idx) const
+    {
+        auto tail_off = tail_offset();
+        return idx >= tail_off ? make_leaf_descent_pos(tail).visit(v, idx)
+                               : visit_regular_descent(root, shift, v, idx);
+    }
+
+    template <typename Fn>
+    void for_each_chunk(Fn&& fn) const
+    {
+        traverse(for_each_chunk_visitor{}, std::forward<Fn>(fn));
+    }
+
+    template <typename Fn>
+    void for_each_chunk(size_t first, size_t last, Fn&& fn) const
+    {
+        traverse(for_each_chunk_i_visitor{}, first, last, std::forward<Fn>(fn));
+    }
+
+    template <typename Fn>
+    bool for_each_chunk_p(Fn&& fn) const
+    {
+        return traverse_p(for_each_chunk_p_visitor{}, std::forward<Fn>(fn));
+    }
+
+    template <typename Fn>
+    bool for_each_chunk_p(size_t first, size_t last, Fn&& fn) const
+    {
+        return traverse_p(
+            for_each_chunk_p_i_visitor{}, first, last, std::forward<Fn>(fn));
+    }
+
+    bool equals(const rbtree& other) const
+    {
+        if (size != other.size)
+            return false;
+        if (size == 0)
+            return true;
+        return (size <= branches<BL> ||
+                make_regular_sub_pos(root, shift, tail_offset())
+                    .visit(equals_visitor{}, other.root)) &&
+               make_leaf_sub_pos(tail, tail_size())
+                   .visit(equals_visitor{}, other.tail);
+    }
+
+    void ensure_mutable_tail(edit_t e, count_t n)
+    {
+        if (!tail->can_mutate(e)) {
+            auto new_tail = node_t::copy_leaf_e(e, tail, n);
+            dec_leaf(tail, n);
+            tail = new_tail;
+        }
+    }
+
+    void push_back_mut(edit_t e, T value)
+    {
+        auto tail_off = tail_offset();
+        auto ts       = size - tail_off;
+        if (ts < branches<BL>) {
+            ensure_mutable_tail(e, ts);
+            new (&tail->leaf()[ts]) T{std::move(value)};
+        } else {
+            auto new_tail = node_t::make_leaf_e(e, std::move(value));
+            try {
+                if (tail_off == size_t{branches<B>} << shift) {
+                    auto new_root = node_t::make_inner_e(e);
+                    try {
+                        auto path = node_t::make_path_e(e, shift, tail);
+                        new_root->inner()[0] = root;
+                        new_root->inner()[1] = path;
+                        root                 = new_root;
+                        tail                 = new_tail;
+                        shift += B;
+                    } catch (...) {
+                        node_t::delete_inner_e(new_root);
+                        throw;
+                    }
+                } else if (tail_off) {
+                    auto new_root =
+                        make_regular_sub_pos(root, shift, tail_off)
+                            .visit(push_tail_mut_visitor<node_t>{}, e, tail);
+                    root = new_root;
+                    tail = new_tail;
+                } else {
+                    auto new_root = node_t::make_path_e(e, shift, tail);
+                    assert(tail_off == 0);
+                    dec_empty_regular(root);
+                    root = new_root;
+                    tail = new_tail;
+                }
+            } catch (...) {
+                node_t::delete_leaf(new_tail, 1);
+                throw;
+            }
+        }
+        ++size;
+    }
+
+    rbtree push_back(T value) const
+    {
+        auto tail_off = tail_offset();
+        auto ts       = size - tail_off;
+        if (ts < branches<BL>) {
+            auto new_tail =
+                node_t::copy_leaf_emplace(tail, ts, std::move(value));
+            return {size + 1, shift, root->inc(), new_tail};
+        } else {
+            auto new_tail = node_t::make_leaf_n(1, std::move(value));
+            try {
+                if (tail_off == size_t{branches<B>} << shift) {
+                    auto new_root = node_t::make_inner_n(2);
+                    try {
+                        auto path            = node_t::make_path(shift, tail);
+                        new_root->inner()[0] = root;
+                        new_root->inner()[1] = path;
+                        root->inc();
+                        tail->inc();
+                        return {size + 1, shift + B, new_root, new_tail};
+                    } catch (...) {
+                        node_t::delete_inner(new_root, 2);
+                        throw;
+                    }
+                } else if (tail_off) {
+                    auto new_root =
+                        make_regular_sub_pos(root, shift, tail_off)
+                            .visit(push_tail_visitor<node_t>{}, tail);
+                    tail->inc();
+                    return {size + 1, shift, new_root, new_tail};
+                } else {
+                    auto new_root = node_t::make_path(shift, tail);
+                    tail->inc();
+                    return {size + 1, shift, new_root, new_tail};
+                }
+            } catch (...) {
+                node_t::delete_leaf(new_tail, 1);
+                throw;
+            }
+        }
+    }
+
+    const T* array_for(size_t index) const
+    {
+        return descend(array_for_visitor<T>(), index);
+    }
+
+    T& get_mut(edit_t e, size_t idx)
+    {
+        auto tail_off = tail_offset();
+        if (idx >= tail_off) {
+            ensure_mutable_tail(e, size - tail_off);
+            return tail->leaf()[idx & mask<BL>];
+        } else {
+            return make_regular_sub_pos(root, shift, tail_off)
+                .visit(get_mut_visitor<node_t>{}, idx, e, &root);
+        }
+    }
+
+    const T& get(size_t index) const
+    {
+        return descend(get_visitor<T>(), index);
+    }
+
+    const T& get_check(size_t index) const
+    {
+        if (index >= size)
+            throw std::out_of_range{"index out of range"};
+        return descend(get_visitor<T>(), index);
+    }
+
+    const T& front() const { return get(0); }
+
+    const T& back() const { return tail->leaf()[(size - 1) & mask<BL>]; }
+
+    template <typename FnT>
+    void update_mut(edit_t e, size_t idx, FnT&& fn)
+    {
+        auto& elem = get_mut(e, idx);
+        elem       = std::forward<FnT>(fn)(std::move(elem));
+    }
+
+    template <typename FnT>
+    rbtree update(size_t idx, FnT&& fn) const
+    {
+        auto tail_off = tail_offset();
+        if (idx >= tail_off) {
+            auto tail_size = size - tail_off;
+            auto new_tail =
+                make_leaf_sub_pos(tail, tail_size)
+                    .visit(update_visitor<node_t>{}, idx - tail_off, fn);
+            return {size, shift, root->inc(), new_tail};
+        } else {
+            auto new_root = make_regular_sub_pos(root, shift, tail_off)
+                                .visit(update_visitor<node_t>{}, idx, fn);
+            return {size, shift, new_root, tail->inc()};
+        }
+    }
+
+    void assoc_mut(edit_t e, size_t idx, T value)
+    {
+        update_mut(e, idx, [&](auto&&) { return std::move(value); });
+    }
+
+    rbtree assoc(size_t idx, T value) const
+    {
+        return update(idx, [&](auto&&) { return std::move(value); });
+    }
+
+    rbtree take(size_t new_size) const
+    {
+        auto tail_off = tail_offset();
+        if (new_size == 0) {
+            return empty();
+        } else if (new_size >= size) {
+            return *this;
+        } else if (new_size > tail_off) {
+            auto new_tail = node_t::copy_leaf(tail, new_size - tail_off);
+            return {new_size, shift, root->inc(), new_tail};
+        } else {
+            using std::get;
+            auto l = new_size - 1;
+            auto v = slice_right_visitor<node_t>();
+            auto r = make_regular_sub_pos(root, shift, tail_off).visit(v, l);
+            auto new_shift = get<0>(r);
+            auto new_root  = get<1>(r);
+            auto new_tail  = get<3>(r);
+            if (new_root) {
+                IMMER_ASSERT_TAGGED(new_root->compute_shift() == get<0>(r));
+                assert(new_root->check(new_shift, new_size - get<2>(r)));
+                return {new_size, new_shift, new_root, new_tail};
+            } else {
+                return {new_size, BL, empty().root->inc(), new_tail};
+            }
+        }
+    }
+
+    void take_mut(edit_t e, size_t new_size)
+    {
+        auto tail_off = tail_offset();
+        if (new_size == 0) {
+            // todo: more efficient?
+            *this = empty();
+        } else if (new_size >= size) {
+            return;
+        } else if (new_size > tail_off) {
+            auto ts    = size - tail_off;
+            auto newts = new_size - tail_off;
+            if (tail->can_mutate(e)) {
+                destroy_n(tail->leaf() + newts, ts - newts);
+            } else {
+                auto new_tail = node_t::copy_leaf_e(e, tail, newts);
+                dec_leaf(tail, ts);
+                tail = new_tail;
+            }
+            size = new_size;
+            return;
+        } else {
+            using std::get;
+            auto l = new_size - 1;
+            auto v = slice_right_mut_visitor<node_t>();
+            auto r = make_regular_sub_pos(root, shift, tail_off).visit(v, l, e);
+            auto new_shift = get<0>(r);
+            auto new_root  = get<1>(r);
+            auto new_tail  = get<3>(r);
+            if (new_root) {
+                root  = new_root;
+                shift = new_shift;
+            } else {
+                root  = empty().root->inc();
+                shift = BL;
+            }
+            dec_leaf(tail, size - tail_off);
+            size = new_size;
+            tail = new_tail;
+            return;
+        }
+    }
+
+    bool check_tree() const
+    {
+#if IMMER_DEBUG_DEEP_CHECK
+        assert(shift >= BL);
+        assert(tail_offset() <= size);
+        assert(check_root());
+        assert(check_tail());
+#endif
+        return true;
+    }
+
+    bool check_tail() const
+    {
+#if IMMER_DEBUG_DEEP_CHECK
+        if (tail_size() > 0)
+            assert(tail->check(0, tail_size()));
+#endif
+        return true;
+    }
+
+    bool check_root() const
+    {
+#if IMMER_DEBUG_DEEP_CHECK
+        if (tail_offset() > 0)
+            assert(root->check(shift, tail_offset()));
+        else {
+            IMMER_ASSERT_TAGGED(root->kind() == node_t::kind_t::inner);
+            assert(shift == BL);
+        }
+#endif
+        return true;
+    }
+};
+
+} // namespace rbts
+} // namespace detail
+} // namespace immer
diff --git a/immer/detail/rbts/rbtree_iterator.hpp b/immer/detail/rbts/rbtree_iterator.hpp
new file mode 100644
index 000000000000..90613b10b98e
--- /dev/null
+++ b/immer/detail/rbts/rbtree_iterator.hpp
@@ -0,0 +1,99 @@
+//
+// immer: immutable data structures for C++
+// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
+//
+// This software is distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
+//
+
+#pragma once
+
+#include <immer/detail/iterator_facade.hpp>
+#include <immer/detail/rbts/rbtree.hpp>
+
+namespace immer {
+namespace detail {
+namespace rbts {
+
+template <typename T, typename MP, bits_t B, bits_t BL>
+struct rbtree_iterator
+    : iterator_facade<rbtree_iterator<T, MP, B, BL>,
+                      std::random_access_iterator_tag,
+                      T,
+                      const T&,
+                      std::ptrdiff_t,
+                      const T*>
+{
+    using tree_t = rbtree<T, MP, B, BL>;
+
+    struct end_t
+    {};
+
+    rbtree_iterator() = default;
+
+    rbtree_iterator(const tree_t& v)
+        : v_{&v}
+        , i_{0}
+        , base_{~size_t{}}
+        , curr_{nullptr}
+    {}
+
+    rbtree_iterator(const tree_t& v, end_t)
+        : v_{&v}
+        , i_{v.size}
+        , base_{~size_t{}}
+        , curr_{nullptr}
+    {}
+
+    const tree_t& impl() const { return *v_; }
+    size_t index() const { return i_; }
+
+private:
+    friend iterator_core_access;
+
+    const tree_t* v_;
+    size_t i_;
+    mutable size_t base_;
+    mutable const T* curr_ = nullptr;
+
+    void increment()
+    {
+        assert(i_ < v_->size);
+        ++i_;
+    }
+
+    void decrement()
+    {
+        assert(i_ > 0);
+        --i_;
+    }
+
+    void advance(std::ptrdiff_t n)
+    {
+        assert(n <= 0 || i_ + static_cast<size_t>(n) <= v_->size);
+        assert(n >= 0 || static_cast<size_t>(-n) <= i_);
+        i_ += n;
+    }
+
+    bool equal(const rbtree_iterator& other) const { return i_ == other.i_; }
+
+    std::ptrdiff_t distance_to(const rbtree_iterator& other) const
+    {
+        return other.i_ > i_ ? static_cast<std::ptrdiff_t>(other.i_ - i_)
+                             : -static_cast<std::ptrdiff_t>(i_ - other.i_);
+    }
+
+    const T& dereference() const
+    {
+        auto base = i_ & ~mask<BL>;
+        if (base_ != base) {
+            base_ = base;
+            curr_ = v_->array_for(i_);
+        }
+        return curr_[i_ & mask<BL>];
+    }
+};
+
+} // namespace rbts
+} // namespace detail
+} // namespace immer
diff --git a/immer/detail/rbts/rrbtree.hpp b/immer/detail/rbts/rrbtree.hpp
new file mode 100644
index 000000000000..7bf59e6e92b1
--- /dev/null
+++ b/immer/detail/rbts/rrbtree.hpp
@@ -0,0 +1,1396 @@
+//
+// immer: immutable data structures for C++
+// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
+//
+// This software is distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
+//
+
+#pragma once
+
+#include <immer/config.hpp>
+#include <immer/detail/rbts/node.hpp>
+#include <immer/detail/rbts/operations.hpp>
+#include <immer/detail/rbts/position.hpp>
+
+#include <immer/detail/type_traits.hpp>
+
+#include <cassert>
+#include <memory>
+#include <numeric>
+
+namespace immer {
+namespace detail {
+namespace rbts {
+
+template <typename T, typename MemoryPolicy, bits_t B, bits_t BL>
+struct rrbtree_iterator;
+
+template <typename T, typename MemoryPolicy, bits_t B, bits_t BL>
+struct rrbtree
+{
+    using node_t  = node<T, MemoryPolicy, B, BL>;
+    using edit_t  = typename node_t::edit_t;
+    using owner_t = typename MemoryPolicy::transience_t::owner;
+
+    size_t size;
+    shift_t shift;
+    node_t* root;
+    node_t* tail;
+
+    static const rrbtree& empty()
+    {
+        static const rrbtree empty_{
+            0, BL, node_t::make_inner_n(0u), node_t::make_leaf_n(0u)};
+        return empty_;
+    }
+
+    template <typename U>
+    static auto from_initializer_list(std::initializer_list<U> values)
+    {
+        auto e      = owner_t{};
+        auto result = rrbtree{empty()};
+        for (auto&& v : values)
+            result.push_back_mut(e, v);
+        return result;
+    }
+
+    template <typename Iter,
+              typename Sent,
+              std::enable_if_t<compatible_sentinel_v<Iter, Sent>, bool> = true>
+    static auto from_range(Iter first, Sent last)
+    {
+        auto e      = owner_t{};
+        auto result = rrbtree{empty()};
+        for (; first != last; ++first)
+            result.push_back_mut(e, *first);
+        return result;
+    }
+
+    static auto from_fill(size_t n, T v)
+    {
+        auto e      = owner_t{};
+        auto result = rrbtree{empty()};
+        while (n-- > 0)
+            result.push_back_mut(e, v);
+        return result;
+    }
+
+    rrbtree(size_t sz, shift_t sh, node_t* r, node_t* t)
+        : size{sz}
+        , shift{sh}
+        , root{r}
+        , tail{t}
+    {
+        assert(check_tree());
+    }
+
+    rrbtree(const rrbtree& other)
+        : rrbtree{other.size, other.shift, other.root, other.tail}
+    {
+        inc();
+    }
+
+    rrbtree(rrbtree&& other)
+        : rrbtree{empty()}
+    {
+        swap(*this, other);
+    }
+
+    rrbtree& operator=(const rrbtree& other)
+    {
+        auto next{other};
+        swap(*this, next);
+        return *this;
+    }
+
+    rrbtree& operator=(rrbtree&& other)
+    {
+        swap(*this, other);
+        return *this;
+    }
+
+    friend void swap(rrbtree& x, rrbtree& y)
+    {
+        using std::swap;
+        swap(x.size, y.size);
+        swap(x.shift, y.shift);
+        swap(x.root, y.root);
+        swap(x.tail, y.tail);
+    }
+
+    ~rrbtree() { dec(); }
+
+    void inc() const
+    {
+        root->inc();
+        tail->inc();
+    }
+
+    void dec() const { traverse(dec_visitor()); }
+
+    auto tail_size() const { return size - tail_offset(); }
+
+    auto tail_offset() const
+    {
+        auto r = root->relaxed();
+        assert(r == nullptr || r->d.count);
+        return r ? r->d.sizes[r->d.count - 1]
+                 : size ? (size - 1) & ~mask<BL>
+                        /* otherwise */ : 0;
+    }
+
+    template <typename Visitor, typename... Args>
+    void traverse(Visitor v, Args&&... args) const
+    {
+        auto tail_off  = tail_offset();
+        auto tail_size = size - tail_off;
+
+        if (tail_off)
+            visit_maybe_relaxed_sub(root, shift, tail_off, v, args...);
+        else
+            make_empty_regular_pos(root).visit(v, args...);
+
+        if (tail_size)
+            make_leaf_sub_pos(tail, tail_size).visit(v, args...);
+        else
+            make_empty_leaf_pos(tail).visit(v, args...);
+    }
+
+    template <typename Visitor, typename... Args>
+    void traverse(Visitor v, size_t first, size_t last, Args&&... args) const
+    {
+        auto tail_off  = tail_offset();
+        auto tail_size = size - tail_off;
+
+        if (first < tail_off)
+            visit_maybe_relaxed_sub(root,
+                                    shift,
+                                    tail_off,
+                                    v,
+                                    first,
+                                    last < tail_off ? last : tail_off,
+                                    args...);
+        if (last > tail_off)
+            make_leaf_sub_pos(tail, tail_size)
+                .visit(v,
+                       first > tail_off ? first - tail_off : 0,
+                       last - tail_off,
+                       args...);
+    }
+
+    template <typename Visitor, typename... Args>
+    bool traverse_p(Visitor v, Args&&... args) const
+    {
+        auto tail_off  = tail_offset();
+        auto tail_size = size - tail_off;
+        return (tail_off
+                    ? visit_maybe_relaxed_sub(root, shift, tail_off, v, args...)
+                    : make_empty_regular_pos(root).visit(v, args...)) &&
+               (tail_size ? make_leaf_sub_pos(tail, tail_size).visit(v, args...)
+                          : make_empty_leaf_pos(tail).visit(v, args...));
+    }
+
+    template <typename Visitor, typename... Args>
+    bool traverse_p(Visitor v, size_t first, size_t last, Args&&... args) const
+    {
+        auto tail_off  = tail_offset();
+        auto tail_size = size - tail_off;
+        return (first < tail_off
+                    ? visit_maybe_relaxed_sub(root,
+                                              shift,
+                                              tail_off,
+                                              v,
+                                              first,
+                                              last < tail_off ? last : tail_off,
+                                              args...)
+                    : true) &&
+               (last > tail_off
+                    ? make_leaf_sub_pos(tail, tail_size)
+                          .visit(v,
+                                 first > tail_off ? first - tail_off : 0,
+                                 last - tail_off,
+                                 args...)
+                    : true);
+    }
+
+    template <typename Visitor>
+    decltype(auto) descend(Visitor v, size_t idx) const
+    {
+        auto tail_off = tail_offset();
+        return idx >= tail_off
+                   ? make_leaf_descent_pos(tail).visit(v, idx - tail_off)
+                   : visit_maybe_relaxed_descent(root, shift, v, idx);
+    }
+
+    template <typename Fn>
+    void for_each_chunk(Fn&& fn) const
+    {
+        traverse(for_each_chunk_visitor{}, std::forward<Fn>(fn));
+    }
+
+    template <typename Fn>
+    void for_each_chunk(size_t first, size_t last, Fn&& fn) const
+    {
+        traverse(for_each_chunk_i_visitor{}, first, last, std::forward<Fn>(fn));
+    }
+
+    template <typename Fn>
+    bool for_each_chunk_p(Fn&& fn) const
+    {
+        return traverse_p(for_each_chunk_p_visitor{}, std::forward<Fn>(fn));
+    }
+
+    template <typename Fn>
+    bool for_each_chunk_p(size_t first, size_t last, Fn&& fn) const
+    {
+        return traverse_p(
+            for_each_chunk_p_i_visitor{}, first, last, std::forward<Fn>(fn));
+    }
+
+    bool equals(const rrbtree& other) const
+    {
+        using iter_t = rrbtree_iterator<T, MemoryPolicy, B, BL>;
+        if (size != other.size)
+            return false;
+        if (size == 0)
+            return true;
+        auto tail_off       = tail_offset();
+        auto tail_off_other = other.tail_offset();
+        // compare trees
+        if (tail_off > 0 && tail_off_other > 0) {
+            // other.shift != shift is a theoretical possibility for
+            // relaxed trees that sadly we haven't managed to exercise
+            // in tests yet...
+            if (other.shift >= shift) {
+                if (!visit_maybe_relaxed_sub(other.root,
+                                             other.shift,
+                                             tail_off_other,
+                                             equals_visitor::rrb{},
+                                             iter_t{other},
+                                             root,
+                                             shift,
+                                             tail_off))
+                    return false;
+            } else {
+                if (!visit_maybe_relaxed_sub(root,
+                                             shift,
+                                             tail_off,
+                                             equals_visitor::rrb{},
+                                             iter_t{*this},
+                                             other.root,
+                                             other.shift,
+                                             tail_off_other))
+                    return false;
+            }
+        }
+        return tail_off == tail_off_other
+                   ? make_leaf_sub_pos(tail, tail_size())
+                         .visit(equals_visitor{}, other.tail)
+                   : tail_off > tail_off_other
+                         ? std::equal(tail->leaf(),
+                                      tail->leaf() + (size - tail_off),
+                                      other.tail->leaf() +
+                                          (tail_off - tail_off_other))
+                         /* otherwise */
+                         : std::equal(tail->leaf(),
+                                      tail->leaf() + (size - tail_off),
+                                      iter_t{other} + tail_off);
+    }
+
+    std::tuple<shift_t, node_t*> push_tail(node_t* root,
+                                           shift_t shift,
+                                           size_t size,
+                                           node_t* tail,
+                                           count_t tail_size) const
+    {
+        if (auto r = root->relaxed()) {
+            auto new_root =
+                make_relaxed_pos(root, shift, r)
+                    .visit(push_tail_visitor<node_t>{}, tail, tail_size);
+            if (new_root)
+                return std::make_tuple(shift, new_root);
+            else {
+                auto new_root = node_t::make_inner_r_n(2);
+                try {
+                    auto new_path        = node_t::make_path(shift, tail);
+                    new_root->inner()[0] = root->inc();
+                    new_root->inner()[1] = new_path;
+                    new_root->relaxed()->d.sizes[0] = size;
+                    new_root->relaxed()->d.sizes[1] = size + tail_size;
+                    new_root->relaxed()->d.count    = 2u;
+                } catch (...) {
+                    node_t::delete_inner_r(new_root, 2);
+                    throw;
+                }
+                return std::make_tuple(shift + B, new_root);
+            }
+        } else if (size == size_t{branches<B>} << shift) {
+            auto new_root = node_t::make_inner_n(2);
+            try {
+                auto new_path        = node_t::make_path(shift, tail);
+                new_root->inner()[0] = root->inc();
+                new_root->inner()[1] = new_path;
+            } catch (...) {
+                node_t::delete_inner(new_root, 2);
+                throw;
+            }
+            return std::make_tuple(shift + B, new_root);
+        } else if (size) {
+            auto new_root = make_regular_sub_pos(root, shift, size)
+                                .visit(push_tail_visitor<node_t>{}, tail);
+            return std::make_tuple(shift, new_root);
+        } else {
+            return std::make_tuple(shift, node_t::make_path(shift, tail));
+        }
+    }
+
+    void
+    push_tail_mut(edit_t e, size_t tail_off, node_t* tail, count_t tail_size)
+    {
+        if (auto r = root->relaxed()) {
+            auto new_root =
+                make_relaxed_pos(root, shift, r)
+                    .visit(push_tail_mut_visitor<node_t>{}, e, tail, tail_size);
+            if (new_root) {
+                root = new_root;
+            } else {
+                auto new_root = node_t::make_inner_r_e(e);
+                try {
+                    auto new_path        = node_t::make_path_e(e, shift, tail);
+                    new_root->inner()[0] = root;
+                    new_root->inner()[1] = new_path;
+                    new_root->relaxed()->d.sizes[0] = tail_off;
+                    new_root->relaxed()->d.sizes[1] = tail_off + tail_size;
+                    new_root->relaxed()->d.count    = 2u;
+                    root                            = new_root;
+                    shift += B;
+                } catch (...) {
+                    node_t::delete_inner_r_e(new_root);
+                    throw;
+                }
+            }
+        } else if (tail_off == size_t{branches<B>} << shift) {
+            auto new_root = node_t::make_inner_e(e);
+            try {
+                auto new_path        = node_t::make_path_e(e, shift, tail);
+                new_root->inner()[0] = root;
+                new_root->inner()[1] = new_path;
+                root                 = new_root;
+                shift += B;
+            } catch (...) {
+                node_t::delete_inner_e(new_root);
+                throw;
+            }
+        } else if (tail_off) {
+            auto new_root =
+                make_regular_sub_pos(root, shift, tail_off)
+                    .visit(push_tail_mut_visitor<node_t>{}, e, tail);
+            root = new_root;
+        } else {
+            auto new_root = node_t::make_path_e(e, shift, tail);
+            dec_empty_regular(root);
+            root = new_root;
+        }
+    }
+
+    void ensure_mutable_tail(edit_t e, count_t n)
+    {
+        if (!tail->can_mutate(e)) {
+            auto new_tail = node_t::copy_leaf_e(e, tail, n);
+            dec_leaf(tail, n);
+            tail = new_tail;
+        }
+    }
+
+    void push_back_mut(edit_t e, T value)
+    {
+        auto ts = tail_size();
+        if (ts < branches<BL>) {
+            ensure_mutable_tail(e, ts);
+            new (&tail->leaf()[ts]) T{std::move(value)};
+        } else {
+            using std::get;
+            auto new_tail = node_t::make_leaf_e(e, std::move(value));
+            auto tail_off = tail_offset();
+            try {
+                push_tail_mut(e, tail_off, tail, ts);
+                tail = new_tail;
+            } catch (...) {
+                node_t::delete_leaf(new_tail, 1u);
+                throw;
+            }
+        }
+        ++size;
+    }
+
+    rrbtree push_back(T value) const
+    {
+        auto ts = tail_size();
+        if (ts < branches<BL>) {
+            auto new_tail =
+                node_t::copy_leaf_emplace(tail, ts, std::move(value));
+            return {size + 1, shift, root->inc(), new_tail};
+        } else {
+            using std::get;
+            auto new_tail = node_t::make_leaf_n(1u, std::move(value));
+            auto tail_off = tail_offset();
+            try {
+                auto new_root =
+                    push_tail(root, shift, tail_off, tail, size - tail_off);
+                tail->inc();
+                return {size + 1, get<0>(new_root), get<1>(new_root), new_tail};
+            } catch (...) {
+                node_t::delete_leaf(new_tail, 1u);
+                throw;
+            }
+        }
+    }
+
+    std::tuple<const T*, size_t, size_t> region_for(size_t idx) const
+    {
+        using std::get;
+        auto tail_off = tail_offset();
+        if (idx >= tail_off) {
+            return std::make_tuple(tail->leaf(), tail_off, size);
+        } else {
+            auto subs = visit_maybe_relaxed_sub(
+                root, shift, tail_off, region_for_visitor<T>(), idx);
+            auto first = idx - get<1>(subs);
+            auto end   = first + get<2>(subs);
+            return std::make_tuple(get<0>(subs), first, end);
+        }
+    }
+
+    T& get_mut(edit_t e, size_t idx)
+    {
+        auto tail_off = tail_offset();
+        if (idx >= tail_off) {
+            ensure_mutable_tail(e, size - tail_off);
+            return tail->leaf()[(idx - tail_off) & mask<BL>];
+        } else {
+            return visit_maybe_relaxed_sub(root,
+                                           shift,
+                                           tail_off,
+                                           get_mut_visitor<node_t>{},
+                                           idx,
+                                           e,
+                                           &root);
+        }
+    }
+
+    const T& get(size_t index) const
+    {
+        return descend(get_visitor<T>(), index);
+    }
+
+    const T& get_check(size_t index) const
+    {
+        if (index >= size)
+            throw std::out_of_range{"out of range"};
+        return descend(get_visitor<T>(), index);
+    }
+
+    const T& front() const { return get(0); }
+
+    const T& back() const { return get(size - 1); }
+
+    template <typename FnT>
+    void update_mut(edit_t e, size_t idx, FnT&& fn)
+    {
+        auto& elem = get_mut(e, idx);
+        elem       = std::forward<FnT>(fn)(std::move(elem));
+    }
+
+    template <typename FnT>
+    rrbtree update(size_t idx, FnT&& fn) const
+    {
+        auto tail_off = tail_offset();
+        if (idx >= tail_off) {
+            auto tail_size = size - tail_off;
+            auto new_tail =
+                make_leaf_sub_pos(tail, tail_size)
+                    .visit(update_visitor<node_t>{}, idx - tail_off, fn);
+            return {size, shift, root->inc(), new_tail};
+        } else {
+            auto new_root = visit_maybe_relaxed_sub(
+                root, shift, tail_off, update_visitor<node_t>{}, idx, fn);
+            return {size, shift, new_root, tail->inc()};
+        }
+    }
+
+    void assoc_mut(edit_t e, size_t idx, T value)
+    {
+        update_mut(e, idx, [&](auto&&) { return std::move(value); });
+    }
+
+    rrbtree assoc(size_t idx, T value) const
+    {
+        return update(idx, [&](auto&&) { return std::move(value); });
+    }
+
+    void take_mut(edit_t e, size_t new_size)
+    {
+        auto tail_off = tail_offset();
+        if (new_size == 0) {
+            *this = empty();
+        } else if (new_size >= size) {
+            return;
+        } else if (new_size > tail_off) {
+            auto ts    = size - tail_off;
+            auto newts = new_size - tail_off;
+            if (tail->can_mutate(e)) {
+                destroy_n(tail->leaf() + newts, ts - newts);
+            } else {
+                auto new_tail = node_t::copy_leaf_e(e, tail, newts);
+                dec_leaf(tail, ts);
+                tail = new_tail;
+            }
+            size = new_size;
+            return;
+        } else {
+            using std::get;
+            auto l = new_size - 1;
+            auto v = slice_right_mut_visitor<node_t>();
+            auto r = visit_maybe_relaxed_sub(root, shift, tail_off, v, l, e);
+            auto new_shift = get<0>(r);
+            auto new_root  = get<1>(r);
+            auto new_tail  = get<3>(r);
+            if (new_root) {
+                root  = new_root;
+                shift = new_shift;
+            } else {
+                root  = empty().root->inc();
+                shift = BL;
+            }
+            dec_leaf(tail, size - tail_off);
+            size = new_size;
+            tail = new_tail;
+            return;
+        }
+    }
+
+    rrbtree take(size_t new_size) const
+    {
+        auto tail_off = tail_offset();
+        if (new_size == 0) {
+            return empty();
+        } else if (new_size >= size) {
+            return *this;
+        } else if (new_size > tail_off) {
+            auto new_tail = node_t::copy_leaf(tail, new_size - tail_off);
+            return {new_size, shift, root->inc(), new_tail};
+        } else {
+            using std::get;
+            auto l = new_size - 1;
+            auto v = slice_right_visitor<node_t>();
+            auto r = visit_maybe_relaxed_sub(root, shift, tail_off, v, l);
+            auto new_shift = get<0>(r);
+            auto new_root  = get<1>(r);
+            auto new_tail  = get<3>(r);
+            if (new_root) {
+                IMMER_ASSERT_TAGGED(new_root->compute_shift() == get<0>(r));
+                assert(new_root->check(new_shift, new_size - get<2>(r)));
+                return {new_size, new_shift, new_root, new_tail};
+            } else {
+                return {new_size, BL, empty().root->inc(), new_tail};
+            }
+        }
+    }
+
+    void drop_mut(edit_t e, size_t elems)
+    {
+        using std::get;
+        auto tail_off = tail_offset();
+        if (elems == 0) {
+            return;
+        } else if (elems >= size) {
+            *this = empty();
+        } else if (elems == tail_off) {
+            dec_inner(root, shift, tail_off);
+            shift = BL;
+            root  = empty().root->inc();
+            size -= elems;
+            return;
+        } else if (elems > tail_off) {
+            auto v = slice_left_mut_visitor<node_t>();
+            tail   = get<1>(make_leaf_sub_pos(tail, size - tail_off)
+                              .visit(v, elems - tail_off, e));
+            if (root != empty().root) {
+                dec_inner(root, shift, tail_off);
+                shift = BL;
+                root  = empty().root->inc();
+            }
+            size -= elems;
+            return;
+        } else {
+            auto v = slice_left_mut_visitor<node_t>();
+            auto r =
+                visit_maybe_relaxed_sub(root, shift, tail_off, v, elems, e);
+            shift = get<0>(r);
+            root  = get<1>(r);
+            size -= elems;
+            return;
+        }
+    }
+
+    rrbtree drop(size_t elems) const
+    {
+        if (elems == 0) {
+            return *this;
+        } else if (elems >= size) {
+            return empty();
+        } else if (elems == tail_offset()) {
+            return {size - elems, BL, empty().root->inc(), tail->inc()};
+        } else if (elems > tail_offset()) {
+            auto tail_off = tail_offset();
+            auto new_tail =
+                node_t::copy_leaf(tail, elems - tail_off, size - tail_off);
+            return {size - elems, BL, empty().root->inc(), new_tail};
+        } else {
+            using std::get;
+            auto v = slice_left_visitor<node_t>();
+            auto r =
+                visit_maybe_relaxed_sub(root, shift, tail_offset(), v, elems);
+            auto new_root  = get<1>(r);
+            auto new_shift = get<0>(r);
+            return {size - elems, new_shift, new_root, tail->inc()};
+        }
+        return *this;
+    }
+
+    rrbtree concat(const rrbtree& r) const
+    {
+        assert(r.size < (std::numeric_limits<size_t>::max() - size));
+        using std::get;
+        if (size == 0)
+            return r;
+        else if (r.size == 0)
+            return *this;
+        else if (r.tail_offset() == 0) {
+            // just concat the tail, similar to push_back
+            auto tail_offst = tail_offset();
+            auto tail_size  = size - tail_offst;
+            if (tail_size == branches<BL>) {
+                auto new_root =
+                    push_tail(root, shift, tail_offst, tail, tail_size);
+                tail->inc();
+                return {size + r.size,
+                        get<0>(new_root),
+                        get<1>(new_root),
+                        r.tail->inc()};
+            } else if (tail_size + r.size <= branches<BL>) {
+                auto new_tail =
+                    node_t::copy_leaf(tail, tail_size, r.tail, r.size);
+                return {size + r.size, shift, root->inc(), new_tail};
+            } else {
+                auto remaining = branches<BL> - tail_size;
+                auto add_tail =
+                    node_t::copy_leaf(tail, tail_size, r.tail, remaining);
+                try {
+                    auto new_tail =
+                        node_t::copy_leaf(r.tail, remaining, r.size);
+                    try {
+                        auto new_root = push_tail(
+                            root, shift, tail_offst, add_tail, branches<BL>);
+                        return {size + r.size,
+                                get<0>(new_root),
+                                get<1>(new_root),
+                                new_tail};
+                    } catch (...) {
+                        node_t::delete_leaf(new_tail, r.size - remaining);
+                        throw;
+                    }
+                } catch (...) {
+                    node_t::delete_leaf(add_tail, branches<BL>);
+                    throw;
+                }
+            }
+        } else if (tail_offset() == 0) {
+            auto tail_offst = tail_offset();
+            auto tail_size  = size - tail_offst;
+            auto concated =
+                concat_trees(tail, tail_size, r.root, r.shift, r.tail_offset());
+            auto new_shift = concated.shift();
+            auto new_root  = concated.node();
+            IMMER_ASSERT_TAGGED(new_shift == new_root->compute_shift());
+            assert(new_root->check(new_shift, size + r.tail_offset()));
+            return {size + r.size, new_shift, new_root, r.tail->inc()};
+        } else {
+            auto tail_offst = tail_offset();
+            auto tail_size  = size - tail_offst;
+            auto concated   = concat_trees(root,
+                                         shift,
+                                         tail_offst,
+                                         tail,
+                                         tail_size,
+                                         r.root,
+                                         r.shift,
+                                         r.tail_offset());
+            auto new_shift  = concated.shift();
+            auto new_root   = concated.node();
+            IMMER_ASSERT_TAGGED(new_shift == new_root->compute_shift());
+            assert(new_root->check(new_shift, size + r.tail_offset()));
+            return {size + r.size, new_shift, new_root, r.tail->inc()};
+        }
+    }
+
+    constexpr static bool supports_transient_concat =
+        !std::is_empty<edit_t>::value;
+
+    friend void concat_mut_l(rrbtree& l, edit_t el, const rrbtree& r)
+    {
+        assert(&l != &r);
+        assert(r.size < (std::numeric_limits<size_t>::max() - l.size));
+        using std::get;
+        if (l.size == 0)
+            l = r;
+        else if (r.size == 0)
+            return;
+        else if (r.tail_offset() == 0) {
+            // just concat the tail, similar to push_back
+            auto tail_offst = l.tail_offset();
+            auto tail_size  = l.size - tail_offst;
+            if (tail_size == branches<BL>) {
+                l.push_tail_mut(el, tail_offst, l.tail, tail_size);
+                l.tail = r.tail->inc();
+                l.size += r.size;
+                return;
+            } else if (tail_size + r.size <= branches<BL>) {
+                l.ensure_mutable_tail(el, tail_size);
+                std::uninitialized_copy(r.tail->leaf(),
+                                        r.tail->leaf() + r.size,
+                                        l.tail->leaf() + tail_size);
+                l.size += r.size;
+                return;
+            } else {
+                auto remaining = branches<BL> - tail_size;
+                l.ensure_mutable_tail(el, tail_size);
+                std::uninitialized_copy(r.tail->leaf(),
+                                        r.tail->leaf() + remaining,
+                                        l.tail->leaf() + tail_size);
+                try {
+                    auto new_tail =
+                        node_t::copy_leaf_e(el, r.tail, remaining, r.size);
+                    try {
+                        l.push_tail_mut(el, tail_offst, l.tail, branches<BL>);
+                        l.tail = new_tail;
+                        l.size += r.size;
+                        return;
+                    } catch (...) {
+                        node_t::delete_leaf(new_tail, r.size - remaining);
+                        throw;
+                    }
+                } catch (...) {
+                    destroy_n(r.tail->leaf() + tail_size, remaining);
+                    throw;
+                }
+            }
+        } else if (l.tail_offset() == 0) {
+            if (supports_transient_concat) {
+                auto tail_offst = l.tail_offset();
+                auto tail_size  = l.size - tail_offst;
+                auto concated =
+                    concat_trees_mut(el,
+                                     el,
+                                     l.tail,
+                                     tail_size,
+                                     MemoryPolicy::transience_t::noone,
+                                     r.root,
+                                     r.shift,
+                                     r.tail_offset());
+                IMMER_ASSERT_TAGGED(concated.shift() ==
+                                    concated.node()->compute_shift());
+                assert(concated.node()->check(concated.shift(),
+                                              l.size + r.tail_offset()));
+                l.size += r.size;
+                l.shift = concated.shift();
+                l.root  = concated.node();
+                l.tail  = r.tail;
+            } else {
+                auto tail_offst = l.tail_offset();
+                auto tail_size  = l.size - tail_offst;
+                auto concated   = concat_trees(
+                    l.tail, tail_size, r.root, r.shift, r.tail_offset());
+                l = {l.size + r.size,
+                     concated.shift(),
+                     concated.node(),
+                     r.tail->inc()};
+                return;
+            }
+        } else {
+            if (supports_transient_concat) {
+                auto tail_offst = l.tail_offset();
+                auto tail_size  = l.size - tail_offst;
+                auto concated =
+                    concat_trees_mut(el,
+                                     el,
+                                     l.root,
+                                     l.shift,
+                                     tail_offst,
+                                     l.tail,
+                                     tail_size,
+                                     MemoryPolicy::transience_t::noone,
+                                     r.root,
+                                     r.shift,
+                                     r.tail_offset());
+                IMMER_ASSERT_TAGGED(concated.shift() ==
+                                    concated.node()->compute_shift());
+                assert(concated.node()->check(concated.shift(),
+                                              l.size + r.tail_offset()));
+                l.size += r.size;
+                l.shift = concated.shift();
+                l.root  = concated.node();
+                l.tail  = r.tail;
+            } else {
+                auto tail_offst = l.tail_offset();
+                auto tail_size  = l.size - tail_offst;
+                auto concated   = concat_trees(l.root,
+                                             l.shift,
+                                             tail_offst,
+                                             l.tail,
+                                             tail_size,
+                                             r.root,
+                                             r.shift,
+                                             r.tail_offset());
+                l               = {l.size + r.size,
+                     concated.shift(),
+                     concated.node(),
+                     r.tail->inc()};
+            }
+        }
+    }
+
+    friend void concat_mut_r(const rrbtree& l, rrbtree& r, edit_t er)
+    {
+        assert(&l != &r);
+        assert(r.size < (std::numeric_limits<size_t>::max() - l.size));
+        using std::get;
+        if (r.size == 0)
+            r = std::move(l);
+        else if (l.size == 0)
+            return;
+        else if (r.tail_offset() == 0) {
+            // just concat the tail, similar to push_back
+            auto tail_offst = l.tail_offset();
+            auto tail_size  = l.size - tail_offst;
+            if (tail_size == branches<BL>) {
+                // this could be improved by making sure that the
+                // newly created nodes as part of the `push_tail()`
+                // are tagged with `er`
+                auto res =
+                    l.push_tail(l.root, l.shift, tail_offst, l.tail, tail_size);
+                l.tail->inc(); // note: leak if mutably concatenated
+                               // with itself, but this is forbidden
+                               // by the interface
+                r = {l.size + r.size, get<0>(res), get<1>(res), r.tail->inc()};
+                return;
+            } else if (tail_size + r.size <= branches<BL>) {
+                // doing this in a exception way mutating way is very
+                // tricky while potential performance gains are
+                // minimal (we need to move every element of the right
+                // tail anyways to make space for the left tail)
+                //
+                // we could however improve this by at least moving the
+                // elements of the right tail...
+                auto new_tail =
+                    node_t::copy_leaf(l.tail, tail_size, r.tail, r.size);
+                r = {l.size + r.size, l.shift, l.root->inc(), new_tail};
+                return;
+            } else {
+                // like the immutable version
+                auto remaining = branches<BL> - tail_size;
+                auto add_tail  = node_t::copy_leaf_e(
+                    er, l.tail, tail_size, r.tail, remaining);
+                try {
+                    auto new_tail =
+                        node_t::copy_leaf_e(er, r.tail, remaining, r.size);
+                    try {
+                        // this could be improved by making sure that the
+                        // newly created nodes as part of the `push_tail()`
+                        // are tagged with `er`
+                        auto new_root = l.push_tail(l.root,
+                                                    l.shift,
+                                                    tail_offst,
+                                                    add_tail,
+                                                    branches<BL>);
+                        r             = {l.size + r.size,
+                             get<0>(new_root),
+                             get<1>(new_root),
+                             new_tail};
+                        return;
+                    } catch (...) {
+                        node_t::delete_leaf(new_tail, r.size - remaining);
+                        throw;
+                    }
+                } catch (...) {
+                    node_t::delete_leaf(add_tail, branches<BL>);
+                    throw;
+                }
+                return;
+            }
+        } else if (l.tail_offset() == 0) {
+            if (supports_transient_concat) {
+                auto tail_offst = l.tail_offset();
+                auto tail_size  = l.size - tail_offst;
+                auto concated =
+                    concat_trees_mut(er,
+                                     MemoryPolicy::transience_t::noone,
+                                     l.tail,
+                                     tail_size,
+                                     er,
+                                     r.root,
+                                     r.shift,
+                                     r.tail_offset());
+                IMMER_ASSERT_TAGGED(concated.shift() ==
+                                    concated.node()->compute_shift());
+                assert(concated.node()->check(concated.shift(),
+                                              l.size + r.tail_offset()));
+                r.size += l.size;
+                r.shift = concated.shift();
+                r.root  = concated.node();
+            } else {
+                auto tail_offst = l.tail_offset();
+                auto tail_size  = l.size - tail_offst;
+                auto concated   = concat_trees(
+                    l.tail, tail_size, r.root, r.shift, r.tail_offset());
+                r = {l.size + r.size,
+                     concated.shift(),
+                     concated.node(),
+                     r.tail->inc()};
+                return;
+            }
+        } else {
+            if (supports_transient_concat) {
+                auto tail_offst = l.tail_offset();
+                auto tail_size  = l.size - tail_offst;
+                auto concated =
+                    concat_trees_mut(er,
+                                     MemoryPolicy::transience_t::noone,
+                                     l.root,
+                                     l.shift,
+                                     tail_offst,
+                                     l.tail,
+                                     tail_size,
+                                     er,
+                                     r.root,
+                                     r.shift,
+                                     r.tail_offset());
+                IMMER_ASSERT_TAGGED(concated.shift() ==
+                                    concated.node()->compute_shift());
+                assert(concated.node()->check(concated.shift(),
+                                              l.size + r.tail_offset()));
+                r.size += l.size;
+                r.shift = concated.shift();
+                r.root  = concated.node();
+                return;
+            } else {
+                auto tail_offst = l.tail_offset();
+                auto tail_size  = l.size - tail_offst;
+                auto concated   = concat_trees(l.root,
+                                             l.shift,
+                                             tail_offst,
+                                             l.tail,
+                                             tail_size,
+                                             r.root,
+                                             r.shift,
+                                             r.tail_offset());
+                r               = {l.size + r.size,
+                     concated.shift(),
+                     concated.node(),
+                     r.tail->inc()};
+                return;
+            }
+        }
+    }
+
+    friend void concat_mut_lr_l(rrbtree& l, edit_t el, rrbtree& r, edit_t er)
+    {
+        assert(&l != &r);
+        assert(r.size < (std::numeric_limits<size_t>::max() - l.size));
+        using std::get;
+        if (l.size == 0)
+            l = r;
+        else if (r.size == 0)
+            return;
+        else if (r.tail_offset() == 0) {
+            // just concat the tail, similar to push_back
+            auto tail_offst = l.tail_offset();
+            auto tail_size  = l.size - tail_offst;
+            if (tail_size == branches<BL>) {
+                l.push_tail_mut(el, tail_offst, l.tail, tail_size);
+                l.tail = r.tail->inc();
+                l.size += r.size;
+                return;
+            } else if (tail_size + r.size <= branches<BL>) {
+                l.ensure_mutable_tail(el, tail_size);
+                if (r.tail->can_mutate(er))
+                    detail::uninitialized_move(r.tail->leaf(),
+                                               r.tail->leaf() + r.size,
+                                               l.tail->leaf() + tail_size);
+                else
+                    std::uninitialized_copy(r.tail->leaf(),
+                                            r.tail->leaf() + r.size,
+                                            l.tail->leaf() + tail_size);
+                l.size += r.size;
+                return;
+            } else {
+                auto remaining = branches<BL> - tail_size;
+                l.ensure_mutable_tail(el, tail_size);
+                if (r.tail->can_mutate(er))
+                    detail::uninitialized_move(r.tail->leaf(),
+                                               r.tail->leaf() + remaining,
+                                               l.tail->leaf() + tail_size);
+                else
+                    std::uninitialized_copy(r.tail->leaf(),
+                                            r.tail->leaf() + remaining,
+                                            l.tail->leaf() + tail_size);
+                try {
+                    auto new_tail =
+                        node_t::copy_leaf_e(el, r.tail, remaining, r.size);
+                    try {
+                        l.push_tail_mut(el, tail_offst, l.tail, branches<BL>);
+                        l.tail = new_tail;
+                        l.size += r.size;
+                        return;
+                    } catch (...) {
+                        node_t::delete_leaf(new_tail, r.size - remaining);
+                        throw;
+                    }
+                } catch (...) {
+                    destroy_n(r.tail->leaf() + tail_size, remaining);
+                    throw;
+                }
+            }
+        } else if (l.tail_offset() == 0) {
+            if (supports_transient_concat) {
+                auto tail_offst = l.tail_offset();
+                auto tail_size  = l.size - tail_offst;
+                auto concated   = concat_trees_mut(el,
+                                                 el,
+                                                 l.tail,
+                                                 tail_size,
+                                                 er,
+                                                 r.root,
+                                                 r.shift,
+                                                 r.tail_offset());
+                IMMER_ASSERT_TAGGED(concated.shift() ==
+                                    concated.node()->compute_shift());
+                assert(concated.node()->check(concated.shift(),
+                                              l.size + r.tail_offset()));
+                l.size += r.size;
+                l.shift = concated.shift();
+                l.root  = concated.node();
+                l.tail  = r.tail;
+                r.hard_reset();
+            } else {
+                auto tail_offst = l.tail_offset();
+                auto tail_size  = l.size - tail_offst;
+                auto concated   = concat_trees(
+                    l.tail, tail_size, r.root, r.shift, r.tail_offset());
+                l = {l.size + r.size,
+                     concated.shift(),
+                     concated.node(),
+                     r.tail->inc()};
+                return;
+            }
+        } else {
+            if (supports_transient_concat) {
+                auto tail_offst = l.tail_offset();
+                auto tail_size  = l.size - tail_offst;
+                auto concated   = concat_trees_mut(el,
+                                                 el,
+                                                 l.root,
+                                                 l.shift,
+                                                 tail_offst,
+                                                 l.tail,
+                                                 tail_size,
+                                                 er,
+                                                 r.root,
+                                                 r.shift,
+                                                 r.tail_offset());
+                IMMER_ASSERT_TAGGED(concated.shift() ==
+                                    concated.node()->compute_shift());
+                assert(concated.node()->check(concated.shift(),
+                                              l.size + r.tail_offset()));
+                l.size += r.size;
+                l.shift = concated.shift();
+                l.root  = concated.node();
+                l.tail  = r.tail;
+                r.hard_reset();
+            } else {
+                auto tail_offst = l.tail_offset();
+                auto tail_size  = l.size - tail_offst;
+                auto concated   = concat_trees(l.root,
+                                             l.shift,
+                                             tail_offst,
+                                             l.tail,
+                                             tail_size,
+                                             r.root,
+                                             r.shift,
+                                             r.tail_offset());
+                l               = {l.size + r.size,
+                     concated.shift(),
+                     concated.node(),
+                     r.tail->inc()};
+            }
+        }
+    }
+
+    friend void concat_mut_lr_r(rrbtree& l, edit_t el, rrbtree& r, edit_t er)
+    {
+        assert(&l != &r);
+        assert(r.size < (std::numeric_limits<size_t>::max() - l.size));
+        using std::get;
+        if (r.size == 0)
+            r = l;
+        else if (l.size == 0)
+            return;
+        else if (r.tail_offset() == 0) {
+            // just concat the tail, similar to push_back
+            auto tail_offst = l.tail_offset();
+            auto tail_size  = l.size - tail_offst;
+            if (tail_size == branches<BL>) {
+                // this could be improved by making sure that the
+                // newly created nodes as part of the `push_tail()`
+                // are tagged with `er`
+                auto res =
+                    l.push_tail(l.root, l.shift, tail_offst, l.tail, tail_size);
+                r = {l.size + r.size, get<0>(res), get<1>(res), r.tail->inc()};
+                return;
+            } else if (tail_size + r.size <= branches<BL>) {
+                // doing this in a exception way mutating way is very
+                // tricky while potential performance gains are
+                // minimal (we need to move every element of the right
+                // tail anyways to make space for the left tail)
+                //
+                // we could however improve this by at least moving the
+                // elements of the mutable tails...
+                auto new_tail =
+                    node_t::copy_leaf(l.tail, tail_size, r.tail, r.size);
+                r = {l.size + r.size, l.shift, l.root->inc(), new_tail};
+                return;
+            } else {
+                // like the immutable version.
+                // we could improve this also by moving elements
+                // instead of just copying them
+                auto remaining = branches<BL> - tail_size;
+                auto add_tail  = node_t::copy_leaf_e(
+                    er, l.tail, tail_size, r.tail, remaining);
+                try {
+                    auto new_tail =
+                        node_t::copy_leaf_e(er, r.tail, remaining, r.size);
+                    try {
+                        // this could be improved by making sure that the
+                        // newly created nodes as part of the `push_tail()`
+                        // are tagged with `er`
+                        auto new_root = l.push_tail(l.root,
+                                                    l.shift,
+                                                    tail_offst,
+                                                    add_tail,
+                                                    branches<BL>);
+                        r             = {l.size + r.size,
+                             get<0>(new_root),
+                             get<1>(new_root),
+                             new_tail};
+                        return;
+                    } catch (...) {
+                        node_t::delete_leaf(new_tail, r.size - remaining);
+                        throw;
+                    }
+                } catch (...) {
+                    node_t::delete_leaf(add_tail, branches<BL>);
+                    throw;
+                }
+                return;
+            }
+        } else if (l.tail_offset() == 0) {
+            if (supports_transient_concat) {
+                auto tail_offst = l.tail_offset();
+                auto tail_size  = l.size - tail_offst;
+                auto concated   = concat_trees_mut(er,
+                                                 el,
+                                                 l.tail,
+                                                 tail_size,
+                                                 er,
+                                                 r.root,
+                                                 r.shift,
+                                                 r.tail_offset());
+                IMMER_ASSERT_TAGGED(concated.shift() ==
+                                    concated.node()->compute_shift());
+                assert(concated.node()->check(concated.shift(),
+                                              l.size + r.tail_offset()));
+                r.size += l.size;
+                r.shift = concated.shift();
+                r.root  = concated.node();
+                l.hard_reset();
+            } else {
+                auto tail_offst = l.tail_offset();
+                auto tail_size  = l.size - tail_offst;
+                auto concated   = concat_trees(
+                    l.tail, tail_size, r.root, r.shift, r.tail_offset());
+                r = {l.size + r.size,
+                     concated.shift(),
+                     concated.node(),
+                     r.tail->inc()};
+                return;
+            }
+        } else {
+            if (supports_transient_concat) {
+                auto tail_offst = l.tail_offset();
+                auto tail_size  = l.size - tail_offst;
+                auto concated   = concat_trees_mut(er,
+                                                 el,
+                                                 l.root,
+                                                 l.shift,
+                                                 tail_offst,
+                                                 l.tail,
+                                                 tail_size,
+                                                 er,
+                                                 r.root,
+                                                 r.shift,
+                                                 r.tail_offset());
+                IMMER_ASSERT_TAGGED(concated.shift() ==
+                                    concated.node()->compute_shift());
+                assert(concated.node()->check(concated.shift(),
+                                              l.size + r.tail_offset()));
+                r.size += l.size;
+                r.shift = concated.shift();
+                r.root  = concated.node();
+                l.hard_reset();
+            } else {
+                auto tail_offst = l.tail_offset();
+                auto tail_size  = l.size - tail_offst;
+                auto concated   = concat_trees(l.root,
+                                             l.shift,
+                                             tail_offst,
+                                             l.tail,
+                                             tail_size,
+                                             r.root,
+                                             r.shift,
+                                             r.tail_offset());
+                r               = {l.size + r.size,
+                     concated.shift(),
+                     concated.node(),
+                     r.tail->inc()};
+            }
+        }
+    }
+
+    void hard_reset()
+    {
+        assert(supports_transient_concat);
+        auto&& empty_ = empty();
+        size          = empty_.size;
+        shift         = empty_.shift;
+        root          = empty_.root;
+        tail          = empty_.tail;
+    }
+
+    bool check_tree() const
+    {
+        assert(shift <= sizeof(size_t) * 8 - BL);
+        assert(shift >= BL);
+        assert(tail_offset() <= size);
+        assert(tail_size() <= branches<BL>);
+#if IMMER_DEBUG_DEEP_CHECK
+        assert(check_root());
+        assert(check_tail());
+#endif
+        return true;
+    }
+
+    bool check_tail() const
+    {
+#if IMMER_DEBUG_DEEP_CHECK
+        if (tail_size() > 0)
+            assert(tail->check(endshift<B, BL>, tail_size()));
+#endif
+        return true;
+    }
+
+    bool check_root() const
+    {
+#if IMMER_DEBUG_DEEP_CHECK
+        if (tail_offset() > 0)
+            assert(root->check(shift, tail_offset()));
+        else {
+            IMMER_ASSERT_TAGGED(root->kind() == node_t::kind_t::inner);
+            assert(shift == BL);
+        }
+#endif
+        return true;
+    }
+
+#if IMMER_DEBUG_PRINT
+    void debug_print(std::ostream& out) const
+    {
+        out << "--" << std::endl
+            << "{" << std::endl
+            << "  size  = " << size << std::endl
+            << "  shift = " << shift << std::endl
+            << "  root  = " << std::endl;
+        debug_print_node(out, root, shift, tail_offset());
+        out << "  tail  = " << std::endl;
+        debug_print_node(out, tail, endshift<B, BL>, tail_size());
+        out << "}" << std::endl;
+    }
+
+    void debug_print_indent(std::ostream& out, unsigned indent) const
+    {
+        while (indent-- > 0)
+            out << ' ';
+    }
+
+    void debug_print_node(std::ostream& out,
+                          node_t* node,
+                          shift_t shift,
+                          size_t size,
+                          unsigned indent = 8) const
+    {
+        const auto indent_step = 4;
+
+        if (shift == endshift<B, BL>) {
+            debug_print_indent(out, indent);
+            out << "- {" << size << "} "
+                << pretty_print_array(node->leaf(), size) << std::endl;
+        } else if (auto r = node->relaxed()) {
+            auto count = r->d.count;
+            debug_print_indent(out, indent);
+            out << "# {" << size << "} "
+                << pretty_print_array(r->d.sizes, r->d.count) << std::endl;
+            auto last_size = size_t{};
+            for (auto i = count_t{}; i < count; ++i) {
+                debug_print_node(out,
+                                 node->inner()[i],
+                                 shift - B,
+                                 r->d.sizes[i] - last_size,
+                                 indent + indent_step);
+                last_size = r->d.sizes[i];
+            }
+        } else {
+            debug_print_indent(out, indent);
+            out << "+ {" << size << "}" << std::endl;
+            auto count =
+                (size >> shift) + (size - ((size >> shift) << shift) > 0);
+            if (count) {
+                for (auto i = count_t{}; i < count - 1; ++i)
+                    debug_print_node(out,
+                                     node->inner()[i],
+                                     shift - B,
+                                     1 << shift,
+                                     indent + indent_step);
+                debug_print_node(out,
+                                 node->inner()[count - 1],
+                                 shift - B,
+                                 size - ((count - 1) << shift),
+                                 indent + indent_step);
+            }
+        }
+    }
+#endif // IMMER_DEBUG_PRINT
+};
+
+} // namespace rbts
+} // namespace detail
+} // namespace immer
diff --git a/immer/detail/rbts/rrbtree_iterator.hpp b/immer/detail/rbts/rrbtree_iterator.hpp
new file mode 100644
index 000000000000..af967774e7ef
--- /dev/null
+++ b/immer/detail/rbts/rrbtree_iterator.hpp
@@ -0,0 +1,98 @@
+//
+// immer: immutable data structures for C++
+// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
+//
+// This software is distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
+//
+
+#pragma once
+
+#include <immer/detail/iterator_facade.hpp>
+#include <immer/detail/rbts/rrbtree.hpp>
+
+namespace immer {
+namespace detail {
+namespace rbts {
+
+template <typename T, typename MP, bits_t B, bits_t BL>
+struct rrbtree_iterator
+    : iterator_facade<rrbtree_iterator<T, MP, B, BL>,
+                      std::random_access_iterator_tag,
+                      T,
+                      const T&,
+                      std::ptrdiff_t,
+                      const T*>
+{
+    using tree_t   = rrbtree<T, MP, B, BL>;
+    using region_t = std::tuple<const T*, size_t, size_t>;
+
+    struct end_t
+    {};
+
+    const tree_t& impl() const { return *v_; }
+    size_t index() const { return i_; }
+
+    rrbtree_iterator() = default;
+
+    rrbtree_iterator(const tree_t& v)
+        : v_{&v}
+        , i_{0}
+        , curr_{nullptr, ~size_t{}, ~size_t{}}
+    {}
+
+    rrbtree_iterator(const tree_t& v, end_t)
+        : v_{&v}
+        , i_{v.size}
+        , curr_{nullptr, ~size_t{}, ~size_t{}}
+    {}
+
+private:
+    friend iterator_core_access;
+
+    const tree_t* v_;
+    size_t i_;
+    mutable region_t curr_;
+
+    void increment()
+    {
+        using std::get;
+        assert(i_ < v_->size);
+        ++i_;
+    }
+
+    void decrement()
+    {
+        using std::get;
+        assert(i_ > 0);
+        --i_;
+    }
+
+    void advance(std::ptrdiff_t n)
+    {
+        using std::get;
+        assert(n <= 0 || i_ + static_cast<size_t>(n) <= v_->size);
+        assert(n >= 0 || static_cast<size_t>(-n) <= i_);
+        i_ += n;
+    }
+
+    bool equal(const rrbtree_iterator& other) const { return i_ == other.i_; }
+
+    std::ptrdiff_t distance_to(const rrbtree_iterator& other) const
+    {
+        return other.i_ > i_ ? static_cast<std::ptrdiff_t>(other.i_ - i_)
+                             : -static_cast<std::ptrdiff_t>(i_ - other.i_);
+    }
+
+    const T& dereference() const
+    {
+        using std::get;
+        if (i_ < get<1>(curr_) || i_ >= get<2>(curr_))
+            curr_ = v_->region_for(i_);
+        return get<0>(curr_)[i_ - get<1>(curr_)];
+    }
+};
+
+} // namespace rbts
+} // namespace detail
+} // namespace immer
diff --git a/immer/detail/rbts/visitor.hpp b/immer/detail/rbts/visitor.hpp
new file mode 100644
index 000000000000..38cd030c4a9f
--- /dev/null
+++ b/immer/detail/rbts/visitor.hpp
@@ -0,0 +1,56 @@
+//
+// immer: immutable data structures for C++
+// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
+//
+// This software is distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
+//
+
+#pragma once
+
+#include <immer/config.hpp>
+
+#include <tuple>
+#include <utility>
+
+namespace immer {
+namespace detail {
+namespace rbts {
+
+template <typename Deriv>
+struct visitor_base
+{
+    template <typename... Args>
+    static decltype(auto) visit_node(Args&&... args)
+    {
+        IMMER_UNREACHABLE;
+    }
+
+    template <typename... Args>
+    static decltype(auto) visit_relaxed(Args&&... args)
+    {
+        return Deriv::visit_inner(std::forward<Args>(args)...);
+    }
+
+    template <typename... Args>
+    static decltype(auto) visit_regular(Args&&... args)
+    {
+        return Deriv::visit_inner(std::forward<Args>(args)...);
+    }
+
+    template <typename... Args>
+    static decltype(auto) visit_inner(Args&&... args)
+    {
+        return Deriv::visit_node(std::forward<Args>(args)...);
+    }
+
+    template <typename... Args>
+    static decltype(auto) visit_leaf(Args&&... args)
+    {
+        return Deriv::visit_node(std::forward<Args>(args)...);
+    }
+};
+
+} // namespace rbts
+} // namespace detail
+} // namespace immer
diff --git a/immer/detail/ref_count_base.hpp b/immer/detail/ref_count_base.hpp
new file mode 100644
index 000000000000..28f2e46a99f9
--- /dev/null
+++ b/immer/detail/ref_count_base.hpp
@@ -0,0 +1,36 @@
+//
+// immer: immutable data structures for C++
+// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
+//
+// This software is distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
+//
+
+#pragma once
+
+#include <atomic>
+
+namespace immer {
+namespace detail {
+
+template <typename Deriv>
+struct ref_count_base
+{
+    mutable std::atomic<int> ref_count{0};
+
+    friend void intrusive_ptr_add_ref(const Deriv* x)
+    {
+        x->ref_count.fetch_add(1, std::memory_order_relaxed);
+    }
+
+    friend void intrusive_ptr_release(const Deriv* x)
+    {
+        if (x->ref_count.fetch_sub(1, std::memory_order_release) == 1) {
+            std::atomic_thread_fence(std::memory_order_acquire);
+            delete x;
+        }
+    }
+};
+
+} /* namespace detail */
+} /* namespace immer */
diff --git a/immer/detail/type_traits.hpp b/immer/detail/type_traits.hpp
new file mode 100644
index 000000000000..afb2652c55a8
--- /dev/null
+++ b/immer/detail/type_traits.hpp
@@ -0,0 +1,223 @@
+//
+// immer: immutable data structures for C++
+// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
+//
+// This software is distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
+//
+
+#pragma once
+
+#include <algorithm>
+#include <iterator>
+#include <memory>
+#include <type_traits>
+#include <utility>
+
+namespace immer {
+namespace detail {
+
+template <typename... Ts>
+struct make_void
+{
+    using type = void;
+};
+
+template <typename... Ts>
+using void_t = typename make_void<Ts...>::type;
+
+template <typename T, typename = void>
+struct is_dereferenceable : std::false_type
+{};
+
+template <typename T>
+struct is_dereferenceable<T, void_t<decltype(*(std::declval<T&>()))>>
+    : std::true_type
+{};
+
+template <typename T>
+constexpr bool is_dereferenceable_v = is_dereferenceable<T>::value;
+
+template <typename T, typename U = T, typename = void>
+struct is_equality_comparable : std::false_type
+{};
+
+template <typename T, typename U>
+struct is_equality_comparable<
+    T,
+    U,
+    std::enable_if_t<std::is_same<bool,
+                                  decltype(std::declval<T&>() ==
+                                           std::declval<U&>())>::value>>
+    : std::true_type
+{};
+
+template <typename T, typename U = T>
+constexpr bool is_equality_comparable_v = is_equality_comparable<T, U>::value;
+
+template <typename T, typename U = T, typename = void>
+struct is_inequality_comparable : std::false_type
+{};
+
+template <typename T, typename U>
+struct is_inequality_comparable<
+    T,
+    U,
+    std::enable_if_t<std::is_same<bool,
+                                  decltype(std::declval<T&>() !=
+                                           std::declval<U&>())>::value>>
+    : std::true_type
+{};
+
+template <typename T, typename U = T>
+constexpr bool is_inequality_comparable_v =
+    is_inequality_comparable<T, U>::value;
+
+template <typename T, typename = void>
+struct is_preincrementable : std::false_type
+{};
+
+template <typename T>
+struct is_preincrementable<
+    T,
+    std::enable_if_t<std::is_same<T&, decltype(++(std::declval<T&>()))>::value>>
+    : std::true_type
+{};
+
+template <typename T>
+constexpr bool is_preincrementable_v = is_preincrementable<T>::value;
+
+template <typename T, typename U = T, typename = void>
+struct is_subtractable : std::false_type
+{};
+
+template <typename T, typename U>
+struct is_subtractable<
+    T,
+    U,
+    void_t<decltype(std::declval<T&>() - std::declval<U&>())>> : std::true_type
+{};
+
+template <typename T, typename U = T>
+constexpr bool is_subtractable_v = is_subtractable<T, U>::value;
+
+namespace swappable {
+
+using std::swap;
+
+template <typename T, typename U, typename = void>
+struct with : std::false_type
+{};
+
+// Does not account for non-referenceable types
+template <typename T, typename U>
+struct with<T,
+            U,
+            void_t<decltype(swap(std::declval<T&>(), std::declval<U&>())),
+                   decltype(swap(std::declval<U&>(), std::declval<T&>()))>>
+    : std::true_type
+{};
+
+} // namespace swappable
+
+template <typename T, typename U>
+using is_swappable_with = swappable::with<T, U>;
+
+template <typename T>
+using is_swappable = is_swappable_with<T, T>;
+
+template <typename T>
+constexpr bool is_swappable_v = is_swappable_with<T&, T&>::value;
+
+template <typename T, typename = void>
+struct is_iterator : std::false_type
+{};
+
+// See http://en.cppreference.com/w/cpp/concept/Iterator
+template <typename T>
+struct is_iterator<
+    T,
+    void_t<
+        std::enable_if_t<is_preincrementable_v<T> &&
+                         is_dereferenceable_v<T>
+                         // accounts for non-referenceable types
+                         && std::is_copy_constructible<T>::value &&
+                         std::is_copy_assignable<T>::value &&
+                         std::is_destructible<T>::value && is_swappable_v<T>>,
+        typename std::iterator_traits<T>::value_type,
+        typename std::iterator_traits<T>::difference_type,
+        typename std::iterator_traits<T>::reference,
+        typename std::iterator_traits<T>::pointer,
+        typename std::iterator_traits<T>::iterator_category>> : std::true_type
+{};
+
+template <typename T>
+constexpr bool is_iterator_v = is_iterator<T>::value;
+
+template <typename T, typename U, typename = void>
+struct compatible_sentinel : std::false_type
+{};
+
+template <typename T, typename U>
+struct compatible_sentinel<
+    T,
+    U,
+    std::enable_if_t<is_iterator_v<T> && is_equality_comparable_v<T, U> &&
+                     is_inequality_comparable_v<T, U>>> : std::true_type
+{};
+
+template <typename T, typename U>
+constexpr bool compatible_sentinel_v = compatible_sentinel<T, U>::value;
+
+template <typename T, typename = void>
+struct is_forward_iterator : std::false_type
+{};
+
+template <typename T>
+struct is_forward_iterator<
+    T,
+    std::enable_if_t<is_iterator_v<T> &&
+                     std::is_base_of<std::forward_iterator_tag,
+                                     typename std::iterator_traits<
+                                         T>::iterator_category>::value>>
+    : std::true_type
+{};
+
+template <typename T>
+constexpr bool is_forward_iterator_v = is_forward_iterator<T>::value;
+
+template <typename T, typename U, typename = void>
+struct std_distance_supports : std::false_type
+{};
+
+template <typename T, typename U>
+struct std_distance_supports<
+    T,
+    U,
+    void_t<decltype(std::distance(std::declval<T>(), std::declval<U>()))>>
+    : std::true_type
+{};
+
+template <typename T, typename U>
+constexpr bool std_distance_supports_v = std_distance_supports<T, U>::value;
+
+template <typename T, typename U, typename V, typename = void>
+struct std_uninitialized_copy_supports : std::false_type
+{};
+
+template <typename T, typename U, typename V>
+struct std_uninitialized_copy_supports<
+    T,
+    U,
+    V,
+    void_t<decltype(std::uninitialized_copy(
+        std::declval<T>(), std::declval<U>(), std::declval<V>()))>>
+    : std::true_type
+{};
+
+template <typename T, typename U, typename V>
+constexpr bool std_uninitialized_copy_supports_v =
+    std_uninitialized_copy_supports<T, U, V>::value;
+
+} // namespace detail
+} // namespace immer
diff --git a/immer/detail/util.hpp b/immer/detail/util.hpp
new file mode 100644
index 000000000000..fb2a520fc78f
--- /dev/null
+++ b/immer/detail/util.hpp
@@ -0,0 +1,258 @@
+//
+// immer: immutable data structures for C++
+// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
+//
+// This software is distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
+//
+
+#pragma once
+
+#include <immer/config.hpp>
+
+#include <cstddef>
+#include <memory>
+#include <new>
+#include <type_traits>
+
+#include <immer/detail/type_traits.hpp>
+
+#if defined(_MSC_VER)
+#include <intrin.h> // for __lzcnt*
+#endif
+
+namespace immer {
+namespace detail {
+
+template <typename T>
+using aligned_storage_for =
+    typename std::aligned_storage<sizeof(T), alignof(T)>::type;
+
+template <typename T>
+T& auto_const_cast(const T& x)
+{
+    return const_cast<T&>(x);
+}
+template <typename T>
+T&& auto_const_cast(const T&& x)
+{
+    return const_cast<T&&>(std::move(x));
+}
+
+template <typename Iter1, typename Iter2>
+auto uninitialized_move(Iter1 in1, Iter1 in2, Iter2 out)
+{
+    return std::uninitialized_copy(
+        std::make_move_iterator(in1), std::make_move_iterator(in2), out);
+}
+
+template <class T>
+void destroy(T* first, T* last)
+{
+    for (; first != last; ++first)
+        first->~T();
+}
+
+template <class T, class Size>
+void destroy_n(T* p, Size n)
+{
+    auto e = p + n;
+    for (; p != e; ++p)
+        p->~T();
+}
+
+template <typename Heap, typename T, typename... Args>
+T* make(Args&&... args)
+{
+    auto ptr = Heap::allocate(sizeof(T));
+    try {
+        return new (ptr) T{std::forward<Args>(args)...};
+    } catch (...) {
+        Heap::deallocate(sizeof(T), ptr);
+        throw;
+    }
+}
+
+struct not_supported_t
+{};
+struct empty_t
+{};
+
+template <typename T>
+struct exact_t
+{
+    T value;
+    exact_t(T v)
+        : value{v} {};
+};
+
+template <typename T>
+inline constexpr auto clz_(T) -> not_supported_t
+{
+    IMMER_UNREACHABLE;
+    return {};
+}
+#if defined(_MSC_VER)
+// inline auto clz_(unsigned short x) { return __lzcnt16(x); }
+// inline auto clz_(unsigned int x) { return __lzcnt(x); }
+// inline auto clz_(unsigned __int64 x) { return __lzcnt64(x); }
+#else
+inline constexpr auto clz_(unsigned int x) { return __builtin_clz(x); }
+inline constexpr auto clz_(unsigned long x) { return __builtin_clzl(x); }
+inline constexpr auto clz_(unsigned long long x) { return __builtin_clzll(x); }
+#endif
+
+template <typename T>
+inline constexpr T log2_aux(T x, T r = 0)
+{
+    return x <= 1 ? r : log2_aux(x >> 1, r + 1);
+}
+
+template <typename T>
+inline constexpr auto log2(T x) -> std::
+    enable_if_t<!std::is_same<decltype(clz_(x)), not_supported_t>::value, T>
+{
+    return x == 0 ? 0 : sizeof(std::size_t) * 8 - 1 - clz_(x);
+}
+
+template <typename T>
+inline constexpr auto log2(T x)
+    -> std::enable_if_t<std::is_same<decltype(clz_(x)), not_supported_t>::value,
+                        T>
+{
+    return log2_aux(x);
+}
+
+template <bool b, typename F>
+auto static_if(F&& f) -> std::enable_if_t<b>
+{
+    std::forward<F>(f)(empty_t{});
+}
+template <bool b, typename F>
+auto static_if(F&& f) -> std::enable_if_t<!b>
+{}
+
+template <bool b, typename R = void, typename F1, typename F2>
+auto static_if(F1&& f1, F2&& f2) -> std::enable_if_t<b, R>
+{
+    return std::forward<F1>(f1)(empty_t{});
+}
+template <bool b, typename R = void, typename F1, typename F2>
+auto static_if(F1&& f1, F2&& f2) -> std::enable_if_t<!b, R>
+{
+    return std::forward<F2>(f2)(empty_t{});
+}
+
+template <typename T, T value>
+struct constantly
+{
+    template <typename... Args>
+    T operator()(Args&&...) const
+    {
+        return value;
+    }
+};
+
+/*!
+ * An alias to `std::distance`
+ */
+template <typename Iterator,
+          typename Sentinel,
+          std::enable_if_t<detail::std_distance_supports_v<Iterator, Sentinel>,
+                           bool> = true>
+typename std::iterator_traits<Iterator>::difference_type
+distance(Iterator first, Sentinel last)
+{
+    return std::distance(first, last);
+}
+
+/*!
+ * Equivalent of the `std::distance` applied to the sentinel-delimited
+ * forward range @f$ [first, last) @f$
+ */
+template <typename Iterator,
+          typename Sentinel,
+          std::enable_if_t<
+              (!detail::std_distance_supports_v<Iterator, Sentinel>) &&detail::
+                      is_forward_iterator_v<Iterator> &&
+                  detail::compatible_sentinel_v<Iterator, Sentinel> &&
+                  (!detail::is_subtractable_v<Sentinel, Iterator>),
+              bool> = true>
+typename std::iterator_traits<Iterator>::difference_type
+distance(Iterator first, Sentinel last)
+{
+    std::size_t result = 0;
+    while (first != last) {
+        ++first;
+        ++result;
+    }
+    return result;
+}
+
+/*!
+ * Equivalent of the `std::distance` applied to the sentinel-delimited
+ * random access range @f$ [first, last) @f$
+ */
+template <typename Iterator,
+          typename Sentinel,
+          std::enable_if_t<
+              (!detail::std_distance_supports_v<Iterator, Sentinel>) &&detail::
+                      is_forward_iterator_v<Iterator> &&
+                  detail::compatible_sentinel_v<Iterator, Sentinel> &&
+                  detail::is_subtractable_v<Sentinel, Iterator>,
+              bool> = true>
+typename std::iterator_traits<Iterator>::difference_type
+distance(Iterator first, Sentinel last)
+{
+    return last - first;
+}
+
+/*!
+ * An alias to `std::uninitialized_copy`
+ */
+template <
+    typename Iterator,
+    typename Sentinel,
+    typename SinkIter,
+    std::enable_if_t<
+        detail::std_uninitialized_copy_supports_v<Iterator, Sentinel, SinkIter>,
+        bool> = true>
+SinkIter uninitialized_copy(Iterator first, Sentinel last, SinkIter d_first)
+{
+    return std::uninitialized_copy(first, last, d_first);
+}
+
+/*!
+ * Equivalent of the `std::uninitialized_copy` applied to the
+ * sentinel-delimited forward range @f$ [first, last) @f$
+ */
+template <typename SourceIter,
+          typename Sent,
+          typename SinkIter,
+          std::enable_if_t<
+              (!detail::std_uninitialized_copy_supports_v<SourceIter,
+                                                          Sent,
+                                                          SinkIter>) &&detail::
+                      compatible_sentinel_v<SourceIter, Sent> &&
+                  detail::is_forward_iterator_v<SinkIter>,
+              bool> = true>
+SinkIter uninitialized_copy(SourceIter first, Sent last, SinkIter d_first)
+{
+    auto current = d_first;
+    try {
+        while (first != last) {
+            *current++ = *first;
+            ++first;
+        }
+    } catch (...) {
+        using Value = typename std::iterator_traits<SinkIter>::value_type;
+        for (; d_first != current; ++d_first) {
+            d_first->~Value();
+        }
+        throw;
+    }
+    return current;
+}
+
+} // namespace detail
+} // namespace immer
diff --git a/immer/experimental/detail/dvektor_impl.hpp b/immer/experimental/detail/dvektor_impl.hpp
new file mode 100644
index 000000000000..81dbbc59f5f3
--- /dev/null
+++ b/immer/experimental/detail/dvektor_impl.hpp
@@ -0,0 +1,498 @@
+//
+// immer: immutable data structures for C++
+// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
+//
+// This software is distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
+//
+
+#pragma once
+
+#include <immer/heap/heap_policy.hpp>
+#include <immer/refcount/enable_intrusive_ptr.hpp>
+#include <immer/refcount/refcount_policy.hpp>
+
+#include <boost/intrusive_ptr.hpp>
+#include <boost/iterator/iterator_facade.hpp>
+#include <boost/smart_ptr/intrusive_ref_counter.hpp>
+
+#include <cassert>
+#include <limits>
+
+namespace immer {
+namespace detail {
+namespace dvektor {
+
+constexpr auto fast_log2(std::size_t x)
+{
+    return x == 0 ? 0 : sizeof(std::size_t) * 8 - 1 - __builtin_clzl(x);
+}
+
+template <int B, typename T = std::size_t>
+constexpr T branches = T{1} << B;
+
+template <int B, typename T = std::size_t>
+constexpr T mask = branches<B, T> - 1;
+
+template <int B, typename T = std::size_t>
+constexpr auto
+    max_depth = fast_log2(std::numeric_limits<std::size_t>::max()) / B;
+
+template <typename T, int B, typename MP>
+struct node;
+
+template <typename T, int B, typename MP>
+using node_ptr = boost::intrusive_ptr<node<T, B, MP>>;
+
+template <typename T, int B>
+using leaf_node = std::array<T, 1 << B>;
+
+template <typename T, int B, typename MP>
+using inner_node = std::array<node_ptr<T, B, MP>, 1 << B>;
+
+template <typename T, int B, typename MP>
+struct node
+    : enable_intrusive_ptr<node<T, B, MP>, typename MP::refcount>
+    , enable_optimized_heap_policy<node<T, B, MP>, typename MP::heap>
+{
+    using leaf_node_t  = leaf_node<T, B>;
+    using inner_node_t = inner_node<T, B, MP>;
+
+    enum
+    {
+        leaf_kind,
+        inner_kind
+    } kind;
+
+    union data_t
+    {
+        leaf_node_t leaf;
+        inner_node_t inner;
+        data_t(leaf_node_t n)
+            : leaf(std::move(n))
+        {}
+        data_t(inner_node_t n)
+            : inner(std::move(n))
+        {}
+        ~data_t() {}
+    } data;
+
+    ~node()
+    {
+        switch (kind) {
+        case leaf_kind:
+            data.leaf.~leaf_node_t();
+            break;
+        case inner_kind:
+            data.inner.~inner_node_t();
+            break;
+        }
+    }
+
+    node(leaf_node<T, B> n)
+        : kind{leaf_kind}
+        , data{std::move(n)}
+    {}
+
+    node(inner_node<T, B, MP> n)
+        : kind{inner_kind}
+        , data{std::move(n)}
+    {}
+
+    inner_node_t& inner() &
+    {
+        assert(kind == inner_kind);
+        return data.inner;
+    }
+    const inner_node_t& inner() const&
+    {
+        assert(kind == inner_kind);
+        return data.inner;
+    }
+    inner_node_t&& inner() &&
+    {
+        assert(kind == inner_kind);
+        return std::move(data.inner);
+    }
+
+    leaf_node_t& leaf() &
+    {
+        assert(kind == leaf_kind);
+        return data.leaf;
+    }
+    const leaf_node_t& leaf() const&
+    {
+        assert(kind == leaf_kind);
+        return data.leaf;
+    }
+    leaf_node_t&& leaf() &&
+    {
+        assert(kind == leaf_kind);
+        return std::move(data.leaf);
+    }
+};
+
+template <typename T, int B, typename MP, typename... Ts>
+auto make_node(Ts&&... xs) -> boost::intrusive_ptr<node<T, B, MP>>
+{
+    return new node<T, B, MP>(std::forward<Ts>(xs)...);
+}
+
+template <typename T, int B, typename MP>
+struct ref
+{
+    using inner_t    = inner_node<T, B, MP>;
+    using leaf_t     = leaf_node<T, B>;
+    using node_t     = node<T, B, MP>;
+    using node_ptr_t = node_ptr<T, B, MP>;
+
+    unsigned depth;
+    std::array<node_ptr_t, max_depth<B>> display;
+
+    template <typename... Ts>
+    static auto make_node(Ts&&... xs)
+    {
+        return dvektor::make_node<T, B, MP>(std::forward<Ts>(xs)...);
+    }
+
+    const T& get_elem(std::size_t index, std::size_t xr) const
+    {
+        auto display_idx = fast_log2(xr) / B;
+        auto node        = display[display_idx].get();
+        auto shift       = display_idx * B;
+        while (display_idx--) {
+            node = node->inner()[(index >> shift) & mask<B>].get();
+            shift -= B;
+        }
+        return node->leaf()[index & mask<B>];
+    }
+
+    node_ptr_t null_slot_and_copy_inner(node_ptr_t& node, std::size_t idx)
+    {
+        auto& n = node->inner();
+        auto x  = node_ptr_t{};
+        x.swap(n[idx]);
+        return copy_of_inner(x);
+    }
+
+    node_ptr_t null_slot_and_copy_leaf(node_ptr_t& node, std::size_t idx)
+    {
+        auto& n = node->inner();
+        auto x  = node_ptr_t{};
+        x.swap(n[idx]);
+        return copy_of_leaf(x);
+    }
+
+    node_ptr_t copy_of_inner(const node_ptr_t& n)
+    {
+        return make_node(n->inner());
+    }
+
+    node_ptr_t copy_of_leaf(const node_ptr_t& n)
+    {
+        return make_node(n->leaf());
+    }
+
+    void stabilize(std::size_t index)
+    {
+        auto shift = B;
+        for (auto i = 1u; i < depth; ++i) {
+            display[i] = copy_of_inner(display[i]);
+            display[i]->inner()[(index >> shift) & mask<B>] = display[i - 1];
+            shift += B;
+        }
+    }
+
+    void goto_pos_writable_from_clean(std::size_t old_index,
+                                      std::size_t index,
+                                      std::size_t xr)
+    {
+        assert(depth);
+        auto d = depth - 1;
+        if (d == 0) {
+            display[0] = copy_of_leaf(display[0]);
+        } else {
+            IMMER_UNREACHABLE;
+            display[d] = copy_of_inner(display[d]);
+            auto shift = B * d;
+            while (--d) {
+                display[d] = null_slot_and_copy_inner(
+                    display[d + 1], (index >> shift) & mask<B>);
+                shift -= B;
+            }
+            display[0] =
+                null_slot_and_copy_leaf(display[1], (index >> B) & mask<B>);
+        }
+    }
+
+    void goto_pos_writable_from_dirty(std::size_t old_index,
+                                      std::size_t new_index,
+                                      std::size_t xr)
+    {
+        assert(depth);
+        if (xr < (1 << B)) {
+            display[0] = copy_of_leaf(display[0]);
+        } else {
+            auto display_idx = fast_log2(xr) / B;
+            auto shift       = B;
+            for (auto i = 1u; i <= display_idx; ++i) {
+                display[i] = copy_of_inner(display[i]);
+                display[i]->inner()[(old_index >> shift) & mask<B>] =
+                    display[i - 1];
+                shift += B;
+            }
+            for (auto i = display_idx - 1; i > 0; --i) {
+                shift -= B;
+                display[i] = null_slot_and_copy_inner(
+                    display[i + 1], (new_index >> shift) & mask<B>);
+            }
+            display[0] =
+                null_slot_and_copy_leaf(display[1], (new_index >> B) & mask<B>);
+        }
+    }
+
+    void goto_fresh_pos_writable_from_clean(std::size_t old_index,
+                                            std::size_t new_index,
+                                            std::size_t xr)
+    {
+        auto display_idx = fast_log2(xr) / B;
+        if (display_idx > 0) {
+            auto shift = display_idx * B;
+            if (display_idx == depth) {
+                display[display_idx] = make_node(inner_t{});
+                display[display_idx]->inner()[(old_index >> shift) & mask<B>] =
+                    display[display_idx - 1];
+                ++depth;
+            }
+            while (--display_idx) {
+                auto node = display[display_idx + 1]
+                                ->inner()[(new_index >> shift) & mask<B>];
+                display[display_idx] =
+                    node ? std::move(node) : make_node(inner_t{});
+            }
+            display[0] = make_node(leaf_t{});
+        }
+    }
+
+    void goto_fresh_pos_writable_from_dirty(std::size_t old_index,
+                                            std::size_t new_index,
+                                            std::size_t xr)
+    {
+        stabilize(old_index);
+        goto_fresh_pos_writable_from_clean(old_index, new_index, xr);
+    }
+
+    void goto_next_block_start(std::size_t index, std::size_t xr)
+    {
+        auto display_idx = fast_log2(xr) / B;
+        auto shift       = display_idx * B;
+        if (display_idx > 0) {
+            display[display_idx - 1] =
+                display[display_idx]->inner()[(index >> shift) & mask<B>];
+            while (--display_idx)
+                display[display_idx - 1] = display[display_idx]->inner()[0];
+        }
+    }
+
+    void goto_pos(std::size_t index, std::size_t xr)
+    {
+        auto display_idx = fast_log2(xr) / B;
+        auto shift       = display_idx * B;
+        if (display_idx) {
+            do {
+                display[display_idx - 1] =
+                    display[display_idx]->inner()[(index >> shift) & mask<B>];
+                shift -= B;
+            } while (--display_idx);
+        }
+    }
+};
+
+template <typename T, int B, typename MP>
+struct impl
+{
+    using inner_t    = inner_node<T, B, MP>;
+    using leaf_t     = leaf_node<T, B>;
+    using node_t     = node<T, B, MP>;
+    using node_ptr_t = node_ptr<T, B, MP>;
+    using ref_t      = ref<T, B, MP>;
+
+    std::size_t size;
+    std::size_t focus;
+    bool dirty;
+    ref_t p;
+
+    template <typename... Ts>
+    static auto make_node(Ts&&... xs)
+    {
+        return dvektor::make_node<T, B, MP>(std::forward<Ts>(xs)...);
+    }
+
+    void goto_pos_writable(std::size_t old_index,
+                           std::size_t new_index,
+                           std::size_t xr)
+    {
+        if (dirty) {
+            p.goto_pos_writable_from_dirty(old_index, new_index, xr);
+        } else {
+            p.goto_pos_writable_from_clean(old_index, new_index, xr);
+            dirty = true;
+        }
+    }
+
+    void goto_fresh_pos_writable(std::size_t old_index,
+                                 std::size_t new_index,
+                                 std::size_t xr)
+    {
+        if (dirty) {
+            p.goto_fresh_pos_writable_from_dirty(old_index, new_index, xr);
+        } else {
+            p.goto_fresh_pos_writable_from_clean(old_index, new_index, xr);
+            dirty = true;
+        }
+    }
+
+    impl push_back(T value) const
+    {
+        if (size) {
+            auto block_index = size & ~mask<B>;
+            auto lo          = size & mask<B>;
+            if (size != block_index) {
+                auto s = impl{size + 1, block_index, dirty, p};
+                s.goto_pos_writable(focus, block_index, focus ^ block_index);
+                s.p.display[0]->leaf()[lo] = std::move(value);
+                return s;
+            } else {
+                auto s = impl{size + 1, block_index, dirty, p};
+                s.goto_fresh_pos_writable(
+                    focus, block_index, focus ^ block_index);
+                s.p.display[0]->leaf()[lo] = std::move(value);
+                return s;
+            }
+        } else {
+            return impl{
+                1, 0, false, {1, {{make_node(leaf_t{{std::move(value)}})}}}};
+        }
+    }
+
+    const T& get(std::size_t index) const
+    {
+        return p.get_elem(index, index ^ focus);
+    }
+
+    template <typename FnT>
+    impl update(std::size_t idx, FnT&& fn) const
+    {
+        auto s = impl{size, idx, dirty, p};
+        s.goto_pos_writable(focus, idx, focus ^ idx);
+        auto& v = s.p.display[0]->leaf()[idx & mask<B>];
+        v       = fn(std::move(v));
+        return s;
+    }
+
+    impl assoc(std::size_t idx, T value) const
+    {
+        return update(idx, [&](auto&&) { return std::move(value); });
+    }
+};
+
+template <typename T, int B, typename MP>
+const impl<T, B, MP> empty = {0, 0, false, ref<T, B, MP>{1, {}}};
+
+template <typename T, int B, typename MP>
+struct iterator
+    : boost::iterator_facade<iterator<T, B, MP>,
+                             T,
+                             boost::random_access_traversal_tag,
+                             const T&>
+{
+    struct end_t
+    {};
+
+    iterator() = default;
+
+    iterator(const impl<T, B, MP>& v)
+        : p_{v.p}
+        , i_{0}
+        , base_{0}
+    {
+        if (v.dirty)
+            p_.stabilize(v.focus);
+        p_.goto_pos(0, 0 ^ v.focus);
+        curr_ = p_.display[0]->leaf().begin();
+    }
+
+    iterator(const impl<T, B, MP>& v, end_t)
+        : p_{v.p}
+        , i_{v.size}
+        , base_{(v.size - 1) & ~mask<B>}
+    {
+        if (v.dirty)
+            p_.stabilize(v.focus);
+        p_.goto_pos(base_, base_ ^ v.focus);
+        curr_ = p_.display[0]->leaf().begin() + (i_ - base_);
+    }
+
+private:
+    friend class boost::iterator_core_access;
+    using leaf_iterator = typename leaf_node<T, B>::const_iterator;
+
+    ref<T, B, MP> p_;
+    std::size_t i_;
+    std::size_t base_;
+    leaf_iterator curr_;
+
+    void increment()
+    {
+        ++i_;
+        if (i_ - base_ < branches<B>) {
+            ++curr_;
+        } else {
+            auto new_base = base_ + branches<B>;
+            p_.goto_next_block_start(new_base, base_ ^ new_base);
+            base_ = new_base;
+            curr_ = p_.display[0]->leaf().begin();
+        }
+    }
+
+    void decrement()
+    {
+        assert(i_ > 0);
+        --i_;
+        if (i_ >= base_) {
+            --curr_;
+        } else {
+            auto new_base = base_ - branches<B>;
+            p_.goto_pos(new_base, base_ ^ new_base);
+            base_ = new_base;
+            curr_ = std::prev(p_.display[0]->leaf().end());
+        }
+    }
+
+    void advance(std::ptrdiff_t n)
+    {
+        i_ += n;
+        if (i_ <= base_ && i_ - base_ < branches<B>) {
+            curr_ += n;
+        } else {
+            auto new_base = i_ & ~mask<B>;
+            p_.goto_pos(new_base, base_ ^ new_base);
+            base_ = new_base;
+            curr_ = p_.display[0]->leaf().begin() + (i_ - base_);
+        }
+    }
+
+    bool equal(const iterator& other) const { return i_ == other.i_; }
+
+    std::ptrdiff_t distance_to(const iterator& other) const
+    {
+        return other.i_ > i_ ? static_cast<std::ptrdiff_t>(other.i_ - i_)
+                             : -static_cast<std::ptrdiff_t>(i_ - other.i_);
+    }
+
+    const T& dereference() const { return *curr_; }
+};
+
+} /* namespace dvektor */
+} /* namespace detail */
+} /* namespace immer */
diff --git a/immer/experimental/dvektor.hpp b/immer/experimental/dvektor.hpp
new file mode 100644
index 000000000000..e809e749d5df
--- /dev/null
+++ b/immer/experimental/dvektor.hpp
@@ -0,0 +1,69 @@
+//
+// immer: immutable data structures for C++
+// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
+//
+// This software is distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
+//
+
+#pragma once
+
+#include <immer/experimental/detail/dvektor_impl.hpp>
+
+#include <immer/memory_policy.hpp>
+
+namespace immer {
+
+template <typename T, int B = 5, typename MemoryPolicy = default_memory_policy>
+class dvektor
+{
+    using impl_t = detail::dvektor::impl<T, B, MemoryPolicy>;
+
+public:
+    using value_type      = T;
+    using reference       = const T&;
+    using size_type       = std::size_t;
+    using difference_type = std::ptrdiff_t;
+    using const_reference = const T&;
+
+    using iterator         = detail::dvektor::iterator<T, B, MemoryPolicy>;
+    using const_iterator   = iterator;
+    using reverse_iterator = std::reverse_iterator<iterator>;
+
+    dvektor() = default;
+
+    iterator begin() const { return {impl_}; }
+    iterator end() const { return {impl_, typename iterator::end_t{}}; }
+
+    reverse_iterator rbegin() const { return reverse_iterator{end()}; }
+    reverse_iterator rend() const { return reverse_iterator{begin()}; }
+
+    std::size_t size() const { return impl_.size; }
+    bool empty() const { return impl_.size == 0; }
+
+    reference operator[](size_type index) const { return impl_.get(index); }
+
+    dvektor push_back(value_type value) const
+    {
+        return {impl_.push_back(std::move(value))};
+    }
+
+    dvektor assoc(std::size_t idx, value_type value) const
+    {
+        return {impl_.assoc(idx, std::move(value))};
+    }
+
+    template <typename FnT>
+    dvektor update(std::size_t idx, FnT&& fn) const
+    {
+        return {impl_.update(idx, std::forward<FnT>(fn))};
+    }
+
+private:
+    dvektor(impl_t impl)
+        : impl_(std::move(impl))
+    {}
+    impl_t impl_ = detail::dvektor::empty<T, B, MemoryPolicy>;
+};
+
+} // namespace immer
diff --git a/immer/flex_vector.hpp b/immer/flex_vector.hpp
new file mode 100644
index 000000000000..d03c3f7e4585
--- /dev/null
+++ b/immer/flex_vector.hpp
@@ -0,0 +1,608 @@
+//
+// immer: immutable data structures for C++
+// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
+//
+// This software is distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
+//
+
+#pragma once
+
+#include <immer/detail/rbts/rrbtree.hpp>
+#include <immer/detail/rbts/rrbtree_iterator.hpp>
+#include <immer/memory_policy.hpp>
+
+namespace immer {
+
+template <typename T,
+          typename MP,
+          detail::rbts::bits_t B,
+          detail::rbts::bits_t BL>
+class vector;
+
+template <typename T,
+          typename MP,
+          detail::rbts::bits_t B,
+          detail::rbts::bits_t BL>
+class flex_vector_transient;
+
+/*!
+ * Immutable sequential container supporting both random access,
+ * structural sharing and efficient concatenation and slicing.
+ *
+ * @tparam T The type of the values to be stored in the container.
+ * @tparam MemoryPolicy Memory management policy. See @ref
+ *         memory_policy.
+ *
+ * @rst
+ *
+ * This container is very similar to `vector`_ but also supports
+ * :math:`O(log(size))` *concatenation*, *slicing* and *insertion* at
+ * any point. Its performance characteristics are almost identical
+ * until one of these operations is performed.  After that,
+ * performance is degraded by a constant factor that usually oscilates
+ * in the range :math:`[1, 2)` depending on the operation and the
+ * amount of flexible operations that have been performed.
+ *
+ * .. tip:: A `vector`_ can be converted to a `flex_vector`_ in
+ *    constant time without any allocation.  This is so because the
+ *    internal structure of a *vector* is a strict subset of the
+ *    internal structure of a *flexible vector*.  You can take
+ *    advantage of this property by creating normal vectors as long as
+ *    the flexible operations are not needed, and convert later in
+ *    your processing pipeline once and if these are needed.
+ *
+ * @endrst
+ */
+template <typename T,
+          typename MemoryPolicy  = default_memory_policy,
+          detail::rbts::bits_t B = default_bits,
+          detail::rbts::bits_t BL =
+              detail::rbts::derive_bits_leaf<T, MemoryPolicy, B>>
+class flex_vector
+{
+    using impl_t = detail::rbts::rrbtree<T, MemoryPolicy, B, BL>;
+
+    using move_t =
+        std::integral_constant<bool, MemoryPolicy::use_transient_rvalues>;
+
+public:
+    static constexpr auto bits      = B;
+    static constexpr auto bits_leaf = BL;
+    using memory_policy             = MemoryPolicy;
+
+    using value_type      = T;
+    using reference       = const T&;
+    using size_type       = detail::rbts::size_t;
+    using difference_type = std::ptrdiff_t;
+    using const_reference = const T&;
+
+    using iterator = detail::rbts::rrbtree_iterator<T, MemoryPolicy, B, BL>;
+    using const_iterator   = iterator;
+    using reverse_iterator = std::reverse_iterator<iterator>;
+
+    using transient_type = flex_vector_transient<T, MemoryPolicy, B, BL>;
+
+    /*!
+     * Default constructor.  It creates a flex_vector of `size() == 0`.
+     * It does not allocate memory and its complexity is @f$ O(1) @f$.
+     */
+    flex_vector() = default;
+
+    /*!
+     * Constructs a flex_vector containing the elements in `values`.
+     */
+    flex_vector(std::initializer_list<T> values)
+        : impl_{impl_t::from_initializer_list(values)}
+    {}
+
+    /*!
+     * Constructs a flex_vector containing the elements in the range
+     * defined by the input iterator `first` and range sentinel `last`.
+     */
+    template <typename Iter,
+              typename Sent,
+              std::enable_if_t<detail::compatible_sentinel_v<Iter, Sent>,
+                               bool> = true>
+    flex_vector(Iter first, Sent last)
+        : impl_{impl_t::from_range(first, last)}
+    {}
+
+    /*!
+     * Constructs a vector containing the element `val` repeated `n`
+     * times.
+     */
+    flex_vector(size_type n, T v = {})
+        : impl_{impl_t::from_fill(n, v)}
+    {}
+
+    /*!
+     * Default constructor.  It creates a flex_vector with the same
+     * contents as `v`.  It does not allocate memory and is
+     * @f$ O(1) @f$.
+     */
+    flex_vector(vector<T, MemoryPolicy, B, BL> v)
+        : impl_{v.impl_.size,
+                v.impl_.shift,
+                v.impl_.root->inc(),
+                v.impl_.tail->inc()}
+    {}
+
+    /*!
+     * Returns an iterator pointing at the first element of the
+     * collection. It does not allocate memory and its complexity is
+     * @f$ O(1) @f$.
+     */
+    IMMER_NODISCARD iterator begin() const { return {impl_}; }
+
+    /*!
+     * Returns an iterator pointing just after the last element of the
+     * collection. It does not allocate and its complexity is @f$ O(1) @f$.
+     */
+    IMMER_NODISCARD iterator end() const
+    {
+        return {impl_, typename iterator::end_t{}};
+    }
+
+    /*!
+     * Returns an iterator that traverses the collection backwards,
+     * pointing at the first element of the reversed collection. It
+     * does not allocate memory and its complexity is @f$ O(1) @f$.
+     */
+    IMMER_NODISCARD reverse_iterator rbegin() const
+    {
+        return reverse_iterator{end()};
+    }
+
+    /*!
+     * Returns an iterator that traverses the collection backwards,
+     * pointing after the last element of the reversed collection. It
+     * does not allocate memory and its complexity is @f$ O(1) @f$.
+     */
+    IMMER_NODISCARD reverse_iterator rend() const
+    {
+        return reverse_iterator{begin()};
+    }
+
+    /*!
+     * Returns the number of elements in the container.  It does
+     * not allocate memory and its complexity is @f$ O(1) @f$.
+     */
+    IMMER_NODISCARD size_type size() const { return impl_.size; }
+
+    /*!
+     * Returns `true` if there are no elements in the container.  It
+     * does not allocate memory and its complexity is @f$ O(1) @f$.
+     */
+    IMMER_NODISCARD bool empty() const { return impl_.size == 0; }
+
+    /*!
+     * Access the last element.
+     */
+    IMMER_NODISCARD const T& back() const { return impl_.back(); }
+
+    /*!
+     * Access the first element.
+     */
+    IMMER_NODISCARD const T& front() const { return impl_.front(); }
+
+    /*!
+     * Returns a `const` reference to the element at position `index`.
+     * It is undefined when @f$ 0 index \geq size() @f$.  It does not
+     * allocate memory and its complexity is *effectively* @f$ O(1)
+     * @f$.
+     */
+    IMMER_NODISCARD reference operator[](size_type index) const
+    {
+        return impl_.get(index);
+    }
+
+    /*!
+     * Returns a `const` reference to the element at position
+     * `index`. It throws an `std::out_of_range` exception when @f$
+     * index \geq size() @f$.  It does not allocate memory and its
+     * complexity is *effectively* @f$ O(1) @f$.
+     */
+    reference at(size_type index) const { return impl_.get_check(index); }
+
+    /*!
+     * Returns whether the vectors are equal.
+     */
+    IMMER_NODISCARD bool operator==(const flex_vector& other) const
+    {
+        return impl_.equals(other.impl_);
+    }
+    IMMER_NODISCARD bool operator!=(const flex_vector& other) const
+    {
+        return !(*this == other);
+    }
+
+    /*!
+     * Returns a flex_vector with `value` inserted at the end.  It may
+     * allocate memory and its complexity is *effectively* @f$ O(1) @f$.
+     *
+     * @rst
+     *
+     * **Example**
+     *   .. literalinclude:: ../example/flex-vector/flex-vector.cpp
+     *      :language: c++
+     *      :dedent: 8
+     *      :start-after: push-back/start
+     *      :end-before:  push-back/end
+     *
+     * @endrst
+     */
+    IMMER_NODISCARD flex_vector push_back(value_type value) const&
+    {
+        return impl_.push_back(std::move(value));
+    }
+
+    IMMER_NODISCARD decltype(auto) push_back(value_type value) &&
+    {
+        return push_back_move(move_t{}, std::move(value));
+    }
+
+    /*!
+     * Returns a flex_vector with `value` inserted at the frony.  It may
+     * allocate memory and its complexity is @f$ O(log(size)) @f$.
+     *
+     * @rst
+     *
+     * **Example**
+     *   .. literalinclude:: ../example/flex-vector/flex-vector.cpp
+     *      :language: c++
+     *      :dedent: 8
+     *      :start-after: push-front/start
+     *      :end-before:  push-front/end
+     *
+     * @endrst
+     */
+    IMMER_NODISCARD flex_vector push_front(value_type value) const
+    {
+        return flex_vector{}.push_back(value) + *this;
+    }
+
+    /*!
+     * Returns a flex_vector containing value `value` at position `index`.
+     * Undefined for `index >= size()`.
+     * It may allocate memory and its complexity is
+     * *effectively* @f$ O(1) @f$.
+     *
+     * @rst
+     *
+     * **Example**
+     *   .. literalinclude:: ../example/flex-vector/flex-vector.cpp
+     *      :language: c++
+     *      :dedent: 8
+     *      :start-after: set/start
+     *      :end-before:  set/end
+     *
+     * @endrst
+     */
+    IMMER_NODISCARD flex_vector set(size_type index, value_type value) const&
+    {
+        return impl_.assoc(index, std::move(value));
+    }
+
+    IMMER_NODISCARD decltype(auto) set(size_type index, value_type value) &&
+    {
+        return set_move(move_t{}, index, std::move(value));
+    }
+
+    /*!
+     * Returns a vector containing the result of the expression
+     * `fn((*this)[idx])` at position `idx`.
+     * Undefined for `index >= size()`.
+     * It may allocate memory and its complexity is
+     * *effectively* @f$ O(1) @f$.
+     *
+     * @rst
+     *
+     * **Example**
+     *   .. literalinclude:: ../example/flex-vector/flex-vector.cpp
+     *      :language: c++
+     *      :dedent: 8
+     *      :start-after: update/start
+     *      :end-before:  update/end
+     *
+     * @endrst
+
+     */
+    template <typename FnT>
+    IMMER_NODISCARD flex_vector update(size_type index, FnT&& fn) const&
+    {
+        return impl_.update(index, std::forward<FnT>(fn));
+    }
+
+    template <typename FnT>
+    IMMER_NODISCARD decltype(auto) update(size_type index, FnT&& fn) &&
+    {
+        return update_move(move_t{}, index, std::forward<FnT>(fn));
+    }
+
+    /*!
+     * Returns a vector containing only the first `min(elems, size())`
+     * elements. It may allocate memory and its complexity is
+     * *effectively* @f$ O(1) @f$.
+     *
+     * @rst
+     *
+     * **Example**
+     *   .. literalinclude:: ../example/flex-vector/flex-vector.cpp
+     *      :language: c++
+     *      :dedent: 8
+     *      :start-after: take/start
+     *      :end-before:  take/end
+     *
+     * @endrst
+     */
+    IMMER_NODISCARD flex_vector take(size_type elems) const&
+    {
+        return impl_.take(elems);
+    }
+
+    IMMER_NODISCARD decltype(auto) take(size_type elems) &&
+    {
+        return take_move(move_t{}, elems);
+    }
+
+    /*!
+     * Returns a vector without the first `min(elems, size())`
+     * elements. It may allocate memory and its complexity is
+     * *effectively* @f$ O(1) @f$.
+     *
+     * @rst
+     *
+     * **Example**
+     *   .. literalinclude:: ../example/flex-vector/flex-vector.cpp
+     *      :language: c++
+     *      :dedent: 8
+     *      :start-after: drop/start
+     *      :end-before:  drop/end
+     *
+     * @endrst
+     */
+    IMMER_NODISCARD flex_vector drop(size_type elems) const&
+    {
+        return impl_.drop(elems);
+    }
+
+    IMMER_NODISCARD decltype(auto) drop(size_type elems) &&
+    {
+        return drop_move(move_t{}, elems);
+    }
+
+    /*!
+     * Concatenation operator. Returns a flex_vector with the contents
+     * of `l` followed by those of `r`.  It may allocate memory
+     * and its complexity is @f$ O(log(max(size_r, size_l))) @f$
+     *
+     * @rst
+     *
+     * **Example**
+     *   .. literalinclude:: ../example/flex-vector/flex-vector.cpp
+     *      :language: c++
+     *      :dedent: 8
+     *      :start-after: concat/start
+     *      :end-before:  concat/end
+     *
+     * @endrst
+     */
+    IMMER_NODISCARD friend flex_vector operator+(const flex_vector& l,
+                                                 const flex_vector& r)
+    {
+        return l.impl_.concat(r.impl_);
+    }
+
+    IMMER_NODISCARD friend decltype(auto) operator+(flex_vector&& l,
+                                                    const flex_vector& r)
+    {
+        return concat_move(move_t{}, std::move(l), r);
+    }
+
+    IMMER_NODISCARD friend decltype(auto) operator+(const flex_vector& l,
+                                                    flex_vector&& r)
+    {
+        return concat_move(move_t{}, l, std::move(r));
+    }
+
+    IMMER_NODISCARD friend decltype(auto) operator+(flex_vector&& l,
+                                                    flex_vector&& r)
+    {
+        return concat_move(move_t{}, std::move(l), std::move(r));
+    }
+
+    /*!
+     * Returns a flex_vector with the `value` inserted at index
+     * `pos`. It may allocate memory and its complexity is @f$
+     * O(log(size)) @f$
+     *
+     * @rst
+     *
+     * **Example**
+     *   .. literalinclude:: ../example/flex-vector/flex-vector.cpp
+     *      :language: c++
+     *      :dedent: 8
+     *      :start-after: insert/start
+     *      :end-before:  insert/end
+     *
+     * @endrst
+     */
+    IMMER_NODISCARD flex_vector insert(size_type pos, T value) const&
+    {
+        return take(pos).push_back(std::move(value)) + drop(pos);
+    }
+    IMMER_NODISCARD decltype(auto) insert(size_type pos, T value) &&
+    {
+        using std::move;
+        auto rs = drop(pos);
+        return std::move(*this).take(pos).push_back(std::move(value)) +
+               std::move(rs);
+    }
+
+    IMMER_NODISCARD flex_vector insert(size_type pos, flex_vector value) const&
+    {
+        return take(pos) + std::move(value) + drop(pos);
+    }
+    IMMER_NODISCARD decltype(auto) insert(size_type pos, flex_vector value) &&
+    {
+        using std::move;
+        auto rs = drop(pos);
+        return std::move(*this).take(pos) + std::move(value) + std::move(rs);
+    }
+
+    /*!
+     * Returns a flex_vector without the element at index `pos`. It
+     * may allocate memory and its complexity is @f$ O(log(size)) @f$
+     *
+     * @rst
+     *
+     * **Example**
+     *   .. literalinclude:: ../example/flex-vector/flex-vector.cpp
+     *      :language: c++
+     *      :dedent: 8
+     *      :start-after: erase/start
+     *      :end-before:  erase/end
+     *
+     * @endrst
+     */
+    IMMER_NODISCARD flex_vector erase(size_type pos) const&
+    {
+        return take(pos) + drop(pos + 1);
+    }
+    IMMER_NODISCARD decltype(auto) erase(size_type pos) &&
+    {
+        auto rs = drop(pos + 1);
+        return std::move(*this).take(pos) + std::move(rs);
+    }
+
+    IMMER_NODISCARD flex_vector erase(size_type pos, size_type lpos) const&
+    {
+        return lpos > pos ? take(pos) + drop(lpos) : *this;
+    }
+    IMMER_NODISCARD decltype(auto) erase(size_type pos, size_type lpos) &&
+    {
+        if (lpos > pos) {
+            auto rs = drop(lpos);
+            return std::move(*this).take(pos) + std::move(rs);
+        } else {
+            return std::move(*this);
+        }
+    }
+
+    /*!
+     * Returns an @a transient form of this container, an
+     * `immer::flex_vector_transient`.
+     */
+    IMMER_NODISCARD transient_type transient() const&
+    {
+        return transient_type{impl_};
+    }
+    IMMER_NODISCARD transient_type transient() &&
+    {
+        return transient_type{std::move(impl_)};
+    }
+
+    // Semi-private
+    const impl_t& impl() const { return impl_; }
+
+#if IMMER_DEBUG_PRINT
+    void debug_print(std::ostream& out = std::cerr) const
+    {
+        impl_.debug_print(out);
+    }
+#endif
+
+private:
+    friend transient_type;
+
+    flex_vector(impl_t impl)
+        : impl_(std::move(impl))
+    {
+#if IMMER_DEBUG_PRINT
+        // force the compiler to generate debug_print, so we can call
+        // it from a debugger
+        [](volatile auto) {}(&flex_vector::debug_print);
+#endif
+    }
+
+    flex_vector&& push_back_move(std::true_type, value_type value)
+    {
+        impl_.push_back_mut({}, std::move(value));
+        return std::move(*this);
+    }
+    flex_vector push_back_move(std::false_type, value_type value)
+    {
+        return impl_.push_back(std::move(value));
+    }
+
+    flex_vector&& set_move(std::true_type, size_type index, value_type value)
+    {
+        impl_.assoc_mut({}, index, std::move(value));
+        return std::move(*this);
+    }
+    flex_vector set_move(std::false_type, size_type index, value_type value)
+    {
+        return impl_.assoc(index, std::move(value));
+    }
+
+    template <typename Fn>
+    flex_vector&& update_move(std::true_type, size_type index, Fn&& fn)
+    {
+        impl_.update_mut({}, index, std::forward<Fn>(fn));
+        return std::move(*this);
+    }
+    template <typename Fn>
+    flex_vector update_move(std::false_type, size_type index, Fn&& fn)
+    {
+        return impl_.update(index, std::forward<Fn>(fn));
+    }
+
+    flex_vector&& take_move(std::true_type, size_type elems)
+    {
+        impl_.take_mut({}, elems);
+        return std::move(*this);
+    }
+    flex_vector take_move(std::false_type, size_type elems)
+    {
+        return impl_.take(elems);
+    }
+
+    flex_vector&& drop_move(std::true_type, size_type elems)
+    {
+        impl_.drop_mut({}, elems);
+        return std::move(*this);
+    }
+    flex_vector drop_move(std::false_type, size_type elems)
+    {
+        return impl_.drop(elems);
+    }
+
+    static flex_vector&&
+    concat_move(std::true_type, flex_vector&& l, const flex_vector& r)
+    {
+        concat_mut_l(l.impl_, {}, r.impl_);
+        return std::move(l);
+    }
+    static flex_vector&&
+    concat_move(std::true_type, const flex_vector& l, flex_vector&& r)
+    {
+        concat_mut_r(l.impl_, r.impl_, {});
+        return std::move(r);
+    }
+    static flex_vector&&
+    concat_move(std::true_type, flex_vector&& l, flex_vector&& r)
+    {
+        concat_mut_lr_l(l.impl_, {}, r.impl_, {});
+        return std::move(l);
+    }
+    static flex_vector
+    concat_move(std::false_type, const flex_vector& l, const flex_vector& r)
+    {
+        return l.impl_.concat(r.impl_);
+    }
+
+    impl_t impl_ = impl_t::empty();
+};
+
+} // namespace immer
diff --git a/immer/flex_vector_transient.hpp b/immer/flex_vector_transient.hpp
new file mode 100644
index 000000000000..fe0a6a534e10
--- /dev/null
+++ b/immer/flex_vector_transient.hpp
@@ -0,0 +1,251 @@
+//
+// immer: immutable data structures for C++
+// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
+//
+// This software is distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
+//
+
+#pragma once
+
+#include <immer/detail/rbts/rrbtree.hpp>
+#include <immer/detail/rbts/rrbtree_iterator.hpp>
+#include <immer/memory_policy.hpp>
+
+namespace immer {
+
+template <typename T,
+          typename MemoryPolicy,
+          detail::rbts::bits_t B,
+          detail::rbts::bits_t BL>
+class flex_vector;
+
+template <typename T,
+          typename MemoryPolicy,
+          detail::rbts::bits_t B,
+          detail::rbts::bits_t BL>
+class vector_transient;
+
+/*!
+ * Mutable version of `immer::flex_vector`.
+ *
+ * @rst
+ *
+ * Refer to :doc:`transients` to learn more about when and how to use
+ * the mutable versions of immutable containers.
+ *
+ * @endrst
+ */
+template <typename T,
+          typename MemoryPolicy  = default_memory_policy,
+          detail::rbts::bits_t B = default_bits,
+          detail::rbts::bits_t BL =
+              detail::rbts::derive_bits_leaf<T, MemoryPolicy, B>>
+class flex_vector_transient : MemoryPolicy::transience_t::owner
+{
+    using impl_t  = detail::rbts::rrbtree<T, MemoryPolicy, B, BL>;
+    using base_t  = typename MemoryPolicy::transience_t::owner;
+    using owner_t = typename MemoryPolicy::transience_t::owner;
+
+public:
+    static constexpr auto bits      = B;
+    static constexpr auto bits_leaf = BL;
+    using memory_policy             = MemoryPolicy;
+
+    using value_type      = T;
+    using reference       = const T&;
+    using size_type       = detail::rbts::size_t;
+    using difference_type = std::ptrdiff_t;
+    using const_reference = const T&;
+
+    using iterator = detail::rbts::rrbtree_iterator<T, MemoryPolicy, B, BL>;
+    using const_iterator   = iterator;
+    using reverse_iterator = std::reverse_iterator<iterator>;
+
+    using persistent_type = flex_vector<T, MemoryPolicy, B, BL>;
+
+    /*!
+     * Default constructor.  It creates a flex_vector of `size() == 0`.  It
+     * does not allocate memory and its complexity is @f$ O(1) @f$.
+     */
+    flex_vector_transient() = default;
+
+    /*!
+     * Default constructor.  It creates a flex_vector with the same
+     * contents as `v`.  It does not allocate memory and is
+     * @f$ O(1) @f$.
+     */
+    flex_vector_transient(vector_transient<T, MemoryPolicy, B, BL> v)
+        : base_t{std::move(static_cast<base_t&>(v))}
+        , impl_{v.impl_.size,
+                v.impl_.shift,
+                v.impl_.root->inc(),
+                v.impl_.tail->inc()}
+    {}
+
+    /*!
+     * Returns an iterator pointing at the first element of the
+     * collection. It does not allocate memory and its complexity is
+     * @f$ O(1) @f$.
+     */
+    IMMER_NODISCARD iterator begin() const { return {impl_}; }
+
+    /*!
+     * Returns an iterator pointing just after the last element of the
+     * collection. It does not allocate and its complexity is @f$ O(1) @f$.
+     */
+    IMMER_NODISCARD iterator end() const
+    {
+        return {impl_, typename iterator::end_t{}};
+    }
+
+    /*!
+     * Returns an iterator that traverses the collection backwards,
+     * pointing at the first element of the reversed collection. It
+     * does not allocate memory and its complexity is @f$ O(1) @f$.
+     */
+    IMMER_NODISCARD reverse_iterator rbegin() const
+    {
+        return reverse_iterator{end()};
+    }
+
+    /*!
+     * Returns an iterator that traverses the collection backwards,
+     * pointing after the last element of the reversed collection. It
+     * does not allocate memory and its complexity is @f$ O(1) @f$.
+     */
+    IMMER_NODISCARD reverse_iterator rend() const
+    {
+        return reverse_iterator{begin()};
+    }
+
+    /*!
+     * Returns the number of elements in the container.  It does
+     * not allocate memory and its complexity is @f$ O(1) @f$.
+     */
+    IMMER_NODISCARD size_type size() const { return impl_.size; }
+
+    /*!
+     * Returns `true` if there are no elements in the container.  It
+     * does not allocate memory and its complexity is @f$ O(1) @f$.
+     */
+    IMMER_NODISCARD bool empty() const { return impl_.size == 0; }
+
+    /*!
+     * Returns a `const` reference to the element at position `index`.
+     * It is undefined when @f$ 0 index \geq size() @f$.  It does not
+     * allocate memory and its complexity is *effectively* @f$ O(1)
+     * @f$.
+     */
+    reference operator[](size_type index) const { return impl_.get(index); }
+
+    /*!
+     * Returns a `const` reference to the element at position
+     * `index`. It throws an `std::out_of_range` exception when @f$
+     * index \geq size() @f$.  It does not allocate memory and its
+     * complexity is *effectively* @f$ O(1) @f$.
+     */
+    reference at(size_type index) const { return impl_.get_check(index); }
+
+    /*!
+     * Inserts `value` at the end.  It may allocate memory and its
+     * complexity is *effectively* @f$ O(1) @f$.
+     */
+    void push_back(value_type value)
+    {
+        impl_.push_back_mut(*this, std::move(value));
+    }
+
+    /*!
+     * Sets to the value `value` at position `idx`.
+     * Undefined for `index >= size()`.
+     * It may allocate memory and its complexity is
+     * *effectively* @f$ O(1) @f$.
+     */
+    void set(size_type index, value_type value)
+    {
+        impl_.assoc_mut(*this, index, std::move(value));
+    }
+
+    /*!
+     * Updates the vector to contain the result of the expression
+     * `fn((*this)[idx])` at position `idx`.
+     * Undefined for `0 >= size()`.
+     * It may allocate memory and its complexity is
+     * *effectively* @f$ O(1) @f$.
+     */
+    template <typename FnT>
+    void update(size_type index, FnT&& fn)
+    {
+        impl_.update_mut(*this, index, std::forward<FnT>(fn));
+    }
+
+    /*!
+     * Resizes the vector to only contain the first `min(elems, size())`
+     * elements. It may allocate memory and its complexity is
+     * *effectively* @f$ O(1) @f$.
+     */
+    void take(size_type elems) { impl_.take_mut(*this, elems); }
+
+    /*!
+     * Removes the first the first `min(elems, size())`
+     * elements. It may allocate memory and its complexity is
+     * *effectively* @f$ O(1) @f$.
+     */
+    void drop(size_type elems) { impl_.drop_mut(*this, elems); }
+
+    /*!
+     * Appends the contents of the `r` at the end.  It may allocate
+     * memory and its complexity is:
+     * @f$ O(log(max(size_r, size_l))) @f$
+     */
+    void append(flex_vector_transient& r)
+    {
+        r.owner_t::operator=(owner_t{});
+        concat_mut_l(impl_, *this, r.impl_);
+    }
+    void append(flex_vector_transient&& r)
+    {
+        concat_mut_lr_l(impl_, *this, r.impl_, r);
+    }
+
+    /*!
+     * Prepends the contents of the `l` at the beginning.  It may
+     * allocate memory and its complexity is:
+     * @f$ O(log(max(size_r, size_l))) @f$
+     */
+    void prepend(flex_vector_transient& l)
+    {
+        l.owner_t::operator=(owner_t{});
+        concat_mut_r(l.impl_, impl_, *this);
+    }
+    void prepend(flex_vector_transient&& l)
+    {
+        concat_mut_lr_r(l.impl_, l, impl_, *this);
+    }
+
+    /*!
+     * Returns an @a immutable form of this container, an
+     * `immer::flex_vector`.
+     */
+    IMMER_NODISCARD persistent_type persistent() &
+    {
+        this->owner_t::operator=(owner_t{});
+        return persistent_type{impl_};
+    }
+    IMMER_NODISCARD persistent_type persistent() &&
+    {
+        return persistent_type{std::move(impl_)};
+    }
+
+private:
+    friend persistent_type;
+
+    flex_vector_transient(impl_t impl)
+        : impl_(std::move(impl))
+    {}
+
+    impl_t impl_ = impl_t::empty();
+};
+
+} // namespace immer
diff --git a/immer/heap/cpp_heap.hpp b/immer/heap/cpp_heap.hpp
new file mode 100644
index 000000000000..cd129b406bea
--- /dev/null
+++ b/immer/heap/cpp_heap.hpp
@@ -0,0 +1,41 @@
+//
+// immer: immutable data structures for C++
+// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
+//
+// This software is distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
+//
+
+#pragma once
+
+#include <memory>
+
+namespace immer {
+
+/*!
+ * A heap that uses `operator new` and `operator delete`.
+ */
+struct cpp_heap
+{
+    /*!
+     * Returns a pointer to a memory region of size `size`, if the
+     * allocation was successful, and throws otherwise.
+     */
+    template <typename... Tags>
+    static void* allocate(std::size_t size, Tags...)
+    {
+        return ::operator new(size);
+    }
+
+    /*!
+     * Releases a memory region `data` that was previously returned by
+     * `allocate`.  One must not use nor deallocate again a memory
+     * region that once it has been deallocated.
+     */
+    static void deallocate(std::size_t size, void* data)
+    {
+        ::operator delete(data);
+    }
+};
+
+} // namespace immer
diff --git a/immer/heap/debug_size_heap.hpp b/immer/heap/debug_size_heap.hpp
new file mode 100644
index 000000000000..d5288c646f8d
--- /dev/null
+++ b/immer/heap/debug_size_heap.hpp
@@ -0,0 +1,69 @@
+//
+// immer: immutable data structures for C++
+// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
+//
+// This software is distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
+//
+
+#pragma once
+
+#include <immer/config.hpp>
+#include <immer/heap/identity_heap.hpp>
+
+#include <cassert>
+#include <cstddef>
+#include <type_traits>
+#include <memory>
+
+namespace immer {
+
+#if IMMER_ENABLE_DEBUG_SIZE_HEAP
+
+/*!
+ * A heap that in debug mode ensures that the sizes for allocation and
+ * deallocation do match.
+ */
+template <typename Base>
+struct debug_size_heap
+{
+#if defined(__MINGW32__) && !defined(__MINGW64__)
+    // There is a bug in MinGW 32bit:
+    // https://sourceforge.net/p/mingw-w64/bugs/778/ It causes different
+    // versions of std::max_align_t to be defined, depending on inclusion order
+    // of stddef.h and stdint.h. As we have no control over the inclusion order
+    // here (as it might be set in stone by the outside world), we can't easily
+    // pin it to one of both versions of std::max_align_t. This means, we have
+    // to hardcode extra_size for MinGW 32bit builds until the mentioned bug is
+    // fixed.
+    constexpr static auto extra_size = 8;
+#else
+    constexpr static auto extra_size = sizeof(
+        std::aligned_storage_t<sizeof(std::size_t), alignof(std::max_align_t)>);
+#endif
+
+    template <typename... Tags>
+    static void* allocate(std::size_t size, Tags... tags)
+    {
+        auto p = (std::size_t*) Base::allocate(size + extra_size, tags...);
+        new (p) std::size_t{size};
+        return ((char*) p) + extra_size;
+    }
+
+    template <typename... Tags>
+    static void deallocate(std::size_t size, void* data, Tags... tags)
+    {
+        auto p = (std::size_t*) (((char*) data) - extra_size);
+        assert(*p == size);
+        Base::deallocate(size + extra_size, p, tags...);
+    }
+};
+
+#else // IMMER_ENABLE_DEBUG_SIZE_HEAP
+
+template <typename Base>
+using debug_size_heap = identity_heap<Base>;
+
+#endif // !IMMER_ENABLE_DEBUG_SIZE_HEAP
+
+} // namespace immer
diff --git a/immer/heap/free_list_heap.hpp b/immer/heap/free_list_heap.hpp
new file mode 100644
index 000000000000..dc25b10184a1
--- /dev/null
+++ b/immer/heap/free_list_heap.hpp
@@ -0,0 +1,83 @@
+//
+// immer: immutable data structures for C++
+// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
+//
+// This software is distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
+//
+
+#pragma once
+
+#include <immer/heap/free_list_node.hpp>
+#include <immer/heap/with_data.hpp>
+
+#include <atomic>
+#include <cassert>
+
+namespace immer {
+
+/*!
+ * Adaptor that does not release the memory to the parent heap but
+ * instead it keeps the memory in a thread-safe global free list. Must
+ * be preceded by a `with_data<free_list_node, ...>` heap adaptor.
+ *
+ * @tparam Size Maximum size of the objects to be allocated.
+ * @tparam Base Type of the parent heap.
+ */
+template <std::size_t Size, std::size_t Limit, typename Base>
+struct free_list_heap : Base
+{
+    using base_t = Base;
+
+    template <typename... Tags>
+    static void* allocate(std::size_t size, Tags...)
+    {
+        assert(size <= sizeof(free_list_node) + Size);
+        assert(size >= sizeof(free_list_node));
+
+        free_list_node* n;
+        do {
+            n = head().data;
+            if (!n) {
+                auto p = base_t::allocate(Size + sizeof(free_list_node));
+                return static_cast<free_list_node*>(p);
+            }
+        } while (!head().data.compare_exchange_weak(n, n->next));
+        head().count.fetch_sub(1u, std::memory_order_relaxed);
+        return n;
+    }
+
+    template <typename... Tags>
+    static void deallocate(std::size_t size, void* data, Tags...)
+    {
+        assert(size <= sizeof(free_list_node) + Size);
+        assert(size >= sizeof(free_list_node));
+
+        // we use relaxed, because we are fine with temporarily having
+        // a few more/less buffers in free list
+        if (head().count.load(std::memory_order_relaxed) >= Limit) {
+            base_t::deallocate(Size + sizeof(free_list_node), data);
+        } else {
+            auto n = static_cast<free_list_node*>(data);
+            do {
+                n->next = head().data;
+            } while (!head().data.compare_exchange_weak(n->next, n));
+            head().count.fetch_add(1u, std::memory_order_relaxed);
+        }
+    }
+
+private:
+    struct head_t
+    {
+        std::atomic<free_list_node*> data;
+        std::atomic<std::size_t> count;
+    };
+
+    static head_t& head()
+    {
+        static head_t head_{{nullptr}, {0}};
+        return head_;
+    }
+};
+
+} // namespace immer
diff --git a/immer/heap/free_list_node.hpp b/immer/heap/free_list_node.hpp
new file mode 100644
index 000000000000..acab4779aa43
--- /dev/null
+++ b/immer/heap/free_list_node.hpp
@@ -0,0 +1,24 @@
+//
+// immer: immutable data structures for C++
+// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
+//
+// This software is distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
+//
+
+#pragma once
+
+#include <immer/heap/with_data.hpp>
+
+namespace immer {
+
+struct free_list_node
+{
+    free_list_node* next;
+};
+
+template <typename Base>
+struct with_free_list_node : with_data<free_list_node, Base>
+{};
+
+} // namespace immer
diff --git a/immer/heap/gc_heap.hpp b/immer/heap/gc_heap.hpp
new file mode 100644
index 000000000000..8494bd26694f
--- /dev/null
+++ b/immer/heap/gc_heap.hpp
@@ -0,0 +1,127 @@
+//
+// immer: immutable data structures for C++
+// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
+//
+// This software is distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
+//
+
+#pragma once
+
+#include <immer/config.hpp>
+#include <immer/heap/tags.hpp>
+
+#if IMMER_HAS_LIBGC
+#include <gc/gc.h>
+#else
+#error "Using garbage collection requires libgc"
+#endif
+
+#include <cstdlib>
+#include <memory>
+
+namespace immer {
+
+#ifdef __APPLE__
+#define IMMER_GC_REQUIRE_INIT 1
+#else
+#define IMMER_GC_REQUIRE_INIT 0
+#endif
+
+#if IMMER_GC_REQUIRE_INIT
+
+namespace detail {
+
+template <int Dummy = 0>
+struct gc_initializer
+{
+    gc_initializer() { GC_init(); }
+    static gc_initializer init;
+};
+
+template <int D>
+gc_initializer<D> gc_initializer<D>::init{};
+
+inline void gc_initializer_guard()
+{
+    static gc_initializer<> init_ = gc_initializer<>::init;
+    (void) init_;
+}
+
+} // namespace detail
+
+#define IMMER_GC_INIT_GUARD_ ::immer::detail::gc_initializer_guard()
+
+#else
+
+#define IMMER_GC_INIT_GUARD_
+
+#endif // IMMER_GC_REQUIRE_INIT
+
+/*!
+ * Heap that uses a tracing garbage collector.
+ *
+ * @rst
+ *
+ * This heap uses the `Boehm's conservative garbage collector`_ under
+ * the hood.  This is a tracing garbage collector that automatically
+ * reclaims unused memory.  Thus, it is not needed to call
+ * ``deallocate()`` in order to release memory.
+ *
+ * .. admonition:: Dependencies
+ *    :class: tip
+ *
+ *    In order to use this header file, you need to make sure that
+ *    Boehm's ``libgc`` is your include path and link to its binary
+ *    library.
+ *
+ * .. caution:: Memory that is allocated with the standard ``malloc``
+ *    and ``free`` is not visible to ``libgc`` when it is looking for
+ *    references.  This means that if, let's say, you store a
+ *    :cpp:class:`immer::vector` using a ``gc_heap`` inside a
+ *    ``std::vector`` that uses a standard allocator, the memory of
+ *    the former might be released automatically at unexpected times
+ *    causing crashes.
+ *
+ * .. caution:: When using a ``gc_heap`` in combination with immutable
+ *    containers, the destructors of the contained objects will never
+ *    be called.  It is ok to store containers inside containers as
+ *    long as all of them use a ``gc_heap`` consistently, but storing
+ *    other kinds of objects with relevant destructors
+ *    (e.g. containers with reference counting or other kinds of
+ *    *resource handles*) might cause memory leaks and other problems.
+ *
+ * .. _boehm's conservative garbage collector: https://github.com/ivmai/bdwgc
+ *
+ * @endrst
+ */
+class gc_heap
+{
+public:
+    static void* allocate(std::size_t n)
+    {
+        IMMER_GC_INIT_GUARD_;
+        auto p = GC_malloc(n);
+        if (IMMER_UNLIKELY(!p))
+            throw std::bad_alloc{};
+        return p;
+    }
+
+    static void* allocate(std::size_t n, norefs_tag)
+    {
+        IMMER_GC_INIT_GUARD_;
+        auto p = GC_malloc_atomic(n);
+        if (IMMER_UNLIKELY(!p))
+            throw std::bad_alloc{};
+        return p;
+    }
+
+    static void deallocate(std::size_t, void* data) { GC_free(data); }
+
+    static void deallocate(std::size_t, void* data, norefs_tag)
+    {
+        GC_free(data);
+    }
+};
+
+} // namespace immer
diff --git a/immer/heap/heap_policy.hpp b/immer/heap/heap_policy.hpp
new file mode 100644
index 000000000000..582c113f334f
--- /dev/null
+++ b/immer/heap/heap_policy.hpp
@@ -0,0 +1,141 @@
+//
+// immer: immutable data structures for C++
+// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
+//
+// This software is distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
+//
+
+#pragma once
+
+#include <immer/config.hpp>
+#include <immer/heap/debug_size_heap.hpp>
+#include <immer/heap/free_list_heap.hpp>
+#include <immer/heap/split_heap.hpp>
+#include <immer/heap/thread_local_free_list_heap.hpp>
+
+#include <algorithm>
+#include <cstdlib>
+
+namespace immer {
+
+/*!
+ * Heap policy that unconditionally uses its `Heap` argument.
+ */
+template <typename Heap>
+struct heap_policy
+{
+    using type = Heap;
+
+    template <std::size_t>
+    struct optimized
+    {
+        using type = Heap;
+    };
+};
+
+template <typename Deriv, typename HeapPolicy>
+struct enable_optimized_heap_policy
+{
+    static void* operator new(std::size_t size)
+    {
+        using heap_type =
+            typename HeapPolicy ::template optimized<sizeof(Deriv)>::type;
+
+        return heap_type::allocate(size);
+    }
+
+    static void operator delete(void* data, std::size_t size)
+    {
+        using heap_type =
+            typename HeapPolicy ::template optimized<sizeof(Deriv)>::type;
+
+        heap_type::deallocate(size, data);
+    }
+};
+
+/*!
+ * Heap policy that returns a heap with a free list of objects
+ * of `max_size = max(Sizes...)` on top an underlying `Heap`.  Note
+ * these two properties of the resulting heap:
+ *
+ * - Allocating an object that is bigger than `max_size` may trigger
+ *   *undefined behavior*.
+ *
+ * - Allocating an object of size less than `max_size` still
+ *   returns an object of `max_size`.
+ *
+ * Basically, this heap will always return objects of `max_size`.
+ * When an object is freed, it does not directly invoke `std::free`,
+ * but it keeps the object in a global linked list instead.  When a
+ * new object is requested, it does not need to call `std::malloc` but
+ * it can directly pop and return the other object from the global
+ * list, a much faster operation.
+ *
+ * This actually creates a hierarchy with two free lists:
+ *
+ * - A `thread_local` free list is used first.  It does not need any
+ *   kind of synchronization and is very fast.  When the thread
+ *   finishes, its contents are returned to the next free list.
+ *
+ * - A global free list using lock-free access via atomics.
+ *
+ * @tparam Heap Heap to be used when the free list is empty.
+ *
+ * @rst
+ *
+ * .. tip:: For many applications that use immutable data structures
+ *    significantly, this is actually the best heap policy, and it
+ *    might become the default in the future.
+ *
+ *    Note that most our data structures internally use trees with the
+ *    same big branching factors.  This means that all *vectors*,
+ *    *maps*, etc. can just allocate elements from the same free-list
+ *    optimized heap.  Not only does this lowers the allocation time,
+ *    but also makes up for more efficient *cache utilization*.  When
+ *    a new node is needed, there are high chances the allocator will
+ *    return a node that was just accessed.  When batches of immutable
+ *    updates are made, this can make a significant difference.
+ *
+ * @endrst
+ */
+template <typename Heap, std::size_t Limit = default_free_list_size>
+struct free_list_heap_policy
+{
+    using type = debug_size_heap<Heap>;
+
+    template <std::size_t Size>
+    struct optimized
+    {
+        using type =
+            split_heap<Size,
+                       with_free_list_node<thread_local_free_list_heap<
+                           Size,
+                           Limit,
+                           free_list_heap<Size, Limit, debug_size_heap<Heap>>>>,
+                       debug_size_heap<Heap>>;
+    };
+};
+
+/*!
+ * Similar to @ref free_list_heap_policy, but it assumes no
+ * multi-threading, so a single global free list with no concurrency
+ * checks is used.
+ */
+template <typename Heap, std::size_t Limit = default_free_list_size>
+struct unsafe_free_list_heap_policy
+{
+    using type = Heap;
+
+    template <std::size_t Size>
+    struct optimized
+    {
+        using type = split_heap<
+            Size,
+            with_free_list_node<
+                unsafe_free_list_heap<Size, Limit, debug_size_heap<Heap>>>,
+            debug_size_heap<Heap>>;
+    };
+};
+
+} // namespace immer
diff --git a/immer/heap/identity_heap.hpp b/immer/heap/identity_heap.hpp
new file mode 100644
index 000000000000..032cb3f221d0
--- /dev/null
+++ b/immer/heap/identity_heap.hpp
@@ -0,0 +1,34 @@
+//
+// immer: immutable data structures for C++
+// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
+//
+// This software is distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
+//
+
+#pragma once
+
+#include <cstdlib>
+
+namespace immer {
+
+/*!
+ * A heap that simply passes on to the parent heap.
+ */
+template <typename Base>
+struct identity_heap : Base
+{
+    template <typename... Tags>
+    static void* allocate(std::size_t size, Tags... tags)
+    {
+        return Base::allocate(size, tags...);
+    }
+
+    template <typename... Tags>
+    static void deallocate(std::size_t size, void* data, Tags... tags)
+    {
+        Base::deallocate(size, data, tags...);
+    }
+};
+
+} // namespace immer
diff --git a/immer/heap/malloc_heap.hpp b/immer/heap/malloc_heap.hpp
new file mode 100644
index 000000000000..a0074d17c0fb
--- /dev/null
+++ b/immer/heap/malloc_heap.hpp
@@ -0,0 +1,44 @@
+//
+// immer: immutable data structures for C++
+// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
+//
+// This software is distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
+//
+
+#pragma once
+
+#include <immer/config.hpp>
+
+#include <cstdlib>
+#include <memory>
+
+namespace immer {
+
+/*!
+ * A heap that uses `std::malloc` and `std::free` to manage memory.
+ */
+struct malloc_heap
+{
+    /*!
+     * Returns a pointer to a memory region of size `size`, if the
+     * allocation was successful and throws `std::bad_alloc` otherwise.
+     */
+    template <typename... Tags>
+    static void* allocate(std::size_t size, Tags...)
+    {
+        auto p = std::malloc(size);
+        if (IMMER_UNLIKELY(!p))
+            throw std::bad_alloc{};
+        return p;
+    }
+
+    /*!
+     * Releases a memory region `data` that was previously returned by
+     * `allocate`.  One must not use nor deallocate again a memory
+     * region that once it has been deallocated.
+     */
+    static void deallocate(std::size_t, void* data) { std::free(data); }
+};
+
+} // namespace immer
diff --git a/immer/heap/split_heap.hpp b/immer/heap/split_heap.hpp
new file mode 100644
index 000000000000..37272d30ecc1
--- /dev/null
+++ b/immer/heap/split_heap.hpp
@@ -0,0 +1,40 @@
+//
+// immer: immutable data structures for C++
+// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
+//
+// This software is distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
+//
+
+#pragma once
+
+#include <atomic>
+#include <cassert>
+
+namespace immer {
+
+/*!
+ * Adaptor that uses `SmallHeap` for allocations that are smaller or
+ * equal to `Size` and `BigHeap` otherwise.
+ */
+template <std::size_t Size, typename SmallHeap, typename BigHeap>
+struct split_heap
+{
+    template <typename... Tags>
+    static void* allocate(std::size_t size, Tags... tags)
+    {
+        return size <= Size ? SmallHeap::allocate(size, tags...)
+                            : BigHeap::allocate(size, tags...);
+    }
+
+    template <typename... Tags>
+    static void deallocate(std::size_t size, void* data, Tags... tags)
+    {
+        if (size <= Size)
+            SmallHeap::deallocate(size, data, tags...);
+        else
+            BigHeap::deallocate(size, data, tags...);
+    }
+};
+
+} // namespace immer
diff --git a/immer/heap/tags.hpp b/immer/heap/tags.hpp
new file mode 100644
index 000000000000..d1ce48d05c89
--- /dev/null
+++ b/immer/heap/tags.hpp
@@ -0,0 +1,16 @@
+//
+// immer: immutable data structures for C++
+// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
+//
+// This software is distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
+//
+
+#pragma once
+
+namespace immer {
+
+struct norefs_tag
+{};
+
+} // namespace immer
diff --git a/immer/heap/thread_local_free_list_heap.hpp b/immer/heap/thread_local_free_list_heap.hpp
new file mode 100644
index 000000000000..9f7f48f43f6c
--- /dev/null
+++ b/immer/heap/thread_local_free_list_heap.hpp
@@ -0,0 +1,55 @@
+//
+// immer: immutable data structures for C++
+// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
+//
+// This software is distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
+//
+
+#pragma once
+
+#include <immer/heap/unsafe_free_list_heap.hpp>
+
+namespace immer {
+namespace detail {
+
+template <typename Heap>
+struct thread_local_free_list_storage
+{
+    struct head_t
+    {
+        free_list_node* data;
+        std::size_t count;
+
+        ~head_t() { Heap::clear(); }
+    };
+
+    static head_t& head()
+    {
+        thread_local static head_t head_{nullptr, 0};
+        return head_;
+    }
+};
+
+} // namespace detail
+
+/*!
+ * Adaptor that does not release the memory to the parent heap but
+ * instead it keeps the memory in a `thread_local` global free
+ * list. Must be preceded by a `with_data<free_list_node, ...>` heap
+ * adaptor.  When the current thread finishes, the memory is returned
+ * to the parent heap.
+ *
+ * @tparam Size  Maximum size of the objects to be allocated.
+ * @tparam Limit Maximum number of elements to keep in the free list.
+ * @tparam Base  Type of the parent heap.
+ */
+template <std::size_t Size, std::size_t Limit, typename Base>
+struct thread_local_free_list_heap
+    : detail::unsafe_free_list_heap_impl<detail::thread_local_free_list_storage,
+                                         Size,
+                                         Limit,
+                                         Base>
+{};
+
+} // namespace immer
diff --git a/immer/heap/unsafe_free_list_heap.hpp b/immer/heap/unsafe_free_list_heap.hpp
new file mode 100644
index 000000000000..942f07802476
--- /dev/null
+++ b/immer/heap/unsafe_free_list_heap.hpp
@@ -0,0 +1,109 @@
+//
+// immer: immutable data structures for C++
+// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
+//
+// This software is distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
+//
+
+#pragma once
+
+#include <cassert>
+#include <immer/config.hpp>
+#include <immer/heap/free_list_node.hpp>
+
+namespace immer {
+namespace detail {
+
+template <typename Heap>
+struct unsafe_free_list_storage
+{
+    struct head_t
+    {
+        free_list_node* data;
+        std::size_t count;
+    };
+
+    static head_t& head()
+    {
+        static head_t head_{nullptr, 0};
+        return head_;
+    }
+};
+
+template <template <class> class Storage,
+          std::size_t Size,
+          std::size_t Limit,
+          typename Base>
+class unsafe_free_list_heap_impl : Base
+{
+    using storage = Storage<unsafe_free_list_heap_impl>;
+
+public:
+    using base_t = Base;
+
+    template <typename... Tags>
+    static void* allocate(std::size_t size, Tags...)
+    {
+        assert(size <= sizeof(free_list_node) + Size);
+        assert(size >= sizeof(free_list_node));
+
+        auto n = storage::head().data;
+        if (!n) {
+            auto p = base_t::allocate(Size + sizeof(free_list_node));
+            return static_cast<free_list_node*>(p);
+        }
+        --storage::head().count;
+        storage::head().data = n->next;
+        return n;
+    }
+
+    template <typename... Tags>
+    static void deallocate(std::size_t size, void* data, Tags...)
+    {
+        assert(size <= sizeof(free_list_node) + Size);
+        assert(size >= sizeof(free_list_node));
+
+        if (storage::head().count >= Limit)
+            base_t::deallocate(Size + sizeof(free_list_node), data);
+        else {
+            auto n               = static_cast<free_list_node*>(data);
+            n->next              = storage::head().data;
+            storage::head().data = n;
+            ++storage::head().count;
+        }
+    }
+
+    static void clear()
+    {
+        while (storage::head().data) {
+            auto n = storage::head().data->next;
+            base_t::deallocate(Size + sizeof(free_list_node),
+                               storage::head().data);
+            storage::head().data = n;
+            --storage::head().count;
+        }
+    }
+};
+
+} // namespace detail
+
+/*!
+ * Adaptor that does not release the memory to the parent heap but
+ * instead it keeps the memory in a global free list that **is not
+ * thread-safe**. Must be preceded by a `with_data<free_list_node,
+ * ...>` heap adaptor.
+ *
+ * @tparam Size  Maximum size of the objects to be allocated.
+ * @tparam Limit Maximum number of elements to keep in the free list.
+ * @tparam Base  Type of the parent heap.
+ */
+template <std::size_t Size, std::size_t Limit, typename Base>
+struct unsafe_free_list_heap
+    : detail::unsafe_free_list_heap_impl<detail::unsafe_free_list_storage,
+                                         Size,
+                                         Limit,
+                                         Base>
+{};
+
+} // namespace immer
diff --git a/immer/heap/with_data.hpp b/immer/heap/with_data.hpp
new file mode 100644
index 000000000000..1e8c2abb082e
--- /dev/null
+++ b/immer/heap/with_data.hpp
@@ -0,0 +1,43 @@
+//
+// immer: immutable data structures for C++
+// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
+//
+// This software is distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
+//
+
+#pragma once
+
+#include <cstdio>
+
+namespace immer {
+
+/*!
+ * Appends a default constructed extra object of type `T` at the
+ * *before* the requested region.
+ *
+ * @tparam T Type of the appended data.
+ * @tparam Base Type of the parent heap.
+ */
+template <typename T, typename Base>
+struct with_data : Base
+{
+    using base_t = Base;
+
+    template <typename... Tags>
+    static void* allocate(std::size_t size, Tags... tags)
+    {
+        auto p = base_t::allocate(size + sizeof(T), tags...);
+        return new (p) T{} + 1;
+    }
+
+    template <typename... Tags>
+    static void deallocate(std::size_t size, void* p, Tags... tags)
+    {
+        auto dp = static_cast<T*>(p) - 1;
+        dp->~T();
+        base_t::deallocate(size + sizeof(T), dp, tags...);
+    }
+};
+
+} // namespace immer
diff --git a/immer/map.hpp b/immer/map.hpp
new file mode 100644
index 000000000000..58a84d2de939
--- /dev/null
+++ b/immer/map.hpp
@@ -0,0 +1,342 @@
+//
+// immer: immutable data structures for C++
+// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
+//
+// This software is distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
+//
+
+#pragma once
+
+#include <immer/detail/hamts/champ.hpp>
+#include <immer/detail/hamts/champ_iterator.hpp>
+#include <immer/memory_policy.hpp>
+
+#include <functional>
+
+namespace immer {
+
+template <typename K,
+          typename T,
+          typename Hash,
+          typename Equal,
+          typename MemoryPolicy,
+          detail::hamts::bits_t B>
+class map_transient;
+
+/*!
+ * Immutable unordered mapping of values from type `K` to type `T`.
+ *
+ * @tparam K    The type of the keys.
+ * @tparam T    The type of the values to be stored in the container.
+ * @tparam Hash The type of a function object capable of hashing
+ *              values of type `T`.
+ * @tparam Equal The type of a function object capable of comparing
+ *              values of type `T`.
+ * @tparam MemoryPolicy Memory management policy. See @ref
+ *              memory_policy.
+ *
+ * @rst
+ *
+ * This cotainer provides a good trade-off between cache locality,
+ * search, update performance and structural sharing.  It does so by
+ * storing the data in contiguous chunks of :math:`2^{B}` elements.
+ * When storing big objects, the size of these contiguous chunks can
+ * become too big, damaging performance.  If this is measured to be
+ * problematic for a specific use-case, it can be solved by using a
+ * `immer::box` to wrap the type `T`.
+ *
+ * **Example**
+ *   .. literalinclude:: ../example/map/intro.cpp
+ *      :language: c++
+ *      :start-after: intro/start
+ *      :end-before:  intro/end
+ *
+ * @endrst
+ *
+ */
+template <typename K,
+          typename T,
+          typename Hash           = std::hash<K>,
+          typename Equal          = std::equal_to<K>,
+          typename MemoryPolicy   = default_memory_policy,
+          detail::hamts::bits_t B = default_bits>
+class map
+{
+    using value_t = std::pair<K, T>;
+
+    struct project_value
+    {
+        const T& operator()(const value_t& v) const noexcept
+        {
+            return v.second;
+        }
+    };
+
+    struct project_value_ptr
+    {
+        const T* operator()(const value_t& v) const noexcept
+        {
+            return &v.second;
+        }
+    };
+
+    struct combine_value
+    {
+        template <typename Kf, typename Tf>
+        value_t operator()(Kf&& k, Tf&& v) const
+        {
+            return {std::forward<Kf>(k), std::forward<Tf>(v)};
+        }
+    };
+
+    struct default_value
+    {
+        const T& operator()() const
+        {
+            static T v{};
+            return v;
+        }
+    };
+
+    struct error_value
+    {
+        const T& operator()() const
+        {
+            throw std::out_of_range{"key not found"};
+        }
+    };
+
+    struct hash_key
+    {
+        auto operator()(const value_t& v) { return Hash{}(v.first); }
+
+        auto operator()(const K& v) { return Hash{}(v); }
+    };
+
+    struct equal_key
+    {
+        auto operator()(const value_t& a, const value_t& b)
+        {
+            return Equal{}(a.first, b.first);
+        }
+
+        auto operator()(const value_t& a, const K& b)
+        {
+            return Equal{}(a.first, b);
+        }
+    };
+
+    struct equal_value
+    {
+        auto operator()(const value_t& a, const value_t& b)
+        {
+            return Equal{}(a.first, b.first) && a.second == b.second;
+        }
+    };
+
+    using impl_t =
+        detail::hamts::champ<value_t, hash_key, equal_key, MemoryPolicy, B>;
+
+public:
+    using key_type        = K;
+    using mapped_type     = T;
+    using value_type      = std::pair<K, T>;
+    using size_type       = detail::hamts::size_t;
+    using diference_type  = std::ptrdiff_t;
+    using hasher          = Hash;
+    using key_equal       = Equal;
+    using reference       = const value_type&;
+    using const_reference = const value_type&;
+
+    using iterator = detail::hamts::
+        champ_iterator<value_t, hash_key, equal_key, MemoryPolicy, B>;
+    using const_iterator = iterator;
+
+    using transient_type = map_transient<K, T, Hash, Equal, MemoryPolicy, B>;
+
+    /*!
+     * Default constructor.  It creates a set of `size() == 0`.  It
+     * does not allocate memory and its complexity is @f$ O(1) @f$.
+     */
+    map() = default;
+
+    /*!
+     * Returns an iterator pointing at the first element of the
+     * collection. It does not allocate memory and its complexity is
+     * @f$ O(1) @f$.
+     */
+    IMMER_NODISCARD iterator begin() const { return {impl_}; }
+
+    /*!
+     * Returns an iterator pointing just after the last element of the
+     * collection. It does not allocate and its complexity is @f$ O(1) @f$.
+     */
+    IMMER_NODISCARD iterator end() const
+    {
+        return {impl_, typename iterator::end_t{}};
+    }
+
+    /*!
+     * Returns the number of elements in the container.  It does
+     * not allocate memory and its complexity is @f$ O(1) @f$.
+     */
+    IMMER_NODISCARD size_type size() const { return impl_.size; }
+
+    /*!
+     * Returns `true` if there are no elements in the container.  It
+     * does not allocate memory and its complexity is @f$ O(1) @f$.
+     */
+    IMMER_NODISCARD bool empty() const { return impl_.size == 0; }
+
+    /*!
+     * Returns `1` when the key `k` is contained in the map or `0`
+     * otherwise. It won't allocate memory and its complexity is
+     * *effectively* @f$ O(1) @f$.
+     */
+    IMMER_NODISCARD size_type count(const K& k) const
+    {
+        return impl_.template get<detail::constantly<size_type, 1>,
+                                  detail::constantly<size_type, 0>>(k);
+    }
+
+    /*!
+     * Returns a `const` reference to the values associated to the key
+     * `k`.  If the key is not contained in the map, it returns a
+     * default constructed value.  It does not allocate memory and its
+     * complexity is *effectively* @f$ O(1) @f$.
+     */
+    IMMER_NODISCARD const T& operator[](const K& k) const
+    {
+        return impl_.template get<project_value, default_value>(k);
+    }
+
+    /*!
+     * Returns a `const` reference to the values associated to the key
+     * `k`.  If the key is not contained in the map, throws an
+     * `std::out_of_range` error.  It does not allocate memory and its
+     * complexity is *effectively* @f$ O(1) @f$.
+     */
+    const T& at(const K& k) const
+    {
+        return impl_.template get<project_value, error_value>(k);
+    }
+
+    /*!
+     * Returns a pointer to the value associated with the key `k`.  If
+     * the key is not contained in the map, a `nullptr` is returned.
+     * It does not allocate memory and its complexity is *effectively*
+     * @f$ O(1) @f$.
+     *
+     * @rst
+     *
+     * .. admonition:: Why doesn't this function return an iterator?
+     *
+     *   Associative containers from the C++ standard library provide a
+     *   ``find`` method that returns an iterator pointing to the
+     *   element in the container or ``end()`` when the key is missing.
+     *   In the case of an unordered container, the only meaningful
+     *   thing one may do with it is to compare it with the end, to
+     *   test if the find was succesfull, and dereference it.  This
+     *   comparison is cumbersome compared to testing for a non-empty
+     *   optional value.  Furthermore, for an immutable container,
+     *   returning an iterator would have some additional performance
+     *   cost, with no benefits otherwise.
+     *
+     *   In our opinion, this function should return a
+     *   ``std::optional<const T&>`` but this construction is not valid
+     *   in any current standard.  As a compromise we return a
+     *   pointer, which has similar syntactic properties yet it is
+     *   unfortunatelly unnecessarily unrestricted.
+     *
+     * @endrst
+     */
+    IMMER_NODISCARD const T* find(const K& k) const
+    {
+        return impl_.template get<project_value_ptr,
+                                  detail::constantly<const T*, nullptr>>(k);
+    }
+
+    /*!
+     * Returns whether the sets are equal.
+     */
+    IMMER_NODISCARD bool operator==(const map& other) const
+    {
+        return impl_.template equals<equal_value>(other.impl_);
+    }
+    IMMER_NODISCARD bool operator!=(const map& other) const
+    {
+        return !(*this == other);
+    }
+
+    /*!
+     * Returns a map containing the association `value`.  If the key is
+     * already in the map, it replaces its association in the map.
+     * It may allocate memory and its complexity is *effectively* @f$
+     * O(1) @f$.
+     */
+    IMMER_NODISCARD map insert(value_type value) const
+    {
+        return impl_.add(std::move(value));
+    }
+
+    /*!
+     * Returns a map containing the association `(k, v)`.  If the key
+     * is already in the map, it replaces its association in the map.
+     * It may allocate memory and its complexity is *effectively* @f$
+     * O(1) @f$.
+     */
+    IMMER_NODISCARD map set(key_type k, mapped_type v) const
+    {
+        return impl_.add({std::move(k), std::move(v)});
+    }
+
+    /*!
+     * Returns a map replacing the association `(k, v)` by the
+     * association new association `(k, fn(v))`, where `v` is the
+     * currently associated value for `k` in the map or a default
+     * constructed value otherwise. It may allocate memory
+     * and its complexity is *effectively* @f$ O(1) @f$.
+     */
+    template <typename Fn>
+    IMMER_NODISCARD map update(key_type k, Fn&& fn) const
+    {
+        return impl_
+            .template update<project_value, default_value, combine_value>(
+                std::move(k), std::forward<Fn>(fn));
+    }
+
+    /*!
+     * Returns a map without the key `k`.  If the key is not
+     * associated in the map it returns the same map.  It may allocate
+     * memory and its complexity is *effectively* @f$ O(1) @f$.
+     */
+    IMMER_NODISCARD map erase(const K& k) const { return impl_.sub(k); }
+
+    /*!
+     * Returns an @a transient form of this container, a
+     * `immer::map_transient`.
+     */
+    IMMER_NODISCARD transient_type transient() const&
+    {
+        return transient_type{impl_};
+    }
+    IMMER_NODISCARD transient_type transient() &&
+    {
+        return transient_type{std::move(impl_)};
+    }
+
+    // Semi-private
+    const impl_t& impl() const { return impl_; }
+
+private:
+    friend transient_type;
+
+    map(impl_t impl)
+        : impl_(std::move(impl))
+    {}
+
+    impl_t impl_ = impl_t::empty();
+};
+
+} // namespace immer
diff --git a/immer/map_transient.hpp b/immer/map_transient.hpp
new file mode 100644
index 000000000000..8821c5d12b56
--- /dev/null
+++ b/immer/map_transient.hpp
@@ -0,0 +1,41 @@
+//
+// immer: immutable data structures for C++
+// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
+//
+// This software is distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
+//
+
+#pragma once
+
+#include <immer/detail/hamts/champ.hpp>
+#include <immer/memory_policy.hpp>
+
+#include <functional>
+
+namespace immer {
+
+/*!
+ * @rst
+ *
+ * .. admonition:: Become a sponsor!
+ *    :class: danger
+ *
+ *    This component is planned but it has **not been implemented yet**.
+ *
+ *    Transiens can critically improve the performance of applications
+ *    intensively using ``set`` and ``map``. If you are working for an
+ *    organization using the library in a commercial project, please consider
+ *    **sponsoring this work**: juanpe@sinusoid.al
+ *
+ * @endrst
+ */
+template <typename K,
+          typename T,
+          typename Hash           = std::hash<K>,
+          typename Equal          = std::equal_to<K>,
+          typename MemoryPolicy   = default_memory_policy,
+          detail::hamts::bits_t B = default_bits>
+class map_transient;
+
+} // namespace immer
diff --git a/immer/memory_policy.hpp b/immer/memory_policy.hpp
new file mode 100644
index 000000000000..b4f665bf621c
--- /dev/null
+++ b/immer/memory_policy.hpp
@@ -0,0 +1,135 @@
+//
+// immer: immutable data structures for C++
+// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
+//
+// This software is distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
+//
+
+#pragma once
+
+#include <immer/heap/cpp_heap.hpp>
+#include <immer/heap/heap_policy.hpp>
+#include <immer/refcount/no_refcount_policy.hpp>
+#include <immer/refcount/refcount_policy.hpp>
+#include <immer/refcount/unsafe_refcount_policy.hpp>
+#include <immer/transience/gc_transience_policy.hpp>
+#include <immer/transience/no_transience_policy.hpp>
+#include <type_traits>
+
+namespace immer {
+
+/*!
+ * Metafunction that returns the best *transience policy* to use for a
+ * given *refcount policy*.
+ */
+template <typename RefcountPolicy>
+struct get_transience_policy
+    : std::conditional<std::is_same<RefcountPolicy, no_refcount_policy>::value,
+                       gc_transience_policy,
+                       no_transience_policy>
+{};
+
+template <typename T>
+using get_transience_policy_t = typename get_transience_policy<T>::type;
+
+/*!
+ * Metafunction that returns wether to *prefer fewer bigger objects*
+ * to use for a given *heap policy*.
+ */
+template <typename HeapPolicy>
+struct get_prefer_fewer_bigger_objects
+    : std::integral_constant<
+          bool,
+          std::is_same<HeapPolicy, heap_policy<cpp_heap>>::value>
+{};
+
+template <typename T>
+constexpr auto get_prefer_fewer_bigger_objects_v =
+    get_prefer_fewer_bigger_objects<T>::value;
+
+/*!
+ * Metafunction that returns wether to use *transient R-Values*
+ * for a given *refcount policy*.
+ */
+template <typename RefcountPolicy>
+struct get_use_transient_rvalues
+    : std::integral_constant<
+          bool,
+          !std::is_same<RefcountPolicy, no_refcount_policy>::value>
+{};
+
+template <typename T>
+constexpr auto get_use_transient_rvalues_v =
+    get_use_transient_rvalues<T>::value;
+
+/*!
+ * This is a default implementation of a *memory policy*.  A memory
+ * policy is just a bag of other policies plus some flags with hints
+ * to the user about the best way to use these strategies.
+ *
+ * @tparam HeapPolicy A *heap policy*, for example, @ref heap_policy.
+ * @tparam RefcountPolicy A *reference counting policy*, for example,
+ *         @ref refcount_policy.
+ * @tparam TransiencePolicy A *transience policy*, for example,
+ *         @ref no_transience_policy.
+ * @tparam PreferFewerBiggerObjects Boolean flag indicating whether
+ *         the user should prefer to allocate memory in bigger chungs
+ *         --e.g. by putting various objects in the same memory
+ *         region-- or not.
+ * @tparam UseTransientRValues Boolean flag indicating whether
+ *         immutable containers should try to modify contents in-place
+ *         when manipulating an r-value reference.
+ */
+template <typename HeapPolicy,
+          typename RefcountPolicy,
+          typename TransiencePolicy = get_transience_policy_t<RefcountPolicy>,
+          bool PreferFewerBiggerObjects =
+              get_prefer_fewer_bigger_objects_v<HeapPolicy>,
+          bool UseTransientRValues =
+              get_use_transient_rvalues_v<RefcountPolicy>>
+struct memory_policy
+{
+    using heap       = HeapPolicy;
+    using refcount   = RefcountPolicy;
+    using transience = TransiencePolicy;
+
+    static constexpr bool prefer_fewer_bigger_objects =
+        PreferFewerBiggerObjects;
+
+    static constexpr bool use_transient_rvalues = UseTransientRValues;
+
+    using transience_t = typename transience::template apply<heap>::type;
+};
+
+/*!
+ * The default *heap policy* just uses the standard heap with a
+ * @ref free_list_heap_policy.  If `IMMER_NO_FREE_LIST` is defined to `1`
+ * then it just uses the standard heap.
+ */
+#if IMMER_NO_FREE_LIST
+using default_heap_policy = heap_policy<debug_size_heap<cpp_heap>>;
+#else
+#if IMMER_NO_THREAD_SAFETY
+using default_heap_policy     = unsafe_free_list_heap_policy<cpp_heap>;
+#else
+using default_heap_policy = free_list_heap_policy<cpp_heap>;
+#endif
+#endif
+
+/*!
+ * By default we use thread safe reference counting.
+ */
+#if IMMER_NO_THREAD_SAFETY
+using default_refcount_policy = unsafe_refcount_policy;
+#else
+using default_refcount_policy = refcount_policy;
+#endif
+
+/*!
+ * The default memory policy.
+ */
+using default_memory_policy =
+    memory_policy<default_heap_policy, default_refcount_policy>;
+
+} // namespace immer
diff --git a/immer/refcount/enable_intrusive_ptr.hpp b/immer/refcount/enable_intrusive_ptr.hpp
new file mode 100644
index 000000000000..1185a219fd9b
--- /dev/null
+++ b/immer/refcount/enable_intrusive_ptr.hpp
@@ -0,0 +1,37 @@
+//
+// immer: immutable data structures for C++
+// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
+//
+// This software is distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
+//
+
+#pragma once
+
+#include <immer/refcount/no_refcount_policy.hpp>
+
+namespace immer {
+
+template <typename Deriv, typename RefcountPolicy>
+class enable_intrusive_ptr
+{
+    mutable RefcountPolicy refcount_data_;
+
+public:
+    enable_intrusive_ptr()
+        : refcount_data_{disowned{}}
+    {}
+
+    friend void intrusive_ptr_add_ref(const Deriv* x)
+    {
+        x->refcount_data_.inc();
+    }
+
+    friend void intrusive_ptr_release(const Deriv* x)
+    {
+        if (x->refcount_data_.dec())
+            delete x;
+    }
+};
+
+} // namespace immer
diff --git a/immer/refcount/no_refcount_policy.hpp b/immer/refcount/no_refcount_policy.hpp
new file mode 100644
index 000000000000..24f9d489f54d
--- /dev/null
+++ b/immer/refcount/no_refcount_policy.hpp
@@ -0,0 +1,45 @@
+//
+// immer: immutable data structures for C++
+// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
+//
+// This software is distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
+//
+
+#pragma once
+
+namespace immer {
+
+struct disowned
+{};
+
+struct no_spinlock
+{
+    bool try_lock() { return true; }
+    void lock() {}
+    void unlock() {}
+
+    struct scoped_lock
+    {
+        scoped_lock(no_spinlock&) {}
+    };
+};
+
+/*!
+ * Disables reference counting, to be used with an alternative garbage
+ * collection strategy like a `gc_heap`.
+ */
+struct no_refcount_policy
+{
+    using spinlock_type = no_spinlock;
+
+    no_refcount_policy(){};
+    no_refcount_policy(disowned) {}
+
+    void inc() {}
+    bool dec() { return false; }
+    void dec_unsafe() {}
+    bool unique() { return false; }
+};
+
+} // namespace immer
diff --git a/immer/refcount/refcount_policy.hpp b/immer/refcount/refcount_policy.hpp
new file mode 100644
index 000000000000..a7a282cd13a7
--- /dev/null
+++ b/immer/refcount/refcount_policy.hpp
@@ -0,0 +1,101 @@
+//
+// immer: immutable data structures for C++
+// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
+//
+// This software is distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
+//
+
+#pragma once
+
+#include <immer/refcount/no_refcount_policy.hpp>
+
+#include <atomic>
+#include <cassert>
+#include <thread>
+#include <utility>
+
+// This has been shamelessly copied from boost...
+#if defined(_MSC_VER) && _MSC_VER >= 1310 &&                                   \
+    (defined(_M_IX86) || defined(_M_X64)) && !defined(__c2__)
+extern "C" void _mm_pause();
+#define IMMER_SMT_PAUSE _mm_pause()
+#elif defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
+#define IMMER_SMT_PAUSE __asm__ __volatile__("rep; nop" : : : "memory")
+#endif
+
+namespace immer {
+
+// This is an atomic spinlock similar to the one used by boost to provide
+// "atomic" shared_ptr operations.  It also does not differ much from the one
+// from libc++ or libstdc++...
+struct spinlock
+{
+    std::atomic_flag v_{};
+
+    bool try_lock() { return !v_.test_and_set(std::memory_order_acquire); }
+
+    void lock()
+    {
+        for (auto k = 0u; !try_lock(); ++k) {
+            if (k < 4)
+                continue;
+#ifdef IMMER_SMT_PAUSE
+            else if (k < 16)
+                IMMER_SMT_PAUSE;
+#endif
+            else
+                std::this_thread::yield();
+        }
+    }
+
+    void unlock() { v_.clear(std::memory_order_release); }
+
+    struct scoped_lock
+    {
+        scoped_lock(const scoped_lock&) = delete;
+        scoped_lock& operator=(const scoped_lock&) = delete;
+
+        explicit scoped_lock(spinlock& sp)
+            : sp_{sp}
+        {
+            sp.lock();
+        }
+
+        ~scoped_lock() { sp_.unlock(); }
+
+    private:
+        spinlock& sp_;
+    };
+};
+
+/*!
+ * A reference counting policy implemented using an *atomic* `int`
+ * count.  It is **thread-safe**.
+ */
+struct refcount_policy
+{
+    using spinlock_type = spinlock;
+
+    mutable std::atomic<int> refcount;
+
+    refcount_policy()
+        : refcount{1} {};
+    refcount_policy(disowned)
+        : refcount{0}
+    {}
+
+    void inc() { refcount.fetch_add(1, std::memory_order_relaxed); }
+
+    bool dec() { return 1 == refcount.fetch_sub(1, std::memory_order_acq_rel); }
+
+    void dec_unsafe()
+    {
+        assert(refcount.load() > 1);
+        refcount.fetch_sub(1, std::memory_order_relaxed);
+    }
+
+    bool unique() { return refcount == 1; }
+};
+
+} // namespace immer
diff --git a/immer/refcount/unsafe_refcount_policy.hpp b/immer/refcount/unsafe_refcount_policy.hpp
new file mode 100644
index 000000000000..bcf24578de27
--- /dev/null
+++ b/immer/refcount/unsafe_refcount_policy.hpp
@@ -0,0 +1,40 @@
+//
+// immer: immutable data structures for C++
+// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
+//
+// This software is distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
+//
+
+#pragma once
+
+#include <immer/refcount/no_refcount_policy.hpp>
+
+#include <atomic>
+#include <utility>
+
+namespace immer {
+
+/*!
+ * A reference counting policy implemented using a raw `int` count.
+ * It is **not thread-safe**.
+ */
+struct unsafe_refcount_policy
+{
+    using spinlock_type = no_spinlock;
+
+    mutable int refcount;
+
+    unsafe_refcount_policy()
+        : refcount{1} {};
+    unsafe_refcount_policy(disowned)
+        : refcount{0}
+    {}
+
+    void inc() { ++refcount; }
+    bool dec() { return --refcount == 0; }
+    void dec_unsafe() { --refcount; }
+    bool unique() { return refcount == 1; }
+};
+
+} // namespace immer
diff --git a/immer/set.hpp b/immer/set.hpp
new file mode 100644
index 000000000000..a152ac955334
--- /dev/null
+++ b/immer/set.hpp
@@ -0,0 +1,198 @@
+//
+// immer: immutable data structures for C++
+// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
+//
+// This software is distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
+//
+
+#pragma once
+
+#include <immer/detail/hamts/champ.hpp>
+#include <immer/detail/hamts/champ_iterator.hpp>
+#include <immer/memory_policy.hpp>
+
+#include <functional>
+
+namespace immer {
+
+template <typename T,
+          typename Hash,
+          typename Equal,
+          typename MemoryPolicy,
+          detail::hamts::bits_t B>
+class set_transient;
+
+/*!
+ * Immutable set representing an unordered bag of values.
+ *
+ * @tparam T    The type of the values to be stored in the container.
+ * @tparam Hash The type of a function object capable of hashing
+ *              values of type `T`.
+ * @tparam Equal The type of a function object capable of comparing
+ *              values of type `T`.
+ * @tparam MemoryPolicy Memory management policy. See @ref
+ *              memory_policy.
+ *
+ * @rst
+ *
+ * This container provides a good trade-off between cache locality,
+ * membership checks, update performance and structural sharing.  It
+ * does so by storing the data in contiguous chunks of :math:`2^{B}`
+ * elements.  When storing big objects, the size of these contiguous
+ * chunks can become too big, damaging performance.  If this is
+ * measured to be problematic for a specific use-case, it can be
+ * solved by using a `immer::box` to wrap the type `T`.
+ *
+ * **Example**
+ *   .. literalinclude:: ../example/set/intro.cpp
+ *      :language: c++
+ *      :start-after: intro/start
+ *      :end-before:  intro/end
+ *
+ * @endrst
+ *
+ */
+template <typename T,
+          typename Hash           = std::hash<T>,
+          typename Equal          = std::equal_to<T>,
+          typename MemoryPolicy   = default_memory_policy,
+          detail::hamts::bits_t B = default_bits>
+class set
+{
+    using impl_t = detail::hamts::champ<T, Hash, Equal, MemoryPolicy, B>;
+
+    struct project_value_ptr
+    {
+        const T* operator()(const T& v) const noexcept { return &v; }
+    };
+
+public:
+    using key_type        = T;
+    using value_type      = T;
+    using size_type       = detail::hamts::size_t;
+    using diference_type  = std::ptrdiff_t;
+    using hasher          = Hash;
+    using key_equal       = Equal;
+    using reference       = const T&;
+    using const_reference = const T&;
+
+    using iterator =
+        detail::hamts::champ_iterator<T, Hash, Equal, MemoryPolicy, B>;
+    using const_iterator = iterator;
+
+    using transient_type = set_transient<T, Hash, Equal, MemoryPolicy, B>;
+
+    /*!
+     * Default constructor.  It creates a set of `size() == 0`.  It
+     * does not allocate memory and its complexity is @f$ O(1) @f$.
+     */
+    set() = default;
+
+    /*!
+     * Returns an iterator pointing at the first element of the
+     * collection. It does not allocate memory and its complexity is
+     * @f$ O(1) @f$.
+     */
+    IMMER_NODISCARD iterator begin() const { return {impl_}; }
+
+    /*!
+     * Returns an iterator pointing just after the last element of the
+     * collection. It does not allocate and its complexity is @f$ O(1) @f$.
+     */
+    IMMER_NODISCARD iterator end() const
+    {
+        return {impl_, typename iterator::end_t{}};
+    }
+
+    /*!
+     * Returns the number of elements in the container.  It does
+     * not allocate memory and its complexity is @f$ O(1) @f$.
+     */
+    IMMER_NODISCARD size_type size() const { return impl_.size; }
+
+    /*!
+     * Returns `true` if there are no elements in the container.  It
+     * does not allocate memory and its complexity is @f$ O(1) @f$.
+     */
+    IMMER_NODISCARD bool empty() const { return impl_.size == 0; }
+
+    /*!
+     * Returns `1` when `value` is contained in the set or `0`
+     * otherwise. It won't allocate memory and its complexity is
+     * *effectively* @f$ O(1) @f$.
+     */
+    IMMER_NODISCARD size_type count(const T& value) const
+    {
+        return impl_.template get<detail::constantly<size_type, 1>,
+                                  detail::constantly<size_type, 0>>(value);
+    }
+
+    /*!
+     * Returns a pointer to the value if `value` is contained in the
+     * set, or nullptr otherwise.
+     * It does not allocate memory and its complexity is *effectively*
+     * @f$ O(1) @f$.
+     */
+    IMMER_NODISCARD const T* find(const T& value) const
+    {
+        return impl_.template get<project_value_ptr,
+                                  detail::constantly<const T*, nullptr>>(value);
+    }
+
+    /*!
+     * Returns whether the sets are equal.
+     */
+    IMMER_NODISCARD bool operator==(const set& other) const
+    {
+        return impl_.equals(other.impl_);
+    }
+    IMMER_NODISCARD bool operator!=(const set& other) const
+    {
+        return !(*this == other);
+    }
+
+    /*!
+     * Returns a set containing `value`.  If the `value` is already in
+     * the set, it returns the same set.  It may allocate memory and
+     * its complexity is *effectively* @f$ O(1) @f$.
+     */
+    IMMER_NODISCARD set insert(T value) const
+    {
+        return impl_.add(std::move(value));
+    }
+
+    /*!
+     * Returns a set without `value`.  If the `value` is not in the
+     * set it returns the same set.  It may allocate memory and its
+     * complexity is *effectively* @f$ O(1) @f$.
+     */
+    IMMER_NODISCARD set erase(const T& value) const { return impl_.sub(value); }
+
+    /*!
+     * Returns an @a transient form of this container, a
+     * `immer::set_transient`.
+     */
+    IMMER_NODISCARD transient_type transient() const&
+    {
+        return transient_type{impl_};
+    }
+    IMMER_NODISCARD transient_type transient() &&
+    {
+        return transient_type{std::move(impl_)};
+    }
+
+    // Semi-private
+    const impl_t& impl() const { return impl_; }
+
+private:
+    friend transient_type;
+
+    set(impl_t impl)
+        : impl_(std::move(impl))
+    {}
+
+    impl_t impl_ = impl_t::empty();
+};
+
+} // namespace immer
diff --git a/immer/set_transient.hpp b/immer/set_transient.hpp
new file mode 100644
index 000000000000..cd0f652b95f6
--- /dev/null
+++ b/immer/set_transient.hpp
@@ -0,0 +1,40 @@
+//
+// immer: immutable data structures for C++
+// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
+//
+// This software is distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
+//
+
+#pragma once
+
+#include <immer/detail/hamts/champ.hpp>
+#include <immer/memory_policy.hpp>
+
+#include <functional>
+
+namespace immer {
+
+/*!
+ * @rst
+ *
+ * .. admonition:: Become a sponsor!
+ *    :class: danger
+ *
+ *    This component is planned but it has **not been implemented yet**.
+ *
+ *    Transiens can critically improve the performance of applications
+ *    intensively using ``set`` and ``map``. If you are working for an
+ *    organization using the library in a commercial project, please consider
+ *    **sponsoring this work**: juanpe@sinusoid.al
+ *
+ * @endrst
+ */
+template <typename T,
+          typename Hash           = std::hash<T>,
+          typename Equal          = std::equal_to<T>,
+          typename MemoryPolicy   = default_memory_policy,
+          detail::hamts::bits_t B = default_bits>
+class set_transient;
+
+} // namespace immer
diff --git a/immer/transience/gc_transience_policy.hpp b/immer/transience/gc_transience_policy.hpp
new file mode 100644
index 000000000000..d3c30efa1cf7
--- /dev/null
+++ b/immer/transience/gc_transience_policy.hpp
@@ -0,0 +1,110 @@
+//
+// immer: immutable data structures for C++
+// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
+//
+// This software is distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
+//
+
+#pragma once
+
+#include <immer/heap/tags.hpp>
+
+#include <atomic>
+#include <memory>
+#include <utility>
+
+namespace immer {
+
+/*!
+ * Provides transience ownership tracking when a *tracing garbage
+ * collector* is used instead of reference counting.
+ *
+ * @rst
+ *
+ * .. warning:: Using this policy without an allocation scheme that
+ *    includes automatic tracing garbage collection may cause memory
+ *    leaks.
+ *
+ * @endrst
+ */
+struct gc_transience_policy
+{
+    template <typename HeapPolicy>
+    struct apply
+    {
+        struct type
+        {
+            using heap_ = typename HeapPolicy::type;
+
+            struct edit
+            {
+                void* v;
+                edit() = delete;
+                bool operator==(edit x) const { return v == x.v; }
+                bool operator!=(edit x) const { return v != x.v; }
+            };
+
+            struct owner
+            {
+                void* make_token_()
+                {
+                    return heap_::allocate(1, norefs_tag{});
+                };
+
+                mutable std::atomic<void*> token_;
+
+                operator edit() { return {token_}; }
+
+                owner()
+                    : token_{make_token_()}
+                {}
+                owner(const owner& o)
+                    : token_{make_token_()}
+                {
+                    o.token_ = make_token_();
+                }
+                owner(owner&& o) noexcept
+                    : token_{o.token_.load()}
+                {}
+                owner& operator=(const owner& o)
+                {
+                    o.token_ = make_token_();
+                    token_   = make_token_();
+                    return *this;
+                }
+                owner& operator=(owner&& o) noexcept
+                {
+                    token_ = o.token_.load();
+                    return *this;
+                }
+            };
+
+            struct ownee
+            {
+                edit token_{nullptr};
+
+                ownee& operator=(edit e)
+                {
+                    assert(e != noone);
+                    // This would be a nice safety plug but it sadly
+                    // does not hold during transient concatenation.
+                    // assert(token_ == e || token_ == edit{nullptr});
+                    token_ = e;
+                    return *this;
+                }
+
+                bool can_mutate(edit t) const { return token_ == t; }
+                bool owned() const { return token_ != edit{nullptr}; }
+            };
+
+            static owner noone;
+        };
+    };
+};
+
+template <typename HP>
+typename gc_transience_policy::apply<HP>::type::owner
+    gc_transience_policy::apply<HP>::type::noone = {};
+
+} // namespace immer
diff --git a/immer/transience/no_transience_policy.hpp b/immer/transience/no_transience_policy.hpp
new file mode 100644
index 000000000000..2f87df7a39b0
--- /dev/null
+++ b/immer/transience/no_transience_policy.hpp
@@ -0,0 +1,48 @@
+//
+// immer: immutable data structures for C++
+// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
+//
+// This software is distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
+//
+
+#pragma once
+
+namespace immer {
+
+/*!
+ * Disables any special *transience* tracking.  To be used when
+ * *reference counting* is available instead.
+ */
+struct no_transience_policy
+{
+    template <typename>
+    struct apply
+    {
+        struct type
+        {
+            struct edit
+            {};
+
+            struct owner
+            {
+                operator edit() const { return {}; }
+            };
+
+            struct ownee
+            {
+                ownee& operator=(edit) { return *this; };
+                bool can_mutate(edit) const { return false; }
+                bool owned() const { return false; }
+            };
+
+            static owner noone;
+        };
+    };
+};
+
+template <typename HP>
+typename no_transience_policy::apply<HP>::type::owner
+    no_transience_policy::apply<HP>::type::noone = {};
+
+} // namespace immer
diff --git a/immer/vector.hpp b/immer/vector.hpp
new file mode 100644
index 000000000000..4f1a148ccd00
--- /dev/null
+++ b/immer/vector.hpp
@@ -0,0 +1,412 @@
+//
+// immer: immutable data structures for C++
+// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
+//
+// This software is distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
+//
+
+#pragma once
+
+#include <immer/detail/rbts/rbtree.hpp>
+#include <immer/detail/rbts/rbtree_iterator.hpp>
+#include <immer/memory_policy.hpp>
+
+#if IMMER_DEBUG_PRINT
+#include <immer/flex_vector.hpp>
+#endif
+
+namespace immer {
+
+template <typename T,
+          typename MemoryPolicy,
+          detail::rbts::bits_t B,
+          detail::rbts::bits_t BL>
+class flex_vector;
+
+template <typename T,
+          typename MemoryPolicy,
+          detail::rbts::bits_t B,
+          detail::rbts::bits_t BL>
+class vector_transient;
+
+/*!
+ * Immutable sequential container supporting both random access and
+ * structural sharing.
+ *
+ * @tparam T The type of the values to be stored in the container.
+ * @tparam MemoryPolicy Memory management policy. See @ref
+ *         memory_policy.
+ *
+ * @rst
+ *
+ * This cotainer provides a good trade-off between cache locality,
+ * random access, update performance and structural sharing.  It does
+ * so by storing the data in contiguous chunks of :math:`2^{BL}`
+ * elements.  By default, when ``sizeof(T) == sizeof(void*)`` then
+ * :math:`B=BL=5`, such that data would be stored in contiguous
+ * chunks of :math:`32` elements.
+ *
+ * You may learn more about the meaning and implications of ``B`` and
+ * ``BL`` parameters in the :doc:`implementation` section.
+ *
+ * .. note:: In several methods we say that their complexity is
+ *    *effectively* :math:`O(...)`. Do not confuse this with the word
+ *    *amortized*, which has a very different meaning.  In this
+ *    context, *effective* means that while the
+ *    mathematically rigurous
+ *    complexity might be higher, for all practical matters the
+ *    provided complexity is more useful to think about the actual
+ *    cost of the operation.
+ *
+ * **Example**
+ *   .. literalinclude:: ../example/vector/intro.cpp
+ *      :language: c++
+ *      :start-after: intro/start
+ *      :end-before:  intro/end
+ *
+ * @endrst
+ */
+template <typename T,
+          typename MemoryPolicy  = default_memory_policy,
+          detail::rbts::bits_t B = default_bits,
+          detail::rbts::bits_t BL =
+              detail::rbts::derive_bits_leaf<T, MemoryPolicy, B>>
+class vector
+{
+    using impl_t = detail::rbts::rbtree<T, MemoryPolicy, B, BL>;
+    using flex_t = flex_vector<T, MemoryPolicy, B, BL>;
+
+    using move_t =
+        std::integral_constant<bool, MemoryPolicy::use_transient_rvalues>;
+
+public:
+    static constexpr auto bits      = B;
+    static constexpr auto bits_leaf = BL;
+    using memory_policy             = MemoryPolicy;
+
+    using value_type      = T;
+    using reference       = const T&;
+    using size_type       = detail::rbts::size_t;
+    using difference_type = std::ptrdiff_t;
+    using const_reference = const T&;
+
+    using iterator = detail::rbts::rbtree_iterator<T, MemoryPolicy, B, BL>;
+    using const_iterator   = iterator;
+    using reverse_iterator = std::reverse_iterator<iterator>;
+
+    using transient_type = vector_transient<T, MemoryPolicy, B, BL>;
+
+    /*!
+     * Default constructor.  It creates a vector of `size() == 0`.  It
+     * does not allocate memory and its complexity is @f$ O(1) @f$.
+     */
+    vector() = default;
+
+    /*!
+     * Constructs a vector containing the elements in `values`.
+     */
+    vector(std::initializer_list<T> values)
+        : impl_{impl_t::from_initializer_list(values)}
+    {}
+
+    /*!
+     * Constructs a vector containing the elements in the range
+     * defined by the input iterator `first` and range sentinel `last`.
+     */
+    template <typename Iter,
+              typename Sent,
+              std::enable_if_t<detail::compatible_sentinel_v<Iter, Sent>,
+                               bool> = true>
+    vector(Iter first, Sent last)
+        : impl_{impl_t::from_range(first, last)}
+    {}
+
+    /*!
+     * Constructs a vector containing the element `val` repeated `n`
+     * times.
+     */
+    vector(size_type n, T v = {})
+        : impl_{impl_t::from_fill(n, v)}
+    {}
+
+    /*!
+     * Returns an iterator pointing at the first element of the
+     * collection. It does not allocate memory and its complexity is
+     * @f$ O(1) @f$.
+     */
+    IMMER_NODISCARD iterator begin() const { return {impl_}; }
+
+    /*!
+     * Returns an iterator pointing just after the last element of the
+     * collection. It does not allocate and its complexity is @f$ O(1) @f$.
+     */
+    IMMER_NODISCARD iterator end() const
+    {
+        return {impl_, typename iterator::end_t{}};
+    }
+
+    /*!
+     * Returns an iterator that traverses the collection backwards,
+     * pointing at the first element of the reversed collection. It
+     * does not allocate memory and its complexity is @f$ O(1) @f$.
+     */
+    IMMER_NODISCARD reverse_iterator rbegin() const
+    {
+        return reverse_iterator{end()};
+    }
+
+    /*!
+     * Returns an iterator that traverses the collection backwards,
+     * pointing after the last element of the reversed collection. It
+     * does not allocate memory and its complexity is @f$ O(1) @f$.
+     */
+    IMMER_NODISCARD reverse_iterator rend() const
+    {
+        return reverse_iterator{begin()};
+    }
+
+    /*!
+     * Returns the number of elements in the container.  It does
+     * not allocate memory and its complexity is @f$ O(1) @f$.
+     */
+    IMMER_NODISCARD size_type size() const { return impl_.size; }
+
+    /*!
+     * Returns `true` if there are no elements in the container.  It
+     * does not allocate memory and its complexity is @f$ O(1) @f$.
+     */
+    IMMER_NODISCARD bool empty() const { return impl_.size == 0; }
+
+    /*!
+     * Access the last element.
+     */
+    IMMER_NODISCARD const T& back() const { return impl_.back(); }
+
+    /*!
+     * Access the first element.
+     */
+    IMMER_NODISCARD const T& front() const { return impl_.front(); }
+
+    /*!
+     * Returns a `const` reference to the element at position `index`.
+     * It is undefined when @f$ 0 index \geq size() @f$.  It does not
+     * allocate memory and its complexity is *effectively* @f$ O(1)
+     * @f$.
+     */
+    IMMER_NODISCARD reference operator[](size_type index) const
+    {
+        return impl_.get(index);
+    }
+
+    /*!
+     * Returns a `const` reference to the element at position
+     * `index`. It throws an `std::out_of_range` exception when @f$
+     * index \geq size() @f$.  It does not allocate memory and its
+     * complexity is *effectively* @f$ O(1) @f$.
+     */
+    reference at(size_type index) const { return impl_.get_check(index); }
+
+    /*!
+     * Returns whether the vectors are equal.
+     */
+    IMMER_NODISCARD bool operator==(const vector& other) const
+    {
+        return impl_.equals(other.impl_);
+    }
+    IMMER_NODISCARD bool operator!=(const vector& other) const
+    {
+        return !(*this == other);
+    }
+
+    /*!
+     * Returns a vector with `value` inserted at the end.  It may
+     * allocate memory and its complexity is *effectively* @f$ O(1) @f$.
+     *
+     * @rst
+     *
+     * **Example**
+     *   .. literalinclude:: ../example/vector/vector.cpp
+     *      :language: c++
+     *      :dedent: 8
+     *      :start-after: push-back/start
+     *      :end-before:  push-back/end
+     *
+     * @endrst
+     */
+    IMMER_NODISCARD vector push_back(value_type value) const&
+    {
+        return impl_.push_back(std::move(value));
+    }
+
+    IMMER_NODISCARD decltype(auto) push_back(value_type value) &&
+    {
+        return push_back_move(move_t{}, std::move(value));
+    }
+
+    /*!
+     * Returns a vector containing value `value` at position `idx`.
+     * Undefined for `index >= size()`.
+     * It may allocate memory and its complexity is
+     * *effectively* @f$ O(1) @f$.
+     *
+     * @rst
+     *
+     * **Example**
+     *   .. literalinclude:: ../example/vector/vector.cpp
+     *      :language: c++
+     *      :dedent: 8
+     *      :start-after: set/start
+     *      :end-before:  set/end
+     *
+     * @endrst
+     */
+    IMMER_NODISCARD vector set(size_type index, value_type value) const&
+    {
+        return impl_.assoc(index, std::move(value));
+    }
+
+    IMMER_NODISCARD decltype(auto) set(size_type index, value_type value) &&
+    {
+        return set_move(move_t{}, index, std::move(value));
+    }
+
+    /*!
+     * Returns a vector containing the result of the expression
+     * `fn((*this)[idx])` at position `idx`.
+     * Undefined for `0 >= size()`.
+     * It may allocate memory and its complexity is
+     * *effectively* @f$ O(1) @f$.
+     *
+     * @rst
+     *
+     * **Example**
+     *   .. literalinclude:: ../example/vector/vector.cpp
+     *      :language: c++
+     *      :dedent: 8
+     *      :start-after: update/start
+     *      :end-before:  update/end
+     *
+     * @endrst
+     */
+    template <typename FnT>
+    IMMER_NODISCARD vector update(size_type index, FnT&& fn) const&
+    {
+        return impl_.update(index, std::forward<FnT>(fn));
+    }
+
+    template <typename FnT>
+    IMMER_NODISCARD decltype(auto) update(size_type index, FnT&& fn) &&
+    {
+        return update_move(move_t{}, index, std::forward<FnT>(fn));
+    }
+
+    /*!
+     * Returns a vector containing only the first `min(elems, size())`
+     * elements. It may allocate memory and its complexity is
+     * *effectively* @f$ O(1) @f$.
+     *
+     * @rst
+     *
+     * **Example**
+     *   .. literalinclude:: ../example/vector/vector.cpp
+     *      :language: c++
+     *      :dedent: 8
+     *      :start-after: take/start
+     *      :end-before:  take/end
+     *
+     * @endrst
+     */
+    IMMER_NODISCARD vector take(size_type elems) const&
+    {
+        return impl_.take(elems);
+    }
+
+    IMMER_NODISCARD decltype(auto) take(size_type elems) &&
+    {
+        return take_move(move_t{}, elems);
+    }
+
+    /*!
+     * Returns an @a transient form of this container, an
+     * `immer::vector_transient`.
+     */
+    IMMER_NODISCARD transient_type transient() const&
+    {
+        return transient_type{impl_};
+    }
+    IMMER_NODISCARD transient_type transient() &&
+    {
+        return transient_type{std::move(impl_)};
+    }
+
+    // Semi-private
+    const impl_t& impl() const { return impl_; }
+
+#if IMMER_DEBUG_PRINT
+    void debug_print(std::ostream& out = std::cerr) const
+    {
+        flex_t{*this}.debug_print(out);
+    }
+#endif
+
+private:
+    friend flex_t;
+    friend transient_type;
+
+    vector(impl_t impl)
+        : impl_(std::move(impl))
+    {
+#if IMMER_DEBUG_PRINT
+        // force the compiler to generate debug_print, so we can call
+        // it from a debugger
+        [](volatile auto) {}(&vector::debug_print);
+#endif
+    }
+
+    vector&& push_back_move(std::true_type, value_type value)
+    {
+        impl_.push_back_mut({}, std::move(value));
+        return std::move(*this);
+    }
+    vector push_back_move(std::false_type, value_type value)
+    {
+        return impl_.push_back(std::move(value));
+    }
+
+    vector&& set_move(std::true_type, size_type index, value_type value)
+    {
+        impl_.assoc_mut({}, index, std::move(value));
+        return std::move(*this);
+    }
+    vector set_move(std::false_type, size_type index, value_type value)
+    {
+        return impl_.assoc(index, std::move(value));
+    }
+
+    template <typename Fn>
+    vector&& update_move(std::true_type, size_type index, Fn&& fn)
+    {
+        impl_.update_mut({}, index, std::forward<Fn>(fn));
+        return std::move(*this);
+    }
+    template <typename Fn>
+    vector update_move(std::false_type, size_type index, Fn&& fn)
+    {
+        return impl_.update(index, std::forward<Fn>(fn));
+    }
+
+    vector&& take_move(std::true_type, size_type elems)
+    {
+        impl_.take_mut({}, elems);
+        return std::move(*this);
+    }
+    vector take_move(std::false_type, size_type elems)
+    {
+        return impl_.take(elems);
+    }
+
+    impl_t impl_ = impl_t::empty();
+};
+
+} // namespace immer
diff --git a/immer/vector_transient.hpp b/immer/vector_transient.hpp
new file mode 100644
index 000000000000..4d648cab07db
--- /dev/null
+++ b/immer/vector_transient.hpp
@@ -0,0 +1,203 @@
+//
+// immer: immutable data structures for C++
+// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
+//
+// This software is distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
+//
+
+#pragma once
+
+#include <immer/detail/rbts/rbtree.hpp>
+#include <immer/detail/rbts/rbtree_iterator.hpp>
+#include <immer/memory_policy.hpp>
+
+namespace immer {
+
+template <typename T,
+          typename MemoryPolicy,
+          detail::rbts::bits_t B,
+          detail::rbts::bits_t BL>
+class vector;
+
+template <typename T,
+          typename MemoryPolicy,
+          detail::rbts::bits_t B,
+          detail::rbts::bits_t BL>
+class flex_vector_transient;
+
+/*!
+ * Mutable version of `immer::vector`.
+ *
+ * @rst
+ *
+ * Refer to :doc:`transients` to learn more about when and how to use
+ * the mutable versions of immutable containers.
+ *
+ * @endrst
+ */
+template <typename T,
+          typename MemoryPolicy  = default_memory_policy,
+          detail::rbts::bits_t B = default_bits,
+          detail::rbts::bits_t BL =
+              detail::rbts::derive_bits_leaf<T, MemoryPolicy, B>>
+class vector_transient : MemoryPolicy::transience_t::owner
+{
+    using impl_t  = detail::rbts::rbtree<T, MemoryPolicy, B, BL>;
+    using flex_t  = flex_vector_transient<T, MemoryPolicy, B, BL>;
+    using owner_t = typename MemoryPolicy::transience_t::owner;
+
+public:
+    static constexpr auto bits      = B;
+    static constexpr auto bits_leaf = BL;
+    using memory_policy             = MemoryPolicy;
+
+    using value_type      = T;
+    using reference       = const T&;
+    using size_type       = detail::rbts::size_t;
+    using difference_type = std::ptrdiff_t;
+    using const_reference = const T&;
+
+    using iterator = detail::rbts::rbtree_iterator<T, MemoryPolicy, B, BL>;
+    using const_iterator   = iterator;
+    using reverse_iterator = std::reverse_iterator<iterator>;
+
+    using persistent_type = vector<T, MemoryPolicy, B, BL>;
+
+    /*!
+     * Default constructor.  It creates a mutable vector of `size() ==
+     * 0`.  It does not allocate memory and its complexity is
+     * @f$ O(1) @f$.
+     */
+    vector_transient() = default;
+
+    /*!
+     * Returns an iterator pointing at the first element of the
+     * collection. It does not allocate memory and its complexity is
+     * @f$ O(1) @f$.
+     */
+    IMMER_NODISCARD iterator begin() const { return {impl_}; }
+
+    /*!
+     * Returns an iterator pointing just after the last element of the
+     * collection. It does not allocate and its complexity is @f$ O(1) @f$.
+     */
+    IMMER_NODISCARD iterator end() const
+    {
+        return {impl_, typename iterator::end_t{}};
+    }
+
+    /*!
+     * Returns an iterator that traverses the collection backwards,
+     * pointing at the first element of the reversed collection. It
+     * does not allocate memory and its complexity is @f$ O(1) @f$.
+     */
+    IMMER_NODISCARD reverse_iterator rbegin() const
+    {
+        return reverse_iterator{end()};
+    }
+
+    /*!
+     * Returns an iterator that traverses the collection backwards,
+     * pointing after the last element of the reversed collection. It
+     * does not allocate memory and its complexity is @f$ O(1) @f$.
+     */
+    IMMER_NODISCARD reverse_iterator rend() const
+    {
+        return reverse_iterator{begin()};
+    }
+
+    /*!
+     * Returns the number of elements in the container.  It does
+     * not allocate memory and its complexity is @f$ O(1) @f$.
+     */
+    IMMER_NODISCARD size_type size() const { return impl_.size; }
+
+    /*!
+     * Returns `true` if there are no elements in the container.  It
+     * does not allocate memory and its complexity is @f$ O(1) @f$.
+     */
+    IMMER_NODISCARD bool empty() const { return impl_.size == 0; }
+
+    /*!
+     * Returns a `const` reference to the element at position `index`.
+     * It is undefined when @f$ 0 index \geq size() @f$.  It does not
+     * allocate memory and its complexity is *effectively* @f$ O(1)
+     * @f$.
+     */
+    reference operator[](size_type index) const { return impl_.get(index); }
+
+    /*!
+     * Returns a `const` reference to the element at position
+     * `index`. It throws an `std::out_of_range` exception when @f$
+     * index \geq size() @f$.  It does not allocate memory and its
+     * complexity is *effectively* @f$ O(1) @f$.
+     */
+    reference at(size_type index) const { return impl_.get_check(index); }
+
+    /*!
+     * Inserts `value` at the end.  It may allocate memory and its
+     * complexity is *effectively* @f$ O(1) @f$.
+     */
+    void push_back(value_type value)
+    {
+        impl_.push_back_mut(*this, std::move(value));
+    }
+
+    /*!
+     * Sets to the value `value` at position `idx`.
+     * Undefined for `index >= size()`.
+     * It may allocate memory and its complexity is
+     * *effectively* @f$ O(1) @f$.
+     */
+    void set(size_type index, value_type value)
+    {
+        impl_.assoc_mut(*this, index, std::move(value));
+    }
+
+    /*!
+     * Updates the vector to contain the result of the expression
+     * `fn((*this)[idx])` at position `idx`.
+     * Undefined for `0 >= size()`.
+     * It may allocate memory and its complexity is
+     * *effectively* @f$ O(1) @f$.
+     */
+    template <typename FnT>
+    void update(size_type index, FnT&& fn)
+    {
+        impl_.update_mut(*this, index, std::forward<FnT>(fn));
+    }
+
+    /*!
+     * Resizes the vector to only contain the first `min(elems, size())`
+     * elements. It may allocate memory and its complexity is
+     * *effectively* @f$ O(1) @f$.
+     */
+    void take(size_type elems) { impl_.take_mut(*this, elems); }
+
+    /*!
+     * Returns an @a immutable form of this container, an
+     * `immer::vector`.
+     */
+    IMMER_NODISCARD persistent_type persistent() &
+    {
+        this->owner_t::operator=(owner_t{});
+        return persistent_type{impl_};
+    }
+    IMMER_NODISCARD persistent_type persistent() &&
+    {
+        return persistent_type{std::move(impl_)};
+    }
+
+private:
+    friend flex_t;
+    friend persistent_type;
+
+    vector_transient(impl_t impl)
+        : impl_(std::move(impl))
+    {}
+
+    impl_t impl_ = impl_t::empty();
+};
+
+} // namespace immer