about summary refs log tree commit diff
path: root/third_party/immer/immer/heap/free_list_heap.hpp
blob: dc25b10184a1c3d84ca4aeee12a30ab88eaf47f9 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
//
// immer: immutable data structures for C++
// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
//
// This software is distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
//

#pragma once

#include <immer/heap/free_list_node.hpp>
#include <immer/heap/with_data.hpp>

#include <atomic>
#include <cassert>

namespace immer {

/*!
 * Adaptor that does not release the memory to the parent heap but
 * instead it keeps the memory in a thread-safe global free list. Must
 * be preceded by a `with_data<free_list_node, ...>` heap adaptor.
 *
 * @tparam Size Maximum size of the objects to be allocated.
 * @tparam Base Type of the parent heap.
 */
template <std::size_t Size, std::size_t Limit, typename Base>
struct free_list_heap : Base
{
    using base_t = Base;

    template <typename... Tags>
    static void* allocate(std::size_t size, Tags...)
    {
        assert(size <= sizeof(free_list_node) + Size);
        assert(size >= sizeof(free_list_node));

        free_list_node* n;
        do {
            n = head().data;
            if (!n) {
                auto p = base_t::allocate(Size + sizeof(free_list_node));
                return static_cast<free_list_node*>(p);
            }
        } while (!head().data.compare_exchange_weak(n, n->next));
        head().count.fetch_sub(1u, std::memory_order_relaxed);
        return n;
    }

    template <typename... Tags>
    static void deallocate(std::size_t size, void* data, Tags...)
    {
        assert(size <= sizeof(free_list_node) + Size);
        assert(size >= sizeof(free_list_node));

        // we use relaxed, because we are fine with temporarily having
        // a few more/less buffers in free list
        if (head().count.load(std::memory_order_relaxed) >= Limit) {
            base_t::deallocate(Size + sizeof(free_list_node), data);
        } else {
            auto n = static_cast<free_list_node*>(data);
            do {
                n->next = head().data;
            } while (!head().data.compare_exchange_weak(n->next, n));
            head().count.fetch_add(1u, std::memory_order_relaxed);
        }
    }

private:
    struct head_t
    {
        std::atomic<free_list_node*> data;
        std::atomic<std::size_t> count;
    };

    static head_t& head()
    {
        static head_t head_{{nullptr}, {0}};
        return head_;
    }
};

} // namespace immer