about summary refs log tree commit diff
path: root/third_party/immer/immer/refcount/refcount_policy.hpp
blob: a7a282cd13a75ef3e8cbc36f332727e324e3fd12 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
//
// immer: immutable data structures for C++
// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
//
// This software is distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
//

#pragma once

#include <immer/refcount/no_refcount_policy.hpp>

#include <atomic>
#include <cassert>
#include <thread>
#include <utility>

// This has been shamelessly copied from boost...
#if defined(_MSC_VER) && _MSC_VER >= 1310 &&                                   \
    (defined(_M_IX86) || defined(_M_X64)) && !defined(__c2__)
extern "C" void _mm_pause();
#define IMMER_SMT_PAUSE _mm_pause()
#elif defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
#define IMMER_SMT_PAUSE __asm__ __volatile__("rep; nop" : : : "memory")
#endif

namespace immer {

// This is an atomic spinlock similar to the one used by boost to provide
// "atomic" shared_ptr operations.  It also does not differ much from the one
// from libc++ or libstdc++...
struct spinlock
{
    std::atomic_flag v_{};

    bool try_lock() { return !v_.test_and_set(std::memory_order_acquire); }

    void lock()
    {
        for (auto k = 0u; !try_lock(); ++k) {
            if (k < 4)
                continue;
#ifdef IMMER_SMT_PAUSE
            else if (k < 16)
                IMMER_SMT_PAUSE;
#endif
            else
                std::this_thread::yield();
        }
    }

    void unlock() { v_.clear(std::memory_order_release); }

    struct scoped_lock
    {
        scoped_lock(const scoped_lock&) = delete;
        scoped_lock& operator=(const scoped_lock&) = delete;

        explicit scoped_lock(spinlock& sp)
            : sp_{sp}
        {
            sp.lock();
        }

        ~scoped_lock() { sp_.unlock(); }

    private:
        spinlock& sp_;
    };
};

/*!
 * A reference counting policy implemented using an *atomic* `int`
 * count.  It is **thread-safe**.
 */
struct refcount_policy
{
    using spinlock_type = spinlock;

    mutable std::atomic<int> refcount;

    refcount_policy()
        : refcount{1} {};
    refcount_policy(disowned)
        : refcount{0}
    {}

    void inc() { refcount.fetch_add(1, std::memory_order_relaxed); }

    bool dec() { return 1 == refcount.fetch_sub(1, std::memory_order_acq_rel); }

    void dec_unsafe()
    {
        assert(refcount.load() > 1);
        refcount.fetch_sub(1, std::memory_order_relaxed);
    }

    bool unique() { return refcount == 1; }
};

} // namespace immer