blob: a16f4c2b603398ea087fc421c097c4e6697367e8 (
plain) (
blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
|
#include "thread-pool.hh"
#include "affinity.hh"
#include "glog/logging.h"
namespace nix {
ThreadPool::ThreadPool(size_t _maxThreads) : maxThreads(_maxThreads) {
restoreAffinity(); // FIXME
if (!maxThreads) {
maxThreads = std::thread::hardware_concurrency();
if (!maxThreads) {
maxThreads = 1;
}
}
DLOG(INFO) << "starting pool of " << maxThreads - 1 << " threads";
}
ThreadPool::~ThreadPool() { shutdown(); }
void ThreadPool::shutdown() {
std::vector<std::thread> workers;
{
auto state(state_.lock());
quit = true;
std::swap(workers, state->workers);
}
if (workers.empty()) {
return;
}
DLOG(INFO) << "reaping " << workers.size() << " worker threads";
work.notify_all();
for (auto& thr : workers) {
thr.join();
}
}
void ThreadPool::enqueue(const work_t& t) {
auto state(state_.lock());
if (quit) {
throw ThreadPoolShutDown(
"cannot enqueue a work item while the thread pool is shutting down");
}
state->pending.push(t);
/* Note: process() also executes items, so count it as a worker. */
if (state->pending.size() > state->workers.size() + 1 &&
state->workers.size() + 1 < maxThreads)
state->workers.emplace_back(&ThreadPool::doWork, this, false);
work.notify_one();
}
void ThreadPool::process() {
state_.lock()->draining = true;
/* Do work until no more work is pending or active. */
try {
doWork(true);
auto state(state_.lock());
assert(quit);
if (state->exception) {
std::rethrow_exception(state->exception);
}
} catch (...) {
/* In the exceptional case, some workers may still be
active. They may be referencing the stack frame of the
caller. So wait for them to finish. (~ThreadPool also does
this, but it might be destroyed after objects referenced by
the work item lambdas.) */
shutdown();
throw;
}
}
void ThreadPool::doWork(bool mainThread) {
if (!mainThread) {
interruptCheck = [&]() { return (bool)quit; };
}
bool didWork = false;
std::exception_ptr exc;
while (true) {
work_t w;
{
auto state(state_.lock());
if (didWork) {
assert(state->active);
state->active--;
if (exc) {
if (!state->exception) {
state->exception = exc;
// Tell the other workers to quit.
quit = true;
work.notify_all();
} else {
/* Print the exception, since we can't
propagate it. */
try {
std::rethrow_exception(exc);
} catch (std::exception& e) {
if (!dynamic_cast<Interrupted*>(&e) &&
!dynamic_cast<ThreadPoolShutDown*>(&e))
ignoreException();
} catch (...) {
}
}
}
}
/* Wait until a work item is available or we're asked to
quit. */
while (true) {
if (quit) {
return;
}
if (!state->pending.empty()) {
break;
}
/* If there are no active or pending items, and the
main thread is running process(), then no new items
can be added. So exit. */
if (!state->active && state->draining) {
quit = true;
work.notify_all();
return;
}
state.wait(work);
}
w = std::move(state->pending.front());
state->pending.pop();
state->active++;
}
try {
w();
} catch (...) {
exc = std::current_exception();
}
didWork = true;
}
}
} // namespace nix
|