1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
|
const std = @import("std");
const builtin = @import("builtin");
const ThreadPool = @This();
mutex: std.Thread.Mutex = .{},
cond: std.Thread.Condition = .{},
run_queue: RunQueue = .{},
is_running: bool = true,
allocator: std.mem.Allocator,
threads: []std.Thread,
const RunQueue = std.SinglyLinkedList(Runnable);
const Runnable = struct {
runFn: RunProto,
};
const RunProto = switch (builtin.zig_backend) {
.stage1 => fn (*Runnable) void,
else => *const fn (*Runnable) void,
};
pub fn init(self: *ThreadPool, allocator: std.mem.Allocator) !void {
self.* = .{
.allocator = allocator,
.threads = &[_]std.Thread{},
};
if (builtin.single_threaded) {
return;
}
const thread_count = std.math.max(1, std.Thread.getCpuCount() catch 1);
self.threads = try allocator.alloc(std.Thread, thread_count);
errdefer allocator.free(self.threads);
// kill and join any threads we spawned previously on error.
var spawned: usize = 0;
errdefer self.join(spawned);
for (self.threads) |*thread| {
thread.* = try std.Thread.spawn(.{}, worker, .{self});
spawned += 1;
}
}
pub fn deinit(self: *ThreadPool) void {
self.join(self.threads.len); // kill and join all threads.
self.* = undefined;
}
fn join(self: *ThreadPool, spawned: usize) void {
{
self.mutex.lock();
defer self.mutex.unlock();
// ensure future worker threads exit the dequeue loop
self.is_running = false;
}
// wake up any sleeping threads (this can be done outside the mutex)
// then wait for all the threads we know are spawned to complete.
self.cond.broadcast();
for (self.threads[0..spawned]) |thread| {
thread.join();
}
self.allocator.free(self.threads);
}
pub fn spawn(self: *ThreadPool, comptime func: anytype, args: anytype) !void {
if (builtin.single_threaded) {
@call(.{}, func, args);
return;
}
const Args = @TypeOf(args);
const Closure = struct {
arguments: Args,
pool: *ThreadPool,
run_node: RunQueue.Node = .{ .data = .{ .runFn = runFn } },
fn runFn(runnable: *Runnable) void {
const run_node = @fieldParentPtr(RunQueue.Node, "data", runnable);
const closure = @fieldParentPtr(@This(), "run_node", run_node);
@call(.{}, func, closure.arguments);
// The thread pool's allocator is protected by the mutex.
const mutex = &closure.pool.mutex;
mutex.lock();
defer mutex.unlock();
closure.pool.allocator.destroy(closure);
}
};
{
self.mutex.lock();
defer self.mutex.unlock();
const closure = try self.allocator.create(Closure);
closure.* = .{
.arguments = args,
.pool = self,
};
self.run_queue.prepend(&closure.run_node);
}
// Notify waiting threads outside the lock to try and keep the critical section small.
self.cond.signal();
}
fn worker(self: *ThreadPool) void {
self.mutex.lock();
defer self.mutex.unlock();
while (true) {
while (self.run_queue.popFirst()) |run_node| {
// Temporarily unlock the mutex in order to execute the run_node
self.mutex.unlock();
defer self.mutex.lock();
const runFn = run_node.data.runFn;
runFn(&run_node.data);
}
// Stop executing instead of waiting if the thread pool is no longer running.
if (self.is_running) {
self.cond.wait(&self.mutex);
} else {
break;
}
}
}
|