aboutsummaryrefslogtreecommitdiff
path: root/src/ThreadPool.zig
diff options
context:
space:
mode:
authorAndrew Kelley <andrew@ziglang.org>2021-11-09 18:27:12 -0700
committerAndrew Kelley <andrew@ziglang.org>2021-11-09 18:31:03 -0700
commit008b0ec5e58fc7e31f3b989868a7d1ea4df3f41d (patch)
tree99374a7b0f5dc0bf56fc5daff702eecb7758d4f5 /src/ThreadPool.zig
parent65e518e8e8ab74b276c5a284caebfad4e5aa502c (diff)
downloadzig-008b0ec5e58fc7e31f3b989868a7d1ea4df3f41d.tar.gz
zig-008b0ec5e58fc7e31f3b989868a7d1ea4df3f41d.zip
std.Thread.Mutex: change API to lock() and unlock()
This is a breaking change. Before, usage looked like this: ```zig const held = mutex.acquire(); defer held.release(); ``` Now it looks like this: ```zig mutex.lock(); defer mutex.unlock(); ``` The `Held` type was an idea to make mutexes slightly safer by making it more difficult to forget to release an aquired lock. However, this ultimately caused more problems than it solved, when any data structures needed to store a held mutex. Simplify everything by reducing the API down to the primitives: lock() and unlock(). Closes #8051 Closes #8246 Closes #10105
Diffstat (limited to 'src/ThreadPool.zig')
-rw-r--r--src/ThreadPool.zig31
1 files changed, 17 insertions, 14 deletions
diff --git a/src/ThreadPool.zig b/src/ThreadPool.zig
index 4198e062bb..a3ba3c6115 100644
--- a/src/ThreadPool.zig
+++ b/src/ThreadPool.zig
@@ -7,7 +7,7 @@ const std = @import("std");
const builtin = @import("builtin");
const ThreadPool = @This();
-lock: std.Thread.Mutex = .{},
+mutex: std.Thread.Mutex = .{},
is_running: bool = true,
allocator: *std.mem.Allocator,
workers: []Worker,
@@ -28,26 +28,28 @@ const Worker = struct {
idle_node: IdleQueue.Node,
fn run(worker: *Worker) void {
+ const pool = worker.pool;
+
while (true) {
- const held = worker.pool.lock.acquire();
+ pool.mutex.lock();
- if (worker.pool.run_queue.popFirst()) |run_node| {
- held.release();
+ if (pool.run_queue.popFirst()) |run_node| {
+ pool.mutex.unlock();
(run_node.data.runFn)(&run_node.data);
continue;
}
- if (worker.pool.is_running) {
+ if (pool.is_running) {
worker.idle_node.data.reset();
- worker.pool.idle_queue.prepend(&worker.idle_node);
- held.release();
+ pool.idle_queue.prepend(&worker.idle_node);
+ pool.mutex.unlock();
worker.idle_node.data.wait();
continue;
}
- held.release();
+ pool.mutex.unlock();
return;
}
}
@@ -88,8 +90,8 @@ fn destroyWorkers(self: *ThreadPool, spawned: usize) void {
pub fn deinit(self: *ThreadPool) void {
{
- const held = self.lock.acquire();
- defer held.release();
+ self.mutex.lock();
+ defer self.mutex.unlock();
self.is_running = false;
while (self.idle_queue.popFirst()) |idle_node|
@@ -117,14 +119,15 @@ pub fn spawn(self: *ThreadPool, comptime func: anytype, args: anytype) !void {
const closure = @fieldParentPtr(@This(), "run_node", run_node);
@call(.{}, func, closure.arguments);
- const held = closure.pool.lock.acquire();
- defer held.release();
+ const mutex = &closure.pool.mutex;
+ mutex.lock();
+ defer mutex.unlock();
closure.pool.allocator.destroy(closure);
}
};
- const held = self.lock.acquire();
- defer held.release();
+ self.mutex.lock();
+ defer self.mutex.unlock();
const closure = try self.allocator.create(Closure);
closure.* = .{