aboutsummaryrefslogtreecommitdiff
path: root/src/ThreadSafeQueue.zig
diff options
context:
space:
mode:
authormlugg <mlugg@mlugg.co.uk>2025-05-29 05:38:55 +0100
committermlugg <mlugg@mlugg.co.uk>2025-06-12 13:55:40 +0100
commit9eb400ef19391261a3b61129d8665602c89959c5 (patch)
treefc7046857c3271294a8ebfcd462ece05b0be5f46 /src/ThreadSafeQueue.zig
parent66d15d9d0974e1b493b717cf02deb435ebd13858 (diff)
downloadzig-9eb400ef19391261a3b61129d8665602c89959c5.tar.gz
zig-9eb400ef19391261a3b61129d8665602c89959c5.zip
compiler: rework backend pipeline to separate codegen and link
The idea here is that instead of the linker calling into codegen, instead codegen should run before we touch the linker, and after MIR is produced, it is sent to the linker. Aside from simplifying the call graph (by preventing N linkers from each calling into M codegen backends!), this has the huge benefit that it is possible to parallellize codegen separately from linking. The threading model can look like this: * 1 semantic analysis thread, which generates AIR * N codegen threads, which process AIR into MIR * 1 linker thread, which emits MIR to the binary The codegen threads are also responsible for `Air.Legalize` and `Air.Liveness`; it's more efficient to do this work here instead of blocking the main thread for this trivially parallel task. I have repurposed the `Zcu.Feature.separate_thread` backend feature to indicate support for this 1:N:1 threading pattern. This commit makes the C backend support this feature, since it was relatively easy to divorce from `link.C`: it just required eliminating some shared buffers. Other backends don't currently support this feature. In fact, they don't even compile -- the next few commits will fix them back up.
Diffstat (limited to 'src/ThreadSafeQueue.zig')
-rw-r--r--src/ThreadSafeQueue.zig72
1 files changed, 0 insertions, 72 deletions
diff --git a/src/ThreadSafeQueue.zig b/src/ThreadSafeQueue.zig
deleted file mode 100644
index 74bbdc418f..0000000000
--- a/src/ThreadSafeQueue.zig
+++ /dev/null
@@ -1,72 +0,0 @@
-const std = @import("std");
-const assert = std.debug.assert;
-const Allocator = std.mem.Allocator;
-
-pub fn ThreadSafeQueue(comptime T: type) type {
- return struct {
- worker_owned: std.ArrayListUnmanaged(T),
- /// Protected by `mutex`.
- shared: std.ArrayListUnmanaged(T),
- mutex: std.Thread.Mutex,
- state: State,
-
- const Self = @This();
-
- pub const State = enum { wait, run };
-
- pub const empty: Self = .{
- .worker_owned = .empty,
- .shared = .empty,
- .mutex = .{},
- .state = .wait,
- };
-
- pub fn deinit(self: *Self, gpa: Allocator) void {
- self.worker_owned.deinit(gpa);
- self.shared.deinit(gpa);
- self.* = undefined;
- }
-
- /// Must be called from the worker thread.
- pub fn check(self: *Self) ?[]T {
- assert(self.worker_owned.items.len == 0);
- {
- self.mutex.lock();
- defer self.mutex.unlock();
- assert(self.state == .run);
- if (self.shared.items.len == 0) {
- self.state = .wait;
- return null;
- }
- std.mem.swap(std.ArrayListUnmanaged(T), &self.worker_owned, &self.shared);
- }
- const result = self.worker_owned.items;
- self.worker_owned.clearRetainingCapacity();
- return result;
- }
-
- /// Adds items to the queue, returning true if and only if the worker
- /// thread is waiting. Thread-safe.
- /// Not safe to call from the worker thread.
- pub fn enqueue(self: *Self, gpa: Allocator, items: []const T) error{OutOfMemory}!bool {
- self.mutex.lock();
- defer self.mutex.unlock();
- try self.shared.appendSlice(gpa, items);
- return switch (self.state) {
- .run => false,
- .wait => {
- self.state = .run;
- return true;
- },
- };
- }
-
- /// Safe only to call exactly once when initially starting the worker.
- pub fn start(self: *Self) bool {
- assert(self.state == .wait);
- if (self.shared.items.len == 0) return false;
- self.state = .run;
- return true;
- }
- };
-}