diff options
| author | Isaac Freund <mail@isaacfreund.com> | 2025-08-25 20:30:20 +0200 |
|---|---|---|
| committer | Isaac Freund <mail@isaacfreund.com> | 2025-08-26 09:39:09 +0200 |
| commit | 6d4dbf05effa3afeb650aeea17683d5de4e6429c (patch) | |
| tree | 29458c85d0cb2cd28a48c2c7f0c47d07222f978b /src | |
| parent | 3e77317261e3da50ac55be0c14bc00192ee93166 (diff) | |
| download | zig-6d4dbf05effa3afeb650aeea17683d5de4e6429c.tar.gz zig-6d4dbf05effa3afeb650aeea17683d5de4e6429c.zip | |
Compilation: use std.Deque
And delete DeprecatedLinearFifo from the source tree.
Diffstat (limited to 'src')
| -rw-r--r-- | src/Compilation.zig | 43 | ||||
| -rw-r--r-- | src/deprecated.zig | 169 |
2 files changed, 21 insertions, 191 deletions
diff --git a/src/Compilation.zig b/src/Compilation.zig index db15eee954..d2b123b8dc 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -45,8 +45,6 @@ const Builtin = @import("Builtin.zig"); const LlvmObject = @import("codegen/llvm.zig").Object; const dev = @import("dev.zig"); -const DeprecatedLinearFifo = @import("deprecated.zig").LinearFifo; - pub const Config = @import("Compilation/Config.zig"); /// General-purpose allocator. Used for both temporary and long-term storage. @@ -124,20 +122,21 @@ work_queues: [ } break :len len; } -]DeprecatedLinearFifo(Job), +]std.Deque(Job), /// These jobs are to invoke the Clang compiler to create an object file, which /// gets linked with the Compilation. -c_object_work_queue: DeprecatedLinearFifo(*CObject), +c_object_work_queue: std.Deque(*CObject), /// These jobs are to invoke the RC compiler to create a compiled resource file (.res), which /// gets linked with the Compilation. -win32_resource_work_queue: if (dev.env.supports(.win32_resource)) DeprecatedLinearFifo(*Win32Resource) else struct { - pub fn ensureUnusedCapacity(_: @This(), _: u0) error{}!void {} - pub fn readItem(_: @This()) ?noreturn { +win32_resource_work_queue: if (dev.env.supports(.win32_resource)) std.Deque(*Win32Resource) else struct { + pub const empty: @This() = .{}; + pub fn ensureUnusedCapacity(_: @This(), _: Allocator, _: u0) error{}!void {} + pub fn popFront(_: @This()) ?noreturn { return null; } - pub fn deinit(_: @This()) void {} + pub fn deinit(_: @This(), _: Allocator) void {} }, /// The ErrorMsg memory is owned by the `CObject`, using Compilation's general purpose allocator. @@ -2231,9 +2230,9 @@ pub fn create(gpa: Allocator, arena: Allocator, diag: *CreateDiagnostic, options .root_mod = options.root_mod, .config = options.config, .dirs = options.dirs, - .work_queues = @splat(.init(gpa)), - .c_object_work_queue = .init(gpa), - .win32_resource_work_queue = if (dev.env.supports(.win32_resource)) .init(gpa) else .{}, + .work_queues = @splat(.empty), + .c_object_work_queue = .empty, + .win32_resource_work_queue = .empty, .c_source_files = options.c_source_files, .rc_source_files = options.rc_source_files, .cache_parent = cache, @@ -2699,9 +2698,9 @@ pub fn destroy(comp: *Compilation) void { if (comp.zcu) |zcu| zcu.deinit(); comp.cache_use.deinit(); - for (&comp.work_queues) |*work_queue| work_queue.deinit(); - comp.c_object_work_queue.deinit(); - comp.win32_resource_work_queue.deinit(); + for (&comp.work_queues) |*work_queue| work_queue.deinit(gpa); + comp.c_object_work_queue.deinit(gpa); + comp.win32_resource_work_queue.deinit(gpa); for (comp.windows_libs.keys()) |windows_lib| gpa.free(windows_lib); comp.windows_libs.deinit(gpa); @@ -3016,17 +3015,17 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) UpdateE // For compiling C objects, we rely on the cache hash system to avoid duplicating work. // Add a Job for each C object. - try comp.c_object_work_queue.ensureUnusedCapacity(comp.c_object_table.count()); + try comp.c_object_work_queue.ensureUnusedCapacity(gpa, comp.c_object_table.count()); for (comp.c_object_table.keys()) |c_object| { - comp.c_object_work_queue.writeItemAssumeCapacity(c_object); + comp.c_object_work_queue.pushBackAssumeCapacity(c_object); try comp.appendFileSystemInput(try .fromUnresolved(arena, comp.dirs, &.{c_object.src.src_path})); } // For compiling Win32 resources, we rely on the cache hash system to avoid duplicating work. // Add a Job for each Win32 resource file. - try comp.win32_resource_work_queue.ensureUnusedCapacity(comp.win32_resource_table.count()); + try comp.win32_resource_work_queue.ensureUnusedCapacity(gpa, comp.win32_resource_table.count()); for (comp.win32_resource_table.keys()) |win32_resource| { - comp.win32_resource_work_queue.writeItemAssumeCapacity(win32_resource); + comp.win32_resource_work_queue.pushBackAssumeCapacity(win32_resource); switch (win32_resource.src) { .rc => |f| { try comp.appendFileSystemInput(try .fromUnresolved(arena, comp.dirs, &.{f.src_path})); @@ -4869,14 +4868,14 @@ fn performAllTheWork( } } - while (comp.c_object_work_queue.readItem()) |c_object| { + while (comp.c_object_work_queue.popFront()) |c_object| { comp.link_task_queue.startPrelinkItem(); comp.thread_pool.spawnWg(&comp.link_task_wait_group, workerUpdateCObject, .{ comp, c_object, main_progress_node, }); } - while (comp.win32_resource_work_queue.readItem()) |win32_resource| { + while (comp.win32_resource_work_queue.popFront()) |win32_resource| { comp.link_task_queue.startPrelinkItem(); comp.thread_pool.spawnWg(&comp.link_task_wait_group, workerUpdateWin32Resource, .{ comp, win32_resource, main_progress_node, @@ -4996,7 +4995,7 @@ fn performAllTheWork( } work: while (true) { - for (&comp.work_queues) |*work_queue| if (work_queue.readItem()) |job| { + for (&comp.work_queues) |*work_queue| if (work_queue.popFront()) |job| { try processOneJob(@intFromEnum(Zcu.PerThread.Id.main), comp, job); continue :work; }; @@ -5025,7 +5024,7 @@ fn performAllTheWork( const JobError = Allocator.Error; pub fn queueJob(comp: *Compilation, job: Job) !void { - try comp.work_queues[Job.stage(job)].writeItem(job); + try comp.work_queues[Job.stage(job)].pushBack(comp.gpa, job); } pub fn queueJobs(comp: *Compilation, jobs: []const Job) !void { diff --git a/src/deprecated.zig b/src/deprecated.zig deleted file mode 100644 index 68c712b3b1..0000000000 --- a/src/deprecated.zig +++ /dev/null @@ -1,169 +0,0 @@ -//! Deprecated. Stop using this API - -const std = @import("std"); -const math = std.math; -const mem = std.mem; -const Allocator = mem.Allocator; -const assert = std.debug.assert; -const testing = std.testing; - -pub fn LinearFifo(comptime T: type) type { - return struct { - allocator: Allocator, - buf: []T, - head: usize, - count: usize, - - const Self = @This(); - - pub fn init(allocator: Allocator) Self { - return .{ - .allocator = allocator, - .buf = &.{}, - .head = 0, - .count = 0, - }; - } - - pub fn deinit(self: *Self) void { - self.allocator.free(self.buf); - self.* = undefined; - } - - pub fn realign(self: *Self) void { - if (self.buf.len - self.head >= self.count) { - mem.copyForwards(T, self.buf[0..self.count], self.buf[self.head..][0..self.count]); - self.head = 0; - } else { - var tmp: [4096 / 2 / @sizeOf(T)]T = undefined; - - while (self.head != 0) { - const n = @min(self.head, tmp.len); - const m = self.buf.len - n; - @memcpy(tmp[0..n], self.buf[0..n]); - mem.copyForwards(T, self.buf[0..m], self.buf[n..][0..m]); - @memcpy(self.buf[m..][0..n], tmp[0..n]); - self.head -= n; - } - } - { // set unused area to undefined - const unused = mem.sliceAsBytes(self.buf[self.count..]); - @memset(unused, undefined); - } - } - - /// Ensure that the buffer can fit at least `size` items - pub fn ensureTotalCapacity(self: *Self, size: usize) !void { - if (self.buf.len >= size) return; - self.realign(); - const new_size = math.ceilPowerOfTwo(usize, size) catch return error.OutOfMemory; - self.buf = try self.allocator.realloc(self.buf, new_size); - } - - /// Makes sure at least `size` items are unused - pub fn ensureUnusedCapacity(self: *Self, size: usize) error{OutOfMemory}!void { - if (self.writableLength() >= size) return; - - return try self.ensureTotalCapacity(math.add(usize, self.count, size) catch return error.OutOfMemory); - } - - /// Returns a writable slice from the 'read' end of the fifo - fn readableSliceMut(self: Self, offset: usize) []T { - if (offset > self.count) return &[_]T{}; - - var start = self.head + offset; - if (start >= self.buf.len) { - start -= self.buf.len; - return self.buf[start .. start + (self.count - offset)]; - } else { - const end = @min(self.head + self.count, self.buf.len); - return self.buf[start..end]; - } - } - - /// Discard first `count` items in the fifo - pub fn discard(self: *Self, count: usize) void { - assert(count <= self.count); - { // set old range to undefined. Note: may be wrapped around - const slice = self.readableSliceMut(0); - if (slice.len >= count) { - const unused = mem.sliceAsBytes(slice[0..count]); - @memset(unused, undefined); - } else { - const unused = mem.sliceAsBytes(slice[0..]); - @memset(unused, undefined); - const unused2 = mem.sliceAsBytes(self.readableSliceMut(slice.len)[0 .. count - slice.len]); - @memset(unused2, undefined); - } - } - var head = self.head + count; - // Note it is safe to do a wrapping subtract as - // bitwise & with all 1s is a noop - head &= self.buf.len -% 1; - self.head = head; - self.count -= count; - } - - /// Read the next item from the fifo - pub fn readItem(self: *Self) ?T { - if (self.count == 0) return null; - - const c = self.buf[self.head]; - self.discard(1); - return c; - } - - /// Returns number of items available in fifo - pub fn writableLength(self: Self) usize { - return self.buf.len - self.count; - } - - /// Returns the first section of writable buffer. - /// Note that this may be of length 0 - pub fn writableSlice(self: Self, offset: usize) []T { - if (offset > self.buf.len) return &[_]T{}; - - const tail = self.head + offset + self.count; - if (tail < self.buf.len) { - return self.buf[tail..]; - } else { - return self.buf[tail - self.buf.len ..][0 .. self.writableLength() - offset]; - } - } - - /// Update the tail location of the buffer (usually follows use of writable/writableWithSize) - pub fn update(self: *Self, count: usize) void { - assert(self.count + count <= self.buf.len); - self.count += count; - } - - /// Appends the data in `src` to the fifo. - /// You must have ensured there is enough space. - pub fn writeAssumeCapacity(self: *Self, src: []const T) void { - assert(self.writableLength() >= src.len); - - var src_left = src; - while (src_left.len > 0) { - const writable_slice = self.writableSlice(0); - assert(writable_slice.len != 0); - const n = @min(writable_slice.len, src_left.len); - @memcpy(writable_slice[0..n], src_left[0..n]); - self.update(n); - src_left = src_left[n..]; - } - } - - /// Write a single item to the fifo - pub fn writeItem(self: *Self, item: T) !void { - try self.ensureUnusedCapacity(1); - return self.writeItemAssumeCapacity(item); - } - - pub fn writeItemAssumeCapacity(self: *Self, item: T) void { - var tail = self.head + self.count; - tail &= self.buf.len - 1; - self.buf[tail] = item; - self.update(1); - } - }; -} |
