aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorAndrew Kelley <andrew@ziglang.org>2025-12-08 15:23:18 -0800
committerAndrew Kelley <andrew@ziglang.org>2025-12-23 22:15:08 -0800
commit6f46570958af8ae27308eb4a9470e05f33aaa522 (patch)
tree19aec2fa52364c78ffa9a9d8dc14d335f664fe06 /src
parent181ac08459f8d4001c504330ee66037135e56908 (diff)
downloadzig-6f46570958af8ae27308eb4a9470e05f33aaa522.tar.gz
zig-6f46570958af8ae27308eb4a9470e05f33aaa522.zip
link.MachO: update parallel hasher to std.Io
Diffstat (limited to 'src')
-rw-r--r--src/link/MachO/CodeSignature.zig17
-rw-r--r--src/link/MachO/hasher.zig24
-rw-r--r--src/link/MachO/uuid.zig16
3 files changed, 27 insertions, 30 deletions
diff --git a/src/link/MachO/CodeSignature.zig b/src/link/MachO/CodeSignature.zig
index 814faf234a..ec516d4af0 100644
--- a/src/link/MachO/CodeSignature.zig
+++ b/src/link/MachO/CodeSignature.zig
@@ -12,7 +12,7 @@ const Sha256 = std.crypto.hash.sha2.Sha256;
const Allocator = std.mem.Allocator;
const trace = @import("../../tracy.zig").trace;
-const Hasher = @import("hasher.zig").ParallelHasher;
+const ParallelHasher = @import("hasher.zig").ParallelHasher;
const MachO = @import("../MachO.zig");
const hash_size = Sha256.digest_length;
@@ -268,7 +268,9 @@ pub fn writeAdhocSignature(
const tracy = trace(@src());
defer tracy.end();
- const allocator = macho_file.base.comp.gpa;
+ const comp = macho_file.base.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
var header: macho.SuperBlob = .{
.magic = macho.CSMAGIC_EMBEDDED_SIGNATURE,
@@ -276,7 +278,7 @@ pub fn writeAdhocSignature(
.count = 0,
};
- var blobs = std.array_list.Managed(Blob).init(allocator);
+ var blobs = std.array_list.Managed(Blob).init(gpa);
defer blobs.deinit();
self.code_directory.inner.execSegBase = opts.exec_seg_base;
@@ -286,13 +288,12 @@ pub fn writeAdhocSignature(
const total_pages = @as(u32, @intCast(mem.alignForward(usize, opts.file_size, self.page_size) / self.page_size));
- try self.code_directory.code_slots.ensureTotalCapacityPrecise(allocator, total_pages);
+ try self.code_directory.code_slots.ensureTotalCapacityPrecise(gpa, total_pages);
self.code_directory.code_slots.items.len = total_pages;
self.code_directory.inner.nCodeSlots = total_pages;
// Calculate hash for each page (in file) and write it to the buffer
- var hasher = Hasher(Sha256){ .allocator = allocator, .io = macho_file.base.comp.io };
- try hasher.hash(opts.file, self.code_directory.code_slots.items, .{
+ try ParallelHasher(Sha256).hash(gpa, io, opts.file, self.code_directory.code_slots.items, .{
.chunk_size = self.page_size,
.max_file_size = opts.file_size,
});
@@ -304,7 +305,7 @@ pub fn writeAdhocSignature(
var hash: [hash_size]u8 = undefined;
if (self.requirements) |*req| {
- var a: std.Io.Writer.Allocating = .init(allocator);
+ var a: std.Io.Writer.Allocating = .init(gpa);
defer a.deinit();
try req.write(&a.writer);
Sha256.hash(a.written(), &hash, .{});
@@ -316,7 +317,7 @@ pub fn writeAdhocSignature(
}
if (self.entitlements) |*ents| {
- var a: std.Io.Writer.Allocating = .init(allocator);
+ var a: std.Io.Writer.Allocating = .init(gpa);
defer a.deinit();
try ents.write(&a.writer);
Sha256.hash(a.written(), &hash, .{});
diff --git a/src/link/MachO/hasher.zig b/src/link/MachO/hasher.zig
index 8cf53071c8..2e0129d240 100644
--- a/src/link/MachO/hasher.zig
+++ b/src/link/MachO/hasher.zig
@@ -1,5 +1,6 @@
const std = @import("std");
const Io = std.Io;
+const assert = std.debug.assert;
const Allocator = std.mem.Allocator;
const trace = @import("../../tracy.zig").trace;
@@ -8,20 +9,15 @@ pub fn ParallelHasher(comptime Hasher: type) type {
const hash_size = Hasher.digest_length;
return struct {
- allocator: Allocator,
- io: std.Io,
-
- pub fn hash(self: Self, file: Io.File, out: [][hash_size]u8, opts: struct {
+ pub fn hash(self: Self, io: Io, file: Io.File, out: [][hash_size]u8, opts: struct {
chunk_size: u64 = 0x4000,
max_file_size: ?u64 = null,
}) !void {
const tracy = trace(@src());
defer tracy.end();
- const io = self.io;
-
const file_size = blk: {
- const file_size = opts.max_file_size orelse try file.getEndPos();
+ const file_size = opts.max_file_size orelse try file.length(io);
break :blk std.math.cast(usize, file_size) orelse return error.Overflow;
};
const chunk_size = std.math.cast(usize, opts.chunk_size) orelse return error.Overflow;
@@ -29,12 +25,12 @@ pub fn ParallelHasher(comptime Hasher: type) type {
const buffer = try self.allocator.alloc(u8, chunk_size * out.len);
defer self.allocator.free(buffer);
- const results = try self.allocator.alloc(Io.File.PReadError!usize, out.len);
+ const results = try self.allocator.alloc(Io.File.ReadPositionalError!usize, out.len);
defer self.allocator.free(results);
{
- var group: std.Io.Group = .init;
- errdefer group.cancel(io);
+ var group: Io.Group = .init;
+ defer group.cancel(io);
for (out, results, 0..) |*out_buf, *result, i| {
const fstart = i * chunk_size;
@@ -42,7 +38,7 @@ pub fn ParallelHasher(comptime Hasher: type) type {
file_size - fstart
else
chunk_size;
- group.async(io, worker, .{
+ group.async(worker, .{
file,
fstart,
buffer[fstart..][0..fsize],
@@ -61,11 +57,9 @@ pub fn ParallelHasher(comptime Hasher: type) type {
fstart: usize,
buffer: []u8,
out: *[hash_size]u8,
- err: *Io.File.PReadError!usize,
+ err: *Io.File.ReadPositionalError!usize,
) void {
- const tracy = trace(@src());
- defer tracy.end();
- err.* = file.preadAll(buffer, fstart);
+ err.* = file.readPositionalAll(buffer, fstart);
Hasher.hash(buffer, out, .{});
}
diff --git a/src/link/MachO/uuid.zig b/src/link/MachO/uuid.zig
index 4d8eac7523..a75799d01e 100644
--- a/src/link/MachO/uuid.zig
+++ b/src/link/MachO/uuid.zig
@@ -4,7 +4,7 @@ const Md5 = std.crypto.hash.Md5;
const trace = @import("../../tracy.zig").trace;
const Compilation = @import("../../Compilation.zig");
-const Hasher = @import("hasher.zig").ParallelHasher;
+const ParallelHasher = @import("hasher.zig").ParallelHasher;
/// Calculates Md5 hash of each chunk in parallel and then hashes all Md5 hashes to produce
/// the final digest.
@@ -16,21 +16,23 @@ pub fn calcUuid(comp: *const Compilation, file: Io.File, file_size: u64, out: *[
const tracy = trace(@src());
defer tracy.end();
+ const gpa = comp.gpa;
+ const io = comp.io;
+
const chunk_size: usize = 1024 * 1024;
const num_chunks: usize = std.math.cast(usize, @divTrunc(file_size, chunk_size)) orelse return error.Overflow;
const actual_num_chunks = if (@rem(file_size, chunk_size) > 0) num_chunks + 1 else num_chunks;
- const hashes = try comp.gpa.alloc([Md5.digest_length]u8, actual_num_chunks);
- defer comp.gpa.free(hashes);
+ const hashes = try gpa.alloc([Md5.digest_length]u8, actual_num_chunks);
+ defer gpa.free(hashes);
- var hasher = Hasher(Md5){ .allocator = comp.gpa, .io = comp.io };
- try hasher.hash(file, hashes, .{
+ try ParallelHasher(Md5).hash(gpa, io, file, hashes, .{
.chunk_size = chunk_size,
.max_file_size = file_size,
});
- const final_buffer = try comp.gpa.alloc(u8, actual_num_chunks * Md5.digest_length);
- defer comp.gpa.free(final_buffer);
+ const final_buffer = try gpa.alloc(u8, actual_num_chunks * Md5.digest_length);
+ defer gpa.free(final_buffer);
for (hashes, 0..) |hash, i| {
@memcpy(final_buffer[i * Md5.digest_length ..][0..Md5.digest_length], &hash);