diff options
| author | Jakub Konka <kubkon@jakubkonka.com> | 2023-06-19 10:29:14 +0200 |
|---|---|---|
| committer | Jakub Konka <kubkon@jakubkonka.com> | 2023-06-19 10:29:39 +0200 |
| commit | b3a2ab3fedfc2e3e15f9024c7334a1f53d9aa7c5 (patch) | |
| tree | c462d0b3d43f3c59a7a8d31164c56305e598f2de | |
| parent | 423d7b848b1953173df99fde1f83166dc68c2a2c (diff) | |
| download | zig-b3a2ab3fedfc2e3e15f9024c7334a1f53d9aa7c5.tar.gz zig-b3a2ab3fedfc2e3e15f9024c7334a1f53d9aa7c5.zip | |
macho: extract parallel hasher into a generic helper struct
| -rw-r--r-- | CMakeLists.txt | 1 | ||||
| -rw-r--r-- | src/link/MachO/CodeSignature.zig | 65 | ||||
| -rw-r--r-- | src/link/MachO/hasher.zig | 60 |
3 files changed, 67 insertions, 59 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt index 16c7dd3d0e..7a5726bdc5 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -594,6 +594,7 @@ set(ZIG_STAGE2_SOURCES "${CMAKE_SOURCE_DIR}/src/link/MachO/dead_strip.zig" "${CMAKE_SOURCE_DIR}/src/link/MachO/eh_frame.zig" "${CMAKE_SOURCE_DIR}/src/link/MachO/fat.zig" + "${CMAKE_SOURCE_DIR}/src/link/MachO/hasher.zig" "${CMAKE_SOURCE_DIR}/src/link/MachO/load_commands.zig" "${CMAKE_SOURCE_DIR}/src/link/MachO/thunks.zig" "${CMAKE_SOURCE_DIR}/src/link/MachO/zld.zig" diff --git a/src/link/MachO/CodeSignature.zig b/src/link/MachO/CodeSignature.zig index 02511dbe29..84c5b49362 100644 --- a/src/link/MachO/CodeSignature.zig +++ b/src/link/MachO/CodeSignature.zig @@ -7,11 +7,10 @@ const log = std.log.scoped(.link); const macho = std.macho; const mem = std.mem; const testing = std.testing; -const ThreadPool = std.Thread.Pool; -const WaitGroup = std.Thread.WaitGroup; const Allocator = mem.Allocator; const Compilation = @import("../../Compilation.zig"); +const Hasher = @import("hasher.zig").ParallelHasher; const Sha256 = std.crypto.hash.sha2.Sha256; const hash_size = Sha256.digest_length; @@ -289,7 +288,11 @@ pub fn writeAdhocSignature( self.code_directory.inner.nCodeSlots = total_pages; // Calculate hash for each page (in file) and write it to the buffer - try self.parallelHash(gpa, comp.thread_pool, opts.file, opts.file_size); + var hasher = Hasher(Sha256){}; + try hasher.hash(gpa, comp.thread_pool, opts.file, self.code_directory.code_slots.items, .{ + .chunk_size = self.page_size, + .max_file_size = opts.file_size, + }); try blobs.append(.{ .code_directory = &self.code_directory }); header.length += @sizeOf(macho.BlobIndex); @@ -348,62 +351,6 @@ pub fn writeAdhocSignature( } } -fn parallelHash( - self: *CodeSignature, - gpa: Allocator, - pool: *ThreadPool, - file: fs.File, - file_size: u32, -) !void { - var wg: WaitGroup = .{}; - - const total_num_chunks = mem.alignForward(usize, file_size, self.page_size) / self.page_size; - assert(self.code_directory.code_slots.items.len >= total_num_chunks); - - const buffer = try gpa.alloc(u8, self.page_size * total_num_chunks); - defer gpa.free(buffer); - - const results = try gpa.alloc(fs.File.PReadError!usize, total_num_chunks); - defer gpa.free(results); - - { - wg.reset(); - defer wg.wait(); - - var i: usize = 0; - while (i < total_num_chunks) : (i += 1) { - const fstart = i * self.page_size; - const fsize = if (fstart + self.page_size > file_size) - file_size - fstart - else - self.page_size; - wg.start(); - try pool.spawn(worker, .{ - file, - fstart, - buffer[fstart..][0..fsize], - &self.code_directory.code_slots.items[i], - &results[i], - &wg, - }); - } - } - for (results) |result| _ = try result; -} - -fn worker( - file: fs.File, - fstart: usize, - buffer: []u8, - out: *[hash_size]u8, - err: *fs.File.PReadError!usize, - wg: *WaitGroup, -) void { - defer wg.finish(); - err.* = file.preadAll(buffer, fstart); - Sha256.hash(buffer, out, .{}); -} - pub fn size(self: CodeSignature) u32 { var ssize: u32 = @sizeOf(macho.SuperBlob) + @sizeOf(macho.BlobIndex) + self.code_directory.size(); if (self.requirements) |req| { diff --git a/src/link/MachO/hasher.zig b/src/link/MachO/hasher.zig new file mode 100644 index 0000000000..d7bf6888b5 --- /dev/null +++ b/src/link/MachO/hasher.zig @@ -0,0 +1,60 @@ +const std = @import("std"); +const assert = std.debug.assert; +const fs = std.fs; +const mem = std.mem; + +const Allocator = mem.Allocator; +const ThreadPool = std.Thread.Pool; +const WaitGroup = std.Thread.WaitGroup; + +pub fn ParallelHasher(comptime Hasher: type) type { + const hash_size = Hasher.digest_length; + + return struct { + pub fn hash(self: @This(), gpa: Allocator, pool: *ThreadPool, file: fs.File, out: [][hash_size]u8, opts: struct { + chunk_size: u16 = 0x4000, + max_file_size: ?u64 = null, + }) !void { + _ = self; + + var wg: WaitGroup = .{}; + + const file_size = opts.max_file_size orelse try file.getEndPos(); + const total_num_chunks = mem.alignForward(u64, file_size, opts.chunk_size) / opts.chunk_size; + assert(out.len >= total_num_chunks); + + const buffer = try gpa.alloc(u8, opts.chunk_size * total_num_chunks); + defer gpa.free(buffer); + + const results = try gpa.alloc(fs.File.PReadError!usize, total_num_chunks); + defer gpa.free(results); + + { + wg.reset(); + defer wg.wait(); + + var i: usize = 0; + while (i < total_num_chunks) : (i += 1) { + const fstart = i * opts.chunk_size; + const fsize = if (fstart + opts.chunk_size > file_size) file_size - fstart else opts.chunk_size; + wg.start(); + try pool.spawn(worker, .{ file, fstart, buffer[fstart..][0..fsize], &out[i], &results[i], &wg }); + } + } + for (results) |result| _ = try result; + } + + fn worker( + file: fs.File, + fstart: usize, + buffer: []u8, + out: *[hash_size]u8, + err: *fs.File.PReadError!usize, + wg: *WaitGroup, + ) void { + defer wg.finish(); + err.* = file.preadAll(buffer, fstart); + Hasher.hash(buffer, out, .{}); + } + }; +} |
