diff options
| author | Andrew Kelley <andrewrk@noreply.codeberg.org> | 2025-12-27 14:10:46 +0100 |
|---|---|---|
| committer | Andrew Kelley <andrewrk@noreply.codeberg.org> | 2025-12-27 14:10:46 +0100 |
| commit | e55e6b5528bb2f01de242fcf32b172e244e98e74 (patch) | |
| tree | 3a5eb3193d3d192c54ab0c2b7295a7f21861c27e /src/link | |
| parent | c3f2de5e519926eb0029062fe8e782a6f9df9c05 (diff) | |
| parent | 60a1ba0a8f3517356fa2941462f002a7f580545b (diff) | |
| download | zig-e55e6b5528bb2f01de242fcf32b172e244e98e74.tar.gz zig-e55e6b5528bb2f01de242fcf32b172e244e98e74.zip | |
Merge pull request 'std: migrate all `fs` APIs to `Io`' (#30232) from std.Io-fs into master
Reviewed-on: https://codeberg.org/ziglang/zig/pulls/30232
Diffstat (limited to 'src/link')
31 files changed, 966 insertions, 733 deletions
diff --git a/src/link/C.zig b/src/link/C.zig index ce48e85851..93e771ebfc 100644 --- a/src/link/C.zig +++ b/src/link/C.zig @@ -124,6 +124,7 @@ pub fn createEmpty( emit: Path, options: link.File.OpenOptions, ) !*C { + const io = comp.io; const target = &comp.root_mod.resolved_target.result; assert(target.ofmt == .c); const optimize_mode = comp.root_mod.optimize_mode; @@ -135,11 +136,11 @@ pub fn createEmpty( assert(!use_lld); assert(!use_llvm); - const file = try emit.root_dir.handle.createFile(emit.sub_path, .{ + const file = try emit.root_dir.handle.createFile(io, emit.sub_path, .{ // Truncation is done on `flush`. .truncate = false, }); - errdefer file.close(); + errdefer file.close(io); const c_file = try arena.create(C); @@ -370,6 +371,7 @@ pub fn flush(self: *C, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.P const comp = self.base.comp; const diags = &comp.link_diags; const gpa = comp.gpa; + const io = comp.io; const zcu = self.base.comp.zcu.?; const ip = &zcu.intern_pool; const pt: Zcu.PerThread = .activate(zcu, tid); @@ -507,8 +509,8 @@ pub fn flush(self: *C, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.P }, self.getString(av_block.code)); const file = self.base.file.?; - file.setEndPos(f.file_size) catch |err| return diags.fail("failed to allocate file: {s}", .{@errorName(err)}); - var fw = file.writer(&.{}); + file.setLength(io, f.file_size) catch |err| return diags.fail("failed to allocate file: {t}", .{err}); + var fw = file.writer(io, &.{}); var w = &fw.interface; w.writeVecAll(f.all_buffers.items) catch |err| switch (err) { error.WriteFailed => return diags.fail("failed to write to '{f}': {s}", .{ @@ -763,6 +765,7 @@ pub fn flushEmitH(zcu: *Zcu) !void { if (true) return; // emit-h is regressed const emit_h = zcu.emit_h orelse return; + const io = zcu.comp.io; // We collect a list of buffers to write, and write them all at once with pwritev 😎 const num_buffers = emit_h.decl_table.count() + 1; @@ -790,14 +793,14 @@ pub fn flushEmitH(zcu: *Zcu) !void { } const directory = emit_h.loc.directory orelse zcu.comp.local_cache_directory; - const file = try directory.handle.createFile(emit_h.loc.basename, .{ + const file = try directory.handle.createFile(io, emit_h.loc.basename, .{ // We set the end position explicitly below; by not truncating the file, we possibly // make it easier on the file system by doing 1 reallocation instead of two. .truncate = false, }); - defer file.close(); + defer file.close(io); - try file.setEndPos(file_size); + try file.setLength(io, file_size); try file.pwritevAll(all_buffers.items, 0); } diff --git a/src/link/Coff.zig b/src/link/Coff.zig index f33e0ccdea..03b757f5b4 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -1,3 +1,23 @@ +const Coff = @This(); + +const builtin = @import("builtin"); +const native_endian = builtin.cpu.arch.endian(); + +const std = @import("std"); +const Io = std.Io; +const assert = std.debug.assert; +const log = std.log.scoped(.link); + +const codegen = @import("../codegen.zig"); +const Compilation = @import("../Compilation.zig"); +const InternPool = @import("../InternPool.zig"); +const link = @import("../link.zig"); +const MappedFile = @import("MappedFile.zig"); +const target_util = @import("../target.zig"); +const Type = @import("../Type.zig"); +const Value = @import("../Value.zig"); +const Zcu = @import("../Zcu.zig"); + base: link.File, mf: MappedFile, nodes: std.MultiArrayList(Node), @@ -631,12 +651,14 @@ fn create( else => return error.UnsupportedCOFFArchitecture, }; + const io = comp.io; + const coff = try arena.create(Coff); - const file = try path.root_dir.handle.adaptToNewApi().createFile(comp.io, path.sub_path, .{ + const file = try path.root_dir.handle.createFile(io, path.sub_path, .{ .read = true, - .mode = link.File.determineMode(comp.config.output_mode, comp.config.link_mode), + .permissions = link.File.determinePermissions(comp.config.output_mode, comp.config.link_mode), }); - errdefer file.close(comp.io); + errdefer file.close(io); coff.* = .{ .base = .{ .tag = .coff2, @@ -644,14 +666,14 @@ fn create( .comp = comp, .emit = path, - .file = .adaptFromNewApi(file), + .file = file, .gc_sections = false, .print_gc_sections = false, .build_id = .none, .allow_shlib_undefined = false, .stack_size = 0, }, - .mf = try .init(file, comp.gpa), + .mf = try .init(file, comp.gpa, io), .nodes = .empty, .import_table = .{ .ni = .none, @@ -1727,22 +1749,20 @@ pub fn flush( const comp = coff.base.comp; if (comp.compiler_rt_dyn_lib) |crt_file| { const gpa = comp.gpa; + const io = comp.io; const compiler_rt_sub_path = try std.fs.path.join(gpa, &.{ std.fs.path.dirname(coff.base.emit.sub_path) orelse "", std.fs.path.basename(crt_file.full_object_path.sub_path), }); defer gpa.free(compiler_rt_sub_path); - crt_file.full_object_path.root_dir.handle.copyFile( + std.Io.Dir.copyFile( + crt_file.full_object_path.root_dir.handle, crt_file.full_object_path.sub_path, coff.base.emit.root_dir.handle, compiler_rt_sub_path, + io, .{}, - ) catch |err| switch (err) { - else => |e| return comp.link_diags.fail("Copy '{s}' failed: {s}", .{ - compiler_rt_sub_path, - @errorName(e), - }), - }; + ) catch |err| return comp.link_diags.fail("copy '{s}' failed: {t}", .{ compiler_rt_sub_path, err }); } } @@ -2358,10 +2378,16 @@ pub fn deleteExport(coff: *Coff, exported: Zcu.Exported, name: InternPool.NullTe _ = name; } -pub fn dump(coff: *Coff, tid: Zcu.PerThread.Id) void { - const w, _ = std.debug.lockStderrWriter(&.{}); - defer std.debug.unlockStderrWriter(); - coff.printNode(tid, w, .root, 0) catch {}; +pub fn dump(coff: *Coff, tid: Zcu.PerThread.Id) Io.Cancelable!void { + const comp = coff.base.comp; + const io = comp.io; + var buffer: [512]u8 = undefined; + const stderr = try io.lockStderr(&buffer, null); + defer io.unlockStderr(); + const w = &stderr.file_writer.interface; + coff.printNode(tid, w, .root, 0) catch |err| switch (err) { + error.WriteFailed => return stderr.err.?, + }; } pub fn printNode( @@ -2459,19 +2485,3 @@ pub fn printNode( } } } - -const assert = std.debug.assert; -const builtin = @import("builtin"); -const codegen = @import("../codegen.zig"); -const Compilation = @import("../Compilation.zig"); -const Coff = @This(); -const InternPool = @import("../InternPool.zig"); -const link = @import("../link.zig"); -const log = std.log.scoped(.link); -const MappedFile = @import("MappedFile.zig"); -const native_endian = builtin.cpu.arch.endian(); -const std = @import("std"); -const target_util = @import("../target.zig"); -const Type = @import("../Type.zig"); -const Value = @import("../Value.zig"); -const Zcu = @import("../Zcu.zig"); diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index 95f4ca8bbd..0fda09e385 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -1,3 +1,24 @@ +const Dwarf = @This(); + +const std = @import("std"); +const Io = std.Io; +const Allocator = std.mem.Allocator; +const DW = std.dwarf; +const Zir = std.zig.Zir; +const assert = std.debug.assert; +const log = std.log.scoped(.dwarf); +const Writer = std.Io.Writer; + +const InternPool = @import("../InternPool.zig"); +const Module = @import("../Package.zig").Module; +const Type = @import("../Type.zig"); +const Value = @import("../Value.zig"); +const Zcu = @import("../Zcu.zig"); +const codegen = @import("../codegen.zig"); +const dev = @import("../dev.zig"); +const link = @import("../link.zig"); +const target_info = @import("../target.zig"); + gpa: Allocator, bin_file: *link.File, format: DW.Format, @@ -27,18 +48,18 @@ pub const UpdateError = error{ EndOfStream, Underflow, UnexpectedEndOfFile, + NonResizable, } || codegen.GenerateSymbolError || - std.fs.File.OpenError || - std.fs.File.SetEndPosError || - std.fs.File.CopyRangeError || - std.fs.File.PReadError || - std.fs.File.PWriteError; + Io.File.OpenError || + Io.File.LengthError || + Io.File.ReadPositionalError || + Io.File.WritePositionalError; pub const FlushError = UpdateError; pub const RelocError = - std.fs.File.PWriteError; + Io.File.PWriteError; pub const AddressSize = enum(u8) { @"32" = 4, @@ -135,11 +156,14 @@ const DebugInfo = struct { fn declAbbrevCode(debug_info: *DebugInfo, unit: Unit.Index, entry: Entry.Index) !AbbrevCode { const dwarf: *Dwarf = @fieldParentPtr("debug_info", debug_info); + const comp = dwarf.bin_file.comp; + const io = comp.io; const unit_ptr = debug_info.section.getUnit(unit); const entry_ptr = unit_ptr.getEntry(entry); if (entry_ptr.len < AbbrevCode.decl_bytes) return .null; var abbrev_code_buf: [AbbrevCode.decl_bytes]u8 = undefined; - if (try dwarf.getFile().?.preadAll( + if (try dwarf.getFile().?.readPositionalAll( + io, &abbrev_code_buf, debug_info.section.off(dwarf) + unit_ptr.off + unit_ptr.header_len + entry_ptr.off, ) != abbrev_code_buf.len) return error.InputOutput; @@ -619,13 +643,10 @@ const Unit = struct { fn move(unit: *Unit, sec: *Section, dwarf: *Dwarf, new_off: u32) UpdateError!void { if (unit.off == new_off) return; - const n = try dwarf.getFile().?.copyRangeAll( - sec.off(dwarf) + unit.off, - dwarf.getFile().?, - sec.off(dwarf) + new_off, - unit.len, - ); - if (n != unit.len) return error.InputOutput; + const comp = dwarf.bin_file.comp; + const io = comp.io; + const file = dwarf.getFile().?; + try link.File.copyRangeAll2(io, file, file, sec.off(dwarf) + unit.off, sec.off(dwarf) + new_off, unit.len); unit.off = new_off; } @@ -655,10 +676,14 @@ const Unit = struct { fn replaceHeader(unit: *Unit, sec: *Section, dwarf: *Dwarf, contents: []const u8) UpdateError!void { assert(contents.len == unit.header_len); - try dwarf.getFile().?.pwriteAll(contents, sec.off(dwarf) + unit.off); + const comp = dwarf.bin_file.comp; + const io = comp.io; + try dwarf.getFile().?.writePositionalAll(io, contents, sec.off(dwarf) + unit.off); } fn writeTrailer(unit: *Unit, sec: *Section, dwarf: *Dwarf) UpdateError!void { + const comp = dwarf.bin_file.comp; + const io = comp.io; const start = unit.off + unit.header_len + if (unit.last.unwrap()) |last_entry| end: { const last_entry_ptr = unit.getEntry(last_entry); break :end last_entry_ptr.off + last_entry_ptr.len; @@ -688,7 +713,7 @@ const Unit = struct { assert(fw.end == extended_op_bytes + op_len_bytes); fw.writeByte(DW.LNE.padding) catch unreachable; assert(fw.end >= unit.trailer_len and fw.end <= len); - return dwarf.getFile().?.pwriteAll(fw.buffered(), sec.off(dwarf) + start); + return dwarf.getFile().?.writePositionalAll(io, fw.buffered(), sec.off(dwarf) + start); } var trailer_aw: Writer.Allocating = try .initCapacity(dwarf.gpa, len); defer trailer_aw.deinit(); @@ -748,7 +773,7 @@ const Unit = struct { assert(tw.end == unit.trailer_len); tw.splatByteAll(fill_byte, len - unit.trailer_len) catch unreachable; assert(tw.end == len); - try dwarf.getFile().?.pwriteAll(trailer_aw.written(), sec.off(dwarf) + start); + try dwarf.getFile().?.writePositionalAll(io, trailer_aw.written(), sec.off(dwarf) + start); } fn resolveRelocs(unit: *Unit, sec: *Section, dwarf: *Dwarf) RelocError!void { @@ -834,6 +859,8 @@ const Entry = struct { dwarf: *Dwarf, ) (UpdateError || Writer.Error)!void { assert(entry.len > 0); + const comp = dwarf.bin_file.comp; + const io = comp.io; const start = entry.off + entry.len; if (sec == &dwarf.debug_frame.section) { const len = if (entry.next.unwrap()) |next_entry| @@ -843,11 +870,11 @@ const Entry = struct { var unit_len_buf: [8]u8 = undefined; const unit_len_bytes = unit_len_buf[0..dwarf.sectionOffsetBytes()]; dwarf.writeInt(unit_len_bytes, len - dwarf.unitLengthBytes()); - try dwarf.getFile().?.pwriteAll(unit_len_bytes, sec.off(dwarf) + unit.off + unit.header_len + entry.off); + try dwarf.getFile().?.writePositionalAll(io, unit_len_bytes, sec.off(dwarf) + unit.off + unit.header_len + entry.off); const buf = try dwarf.gpa.alloc(u8, len - entry.len); defer dwarf.gpa.free(buf); @memset(buf, DW.CFA.nop); - try dwarf.getFile().?.pwriteAll(buf, sec.off(dwarf) + unit.off + unit.header_len + start); + try dwarf.getFile().?.writePositionalAll(io, buf, sec.off(dwarf) + unit.off + unit.header_len + start); return; } const len = unit.getEntry(entry.next.unwrap() orelse return).off - start; @@ -906,7 +933,7 @@ const Entry = struct { }, } else assert(!sec.pad_entries_to_ideal and len == 0); assert(fw.end <= len); - try dwarf.getFile().?.pwriteAll(fw.buffered(), sec.off(dwarf) + unit.off + unit.header_len + start); + try dwarf.getFile().?.writePositionalAll(io, fw.buffered(), sec.off(dwarf) + unit.off + unit.header_len + start); } fn resize( @@ -949,11 +976,13 @@ const Entry = struct { fn replace(entry_ptr: *Entry, unit: *Unit, sec: *Section, dwarf: *Dwarf, contents: []const u8) UpdateError!void { assert(contents.len == entry_ptr.len); - try dwarf.getFile().?.pwriteAll(contents, sec.off(dwarf) + unit.off + unit.header_len + entry_ptr.off); + const comp = dwarf.bin_file.comp; + const io = comp.io; + try dwarf.getFile().?.writePositionalAll(io, contents, sec.off(dwarf) + unit.off + unit.header_len + entry_ptr.off); if (false) { const buf = try dwarf.gpa.alloc(u8, sec.len); defer dwarf.gpa.free(buf); - _ = try dwarf.getFile().?.preadAll(buf, sec.off(dwarf)); + _ = try dwarf.getFile().?.readPositionalAll(io, buf, sec.off(dwarf)); log.info("Section{{ .first = {}, .last = {}, .off = 0x{x}, .len = 0x{x} }}", .{ @intFromEnum(sec.first), @intFromEnum(sec.last), @@ -4682,6 +4711,8 @@ fn updateContainerTypeWriterError( } pub fn updateLineNumber(dwarf: *Dwarf, zcu: *Zcu, zir_index: InternPool.TrackedInst.Index) UpdateError!void { + const comp = dwarf.bin_file.comp; + const io = comp.io; const ip = &zcu.intern_pool; const inst_info = zir_index.resolveFull(ip).?; @@ -4701,7 +4732,7 @@ pub fn updateLineNumber(dwarf: *Dwarf, zcu: *Zcu, zir_index: InternPool.TrackedI const unit = dwarf.debug_info.section.getUnit(dwarf.getUnitIfExists(file.mod.?) orelse return); const entry = unit.getEntry(dwarf.decls.get(zir_index) orelse return); - try dwarf.getFile().?.pwriteAll(&line_buf, dwarf.debug_info.section.off(dwarf) + unit.off + unit.header_len + entry.off + DebugInfo.declEntryLineOff(dwarf)); + try dwarf.getFile().?.writePositionalAll(io, &line_buf, dwarf.debug_info.section.off(dwarf) + unit.off + unit.header_len + entry.off + DebugInfo.declEntryLineOff(dwarf)); } pub fn freeNav(dwarf: *Dwarf, nav_index: InternPool.Nav.Index) void { @@ -4738,6 +4769,8 @@ pub fn flush(dwarf: *Dwarf, pt: Zcu.PerThread) FlushError!void { fn flushWriterError(dwarf: *Dwarf, pt: Zcu.PerThread) (FlushError || Writer.Error)!void { const zcu = pt.zcu; const ip = &zcu.intern_pool; + const comp = dwarf.bin_file.comp; + const io = comp.io; { const type_gop = try dwarf.types.getOrPut(dwarf.gpa, .anyerror_type); @@ -4957,7 +4990,7 @@ fn flushWriterError(dwarf: *Dwarf, pt: Zcu.PerThread) (FlushError || Writer.Erro if (dwarf.debug_str.section.dirty) { const contents = dwarf.debug_str.contents.items; try dwarf.debug_str.section.resize(dwarf, contents.len); - try dwarf.getFile().?.pwriteAll(contents, dwarf.debug_str.section.off(dwarf)); + try dwarf.getFile().?.writePositionalAll(io, contents, dwarf.debug_str.section.off(dwarf)); dwarf.debug_str.section.dirty = false; } if (dwarf.debug_line.section.dirty) { @@ -5069,7 +5102,7 @@ fn flushWriterError(dwarf: *Dwarf, pt: Zcu.PerThread) (FlushError || Writer.Erro if (dwarf.debug_line_str.section.dirty) { const contents = dwarf.debug_line_str.contents.items; try dwarf.debug_line_str.section.resize(dwarf, contents.len); - try dwarf.getFile().?.pwriteAll(contents, dwarf.debug_line_str.section.off(dwarf)); + try dwarf.getFile().?.writePositionalAll(io, contents, dwarf.debug_line_str.section.off(dwarf)); dwarf.debug_line_str.section.dirty = false; } if (dwarf.debug_loclists.section.dirty) { @@ -6350,7 +6383,7 @@ const AbbrevCode = enum { }); }; -fn getFile(dwarf: *Dwarf) ?std.fs.File { +fn getFile(dwarf: *Dwarf) ?Io.File { if (dwarf.bin_file.cast(.macho)) |macho_file| if (macho_file.d_sym) |*d_sym| return d_sym.file; return dwarf.bin_file.file; } @@ -6391,9 +6424,11 @@ fn writeInt(dwarf: *Dwarf, buf: []u8, int: u64) void { } fn resolveReloc(dwarf: *Dwarf, source: u64, target: u64, size: u32) RelocError!void { + const comp = dwarf.bin_file.comp; + const io = comp.io; var buf: [8]u8 = undefined; dwarf.writeInt(buf[0..size], target); - try dwarf.getFile().?.pwriteAll(buf[0..size], source); + try dwarf.getFile().?.writePositionalAll(io, buf[0..size], source); } fn unitLengthBytes(dwarf: *Dwarf) u32 { @@ -6429,21 +6464,3 @@ const force_incremental = false; inline fn incremental(dwarf: Dwarf) bool { return force_incremental or dwarf.bin_file.comp.config.incremental; } - -const Allocator = std.mem.Allocator; -const DW = std.dwarf; -const Dwarf = @This(); -const InternPool = @import("../InternPool.zig"); -const Module = @import("../Package.zig").Module; -const Type = @import("../Type.zig"); -const Value = @import("../Value.zig"); -const Zcu = @import("../Zcu.zig"); -const Zir = std.zig.Zir; -const assert = std.debug.assert; -const codegen = @import("../codegen.zig"); -const dev = @import("../dev.zig"); -const link = @import("../link.zig"); -const log = std.log.scoped(.dwarf); -const std = @import("std"); -const target_info = @import("../target.zig"); -const Writer = std.Io.Writer; diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 69acbe034b..85f37f88ce 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -313,12 +313,14 @@ pub fn createEmpty( const is_obj = output_mode == .Obj; const is_obj_or_ar = is_obj or (output_mode == .Lib and link_mode == .static); + const io = comp.io; + // What path should this ELF linker code output to? const sub_path = emit.sub_path; - self.base.file = try emit.root_dir.handle.createFile(sub_path, .{ + self.base.file = try emit.root_dir.handle.createFile(io, sub_path, .{ .truncate = true, .read = true, - .mode = link.File.determineMode(output_mode, link_mode), + .permissions = link.File.determinePermissions(output_mode, link_mode), }); const gpa = comp.gpa; @@ -406,10 +408,12 @@ pub fn open( } pub fn deinit(self: *Elf) void { - const gpa = self.base.comp.gpa; + const comp = self.base.comp; + const gpa = comp.gpa; + const io = comp.io; for (self.file_handles.items) |fh| { - fh.close(); + fh.close(io); } self.file_handles.deinit(gpa); @@ -483,6 +487,8 @@ pub fn getUavVAddr(self: *Elf, uav: InternPool.Index, reloc_info: link.File.Relo /// Returns end pos of collision, if any. fn detectAllocCollision(self: *Elf, start: u64, size: u64) !?u64 { + const comp = self.base.comp; + const io = comp.io; const small_ptr = self.ptr_width == .p32; const ehdr_size: u64 = if (small_ptr) @sizeOf(elf.Elf32_Ehdr) else @sizeOf(elf.Elf64_Ehdr); if (start < ehdr_size) @@ -522,7 +528,7 @@ fn detectAllocCollision(self: *Elf, start: u64, size: u64) !?u64 { } } - if (at_end) try self.base.file.?.setEndPos(end); + if (at_end) try self.base.file.?.setLength(io, end); return null; } @@ -552,6 +558,8 @@ pub fn findFreeSpace(self: *Elf, object_size: u64, min_alignment: u64) !u64 { } pub fn growSection(self: *Elf, shdr_index: u32, needed_size: u64, min_alignment: u64) !void { + const comp = self.base.comp; + const io = comp.io; const shdr = &self.sections.items(.shdr)[shdr_index]; if (shdr.sh_type != elf.SHT_NOBITS) { @@ -574,18 +582,11 @@ pub fn growSection(self: *Elf, shdr_index: u32, needed_size: u64, min_alignment: new_offset, }); - const amt = try self.base.file.?.copyRangeAll( - shdr.sh_offset, - self.base.file.?, - new_offset, - existing_size, - ); - // TODO figure out what to about this error condition - how to communicate it up. - if (amt != existing_size) return error.InputOutput; + try self.base.copyRangeAll(shdr.sh_offset, new_offset, existing_size); shdr.sh_offset = new_offset; } else if (shdr.sh_offset + allocated_size == std.math.maxInt(u64)) { - try self.base.file.?.setEndPos(shdr.sh_offset + needed_size); + try self.base.file.?.setLength(io, shdr.sh_offset + needed_size); } } @@ -737,8 +738,8 @@ pub fn loadInput(self: *Elf, input: link.Input) !void { .res => unreachable, .dso_exact => @panic("TODO"), .object => |obj| try parseObject(self, obj), - .archive => |obj| try parseArchive(gpa, diags, &self.file_handles, &self.files, target, debug_fmt_strip, default_sym_version, &self.objects, obj, is_static_lib), - .dso => |dso| try parseDso(gpa, diags, dso, &self.shared_objects, &self.files, target), + .archive => |obj| try parseArchive(gpa, io, diags, &self.file_handles, &self.files, target, debug_fmt_strip, default_sym_version, &self.objects, obj, is_static_lib), + .dso => |dso| try parseDso(gpa, io, diags, dso, &self.shared_objects, &self.files, target), } } @@ -747,9 +748,10 @@ pub fn flush(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std defer tracy.end(); const comp = self.base.comp; + const io = comp.io; const diags = &comp.link_diags; - if (comp.verbose_link) Compilation.dump_argv(self.dump_argv_list.items); + if (comp.verbose_link) try Compilation.dumpArgv(io, self.dump_argv_list.items); const sub_prog_node = prog_node.start("ELF Flush", 0); defer sub_prog_node.end(); @@ -757,7 +759,7 @@ pub fn flush(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std return flushInner(self, arena, tid) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.LinkFailure => return error.LinkFailure, - else => |e| return diags.fail("ELF flush failed: {s}", .{@errorName(e)}), + else => |e| return diags.fail("ELF flush failed: {t}", .{e}), }; } @@ -1047,9 +1049,11 @@ fn dumpArgvInit(self: *Elf, arena: Allocator) !void { } pub fn openParseObjectReportingFailure(self: *Elf, path: Path) void { - const diags = &self.base.comp.link_diags; - const obj = link.openObject(path, false, false) catch |err| { - switch (diags.failParse(path, "failed to open object: {s}", .{@errorName(err)})) { + const comp = self.base.comp; + const io = comp.io; + const diags = &comp.link_diags; + const obj = link.openObject(io, path, false, false) catch |err| { + switch (diags.failParse(path, "failed to open object: {t}", .{err})) { error.LinkFailure => return, } }; @@ -1057,10 +1061,11 @@ pub fn openParseObjectReportingFailure(self: *Elf, path: Path) void { } fn parseObjectReportingFailure(self: *Elf, obj: link.Input.Object) void { - const diags = &self.base.comp.link_diags; + const comp = self.base.comp; + const diags = &comp.link_diags; self.parseObject(obj) catch |err| switch (err) { error.LinkFailure => return, // already reported - else => |e| diags.addParseError(obj.path, "failed to parse object: {s}", .{@errorName(e)}), + else => |e| diags.addParseError(obj.path, "failed to parse object: {t}", .{e}), }; } @@ -1068,10 +1073,12 @@ fn parseObject(self: *Elf, obj: link.Input.Object) !void { const tracy = trace(@src()); defer tracy.end(); - const gpa = self.base.comp.gpa; - const diags = &self.base.comp.link_diags; - const target = &self.base.comp.root_mod.resolved_target.result; - const debug_fmt_strip = self.base.comp.config.debug_format == .strip; + const comp = self.base.comp; + const io = comp.io; + const gpa = comp.gpa; + const diags = &comp.link_diags; + const target = &comp.root_mod.resolved_target.result; + const debug_fmt_strip = comp.config.debug_format == .strip; const default_sym_version = self.default_sym_version; const file_handles = &self.file_handles; @@ -1090,14 +1097,15 @@ fn parseObject(self: *Elf, obj: link.Input.Object) !void { try self.objects.append(gpa, index); const object = self.file(index).?.object; - try object.parseCommon(gpa, diags, obj.path, handle, target); + try object.parseCommon(gpa, io, diags, obj.path, handle, target); if (!self.base.isStaticLib()) { - try object.parse(gpa, diags, obj.path, handle, target, debug_fmt_strip, default_sym_version); + try object.parse(gpa, io, diags, obj.path, handle, target, debug_fmt_strip, default_sym_version); } } fn parseArchive( gpa: Allocator, + io: Io, diags: *Diags, file_handles: *std.ArrayList(File.Handle), files: *std.MultiArrayList(File.Entry), @@ -1112,7 +1120,7 @@ fn parseArchive( defer tracy.end(); const fh = try addFileHandle(gpa, file_handles, obj.file); - var archive = try Archive.parse(gpa, diags, file_handles, obj.path, fh); + var archive = try Archive.parse(gpa, io, diags, file_handles, obj.path, fh); defer archive.deinit(gpa); const init_alive = if (is_static_lib) true else obj.must_link; @@ -1123,15 +1131,16 @@ fn parseArchive( const object = &files.items(.data)[index].object; object.index = index; object.alive = init_alive; - try object.parseCommon(gpa, diags, obj.path, obj.file, target); + try object.parseCommon(gpa, io, diags, obj.path, obj.file, target); if (!is_static_lib) - try object.parse(gpa, diags, obj.path, obj.file, target, debug_fmt_strip, default_sym_version); + try object.parse(gpa, io, diags, obj.path, obj.file, target, debug_fmt_strip, default_sym_version); try objects.append(gpa, index); } } fn parseDso( gpa: Allocator, + io: Io, diags: *Diags, dso: link.Input.Dso, shared_objects: *std.StringArrayHashMapUnmanaged(File.Index), @@ -1143,8 +1152,8 @@ fn parseDso( const handle = dso.file; - const stat = Stat.fromFs(try handle.stat()); - var header = try SharedObject.parseHeader(gpa, diags, dso.path, handle, stat, target); + const stat = Stat.fromFs(try handle.stat(io)); + var header = try SharedObject.parseHeader(gpa, io, diags, dso.path, handle, stat, target); defer header.deinit(gpa); const soname = header.soname() orelse dso.path.basename(); @@ -1158,7 +1167,7 @@ fn parseDso( gop.value_ptr.* = index; - var parsed = try SharedObject.parse(gpa, &header, handle); + var parsed = try SharedObject.parse(gpa, io, &header, handle); errdefer parsed.deinit(gpa); const duped_path: Path = .{ @@ -2888,13 +2897,7 @@ pub fn allocateAllocSections(self: *Elf) !void { if (shdr.sh_offset > 0) { // Get size actually commited to the output file. const existing_size = self.sectionSize(shndx); - const amt = try self.base.file.?.copyRangeAll( - shdr.sh_offset, - self.base.file.?, - new_offset, - existing_size, - ); - if (amt != existing_size) return error.InputOutput; + try self.base.copyRangeAll(shdr.sh_offset, new_offset, existing_size); } shdr.sh_offset = new_offset; @@ -2930,13 +2933,7 @@ pub fn allocateNonAllocSections(self: *Elf) !void { if (shdr.sh_offset > 0) { const existing_size = self.sectionSize(@intCast(shndx)); - const amt = try self.base.file.?.copyRangeAll( - shdr.sh_offset, - self.base.file.?, - new_offset, - existing_size, - ); - if (amt != existing_size) return error.InputOutput; + try self.base.copyRangeAll(shdr.sh_offset, new_offset, existing_size); } shdr.sh_offset = new_offset; @@ -3649,7 +3646,7 @@ fn fileLookup(files: std.MultiArrayList(File.Entry), index: File.Index, zig_obje pub fn addFileHandle( gpa: Allocator, file_handles: *std.ArrayList(File.Handle), - handle: fs.File, + handle: Io.File, ) Allocator.Error!File.HandleIndex { try file_handles.append(gpa, handle); return @intCast(file_handles.items.len - 1); @@ -4066,10 +4063,10 @@ fn fmtDumpState(self: *Elf, writer: *std.Io.Writer) std.Io.Writer.Error!void { } /// Caller owns the memory. -pub fn preadAllAlloc(allocator: Allocator, handle: fs.File, offset: u64, size: u64) ![]u8 { +pub fn preadAllAlloc(allocator: Allocator, io: Io, io_file: Io.File, offset: u64, size: u64) ![]u8 { const buffer = try allocator.alloc(u8, math.cast(usize, size) orelse return error.Overflow); errdefer allocator.free(buffer); - const amt = try handle.preadAll(buffer, offset); + const amt = try io_file.readPositionalAll(io, buffer, offset); if (amt != size) return error.InputOutput; return buffer; } @@ -4435,16 +4432,17 @@ pub fn stringTableLookup(strtab: []const u8, off: u32) [:0]const u8 { pub fn pwriteAll(elf_file: *Elf, bytes: []const u8, offset: u64) error{LinkFailure}!void { const comp = elf_file.base.comp; + const io = comp.io; const diags = &comp.link_diags; - elf_file.base.file.?.pwriteAll(bytes, offset) catch |err| { - return diags.fail("failed to write: {s}", .{@errorName(err)}); - }; + elf_file.base.file.?.writePositionalAll(io, bytes, offset) catch |err| + return diags.fail("failed to write: {t}", .{err}); } -pub fn setEndPos(elf_file: *Elf, length: u64) error{LinkFailure}!void { +pub fn setLength(elf_file: *Elf, length: u64) error{LinkFailure}!void { const comp = elf_file.base.comp; + const io = comp.i; const diags = &comp.link_diags; - elf_file.base.file.?.setEndPos(length) catch |err| { + elf_file.base.file.?.setLength(io, length) catch |err| { return diags.fail("failed to set file end pos: {s}", .{@errorName(err)}); }; } @@ -4458,6 +4456,7 @@ pub fn cast(elf_file: *Elf, comptime T: type, x: anytype) error{LinkFailure}!T { } const std = @import("std"); +const Io = std.Io; const build_options = @import("build_options"); const builtin = @import("builtin"); const assert = std.debug.assert; diff --git a/src/link/Elf/Archive.zig b/src/link/Elf/Archive.zig index a9961bf8f9..14f2868956 100644 --- a/src/link/Elf/Archive.zig +++ b/src/link/Elf/Archive.zig @@ -1,3 +1,21 @@ +const Archive = @This(); + +const std = @import("std"); +const Io = std.Io; +const assert = std.debug.assert; +const elf = std.elf; +const fs = std.fs; +const log = std.log.scoped(.link); +const mem = std.mem; +const Path = std.Build.Cache.Path; +const Allocator = std.mem.Allocator; + +const Diags = @import("../../link.zig").Diags; +const Elf = @import("../Elf.zig"); +const File = @import("file.zig").File; +const Object = @import("Object.zig"); +const StringTable = @import("../StringTable.zig"); + objects: []const Object, /// '\n'-delimited strtab: []const u8, @@ -10,22 +28,23 @@ pub fn deinit(a: *Archive, gpa: Allocator) void { pub fn parse( gpa: Allocator, + io: Io, diags: *Diags, file_handles: *const std.ArrayList(File.Handle), path: Path, handle_index: File.HandleIndex, ) !Archive { - const handle = file_handles.items[handle_index]; + const file = file_handles.items[handle_index]; var pos: usize = 0; { var magic_buffer: [elf.ARMAG.len]u8 = undefined; - const n = try handle.preadAll(&magic_buffer, pos); + const n = try file.readPositionalAll(io, &magic_buffer, pos); if (n != magic_buffer.len) return error.BadMagic; if (!mem.eql(u8, &magic_buffer, elf.ARMAG)) return error.BadMagic; pos += magic_buffer.len; } - const size = (try handle.stat()).size; + const size = (try file.stat(io)).size; var objects: std.ArrayList(Object) = .empty; defer objects.deinit(gpa); @@ -36,7 +55,7 @@ pub fn parse( while (pos < size) { var hdr: elf.ar_hdr = undefined; { - const n = try handle.preadAll(mem.asBytes(&hdr), pos); + const n = try file.readPositionalAll(io, mem.asBytes(&hdr), pos); if (n != @sizeOf(elf.ar_hdr)) return error.UnexpectedEndOfFile; } pos += @sizeOf(elf.ar_hdr); @@ -53,7 +72,7 @@ pub fn parse( if (hdr.isSymtab() or hdr.isSymtab64()) continue; if (hdr.isStrtab()) { try strtab.resize(gpa, obj_size); - const amt = try handle.preadAll(strtab.items, pos); + const amt = try file.readPositionalAll(io, strtab.items, pos); if (amt != obj_size) return error.InputOutput; continue; } @@ -120,7 +139,7 @@ pub fn setArHdr(opts: struct { @memset(mem.asBytes(&hdr), 0x20); { - var writer: std.Io.Writer = .fixed(&hdr.ar_name); + var writer: Io.Writer = .fixed(&hdr.ar_name); switch (opts.name) { .symtab => writer.print("{s}", .{elf.SYM64NAME}) catch unreachable, .strtab => writer.print("//", .{}) catch unreachable, @@ -133,7 +152,7 @@ pub fn setArHdr(opts: struct { hdr.ar_gid[0] = '0'; hdr.ar_mode[0] = '0'; { - var writer: std.Io.Writer = .fixed(&hdr.ar_size); + var writer: Io.Writer = .fixed(&hdr.ar_size); writer.print("{d}", .{opts.size}) catch unreachable; } hdr.ar_fmag = elf.ARFMAG.*; @@ -206,7 +225,7 @@ pub const ArSymtab = struct { ar: ArSymtab, elf_file: *Elf, - fn default(f: Format, writer: *std.Io.Writer) std.Io.Writer.Error!void { + fn default(f: Format, writer: *Io.Writer) Io.Writer.Error!void { const ar = f.ar; const elf_file = f.elf_file; for (ar.symtab.items, 0..) |entry, i| { @@ -261,7 +280,7 @@ pub const ArStrtab = struct { try writer.writeAll(ar.buffer.items); } - pub fn format(ar: ArStrtab, writer: *std.Io.Writer) std.Io.Writer.Error!void { + pub fn format(ar: ArStrtab, writer: *Io.Writer) Io.Writer.Error!void { try writer.print("{f}", .{std.ascii.hexEscape(ar.buffer.items, .lower)}); } }; @@ -277,19 +296,3 @@ pub const ArState = struct { /// Total size of the contributing object (excludes ar_hdr). size: u64 = 0, }; - -const std = @import("std"); -const assert = std.debug.assert; -const elf = std.elf; -const fs = std.fs; -const log = std.log.scoped(.link); -const mem = std.mem; -const Path = std.Build.Cache.Path; -const Allocator = std.mem.Allocator; - -const Diags = @import("../../link.zig").Diags; -const Archive = @This(); -const Elf = @import("../Elf.zig"); -const File = @import("file.zig").File; -const Object = @import("Object.zig"); -const StringTable = @import("../StringTable.zig"); diff --git a/src/link/Elf/AtomList.zig b/src/link/Elf/AtomList.zig index 8fdf555115..9350f1a276 100644 --- a/src/link/Elf/AtomList.zig +++ b/src/link/Elf/AtomList.zig @@ -90,7 +90,9 @@ pub fn allocate(list: *AtomList, elf_file: *Elf) !void { } pub fn write(list: AtomList, buffer: *std.Io.Writer.Allocating, undefs: anytype, elf_file: *Elf) !void { - const gpa = elf_file.base.comp.gpa; + const comp = elf_file.base.comp; + const gpa = comp.gpa; + const io = comp.io; const osec = elf_file.sections.items(.shdr)[list.output_section_index]; assert(osec.sh_type != elf.SHT_NOBITS); assert(!list.dirty); @@ -121,12 +123,14 @@ pub fn write(list: AtomList, buffer: *std.Io.Writer.Allocating, undefs: anytype, try atom_ptr.resolveRelocsAlloc(elf_file, out_code); } - try elf_file.base.file.?.pwriteAll(buffer.written(), list.offset(elf_file)); + try elf_file.base.file.?.writePositionalAll(io, buffer.written(), list.offset(elf_file)); buffer.clearRetainingCapacity(); } pub fn writeRelocatable(list: AtomList, buffer: *std.array_list.Managed(u8), elf_file: *Elf) !void { - const gpa = elf_file.base.comp.gpa; + const comp = elf_file.base.comp; + const gpa = comp.gpa; + const io = comp.io; const osec = elf_file.sections.items(.shdr)[list.output_section_index]; assert(osec.sh_type != elf.SHT_NOBITS); @@ -152,7 +156,7 @@ pub fn writeRelocatable(list: AtomList, buffer: *std.array_list.Managed(u8), elf @memcpy(out_code, code); } - try elf_file.base.file.?.pwriteAll(buffer.items, list.offset(elf_file)); + try elf_file.base.file.?.writePositionalAll(io, buffer.items, list.offset(elf_file)); buffer.clearRetainingCapacity(); } diff --git a/src/link/Elf/Object.zig b/src/link/Elf/Object.zig index d51a82b266..ebdd1f2098 100644 --- a/src/link/Elf/Object.zig +++ b/src/link/Elf/Object.zig @@ -1,3 +1,30 @@ +const Object = @This(); + +const std = @import("std"); +const Io = std.Io; +const assert = std.debug.assert; +const eh_frame = @import("eh_frame.zig"); +const elf = std.elf; +const fs = std.fs; +const log = std.log.scoped(.link); +const math = std.math; +const mem = std.mem; +const Path = std.Build.Cache.Path; +const Allocator = std.mem.Allocator; + +const Diags = @import("../../link.zig").Diags; +const Archive = @import("Archive.zig"); +const Atom = @import("Atom.zig"); +const AtomList = @import("AtomList.zig"); +const Cie = eh_frame.Cie; +const Elf = @import("../Elf.zig"); +const Fde = eh_frame.Fde; +const File = @import("file.zig").File; +const Merge = @import("Merge.zig"); +const Symbol = @import("Symbol.zig"); +const Alignment = Atom.Alignment; +const riscv = @import("../riscv.zig"); + archive: ?InArchive = null, /// Archive files cannot contain subdirectories, so only the basename is needed /// for output. However, the full path is kept for error reporting. @@ -65,10 +92,11 @@ pub fn deinit(self: *Object, gpa: Allocator) void { pub fn parse( self: *Object, gpa: Allocator, + io: Io, diags: *Diags, /// For error reporting purposes only. path: Path, - handle: fs.File, + handle: Io.File, target: *const std.Target, debug_fmt_strip: bool, default_sym_version: elf.Versym, @@ -78,7 +106,7 @@ pub fn parse( // Allocate atom index 0 to null atom try self.atoms.append(gpa, .{ .extra_index = try self.addAtomExtra(gpa, .{}) }); - try self.initAtoms(gpa, diags, path, handle, debug_fmt_strip, target); + try self.initAtoms(gpa, io, diags, path, handle, debug_fmt_strip, target); try self.initSymbols(gpa, default_sym_version); for (self.shdrs.items, 0..) |shdr, i| { @@ -87,7 +115,7 @@ pub fn parse( if ((target.cpu.arch == .x86_64 and shdr.sh_type == elf.SHT_X86_64_UNWIND) or mem.eql(u8, self.getString(atom_ptr.name_offset), ".eh_frame")) { - try self.parseEhFrame(gpa, handle, @intCast(i), target); + try self.parseEhFrame(gpa, io, handle, @intCast(i), target); } } } @@ -95,15 +123,16 @@ pub fn parse( pub fn parseCommon( self: *Object, gpa: Allocator, + io: Io, diags: *Diags, path: Path, - handle: fs.File, + handle: Io.File, target: *const std.Target, ) !void { const offset = if (self.archive) |ar| ar.offset else 0; - const file_size = (try handle.stat()).size; + const file_size = (try handle.stat(io)).size; - const header_buffer = try Elf.preadAllAlloc(gpa, handle, offset, @sizeOf(elf.Elf64_Ehdr)); + const header_buffer = try Elf.preadAllAlloc(gpa, io, handle, offset, @sizeOf(elf.Elf64_Ehdr)); defer gpa.free(header_buffer); self.header = @as(*align(1) const elf.Elf64_Ehdr, @ptrCast(header_buffer)).*; if (!mem.eql(u8, self.header.?.e_ident[0..4], elf.MAGIC)) { @@ -127,7 +156,7 @@ pub fn parseCommon( return diags.failParse(path, "corrupt header: section header table extends past the end of file", .{}); } - const shdrs_buffer = try Elf.preadAllAlloc(gpa, handle, offset + shoff, shsize); + const shdrs_buffer = try Elf.preadAllAlloc(gpa, io, handle, offset + shoff, shsize); defer gpa.free(shdrs_buffer); const shdrs = @as([*]align(1) const elf.Elf64_Shdr, @ptrCast(shdrs_buffer.ptr))[0..shnum]; try self.shdrs.appendUnalignedSlice(gpa, shdrs); @@ -140,7 +169,7 @@ pub fn parseCommon( } } - const shstrtab = try self.preadShdrContentsAlloc(gpa, handle, self.header.?.e_shstrndx); + const shstrtab = try self.preadShdrContentsAlloc(gpa, io, handle, self.header.?.e_shstrndx); defer gpa.free(shstrtab); for (self.shdrs.items) |shdr| { if (shdr.sh_name >= shstrtab.len) { @@ -158,7 +187,7 @@ pub fn parseCommon( const shdr = self.shdrs.items[index]; self.first_global = shdr.sh_info; - const raw_symtab = try self.preadShdrContentsAlloc(gpa, handle, index); + const raw_symtab = try self.preadShdrContentsAlloc(gpa, io, handle, index); defer gpa.free(raw_symtab); const nsyms = math.divExact(usize, raw_symtab.len, @sizeOf(elf.Elf64_Sym)) catch { return diags.failParse(path, "symbol table not evenly divisible", .{}); @@ -166,7 +195,7 @@ pub fn parseCommon( const symtab = @as([*]align(1) const elf.Elf64_Sym, @ptrCast(raw_symtab.ptr))[0..nsyms]; const strtab_bias = @as(u32, @intCast(self.strtab.items.len)); - const strtab = try self.preadShdrContentsAlloc(gpa, handle, shdr.sh_link); + const strtab = try self.preadShdrContentsAlloc(gpa, io, handle, shdr.sh_link); defer gpa.free(strtab); try self.strtab.appendSlice(gpa, strtab); @@ -262,9 +291,10 @@ pub fn validateEFlags( fn initAtoms( self: *Object, gpa: Allocator, + io: Io, diags: *Diags, path: Path, - handle: fs.File, + handle: Io.File, debug_fmt_strip: bool, target: *const std.Target, ) !void { @@ -297,7 +327,7 @@ fn initAtoms( }; const shndx: u32 = @intCast(i); - const group_raw_data = try self.preadShdrContentsAlloc(gpa, handle, shndx); + const group_raw_data = try self.preadShdrContentsAlloc(gpa, io, handle, shndx); defer gpa.free(group_raw_data); const group_nmembers = math.divExact(usize, group_raw_data.len, @sizeOf(u32)) catch { return diags.failParse(path, "corrupt section group: not evenly divisible ", .{}); @@ -338,7 +368,7 @@ fn initAtoms( const shndx: u32 = @intCast(i); if (self.skipShdr(shndx, debug_fmt_strip)) continue; const size, const alignment = if (shdr.sh_flags & elf.SHF_COMPRESSED != 0) blk: { - const data = try self.preadShdrContentsAlloc(gpa, handle, shndx); + const data = try self.preadShdrContentsAlloc(gpa, io, handle, shndx); defer gpa.free(data); const chdr = @as(*align(1) const elf.Elf64_Chdr, @ptrCast(data.ptr)).*; break :blk .{ chdr.ch_size, Alignment.fromNonzeroByteUnits(chdr.ch_addralign) }; @@ -359,7 +389,7 @@ fn initAtoms( elf.SHT_REL, elf.SHT_RELA => { const atom_index = self.atoms_indexes.items[shdr.sh_info]; if (self.atom(atom_index)) |atom_ptr| { - const relocs = try self.preadRelocsAlloc(gpa, handle, @intCast(i)); + const relocs = try self.preadRelocsAlloc(gpa, io, handle, @intCast(i)); defer gpa.free(relocs); atom_ptr.relocs_section_index = @intCast(i); const rel_index: u32 = @intCast(self.relocs.items.len); @@ -421,7 +451,8 @@ fn initSymbols( fn parseEhFrame( self: *Object, gpa: Allocator, - handle: fs.File, + io: Io, + handle: Io.File, shndx: u32, target: *const std.Target, ) !void { @@ -430,12 +461,12 @@ fn parseEhFrame( else => {}, } else null; - const raw = try self.preadShdrContentsAlloc(gpa, handle, shndx); + const raw = try self.preadShdrContentsAlloc(gpa, io, handle, shndx); defer gpa.free(raw); const data_start: u32 = @intCast(self.eh_frame_data.items.len); try self.eh_frame_data.appendSlice(gpa, raw); const relocs = if (relocs_shndx) |index| - try self.preadRelocsAlloc(gpa, handle, index) + try self.preadRelocsAlloc(gpa, io, handle, index) else &[0]elf.Elf64_Rela{}; defer gpa.free(relocs); @@ -1095,13 +1126,18 @@ pub fn updateArSymtab(self: Object, ar_symtab: *Archive.ArSymtab, elf_file: *Elf } pub fn updateArSize(self: *Object, elf_file: *Elf) !void { + const comp = elf_file.base.comp; + const io = comp.io; self.output_ar_state.size = if (self.archive) |ar| ar.size else size: { const handle = elf_file.fileHandle(self.file_handle); - break :size (try handle.stat()).size; + break :size (try handle.stat(io)).size; }; } pub fn writeAr(self: Object, elf_file: *Elf, writer: anytype) !void { + const comp = elf_file.base.comp; + const gpa = comp.gpa; + const io = comp.io; const size = std.math.cast(usize, self.output_ar_state.size) orelse return error.Overflow; const offset: u64 = if (self.archive) |ar| ar.offset else 0; const name = fs.path.basename(self.path.sub_path); @@ -1114,10 +1150,9 @@ pub fn writeAr(self: Object, elf_file: *Elf, writer: anytype) !void { }); try writer.writeAll(mem.asBytes(&hdr)); const handle = elf_file.fileHandle(self.file_handle); - const gpa = elf_file.base.comp.gpa; const data = try gpa.alloc(u8, size); defer gpa.free(data); - const amt = try handle.preadAll(data, offset); + const amt = try handle.readPositionalAll(io, data, offset); if (amt != size) return error.InputOutput; try writer.writeAll(data); } @@ -1190,11 +1225,12 @@ pub fn writeSymtab(self: *Object, elf_file: *Elf) void { /// Caller owns the memory. pub fn codeDecompressAlloc(self: *Object, elf_file: *Elf, atom_index: Atom.Index) ![]u8 { const comp = elf_file.base.comp; + const io = comp.io; const gpa = comp.gpa; const atom_ptr = self.atom(atom_index).?; const shdr = atom_ptr.inputShdr(elf_file); const handle = elf_file.fileHandle(self.file_handle); - const data = try self.preadShdrContentsAlloc(gpa, handle, atom_ptr.input_section_index); + const data = try self.preadShdrContentsAlloc(gpa, io, handle, atom_ptr.input_section_index); defer if (shdr.sh_flags & elf.SHF_COMPRESSED != 0) gpa.free(data); if (shdr.sh_flags & elf.SHF_COMPRESSED != 0) { @@ -1310,18 +1346,18 @@ fn addString(self: *Object, gpa: Allocator, str: []const u8) !u32 { } /// Caller owns the memory. -fn preadShdrContentsAlloc(self: Object, gpa: Allocator, handle: fs.File, index: u32) ![]u8 { +fn preadShdrContentsAlloc(self: Object, gpa: Allocator, io: Io, handle: Io.File, index: u32) ![]u8 { assert(index < self.shdrs.items.len); const offset = if (self.archive) |ar| ar.offset else 0; const shdr = self.shdrs.items[index]; const sh_offset = math.cast(u64, shdr.sh_offset) orelse return error.Overflow; const sh_size = math.cast(u64, shdr.sh_size) orelse return error.Overflow; - return Elf.preadAllAlloc(gpa, handle, offset + sh_offset, sh_size); + return Elf.preadAllAlloc(gpa, io, handle, offset + sh_offset, sh_size); } /// Caller owns the memory. -fn preadRelocsAlloc(self: Object, gpa: Allocator, handle: fs.File, shndx: u32) ![]align(1) const elf.Elf64_Rela { - const raw = try self.preadShdrContentsAlloc(gpa, handle, shndx); +fn preadRelocsAlloc(self: Object, gpa: Allocator, io: Io, handle: Io.File, shndx: u32) ![]align(1) const elf.Elf64_Rela { + const raw = try self.preadShdrContentsAlloc(gpa, io, handle, shndx); const num = @divExact(raw.len, @sizeOf(elf.Elf64_Rela)); return @as([*]align(1) const elf.Elf64_Rela, @ptrCast(raw.ptr))[0..num]; } @@ -1552,29 +1588,3 @@ const InArchive = struct { offset: u64, size: u32, }; - -const Object = @This(); - -const std = @import("std"); -const assert = std.debug.assert; -const eh_frame = @import("eh_frame.zig"); -const elf = std.elf; -const fs = std.fs; -const log = std.log.scoped(.link); -const math = std.math; -const mem = std.mem; -const Path = std.Build.Cache.Path; -const Allocator = std.mem.Allocator; - -const Diags = @import("../../link.zig").Diags; -const Archive = @import("Archive.zig"); -const Atom = @import("Atom.zig"); -const AtomList = @import("AtomList.zig"); -const Cie = eh_frame.Cie; -const Elf = @import("../Elf.zig"); -const Fde = eh_frame.Fde; -const File = @import("file.zig").File; -const Merge = @import("Merge.zig"); -const Symbol = @import("Symbol.zig"); -const Alignment = Atom.Alignment; -const riscv = @import("../riscv.zig"); diff --git a/src/link/Elf/SharedObject.zig b/src/link/Elf/SharedObject.zig index 1e17aa34a8..c97d53a862 100644 --- a/src/link/Elf/SharedObject.zig +++ b/src/link/Elf/SharedObject.zig @@ -1,3 +1,20 @@ +const SharedObject = @This(); + +const std = @import("std"); +const Io = std.Io; +const assert = std.debug.assert; +const elf = std.elf; +const log = std.log.scoped(.elf); +const mem = std.mem; +const Path = std.Build.Cache.Path; +const Stat = std.Build.Cache.File.Stat; +const Allocator = mem.Allocator; + +const Elf = @import("../Elf.zig"); +const File = @import("file.zig").File; +const Symbol = @import("Symbol.zig"); +const Diags = @import("../../link.zig").Diags; + path: Path, index: File.Index, @@ -92,16 +109,17 @@ pub const Parsed = struct { pub fn parseHeader( gpa: Allocator, + io: Io, diags: *Diags, file_path: Path, - fs_file: std.fs.File, + file: Io.File, stat: Stat, target: *const std.Target, ) !Header { var ehdr: elf.Elf64_Ehdr = undefined; { const buf = mem.asBytes(&ehdr); - const amt = try fs_file.preadAll(buf, 0); + const amt = try file.readPositionalAll(io, buf, 0); if (amt != buf.len) return error.UnexpectedEndOfFile; } if (!mem.eql(u8, ehdr.e_ident[0..4], "\x7fELF")) return error.BadMagic; @@ -118,7 +136,7 @@ pub fn parseHeader( errdefer gpa.free(sections); { const buf = mem.sliceAsBytes(sections); - const amt = try fs_file.preadAll(buf, shoff); + const amt = try file.readPositionalAll(io, buf, shoff); if (amt != buf.len) return error.UnexpectedEndOfFile; } @@ -143,7 +161,7 @@ pub fn parseHeader( const dynamic_table = try gpa.alloc(elf.Elf64_Dyn, n); errdefer gpa.free(dynamic_table); const buf = mem.sliceAsBytes(dynamic_table); - const amt = try fs_file.preadAll(buf, shdr.sh_offset); + const amt = try file.readPositionalAll(io, buf, shdr.sh_offset); if (amt != buf.len) return error.UnexpectedEndOfFile; break :dt dynamic_table; } else &.{}; @@ -158,7 +176,7 @@ pub fn parseHeader( const strtab_shdr = sections[dynsym_shdr.sh_link]; const n = std.math.cast(usize, strtab_shdr.sh_size) orelse return error.Overflow; const buf = try strtab.addManyAsSlice(gpa, n); - const amt = try fs_file.preadAll(buf, strtab_shdr.sh_offset); + const amt = try file.readPositionalAll(io, buf, strtab_shdr.sh_offset); if (amt != buf.len) return error.UnexpectedEndOfFile; } @@ -190,9 +208,10 @@ pub fn parseHeader( pub fn parse( gpa: Allocator, + io: Io, /// Moves resources from header. Caller may unconditionally deinit. header: *Header, - fs_file: std.fs.File, + file: Io.File, ) !Parsed { const symtab = if (header.dynsym_sect_index) |index| st: { const shdr = header.sections[index]; @@ -200,7 +219,7 @@ pub fn parse( const symtab = try gpa.alloc(elf.Elf64_Sym, n); errdefer gpa.free(symtab); const buf = mem.sliceAsBytes(symtab); - const amt = try fs_file.preadAll(buf, shdr.sh_offset); + const amt = try file.readPositionalAll(io, buf, shdr.sh_offset); if (amt != buf.len) return error.UnexpectedEndOfFile; break :st symtab; } else &.{}; @@ -211,7 +230,7 @@ pub fn parse( if (header.verdef_sect_index) |shndx| { const shdr = header.sections[shndx]; - const verdefs = try Elf.preadAllAlloc(gpa, fs_file, shdr.sh_offset, shdr.sh_size); + const verdefs = try Elf.preadAllAlloc(gpa, io, file, shdr.sh_offset, shdr.sh_size); defer gpa.free(verdefs); var offset: u32 = 0; @@ -237,7 +256,7 @@ pub fn parse( const versyms = try gpa.alloc(elf.Versym, symtab.len); errdefer gpa.free(versyms); const buf = mem.sliceAsBytes(versyms); - const amt = try fs_file.preadAll(buf, shdr.sh_offset); + const amt = try file.readPositionalAll(io, buf, shdr.sh_offset); if (amt != buf.len) return error.UnexpectedEndOfFile; break :vs versyms; } else &.{}; @@ -534,19 +553,3 @@ const Format = struct { } } }; - -const SharedObject = @This(); - -const std = @import("std"); -const assert = std.debug.assert; -const elf = std.elf; -const log = std.log.scoped(.elf); -const mem = std.mem; -const Path = std.Build.Cache.Path; -const Stat = std.Build.Cache.File.Stat; -const Allocator = mem.Allocator; - -const Elf = @import("../Elf.zig"); -const File = @import("file.zig").File; -const Symbol = @import("Symbol.zig"); -const Diags = @import("../../link.zig").Diags; diff --git a/src/link/Elf/ZigObject.zig b/src/link/Elf/ZigObject.zig index 1450e3ab92..588b4e3fc3 100644 --- a/src/link/Elf/ZigObject.zig +++ b/src/link/Elf/ZigObject.zig @@ -740,7 +740,9 @@ pub fn checkDuplicates(self: *ZigObject, dupes: anytype, elf_file: *Elf) error{O /// We need this so that we can write to an archive. /// TODO implement writing ZigObject data directly to a buffer instead. pub fn readFileContents(self: *ZigObject, elf_file: *Elf) !void { - const gpa = elf_file.base.comp.gpa; + const comp = elf_file.base.comp; + const gpa = comp.gpa; + const io = comp.io; const shsize: u64 = switch (elf_file.ptr_width) { .p32 => @sizeOf(elf.Elf32_Shdr), .p64 => @sizeOf(elf.Elf64_Shdr), @@ -753,7 +755,7 @@ pub fn readFileContents(self: *ZigObject, elf_file: *Elf) !void { const size = std.math.cast(usize, end_pos) orelse return error.Overflow; try self.data.resize(gpa, size); - const amt = try elf_file.base.file.?.preadAll(self.data.items, 0); + const amt = try elf_file.base.file.?.readPositionalAll(io, self.data.items, 0); if (amt != size) return error.InputOutput; } @@ -901,13 +903,15 @@ pub fn writeSymtab(self: ZigObject, elf_file: *Elf) void { /// Returns atom's code. /// Caller owns the memory. pub fn codeAlloc(self: *ZigObject, elf_file: *Elf, atom_index: Atom.Index) ![]u8 { - const gpa = elf_file.base.comp.gpa; + const comp = elf_file.base.comp; + const gpa = comp.gpa; + const io = comp.io; const atom_ptr = self.atom(atom_index).?; const file_offset = atom_ptr.offset(elf_file); const size = std.math.cast(usize, atom_ptr.size) orelse return error.Overflow; const code = try gpa.alloc(u8, size); errdefer gpa.free(code); - const amt = try elf_file.base.file.?.preadAll(code, file_offset); + const amt = try elf_file.base.file.?.readPositionalAll(io, code, file_offset); if (amt != code.len) { log.err("fetching code for {s} failed", .{atom_ptr.name(elf_file)}); return error.InputOutput; @@ -1365,6 +1369,8 @@ fn updateNavCode( ) link.File.UpdateNavError!void { const zcu = pt.zcu; const gpa = zcu.gpa; + const comp = elf_file.base.comp; + const io = comp.io; const ip = &zcu.intern_pool; const nav = ip.getNav(nav_index); @@ -1449,8 +1455,8 @@ fn updateNavCode( const shdr = elf_file.sections.items(.shdr)[shdr_index]; if (shdr.sh_type != elf.SHT_NOBITS) { const file_offset = atom_ptr.offset(elf_file); - elf_file.base.file.?.pwriteAll(code, file_offset) catch |err| - return elf_file.base.cgFail(nav_index, "failed to write to output file: {s}", .{@errorName(err)}); + elf_file.base.file.?.writePositionalAll(io, code, file_offset) catch |err| + return elf_file.base.cgFail(nav_index, "failed to write to output file: {t}", .{err}); log.debug("writing {f} from 0x{x} to 0x{x}", .{ nav.fqn.fmt(ip), file_offset, file_offset + code.len }); } } @@ -1467,6 +1473,8 @@ fn updateTlv( const zcu = pt.zcu; const ip = &zcu.intern_pool; const gpa = zcu.gpa; + const comp = elf_file.base.comp; + const io = comp.io; const nav = ip.getNav(nav_index); log.debug("updateTlv {f}({d})", .{ nav.fqn.fmt(ip), nav_index }); @@ -1503,8 +1511,8 @@ fn updateTlv( const shdr = elf_file.sections.items(.shdr)[shndx]; if (shdr.sh_type != elf.SHT_NOBITS) { const file_offset = atom_ptr.offset(elf_file); - elf_file.base.file.?.pwriteAll(code, file_offset) catch |err| - return elf_file.base.cgFail(nav_index, "failed to write to output file: {s}", .{@errorName(err)}); + elf_file.base.file.?.writePositionalAll(io, code, file_offset) catch |err| + return elf_file.base.cgFail(nav_index, "failed to write to output file: {t}", .{err}); log.debug("writing TLV {s} from 0x{x} to 0x{x}", .{ atom_ptr.name(elf_file), file_offset, @@ -2003,6 +2011,8 @@ fn trampolineSize(cpu_arch: std.Target.Cpu.Arch) u64 { } fn writeTrampoline(tr_sym: Symbol, target: Symbol, elf_file: *Elf) !void { + const comp = elf_file.base.comp; + const io = comp.io; const atom_ptr = tr_sym.atom(elf_file).?; const fileoff = atom_ptr.offset(elf_file); const source_addr = tr_sym.address(.{}, elf_file); @@ -2012,7 +2022,7 @@ fn writeTrampoline(tr_sym: Symbol, target: Symbol, elf_file: *Elf) !void { .x86_64 => try x86_64.writeTrampolineCode(source_addr, target_addr, &buf), else => @panic("TODO implement write trampoline for this CPU arch"), }; - try elf_file.base.file.?.pwriteAll(out, fileoff); + try elf_file.base.file.?.writePositionalAll(io, out, fileoff); if (elf_file.base.child_pid) |pid| { switch (builtin.os.tag) { diff --git a/src/link/Elf/file.zig b/src/link/Elf/file.zig index 50f5159d18..52d3c6e6f0 100644 --- a/src/link/Elf/file.zig +++ b/src/link/Elf/file.zig @@ -1,3 +1,20 @@ +const std = @import("std"); +const Io = std.Io; +const elf = std.elf; +const log = std.log.scoped(.link); +const Path = std.Build.Cache.Path; +const Allocator = std.mem.Allocator; + +const Archive = @import("Archive.zig"); +const Atom = @import("Atom.zig"); +const Cie = @import("eh_frame.zig").Cie; +const Elf = @import("../Elf.zig"); +const LinkerDefined = @import("LinkerDefined.zig"); +const Object = @import("Object.zig"); +const SharedObject = @import("SharedObject.zig"); +const Symbol = @import("Symbol.zig"); +const ZigObject = @import("ZigObject.zig"); + pub const File = union(enum) { zig_object: *ZigObject, linker_defined: *LinkerDefined, @@ -279,22 +296,6 @@ pub const File = union(enum) { shared_object: SharedObject, }; - pub const Handle = std.fs.File; + pub const Handle = Io.File; pub const HandleIndex = Index; }; - -const std = @import("std"); -const elf = std.elf; -const log = std.log.scoped(.link); -const Path = std.Build.Cache.Path; -const Allocator = std.mem.Allocator; - -const Archive = @import("Archive.zig"); -const Atom = @import("Atom.zig"); -const Cie = @import("eh_frame.zig").Cie; -const Elf = @import("../Elf.zig"); -const LinkerDefined = @import("LinkerDefined.zig"); -const Object = @import("Object.zig"); -const SharedObject = @import("SharedObject.zig"); -const Symbol = @import("Symbol.zig"); -const ZigObject = @import("ZigObject.zig"); diff --git a/src/link/Elf/relocatable.zig b/src/link/Elf/relocatable.zig index 7adeecdcde..ec3ff252fb 100644 --- a/src/link/Elf/relocatable.zig +++ b/src/link/Elf/relocatable.zig @@ -1,5 +1,26 @@ +const std = @import("std"); +const assert = std.debug.assert; +const elf = std.elf; +const math = std.math; +const mem = std.mem; +const Path = std.Build.Cache.Path; +const log = std.log.scoped(.link); +const state_log = std.log.scoped(.link_state); + +const build_options = @import("build_options"); + +const eh_frame = @import("eh_frame.zig"); +const link = @import("../../link.zig"); +const Archive = @import("Archive.zig"); +const Compilation = @import("../../Compilation.zig"); +const Elf = @import("../Elf.zig"); +const File = @import("file.zig").File; +const Object = @import("Object.zig"); +const Symbol = @import("Symbol.zig"); + pub fn flushStaticLib(elf_file: *Elf, comp: *Compilation) !void { const gpa = comp.gpa; + const io = comp.io; const diags = &comp.link_diags; if (diags.hasErrors()) return error.LinkFailure; @@ -125,8 +146,8 @@ pub fn flushStaticLib(elf_file: *Elf, comp: *Compilation) !void { assert(writer.buffered().len == total_size); - try elf_file.base.file.?.setEndPos(total_size); - try elf_file.base.file.?.pwriteAll(writer.buffered(), 0); + try elf_file.base.file.?.setLength(io, total_size); + try elf_file.base.file.?.writePositionalAll(io, writer.buffered(), 0); if (diags.hasErrors()) return error.LinkFailure; } @@ -330,13 +351,7 @@ fn allocateAllocSections(elf_file: *Elf) !void { if (shdr.sh_offset > 0) { const existing_size = elf_file.sectionSize(@intCast(shndx)); - const amt = try elf_file.base.file.?.copyRangeAll( - shdr.sh_offset, - elf_file.base.file.?, - new_offset, - existing_size, - ); - if (amt != existing_size) return error.InputOutput; + try elf_file.base.copyRangeAll(shdr.sh_offset, new_offset, existing_size); } shdr.sh_offset = new_offset; @@ -360,7 +375,9 @@ fn writeAtoms(elf_file: *Elf) !void { } fn writeSyntheticSections(elf_file: *Elf) !void { - const gpa = elf_file.base.comp.gpa; + const comp = elf_file.base.comp; + const io = comp.io; + const gpa = comp.gpa; const slice = elf_file.sections.slice(); const SortRelocs = struct { @@ -397,7 +414,7 @@ fn writeSyntheticSections(elf_file: *Elf) !void { shdr.sh_offset + shdr.sh_size, }); - try elf_file.base.file.?.pwriteAll(@ptrCast(relocs.items), shdr.sh_offset); + try elf_file.base.file.?.writePositionalAll(io, @ptrCast(relocs.items), shdr.sh_offset); } if (elf_file.section_indexes.eh_frame) |shndx| { @@ -417,7 +434,7 @@ fn writeSyntheticSections(elf_file: *Elf) !void { shdr.sh_offset + sh_size, }); assert(writer.buffered().len == sh_size - existing_size); - try elf_file.base.file.?.pwriteAll(writer.buffered(), shdr.sh_offset + existing_size); + try elf_file.base.file.?.writePositionalAll(io, writer.buffered(), shdr.sh_offset + existing_size); } if (elf_file.section_indexes.eh_frame_rela) |shndx| { const shdr = slice.items(.shdr)[shndx]; @@ -435,7 +452,7 @@ fn writeSyntheticSections(elf_file: *Elf) !void { shdr.sh_offset, shdr.sh_offset + shdr.sh_size, }); - try elf_file.base.file.?.pwriteAll(@ptrCast(relocs.items), shdr.sh_offset); + try elf_file.base.file.?.writePositionalAll(io, @ptrCast(relocs.items), shdr.sh_offset); } try writeGroups(elf_file); @@ -444,7 +461,9 @@ fn writeSyntheticSections(elf_file: *Elf) !void { } fn writeGroups(elf_file: *Elf) !void { - const gpa = elf_file.base.comp.gpa; + const comp = elf_file.base.comp; + const io = comp.io; + const gpa = comp.gpa; for (elf_file.group_sections.items) |cgs| { const shdr = elf_file.sections.items(.shdr)[cgs.shndx]; const sh_size = math.cast(usize, shdr.sh_size) orelse return error.Overflow; @@ -457,25 +476,6 @@ fn writeGroups(elf_file: *Elf) !void { shdr.sh_offset, shdr.sh_offset + shdr.sh_size, }); - try elf_file.base.file.?.pwriteAll(writer.buffered(), shdr.sh_offset); + try elf_file.base.file.?.writePositionalAll(io, writer.buffered(), shdr.sh_offset); } } - -const assert = std.debug.assert; -const build_options = @import("build_options"); -const eh_frame = @import("eh_frame.zig"); -const elf = std.elf; -const link = @import("../../link.zig"); -const log = std.log.scoped(.link); -const math = std.math; -const mem = std.mem; -const state_log = std.log.scoped(.link_state); -const Path = std.Build.Cache.Path; -const std = @import("std"); - -const Archive = @import("Archive.zig"); -const Compilation = @import("../../Compilation.zig"); -const Elf = @import("../Elf.zig"); -const File = @import("file.zig").File; -const Object = @import("Object.zig"); -const Symbol = @import("Symbol.zig"); diff --git a/src/link/Elf2.zig b/src/link/Elf2.zig index 7d12ccedb2..bbdb439385 100644 --- a/src/link/Elf2.zig +++ b/src/link/Elf2.zig @@ -1,3 +1,23 @@ +const Elf = @This(); + +const builtin = @import("builtin"); +const native_endian = builtin.cpu.arch.endian(); + +const std = @import("std"); +const Io = std.Io; +const assert = std.debug.assert; +const log = std.log.scoped(.link); + +const codegen = @import("../codegen.zig"); +const Compilation = @import("../Compilation.zig"); +const InternPool = @import("../InternPool.zig"); +const link = @import("../link.zig"); +const MappedFile = @import("MappedFile.zig"); +const target_util = @import("../target.zig"); +const Type = @import("../Type.zig"); +const Value = @import("../Value.zig"); +const Zcu = @import("../Zcu.zig"); + base: link.File, options: link.File.OpenOptions, mf: MappedFile, @@ -908,6 +928,7 @@ fn create( path: std.Build.Cache.Path, options: link.File.OpenOptions, ) !*Elf { + const io = comp.io; const target = &comp.root_mod.resolved_target.result; assert(target.ofmt == .elf); const class: std.elf.CLASS = switch (target.ptrBitWidth()) { @@ -953,11 +974,11 @@ fn create( }; const elf = try arena.create(Elf); - const file = try path.root_dir.handle.adaptToNewApi().createFile(comp.io, path.sub_path, .{ + const file = try path.root_dir.handle.createFile(io, path.sub_path, .{ .read = true, - .mode = link.File.determineMode(comp.config.output_mode, comp.config.link_mode), + .permissions = link.File.determinePermissions(comp.config.output_mode, comp.config.link_mode), }); - errdefer file.close(comp.io); + errdefer file.close(io); elf.* = .{ .base = .{ .tag = .elf2, @@ -965,7 +986,7 @@ fn create( .comp = comp, .emit = path, - .file = .adaptFromNewApi(file), + .file = file, .gc_sections = false, .print_gc_sections = false, .build_id = .none, @@ -973,7 +994,7 @@ fn create( .stack_size = 0, }, .options = options, - .mf = try .init(file, comp.gpa), + .mf = try .init(file, comp.gpa, io), .ni = .{ .tls = .none, }, @@ -1973,8 +1994,8 @@ pub fn lazySymbol(elf: *Elf, lazy: link.File.LazySymbol) !Symbol.Index { return lazy_gop.value_ptr.*; } -pub fn loadInput(elf: *Elf, input: link.Input) (std.fs.File.Reader.SizeError || - std.Io.File.Reader.Error || MappedFile.Error || error{ EndOfStream, BadMagic, LinkFailure })!void { +pub fn loadInput(elf: *Elf, input: link.Input) (Io.File.Reader.SizeError || + Io.File.Reader.Error || MappedFile.Error || error{ EndOfStream, BadMagic, LinkFailure })!void { const io = elf.base.comp.io; var buf: [4096]u8 = undefined; switch (input) { @@ -2007,7 +2028,7 @@ pub fn loadInput(elf: *Elf, input: link.Input) (std.fs.File.Reader.SizeError || .dso_exact => |dso_exact| try elf.loadDsoExact(dso_exact.name), } } -fn loadArchive(elf: *Elf, path: std.Build.Cache.Path, fr: *std.Io.File.Reader) !void { +fn loadArchive(elf: *Elf, path: std.Build.Cache.Path, fr: *Io.File.Reader) !void { const comp = elf.base.comp; const gpa = comp.gpa; const diags = &comp.link_diags; @@ -2067,7 +2088,7 @@ fn loadObject( elf: *Elf, path: std.Build.Cache.Path, member: ?[]const u8, - fr: *std.Io.File.Reader, + fr: *Io.File.Reader, fl: MappedFile.Node.FileLocation, ) !void { const comp = elf.base.comp; @@ -2310,7 +2331,7 @@ fn loadObject( }, } } -fn loadDso(elf: *Elf, path: std.Build.Cache.Path, fr: *std.Io.File.Reader) !void { +fn loadDso(elf: *Elf, path: std.Build.Cache.Path, fr: *Io.File.Reader) !void { const comp = elf.base.comp; const diags = &comp.link_diags; const r = &fr.interface; @@ -3305,12 +3326,13 @@ fn flushInputSection(elf: *Elf, isi: Node.InputSectionIndex) !void { const file_loc = isi.fileLocation(elf); if (file_loc.size == 0) return; const comp = elf.base.comp; + const io = comp.io; const gpa = comp.gpa; const ii = isi.input(elf); const path = ii.path(elf); - const file = try path.root_dir.handle.adaptToNewApi().openFile(comp.io, path.sub_path, .{}); - defer file.close(comp.io); - var fr = file.reader(comp.io, &.{}); + const file = try path.root_dir.handle.openFile(io, path.sub_path, .{}); + defer file.close(io); + var fr = file.reader(io, &.{}); try fr.seekTo(file_loc.offset); var nw: MappedFile.Node.Writer = undefined; const si = isi.symbol(elf); @@ -3707,10 +3729,16 @@ pub fn deleteExport(elf: *Elf, exported: Zcu.Exported, name: InternPool.NullTerm _ = name; } -pub fn dump(elf: *Elf, tid: Zcu.PerThread.Id) void { - const w, _ = std.debug.lockStderrWriter(&.{}); - defer std.debug.unlockStderrWriter(); - elf.printNode(tid, w, .root, 0) catch {}; +pub fn dump(elf: *Elf, tid: Zcu.PerThread.Id) Io.Cancelable!void { + const comp = elf.base.comp; + const io = comp.io; + var buffer: [512]u8 = undefined; + const stderr = try io.lockStderr(&buffer, null); + defer io.lockStderr(); + const w = &stderr.file_writer.interface; + elf.printNode(tid, w, .root, 0) catch |err| switch (err) { + error.WriteFailed => return stderr.err.?, + }; } pub fn printNode( @@ -3822,19 +3850,3 @@ pub fn printNode( try w.writeByte('\n'); } } - -const assert = std.debug.assert; -const builtin = @import("builtin"); -const codegen = @import("../codegen.zig"); -const Compilation = @import("../Compilation.zig"); -const Elf = @This(); -const InternPool = @import("../InternPool.zig"); -const link = @import("../link.zig"); -const log = std.log.scoped(.link); -const MappedFile = @import("MappedFile.zig"); -const native_endian = builtin.cpu.arch.endian(); -const std = @import("std"); -const target_util = @import("../target.zig"); -const Type = @import("../Type.zig"); -const Value = @import("../Value.zig"); -const Zcu = @import("../Zcu.zig"); diff --git a/src/link/Lld.zig b/src/link/Lld.zig index 2345090482..b2a0f6e396 100644 --- a/src/link/Lld.zig +++ b/src/link/Lld.zig @@ -359,6 +359,7 @@ fn linkAsArchive(lld: *Lld, arena: Allocator) !void { fn coffLink(lld: *Lld, arena: Allocator) !void { const comp = lld.base.comp; const gpa = comp.gpa; + const io = comp.io; const base = &lld.base; const coff = &lld.ofmt.coff; @@ -400,11 +401,12 @@ fn coffLink(lld: *Lld, arena: Allocator) !void { // regarding eliding redundant object -> object transformations. return error.NoObjectsToLink; }; - try std.fs.Dir.copyFile( + try Io.Dir.copyFile( the_object_path.root_dir.handle, the_object_path.sub_path, directory.handle, base.emit.sub_path, + io, .{}, ); } else { @@ -718,13 +720,13 @@ fn coffLink(lld: *Lld, arena: Allocator) !void { argv.appendAssumeCapacity(try crt_file.full_object_path.toString(arena)); continue; } - if (try findLib(arena, lib_basename, coff.lib_directories)) |full_path| { + if (try findLib(arena, io, lib_basename, coff.lib_directories)) |full_path| { argv.appendAssumeCapacity(full_path); continue; } if (target.abi.isGnu()) { const fallback_name = try allocPrint(arena, "lib{s}.dll.a", .{key}); - if (try findLib(arena, fallback_name, coff.lib_directories)) |full_path| { + if (try findLib(arena, io, fallback_name, coff.lib_directories)) |full_path| { argv.appendAssumeCapacity(full_path); continue; } @@ -741,9 +743,9 @@ fn coffLink(lld: *Lld, arena: Allocator) !void { try spawnLld(comp, arena, argv.items); } } -fn findLib(arena: Allocator, name: []const u8, lib_directories: []const Cache.Directory) !?[]const u8 { +fn findLib(arena: Allocator, io: Io, name: []const u8, lib_directories: []const Cache.Directory) !?[]const u8 { for (lib_directories) |lib_directory| { - lib_directory.handle.access(name, .{}) catch |err| switch (err) { + lib_directory.handle.access(io, name, .{}) catch |err| switch (err) { error.FileNotFound => continue, else => |e| return e, }; @@ -755,6 +757,7 @@ fn findLib(arena: Allocator, name: []const u8, lib_directories: []const Cache.Di fn elfLink(lld: *Lld, arena: Allocator) !void { const comp = lld.base.comp; const gpa = comp.gpa; + const io = comp.io; const diags = &comp.link_diags; const base = &lld.base; const elf = &lld.ofmt.elf; @@ -816,11 +819,12 @@ fn elfLink(lld: *Lld, arena: Allocator) !void { // regarding eliding redundant object -> object transformations. return error.NoObjectsToLink; }; - try std.fs.Dir.copyFile( + try Io.Dir.copyFile( the_object_path.root_dir.handle, the_object_path.sub_path, directory.handle, base.emit.sub_path, + io, .{}, ); } else { @@ -1326,6 +1330,7 @@ fn getLDMOption(target: *const std.Target) ?[]const u8 { } fn wasmLink(lld: *Lld, arena: Allocator) !void { const comp = lld.base.comp; + const diags = &comp.link_diags; const shared_memory = comp.config.shared_memory; const export_memory = comp.config.export_memory; const import_memory = comp.config.import_memory; @@ -1334,6 +1339,7 @@ fn wasmLink(lld: *Lld, arena: Allocator) !void { const wasm = &lld.ofmt.wasm; const gpa = comp.gpa; + const io = comp.io; const directory = base.emit.root_dir; // Just an alias to make it shorter to type. const full_out_path = try directory.join(arena, &[_][]const u8{base.emit.sub_path}); @@ -1371,11 +1377,12 @@ fn wasmLink(lld: *Lld, arena: Allocator) !void { // regarding eliding redundant object -> object transformations. return error.NoObjectsToLink; }; - try fs.Dir.copyFile( + try Io.Dir.copyFile( the_object_path.root_dir.handle, the_object_path.sub_path, directory.handle, base.emit.sub_path, + io, .{}, ); } else { @@ -1565,27 +1572,23 @@ fn wasmLink(lld: *Lld, arena: Allocator) !void { // is not the case, it means we will get "exec format error" when trying to run // it, and then can react to that in the same way as trying to run an ELF file // from a foreign CPU architecture. - if (fs.has_executable_bit and target.os.tag == .wasi and + if (Io.File.Permissions.has_executable_bit and target.os.tag == .wasi and comp.config.output_mode == .Exe) { - // TODO: what's our strategy for reporting linker errors from this function? - // report a nice error here with the file path if it fails instead of - // just returning the error code. // chmod does not interact with umask, so we use a conservative -rwxr--r-- here. - std.posix.fchmodat(fs.cwd().fd, full_out_path, 0o744, 0) catch |err| switch (err) { - error.OperationNotSupported => unreachable, // Not a symlink. - else => |e| return e, - }; + Io.Dir.cwd().setFilePermissions(io, full_out_path, .fromMode(0o744), .{}) catch |err| + return diags.fail("{s}: failed to enable executable permissions: {t}", .{ full_out_path, err }); } } } fn spawnLld(comp: *Compilation, arena: Allocator, argv: []const []const u8) !void { const io = comp.io; + const gpa = comp.gpa; if (comp.verbose_link) { // Skip over our own name so that the LLD linker name is the first argv item. - Compilation.dump_argv(argv[1..]); + try Compilation.dumpArgv(io, argv[1..]); } // If possible, we run LLD as a child process because it does not always @@ -1599,7 +1602,7 @@ fn spawnLld(comp: *Compilation, arena: Allocator, argv: []const []const u8) !voi } var stderr: []u8 = &.{}; - defer comp.gpa.free(stderr); + defer gpa.free(stderr); var child = std.process.Child.init(argv, arena); const term = (if (comp.clang_passthrough_mode) term: { @@ -1607,16 +1610,16 @@ fn spawnLld(comp: *Compilation, arena: Allocator, argv: []const []const u8) !voi child.stdout_behavior = .Inherit; child.stderr_behavior = .Inherit; - break :term child.spawnAndWait(); + break :term child.spawnAndWait(io); } else term: { child.stdin_behavior = .Ignore; child.stdout_behavior = .Ignore; child.stderr_behavior = .Pipe; - child.spawn() catch |err| break :term err; + child.spawn(io) catch |err| break :term err; var stderr_reader = child.stderr.?.readerStreaming(io, &.{}); - stderr = try stderr_reader.interface.allocRemaining(comp.gpa, .unlimited); - break :term child.wait(); + stderr = try stderr_reader.interface.allocRemaining(gpa, .unlimited); + break :term child.wait(io); }) catch |first_err| term: { const err = switch (first_err) { error.NameTooLong => err: { @@ -1624,13 +1627,13 @@ fn spawnLld(comp: *Compilation, arena: Allocator, argv: []const []const u8) !voi const rand_int = std.crypto.random.int(u64); const rsp_path = "tmp" ++ s ++ std.fmt.hex(rand_int) ++ ".rsp"; - const rsp_file = try comp.dirs.local_cache.handle.createFile(rsp_path, .{}); - defer comp.dirs.local_cache.handle.deleteFileZ(rsp_path) catch |err| - log.warn("failed to delete response file {s}: {s}", .{ rsp_path, @errorName(err) }); + const rsp_file = try comp.dirs.local_cache.handle.createFile(io, rsp_path, .{}); + defer comp.dirs.local_cache.handle.deleteFile(io, rsp_path) catch |err| + log.warn("failed to delete response file {s}: {t}", .{ rsp_path, err }); { - defer rsp_file.close(); + defer rsp_file.close(io); var rsp_file_buffer: [1024]u8 = undefined; - var rsp_file_writer = rsp_file.writer(&rsp_file_buffer); + var rsp_file_writer = rsp_file.writer(io, &rsp_file_buffer); const rsp_writer = &rsp_file_writer.interface; for (argv[2..]) |arg| { try rsp_writer.writeByte('"'); @@ -1657,16 +1660,16 @@ fn spawnLld(comp: *Compilation, arena: Allocator, argv: []const []const u8) !voi rsp_child.stdout_behavior = .Inherit; rsp_child.stderr_behavior = .Inherit; - break :term rsp_child.spawnAndWait() catch |err| break :err err; + break :term rsp_child.spawnAndWait(io) catch |err| break :err err; } else { rsp_child.stdin_behavior = .Ignore; rsp_child.stdout_behavior = .Ignore; rsp_child.stderr_behavior = .Pipe; - rsp_child.spawn() catch |err| break :err err; + rsp_child.spawn(io) catch |err| break :err err; var stderr_reader = rsp_child.stderr.?.readerStreaming(io, &.{}); - stderr = try stderr_reader.interface.allocRemaining(comp.gpa, .unlimited); - break :term rsp_child.wait() catch |err| break :err err; + stderr = try stderr_reader.interface.allocRemaining(gpa, .unlimited); + break :term rsp_child.wait(io) catch |err| break :err err; } }, else => first_err, @@ -1692,6 +1695,7 @@ fn spawnLld(comp: *Compilation, arena: Allocator, argv: []const []const u8) !voi } const std = @import("std"); +const Io = std.Io; const Allocator = std.mem.Allocator; const Cache = std.Build.Cache; const allocPrint = std.fmt.allocPrint; diff --git a/src/link/MachO.zig b/src/link/MachO.zig index 2c4ffd6632..b747b3de56 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -219,10 +219,12 @@ pub fn createEmpty( }; errdefer self.base.destroy(); - self.base.file = try emit.root_dir.handle.createFile(emit.sub_path, .{ + const io = comp.io; + + self.base.file = try emit.root_dir.handle.createFile(io, emit.sub_path, .{ .truncate = true, .read = true, - .mode = link.File.determineMode(output_mode, link_mode), + .permissions = link.File.determinePermissions(output_mode, link_mode), }); // Append null file @@ -267,14 +269,16 @@ pub fn open( } pub fn deinit(self: *MachO) void { - const gpa = self.base.comp.gpa; + const comp = self.base.comp; + const gpa = comp.gpa; + const io = comp.io; if (self.d_sym) |*d_sym| { d_sym.deinit(); } for (self.file_handles.items) |handle| { - handle.close(); + handle.close(io); } self.file_handles.deinit(gpa); @@ -343,7 +347,8 @@ pub fn flush( const comp = self.base.comp; const gpa = comp.gpa; - const diags = &self.base.comp.link_diags; + const io = comp.io; + const diags = &comp.link_diags; const sub_prog_node = prog_node.start("MachO Flush", 0); defer sub_prog_node.end(); @@ -376,26 +381,26 @@ pub fn flush( // in this set. try positionals.ensureUnusedCapacity(comp.c_object_table.keys().len); for (comp.c_object_table.keys()) |key| { - positionals.appendAssumeCapacity(try link.openObjectInput(diags, key.status.success.object_path)); + positionals.appendAssumeCapacity(try link.openObjectInput(io, diags, key.status.success.object_path)); } - if (zcu_obj_path) |path| try positionals.append(try link.openObjectInput(diags, path)); + if (zcu_obj_path) |path| try positionals.append(try link.openObjectInput(io, diags, path)); if (comp.config.any_sanitize_thread) { - try positionals.append(try link.openObjectInput(diags, comp.tsan_lib.?.full_object_path)); + try positionals.append(try link.openObjectInput(io, diags, comp.tsan_lib.?.full_object_path)); } if (comp.config.any_fuzz) { - try positionals.append(try link.openArchiveInput(diags, comp.fuzzer_lib.?.full_object_path, false, false)); + try positionals.append(try link.openArchiveInput(io, diags, comp.fuzzer_lib.?.full_object_path, false, false)); } if (comp.ubsan_rt_lib) |crt_file| { const path = crt_file.full_object_path; - self.classifyInputFile(try link.openArchiveInput(diags, path, false, false)) catch |err| + self.classifyInputFile(try link.openArchiveInput(io, diags, path, false, false)) catch |err| diags.addParseError(path, "failed to parse archive: {s}", .{@errorName(err)}); } else if (comp.ubsan_rt_obj) |crt_file| { const path = crt_file.full_object_path; - self.classifyInputFile(try link.openObjectInput(diags, path)) catch |err| + self.classifyInputFile(try link.openObjectInput(io, diags, path)) catch |err| diags.addParseError(path, "failed to parse archive: {s}", .{@errorName(err)}); } @@ -430,7 +435,7 @@ pub fn flush( if (comp.config.link_libc and is_exe_or_dyn_lib) { if (comp.zigc_static_lib) |zigc| { const path = zigc.full_object_path; - self.classifyInputFile(try link.openArchiveInput(diags, path, false, false)) catch |err| + self.classifyInputFile(try link.openArchiveInput(io, diags, path, false, false)) catch |err| diags.addParseError(path, "failed to parse archive: {s}", .{@errorName(err)}); } } @@ -453,12 +458,12 @@ pub fn flush( for (system_libs.items) |lib| { switch (Compilation.classifyFileExt(lib.path.sub_path)) { .shared_library => { - const dso_input = try link.openDsoInput(diags, lib.path, lib.needed, lib.weak, lib.reexport); + const dso_input = try link.openDsoInput(io, diags, lib.path, lib.needed, lib.weak, lib.reexport); self.classifyInputFile(dso_input) catch |err| diags.addParseError(lib.path, "failed to parse input file: {s}", .{@errorName(err)}); }, .static_library => { - const archive_input = try link.openArchiveInput(diags, lib.path, lib.must_link, lib.hidden); + const archive_input = try link.openArchiveInput(io, diags, lib.path, lib.must_link, lib.hidden); self.classifyInputFile(archive_input) catch |err| diags.addParseError(lib.path, "failed to parse input file: {s}", .{@errorName(err)}); }, @@ -469,11 +474,11 @@ pub fn flush( // Finally, link against compiler_rt. if (comp.compiler_rt_lib) |crt_file| { const path = crt_file.full_object_path; - self.classifyInputFile(try link.openArchiveInput(diags, path, false, false)) catch |err| + self.classifyInputFile(try link.openArchiveInput(io, diags, path, false, false)) catch |err| diags.addParseError(path, "failed to parse archive: {s}", .{@errorName(err)}); } else if (comp.compiler_rt_obj) |crt_file| { const path = crt_file.full_object_path; - self.classifyInputFile(try link.openObjectInput(diags, path)) catch |err| + self.classifyInputFile(try link.openObjectInput(io, diags, path)) catch |err| diags.addParseError(path, "failed to parse archive: {s}", .{@errorName(err)}); } @@ -564,7 +569,7 @@ pub fn flush( self.writeLinkeditSectionsToFile() catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.LinkFailure => return error.LinkFailure, - else => |e| return diags.fail("failed to write linkedit sections to file: {s}", .{@errorName(e)}), + else => |e| return diags.fail("failed to write linkedit sections to file: {t}", .{e}), }; var codesig: ?CodeSignature = if (self.requiresCodeSig()) blk: { @@ -575,8 +580,8 @@ pub fn flush( // where the code signature goes into. var codesig = CodeSignature.init(self.getPageSize()); codesig.code_directory.ident = fs.path.basename(self.base.emit.sub_path); - if (self.entitlements) |path| codesig.addEntitlements(gpa, path) catch |err| - return diags.fail("failed to add entitlements from {s}: {s}", .{ path, @errorName(err) }); + if (self.entitlements) |path| codesig.addEntitlements(gpa, io, path) catch |err| + return diags.fail("failed to add entitlements from {s}: {t}", .{ path, err }); try self.writeCodeSignaturePadding(&codesig); break :blk codesig; } else null; @@ -612,15 +617,17 @@ pub fn flush( else => |e| return diags.fail("failed to write code signature: {s}", .{@errorName(e)}), }; const emit = self.base.emit; - invalidateKernelCache(emit.root_dir.handle, emit.sub_path) catch |err| switch (err) { - else => |e| return diags.fail("failed to invalidate kernel cache: {s}", .{@errorName(e)}), + invalidateKernelCache(io, emit.root_dir.handle, emit.sub_path) catch |err| switch (err) { + else => |e| return diags.fail("failed to invalidate kernel cache: {t}", .{e}), }; } } /// --verbose-link output fn dumpArgv(self: *MachO, comp: *Compilation) !void { - const gpa = self.base.comp.gpa; + const gpa = comp.gpa; + const io = comp.io; + var arena_allocator = std.heap.ArenaAllocator.init(gpa); defer arena_allocator.deinit(); const arena = arena_allocator.allocator(); @@ -815,7 +822,7 @@ fn dumpArgv(self: *MachO, comp: *Compilation) !void { if (comp.ubsan_rt_obj) |obj| try argv.append(try obj.full_object_path.toString(arena)); } - Compilation.dump_argv(argv.items); + try Compilation.dumpArgv(io, argv.items); } /// TODO delete this, libsystem must be resolved when setting up the compilation pipeline @@ -825,7 +832,8 @@ pub fn resolveLibSystem( comp: *Compilation, out_libs: anytype, ) !void { - const diags = &self.base.comp.link_diags; + const io = comp.io; + const diags = &comp.link_diags; var test_path = std.array_list.Managed(u8).init(arena); var checked_paths = std.array_list.Managed([]const u8).init(arena); @@ -834,16 +842,16 @@ pub fn resolveLibSystem( if (self.sdk_layout) |sdk_layout| switch (sdk_layout) { .sdk => { const dir = try fs.path.join(arena, &.{ comp.sysroot.?, "usr", "lib" }); - if (try accessLibPath(arena, &test_path, &checked_paths, dir, "System")) break :success; + if (try accessLibPath(arena, io, &test_path, &checked_paths, dir, "System")) break :success; }, .vendored => { const dir = try comp.dirs.zig_lib.join(arena, &.{ "libc", "darwin" }); - if (try accessLibPath(arena, &test_path, &checked_paths, dir, "System")) break :success; + if (try accessLibPath(arena, io, &test_path, &checked_paths, dir, "System")) break :success; }, }; for (self.lib_directories) |directory| { - if (try accessLibPath(arena, &test_path, &checked_paths, directory.path orelse ".", "System")) break :success; + if (try accessLibPath(arena, io, &test_path, &checked_paths, directory.path orelse ".", "System")) break :success; } diags.addMissingLibraryError(checked_paths.items, "unable to find libSystem system library", .{}); @@ -861,6 +869,9 @@ pub fn classifyInputFile(self: *MachO, input: link.Input) !void { const tracy = trace(@src()); defer tracy.end(); + const comp = self.base.comp; + const io = comp.io; + const path, const file = input.pathAndFile().?; // TODO don't classify now, it's too late. The input file has already been classified log.debug("classifying input file {f}", .{path}); @@ -871,7 +882,7 @@ pub fn classifyInputFile(self: *MachO, input: link.Input) !void { const fat_arch: ?fat.Arch = try self.parseFatFile(file, path); const offset = if (fat_arch) |fa| fa.offset else 0; - if (readMachHeader(file, offset) catch null) |h| blk: { + if (readMachHeader(io, file, offset) catch null) |h| blk: { if (h.magic != macho.MH_MAGIC_64) break :blk; switch (h.filetype) { macho.MH_OBJECT => try self.addObject(path, fh, offset), @@ -880,7 +891,7 @@ pub fn classifyInputFile(self: *MachO, input: link.Input) !void { } return; } - if (readArMagic(file, offset, &buffer) catch null) |ar_magic| blk: { + if (readArMagic(io, file, offset, &buffer) catch null) |ar_magic| blk: { if (!mem.eql(u8, ar_magic, Archive.ARMAG)) break :blk; try self.addArchive(input.archive, fh, fat_arch); return; @@ -888,12 +899,14 @@ pub fn classifyInputFile(self: *MachO, input: link.Input) !void { _ = try self.addTbd(.fromLinkInput(input), true, fh); } -fn parseFatFile(self: *MachO, file: std.fs.File, path: Path) !?fat.Arch { - const diags = &self.base.comp.link_diags; - const fat_h = fat.readFatHeader(file) catch return null; +fn parseFatFile(self: *MachO, file: Io.File, path: Path) !?fat.Arch { + const comp = self.base.comp; + const io = comp.io; + const diags = &comp.link_diags; + const fat_h = fat.readFatHeader(io, file) catch return null; if (fat_h.magic != macho.FAT_MAGIC and fat_h.magic != macho.FAT_MAGIC_64) return null; var fat_archs_buffer: [2]fat.Arch = undefined; - const fat_archs = try fat.parseArchs(file, fat_h, &fat_archs_buffer); + const fat_archs = try fat.parseArchs(io, file, fat_h, &fat_archs_buffer); const cpu_arch = self.getTarget().cpu.arch; for (fat_archs) |arch| { if (arch.tag == cpu_arch) return arch; @@ -901,16 +914,16 @@ fn parseFatFile(self: *MachO, file: std.fs.File, path: Path) !?fat.Arch { return diags.failParse(path, "missing arch in universal file: expected {s}", .{@tagName(cpu_arch)}); } -pub fn readMachHeader(file: std.fs.File, offset: usize) !macho.mach_header_64 { +pub fn readMachHeader(io: Io, file: Io.File, offset: usize) !macho.mach_header_64 { var buffer: [@sizeOf(macho.mach_header_64)]u8 = undefined; - const nread = try file.preadAll(&buffer, offset); + const nread = try file.readPositionalAll(io, &buffer, offset); if (nread != buffer.len) return error.InputOutput; const hdr = @as(*align(1) const macho.mach_header_64, @ptrCast(&buffer)).*; return hdr; } -pub fn readArMagic(file: std.fs.File, offset: usize, buffer: *[Archive.SARMAG]u8) ![]const u8 { - const nread = try file.preadAll(buffer, offset); +pub fn readArMagic(io: Io, file: Io.File, offset: usize, buffer: *[Archive.SARMAG]u8) ![]const u8 { + const nread = try file.readPositionalAll(io, buffer, offset); if (nread != buffer.len) return error.InputOutput; return buffer[0..Archive.SARMAG]; } @@ -921,6 +934,7 @@ fn addObject(self: *MachO, path: Path, handle_index: File.HandleIndex, offset: u const comp = self.base.comp; const gpa = comp.gpa; + const io = comp.io; const abs_path = try std.fs.path.resolvePosix(gpa, &.{ comp.dirs.cwd, @@ -930,7 +944,7 @@ fn addObject(self: *MachO, path: Path, handle_index: File.HandleIndex, offset: u errdefer gpa.free(abs_path); const file = self.getFileHandle(handle_index); - const stat = try file.stat(); + const stat = try file.stat(io); const mtime = stat.mtime.toSeconds(); const index: File.Index = @intCast(try self.files.addOne(gpa)); self.files.set(index, .{ .object = .{ @@ -1069,6 +1083,7 @@ fn isHoisted(self: *MachO, install_name: []const u8) bool { /// TODO delete this, libraries must be instead resolved when instantiating the compilation pipeline fn accessLibPath( arena: Allocator, + io: Io, test_path: *std.array_list.Managed(u8), checked_paths: *std.array_list.Managed([]const u8), search_dir: []const u8, @@ -1080,7 +1095,7 @@ fn accessLibPath( test_path.clearRetainingCapacity(); try test_path.print("{s}" ++ sep ++ "lib{s}{s}", .{ search_dir, name, ext }); try checked_paths.append(try arena.dupe(u8, test_path.items)); - fs.cwd().access(test_path.items, .{}) catch |err| switch (err) { + Io.Dir.cwd().access(io, test_path.items, .{}) catch |err| switch (err) { error.FileNotFound => continue, else => |e| return e, }; @@ -1092,6 +1107,7 @@ fn accessLibPath( fn accessFrameworkPath( arena: Allocator, + io: Io, test_path: *std.array_list.Managed(u8), checked_paths: *std.array_list.Managed([]const u8), search_dir: []const u8, @@ -1108,7 +1124,7 @@ fn accessFrameworkPath( ext, }); try checked_paths.append(try arena.dupe(u8, test_path.items)); - fs.cwd().access(test_path.items, .{}) catch |err| switch (err) { + Io.Dir.cwd().access(io, test_path.items, .{}) catch |err| switch (err) { error.FileNotFound => continue, else => |e| return e, }; @@ -1124,7 +1140,9 @@ fn parseDependentDylibs(self: *MachO) !void { if (self.dylibs.items.len == 0) return; - const gpa = self.base.comp.gpa; + const comp = self.base.comp; + const gpa = comp.gpa; + const io = comp.io; const framework_dirs = self.framework_dirs; // TODO delete this, directories must instead be resolved by the frontend @@ -1165,14 +1183,14 @@ fn parseDependentDylibs(self: *MachO) !void { // Framework for (framework_dirs) |dir| { test_path.clearRetainingCapacity(); - if (try accessFrameworkPath(arena, &test_path, &checked_paths, dir, stem)) break :full_path test_path.items; + if (try accessFrameworkPath(arena, io, &test_path, &checked_paths, dir, stem)) break :full_path test_path.items; } // Library const lib_name = eatPrefix(stem, "lib") orelse stem; for (lib_directories) |lib_directory| { test_path.clearRetainingCapacity(); - if (try accessLibPath(arena, &test_path, &checked_paths, lib_directory.path orelse ".", lib_name)) break :full_path test_path.items; + if (try accessLibPath(arena, io, &test_path, &checked_paths, lib_directory.path orelse ".", lib_name)) break :full_path test_path.items; } } @@ -1181,13 +1199,13 @@ fn parseDependentDylibs(self: *MachO) !void { const path = if (existing_ext.len > 0) id.name[0 .. id.name.len - existing_ext.len] else id.name; for (&[_][]const u8{ ".tbd", ".dylib", "" }) |ext| { test_path.clearRetainingCapacity(); - if (self.base.comp.sysroot) |root| { + if (comp.sysroot) |root| { try test_path.print("{s}" ++ fs.path.sep_str ++ "{s}{s}", .{ root, path, ext }); } else { try test_path.print("{s}{s}", .{ path, ext }); } try checked_paths.append(try arena.dupe(u8, test_path.items)); - fs.cwd().access(test_path.items, .{}) catch |err| switch (err) { + Io.Dir.cwd().access(io, test_path.items, .{}) catch |err| switch (err) { error.FileNotFound => continue, else => |e| return e, }; @@ -1202,7 +1220,8 @@ fn parseDependentDylibs(self: *MachO) !void { const rel_path = try fs.path.join(arena, &.{ prefix, path }); try checked_paths.append(rel_path); var buffer: [fs.max_path_bytes]u8 = undefined; - const full_path = fs.realpath(rel_path, &buffer) catch continue; + // TODO don't use realpath + const full_path = buffer[0 .. Io.Dir.realPathFileAbsolute(io, rel_path, &buffer) catch continue]; break :full_path try arena.dupe(u8, full_path); } } else if (eatPrefix(id.name, "@loader_path/")) |_| { @@ -1215,8 +1234,9 @@ fn parseDependentDylibs(self: *MachO) !void { try checked_paths.append(try arena.dupe(u8, id.name)); var buffer: [fs.max_path_bytes]u8 = undefined; - if (fs.realpath(id.name, &buffer)) |full_path| { - break :full_path try arena.dupe(u8, full_path); + // TODO don't use realpath + if (Io.Dir.realPathFileAbsolute(io, id.name, &buffer)) |full_path_n| { + break :full_path try arena.dupe(u8, buffer[0..full_path_n]); } else |_| { try self.reportMissingDependencyError( self.getFile(dylib_index).?.dylib.getUmbrella(self).index, @@ -1233,12 +1253,12 @@ fn parseDependentDylibs(self: *MachO) !void { .path = Path.initCwd(full_path), .weak = is_weak, }; - const file = try lib.path.root_dir.handle.openFile(lib.path.sub_path, .{}); + const file = try lib.path.root_dir.handle.openFile(io, lib.path.sub_path, .{}); const fh = try self.addFileHandle(file); const fat_arch = try self.parseFatFile(file, lib.path); const offset = if (fat_arch) |fa| fa.offset else 0; const file_index = file_index: { - if (readMachHeader(file, offset) catch null) |h| blk: { + if (readMachHeader(io, file, offset) catch null) |h| blk: { if (h.magic != macho.MH_MAGIC_64) break :blk; switch (h.filetype) { macho.MH_DYLIB => break :file_index try self.addDylib(lib, false, fh, offset), @@ -3147,7 +3167,9 @@ fn detectAllocCollision(self: *MachO, start: u64, size: u64) !?u64 { } } - if (at_end) try self.base.file.?.setEndPos(end); + const comp = self.base.comp; + const io = comp.io; + if (at_end) try self.base.file.?.setLength(io, end); return null; } @@ -3232,21 +3254,36 @@ pub fn findFreeSpaceVirtual(self: *MachO, object_size: u64, min_alignment: u32) } pub fn copyRangeAll(self: *MachO, old_offset: u64, new_offset: u64, size: u64) !void { - const file = self.base.file.?; - const amt = try file.copyRangeAll(old_offset, file, new_offset, size); - if (amt != size) return error.InputOutput; + return self.base.copyRangeAll(old_offset, new_offset, size); } -/// Like File.copyRangeAll but also ensures the source region is zeroed out after copy. +/// Like copyRangeAll but also ensures the source region is zeroed out after copy. /// This is so that we guarantee zeroed out regions for mapping of zerofill sections by the loader. fn copyRangeAllZeroOut(self: *MachO, old_offset: u64, new_offset: u64, size: u64) !void { - const gpa = self.base.comp.gpa; - try self.copyRangeAll(old_offset, new_offset, size); + const comp = self.base.comp; + const io = comp.io; + const file = self.base.file.?; + var write_buffer: [2048]u8 = undefined; + var file_reader = file.reader(io, &.{}); + file_reader.pos = old_offset; + var file_writer = file.writer(io, &write_buffer); + file_writer.pos = new_offset; const size_u = math.cast(usize, size) orelse return error.Overflow; - const zeroes = try gpa.alloc(u8, size_u); // TODO no need to allocate here. - defer gpa.free(zeroes); - @memset(zeroes, 0); - try self.base.file.?.pwriteAll(zeroes, old_offset); + const n = file_writer.interface.sendFileAll(&file_reader, .limited(size_u)) catch |err| switch (err) { + error.ReadFailed => return file_reader.err.?, + error.WriteFailed => return file_writer.err.?, + }; + assert(n == size_u); + file_writer.seekTo(old_offset) catch |err| switch (err) { + error.WriteFailed => return file_writer.err.?, + else => |e| return e, + }; + file_writer.interface.splatByteAll(0, size_u) catch |err| switch (err) { + error.WriteFailed => return file_writer.err.?, + }; + file_writer.interface.flush() catch |err| switch (err) { + error.WriteFailed => return file_writer.err.?, + }; } const InitMetadataOptions = struct { @@ -3257,8 +3294,10 @@ const InitMetadataOptions = struct { }; pub fn closeDebugInfo(self: *MachO) bool { + const comp = self.base.comp; + const io = comp.io; const d_sym = &(self.d_sym orelse return false); - d_sym.file.?.close(); + d_sym.file.?.close(io); d_sym.file = null; return true; } @@ -3269,7 +3308,9 @@ pub fn reopenDebugInfo(self: *MachO) !void { assert(!self.base.comp.config.use_llvm); assert(self.base.comp.config.debug_format == .dwarf); - const gpa = self.base.comp.gpa; + const comp = self.base.comp; + const io = comp.io; + const gpa = comp.gpa; const sep = fs.path.sep_str; const d_sym_path = try std.fmt.allocPrint( gpa, @@ -3278,10 +3319,10 @@ pub fn reopenDebugInfo(self: *MachO) !void { ); defer gpa.free(d_sym_path); - var d_sym_bundle = try self.base.emit.root_dir.handle.makeOpenPath(d_sym_path, .{}); - defer d_sym_bundle.close(); + var d_sym_bundle = try self.base.emit.root_dir.handle.createDirPathOpen(io, d_sym_path, .{}); + defer d_sym_bundle.close(io); - self.d_sym.?.file = try d_sym_bundle.createFile(fs.path.basename(self.base.emit.sub_path), .{ + self.d_sym.?.file = try d_sym_bundle.createFile(io, fs.path.basename(self.base.emit.sub_path), .{ .truncate = false, .read = true, }); @@ -3289,6 +3330,10 @@ pub fn reopenDebugInfo(self: *MachO) !void { // TODO: move to ZigObject fn initMetadata(self: *MachO, options: InitMetadataOptions) !void { + const comp = self.base.comp; + const gpa = comp.gpa; + const io = comp.io; + if (!self.base.isRelocatable()) { const base_vmaddr = blk: { const pagezero_size = self.pagezero_size orelse default_pagezero_size; @@ -3343,7 +3388,11 @@ fn initMetadata(self: *MachO, options: InitMetadataOptions) !void { if (options.zo.dwarf) |*dwarf| { // Create dSYM bundle. log.debug("creating {s}.dSYM bundle", .{options.emit.sub_path}); - self.d_sym = .{ .allocator = self.base.comp.gpa, .file = null }; + self.d_sym = .{ + .io = io, + .allocator = gpa, + .file = null, + }; try self.reopenDebugInfo(); try self.d_sym.?.initMetadata(self); try dwarf.initMetadata(); @@ -3463,6 +3512,9 @@ fn growSectionNonRelocatable(self: *MachO, sect_index: u8, needed_size: u64) !vo const seg_id = self.sections.items(.segment_id)[sect_index]; const seg = &self.segments.items[seg_id]; + const comp = self.base.comp; + const io = comp.io; + if (!sect.isZerofill()) { const allocated_size = self.allocatedSize(sect.offset); if (needed_size > allocated_size) { @@ -3484,7 +3536,7 @@ fn growSectionNonRelocatable(self: *MachO, sect_index: u8, needed_size: u64) !vo sect.offset = @intCast(new_offset); } else if (sect.offset + allocated_size == std.math.maxInt(u64)) { - try self.base.file.?.setEndPos(sect.offset + needed_size); + try self.base.file.?.setLength(io, sect.offset + needed_size); } seg.filesize = needed_size; } @@ -3506,6 +3558,8 @@ fn growSectionNonRelocatable(self: *MachO, sect_index: u8, needed_size: u64) !vo } fn growSectionRelocatable(self: *MachO, sect_index: u8, needed_size: u64) !void { + const comp = self.base.comp; + const io = comp.io; const sect = &self.sections.items(.header)[sect_index]; if (!sect.isZerofill()) { @@ -3533,7 +3587,7 @@ fn growSectionRelocatable(self: *MachO, sect_index: u8, needed_size: u64) !void sect.offset = @intCast(new_offset); sect.addr = new_addr; } else if (sect.offset + allocated_size == std.math.maxInt(u64)) { - try self.base.file.?.setEndPos(sect.offset + needed_size); + try self.base.file.?.setLength(io, sect.offset + needed_size); } } sect.size = needed_size; @@ -3567,11 +3621,11 @@ pub fn getTarget(self: *const MachO) *const std.Target { /// into a new inode, remove the original file, and rename the copy to match /// the original file. This is super messy, but there doesn't seem any other /// way to please the XNU. -pub fn invalidateKernelCache(dir: fs.Dir, sub_path: []const u8) !void { +pub fn invalidateKernelCache(io: Io, dir: Io.Dir, sub_path: []const u8) !void { const tracy = trace(@src()); defer tracy.end(); if (builtin.target.os.tag.isDarwin() and builtin.target.cpu.arch == .aarch64) { - try dir.copyFile(sub_path, dir, sub_path, .{}); + try dir.copyFile(sub_path, dir, sub_path, io, .{}); } } @@ -3762,7 +3816,7 @@ pub fn getInternalObject(self: *MachO) ?*InternalObject { return self.getFile(index).?.internal; } -pub fn addFileHandle(self: *MachO, file: fs.File) !File.HandleIndex { +pub fn addFileHandle(self: *MachO, file: Io.File) !File.HandleIndex { const gpa = self.base.comp.gpa; const index: File.HandleIndex = @intCast(self.file_handles.items.len); const fh = try self.file_handles.addOne(gpa); @@ -4333,11 +4387,13 @@ fn inferSdkVersion(comp: *Compilation, sdk_layout: SdkLayout) ?std.SemanticVersi defer arena_allocator.deinit(); const arena = arena_allocator.allocator(); + const io = comp.io; + const sdk_dir = switch (sdk_layout) { .sdk => comp.sysroot.?, .vendored => fs.path.join(arena, &.{ comp.dirs.zig_lib.path.?, "libc", "darwin" }) catch return null, }; - if (readSdkVersionFromSettings(arena, sdk_dir)) |ver| { + if (readSdkVersionFromSettings(arena, io, sdk_dir)) |ver| { return parseSdkVersion(ver); } else |_| { // Read from settings should always succeed when vendored. @@ -4360,9 +4416,9 @@ fn inferSdkVersion(comp: *Compilation, sdk_layout: SdkLayout) ?std.SemanticVersi // Official Apple SDKs ship with a `SDKSettings.json` located at the top of SDK fs layout. // Use property `MinimalDisplayName` to determine version. // The file/property is also available with vendored libc. -fn readSdkVersionFromSettings(arena: Allocator, dir: []const u8) ![]const u8 { +fn readSdkVersionFromSettings(arena: Allocator, io: Io, dir: []const u8) ![]const u8 { const sdk_path = try fs.path.join(arena, &.{ dir, "SDKSettings.json" }); - const contents = try fs.cwd().readFileAlloc(sdk_path, arena, .limited(std.math.maxInt(u16))); + const contents = try Io.Dir.cwd().readFileAlloc(io, sdk_path, arena, .limited(std.math.maxInt(u16))); const parsed = try std.json.parseFromSlice(std.json.Value, arena, contents, .{}); if (parsed.value.object.get("MinimalDisplayName")) |ver| return ver.string; return error.SdkVersionFailure; @@ -5324,18 +5380,18 @@ fn isReachable(atom: *const Atom, rel: Relocation, macho_file: *MachO) bool { pub fn pwriteAll(macho_file: *MachO, bytes: []const u8, offset: u64) error{LinkFailure}!void { const comp = macho_file.base.comp; + const io = comp.io; const diags = &comp.link_diags; - macho_file.base.file.?.pwriteAll(bytes, offset) catch |err| { - return diags.fail("failed to write: {s}", .{@errorName(err)}); - }; + macho_file.base.file.?.writePositionalAll(io, bytes, offset) catch |err| + return diags.fail("failed to write: {t}", .{err}); } -pub fn setEndPos(macho_file: *MachO, length: u64) error{LinkFailure}!void { +pub fn setLength(macho_file: *MachO, length: u64) error{LinkFailure}!void { const comp = macho_file.base.comp; + const io = comp.io; const diags = &comp.link_diags; - macho_file.base.file.?.setEndPos(length) catch |err| { - return diags.fail("failed to set file end pos: {s}", .{@errorName(err)}); - }; + macho_file.base.file.?.setLength(io, length) catch |err| + return diags.fail("failed to set file end pos: {t}", .{err}); } pub fn cast(macho_file: *MachO, comptime T: type, x: anytype) error{LinkFailure}!T { @@ -5367,10 +5423,11 @@ const max_distance = (1 << (jump_bits - 1)); const max_allowed_distance = max_distance - 0x500_000; const MachO = @This(); - -const std = @import("std"); const build_options = @import("build_options"); const builtin = @import("builtin"); + +const std = @import("std"); +const Io = std.Io; const assert = std.debug.assert; const fs = std.fs; const log = std.log.scoped(.link); @@ -5380,6 +5437,11 @@ const math = std.math; const mem = std.mem; const meta = std.meta; const Writer = std.Io.Writer; +const AtomicBool = std.atomic.Value(bool); +const Cache = std.Build.Cache; +const Hash = std.hash.Wyhash; +const Md5 = std.crypto.hash.Md5; +const Allocator = std.mem.Allocator; const aarch64 = codegen.aarch64.encoding; const bind = @import("MachO/dyld_info/bind.zig"); @@ -5397,11 +5459,8 @@ const trace = @import("../tracy.zig").trace; const synthetic = @import("MachO/synthetic.zig"); const Alignment = Atom.Alignment; -const Allocator = mem.Allocator; const Archive = @import("MachO/Archive.zig"); -const AtomicBool = std.atomic.Value(bool); const Bind = bind.Bind; -const Cache = std.Build.Cache; const CodeSignature = @import("MachO/CodeSignature.zig"); const Compilation = @import("../Compilation.zig"); const DataInCode = synthetic.DataInCode; @@ -5411,14 +5470,12 @@ const ExportTrie = @import("MachO/dyld_info/Trie.zig"); const Path = Cache.Path; const File = @import("MachO/file.zig").File; const GotSection = synthetic.GotSection; -const Hash = std.hash.Wyhash; const Indsymtab = synthetic.Indsymtab; const InternalObject = @import("MachO/InternalObject.zig"); const ObjcStubsSection = synthetic.ObjcStubsSection; const Object = @import("MachO/Object.zig"); const LazyBind = bind.LazyBind; const LaSymbolPtrSection = synthetic.LaSymbolPtrSection; -const Md5 = std.crypto.hash.Md5; const Zcu = @import("../Zcu.zig"); const InternPool = @import("../InternPool.zig"); const Rebase = @import("MachO/dyld_info/Rebase.zig"); diff --git a/src/link/MachO/Archive.zig b/src/link/MachO/Archive.zig index d1962412c4..54c00e33ee 100644 --- a/src/link/MachO/Archive.zig +++ b/src/link/MachO/Archive.zig @@ -6,6 +6,7 @@ pub fn deinit(self: *Archive, allocator: Allocator) void { pub fn unpack(self: *Archive, macho_file: *MachO, path: Path, handle_index: File.HandleIndex, fat_arch: ?fat.Arch) !void { const comp = macho_file.base.comp; + const io = comp.io; const gpa = comp.gpa; const diags = &comp.link_diags; @@ -14,7 +15,7 @@ pub fn unpack(self: *Archive, macho_file: *MachO, path: Path, handle_index: File const handle = macho_file.getFileHandle(handle_index); const offset = if (fat_arch) |ar| ar.offset else 0; - const end_pos = if (fat_arch) |ar| offset + ar.size else (try handle.stat()).size; + const end_pos = if (fat_arch) |ar| offset + ar.size else (try handle.stat(io)).size; var pos: usize = offset + SARMAG; while (true) { @@ -23,7 +24,7 @@ pub fn unpack(self: *Archive, macho_file: *MachO, path: Path, handle_index: File var hdr_buffer: [@sizeOf(ar_hdr)]u8 = undefined; { - const amt = try handle.preadAll(&hdr_buffer, pos); + const amt = try handle.readPositionalAll(io, &hdr_buffer, pos); if (amt != @sizeOf(ar_hdr)) return error.InputOutput; } const hdr = @as(*align(1) const ar_hdr, @ptrCast(&hdr_buffer)).*; @@ -41,7 +42,7 @@ pub fn unpack(self: *Archive, macho_file: *MachO, path: Path, handle_index: File if (try hdr.nameLength()) |len| { hdr_size -= len; const buf = try arena.allocator().alloc(u8, len); - const amt = try handle.preadAll(buf, pos); + const amt = try handle.readPositionalAll(io, buf, pos); if (amt != len) return error.InputOutput; pos += len; const actual_len = mem.indexOfScalar(u8, buf, @as(u8, 0)) orelse len; diff --git a/src/link/MachO/CodeSignature.zig b/src/link/MachO/CodeSignature.zig index 5bded3b9e3..0955c823b8 100644 --- a/src/link/MachO/CodeSignature.zig +++ b/src/link/MachO/CodeSignature.zig @@ -1,20 +1,28 @@ const CodeSignature = @This(); const std = @import("std"); +const Io = std.Io; const assert = std.debug.assert; const fs = std.fs; const log = std.log.scoped(.link); const macho = std.macho; const mem = std.mem; const testing = std.testing; +const Sha256 = std.crypto.hash.sha2.Sha256; +const Allocator = std.mem.Allocator; + const trace = @import("../../tracy.zig").trace; -const Allocator = mem.Allocator; -const Hasher = @import("hasher.zig").ParallelHasher; +const ParallelHasher = @import("hasher.zig").ParallelHasher; const MachO = @import("../MachO.zig"); -const Sha256 = std.crypto.hash.sha2.Sha256; const hash_size = Sha256.digest_length; +page_size: u16, +code_directory: CodeDirectory, +requirements: ?Requirements = null, +entitlements: ?Entitlements = null, +signature: ?Signature = null, + const Blob = union(enum) { code_directory: *CodeDirectory, requirements: *Requirements, @@ -218,12 +226,6 @@ const Signature = struct { } }; -page_size: u16, -code_directory: CodeDirectory, -requirements: ?Requirements = null, -entitlements: ?Entitlements = null, -signature: ?Signature = null, - pub fn init(page_size: u16) CodeSignature { return .{ .page_size = page_size, @@ -244,13 +246,13 @@ pub fn deinit(self: *CodeSignature, allocator: Allocator) void { } } -pub fn addEntitlements(self: *CodeSignature, allocator: Allocator, path: []const u8) !void { - const inner = try fs.cwd().readFileAlloc(path, allocator, .limited(std.math.maxInt(u32))); +pub fn addEntitlements(self: *CodeSignature, allocator: Allocator, io: Io, path: []const u8) !void { + const inner = try Io.Dir.cwd().readFileAlloc(io, path, allocator, .limited(std.math.maxInt(u32))); self.entitlements = .{ .inner = inner }; } pub const WriteOpts = struct { - file: fs.File, + file: Io.File, exec_seg_base: u64, exec_seg_limit: u64, file_size: u32, @@ -266,7 +268,9 @@ pub fn writeAdhocSignature( const tracy = trace(@src()); defer tracy.end(); - const allocator = macho_file.base.comp.gpa; + const comp = macho_file.base.comp; + const gpa = comp.gpa; + const io = comp.io; var header: macho.SuperBlob = .{ .magic = macho.CSMAGIC_EMBEDDED_SIGNATURE, @@ -274,7 +278,7 @@ pub fn writeAdhocSignature( .count = 0, }; - var blobs = std.array_list.Managed(Blob).init(allocator); + var blobs = std.array_list.Managed(Blob).init(gpa); defer blobs.deinit(); self.code_directory.inner.execSegBase = opts.exec_seg_base; @@ -284,13 +288,12 @@ pub fn writeAdhocSignature( const total_pages = @as(u32, @intCast(mem.alignForward(usize, opts.file_size, self.page_size) / self.page_size)); - try self.code_directory.code_slots.ensureTotalCapacityPrecise(allocator, total_pages); + try self.code_directory.code_slots.ensureTotalCapacityPrecise(gpa, total_pages); self.code_directory.code_slots.items.len = total_pages; self.code_directory.inner.nCodeSlots = total_pages; // Calculate hash for each page (in file) and write it to the buffer - var hasher = Hasher(Sha256){ .allocator = allocator, .io = macho_file.base.comp.io }; - try hasher.hash(opts.file, self.code_directory.code_slots.items, .{ + try ParallelHasher(Sha256).hash(gpa, io, opts.file, self.code_directory.code_slots.items, .{ .chunk_size = self.page_size, .max_file_size = opts.file_size, }); @@ -302,7 +305,7 @@ pub fn writeAdhocSignature( var hash: [hash_size]u8 = undefined; if (self.requirements) |*req| { - var a: std.Io.Writer.Allocating = .init(allocator); + var a: std.Io.Writer.Allocating = .init(gpa); defer a.deinit(); try req.write(&a.writer); Sha256.hash(a.written(), &hash, .{}); @@ -314,7 +317,7 @@ pub fn writeAdhocSignature( } if (self.entitlements) |*ents| { - var a: std.Io.Writer.Allocating = .init(allocator); + var a: std.Io.Writer.Allocating = .init(gpa); defer a.deinit(); try ents.write(&a.writer); Sha256.hash(a.written(), &hash, .{}); diff --git a/src/link/MachO/DebugSymbols.zig b/src/link/MachO/DebugSymbols.zig index 5d7b9b88c3..3e723bd9d7 100644 --- a/src/link/MachO/DebugSymbols.zig +++ b/src/link/MachO/DebugSymbols.zig @@ -1,5 +1,28 @@ +const DebugSymbols = @This(); + +const std = @import("std"); +const Io = std.Io; +const assert = std.debug.assert; +const fs = std.fs; +const log = std.log.scoped(.link_dsym); +const macho = std.macho; +const makeStaticString = MachO.makeStaticString; +const math = std.math; +const mem = std.mem; +const Writer = std.Io.Writer; +const Allocator = std.mem.Allocator; + +const link = @import("../../link.zig"); +const MachO = @import("../MachO.zig"); +const StringTable = @import("../StringTable.zig"); +const Type = @import("../../Type.zig"); +const trace = @import("../../tracy.zig").trace; +const load_commands = @import("load_commands.zig"); +const padToIdeal = MachO.padToIdeal; + +io: Io, allocator: Allocator, -file: ?fs.File, +file: ?Io.File, symtab_cmd: macho.symtab_command = .{}, uuid_cmd: macho.uuid_command = .{ .uuid = [_]u8{0} ** 16 }, @@ -102,6 +125,7 @@ pub fn growSection( requires_file_copy: bool, macho_file: *MachO, ) !void { + const io = self.io; const sect = self.getSectionPtr(sect_index); const allocated_size = self.allocatedSize(sect.offset); @@ -111,25 +135,17 @@ pub fn growSection( const new_offset = try self.findFreeSpace(needed_size, 1); log.debug("moving {s} section: {} bytes from 0x{x} to 0x{x}", .{ - sect.sectName(), - existing_size, - sect.offset, - new_offset, + sect.sectName(), existing_size, sect.offset, new_offset, }); if (requires_file_copy) { - const amt = try self.file.?.copyRangeAll( - sect.offset, - self.file.?, - new_offset, - existing_size, - ); - if (amt != existing_size) return error.InputOutput; + const file = self.file.?; + try link.File.copyRangeAll2(io, file, file, sect.offset, new_offset, existing_size); } sect.offset = @intCast(new_offset); } else if (sect.offset + allocated_size == std.math.maxInt(u64)) { - try self.file.?.setEndPos(sect.offset + needed_size); + try self.file.?.setLength(io, sect.offset + needed_size); } sect.size = needed_size; @@ -153,6 +169,7 @@ pub fn markDirty(self: *DebugSymbols, sect_index: u8, macho_file: *MachO) void { } fn detectAllocCollision(self: *DebugSymbols, start: u64, size: u64) !?u64 { + const io = self.io; var at_end = true; const end = start + padToIdeal(size); @@ -165,7 +182,7 @@ fn detectAllocCollision(self: *DebugSymbols, start: u64, size: u64) !?u64 { } } - if (at_end) try self.file.?.setEndPos(end); + if (at_end) try self.file.?.setLength(io, end); return null; } @@ -179,6 +196,7 @@ fn findFreeSpace(self: *DebugSymbols, object_size: u64, min_alignment: u64) !u64 } pub fn flush(self: *DebugSymbols, macho_file: *MachO) !void { + const io = self.io; const zo = macho_file.getZigObject().?; for (self.relocs.items) |*reloc| { const sym = zo.symbols.items[reloc.target]; @@ -190,12 +208,9 @@ pub fn flush(self: *DebugSymbols, macho_file: *MachO) !void { const sect = &self.sections.items[self.debug_info_section_index.?]; const file_offset = sect.offset + reloc.offset; log.debug("resolving relocation: {d}@{x} ('{s}') at offset {x}", .{ - reloc.target, - addr, - sym_name, - file_offset, + reloc.target, addr, sym_name, file_offset, }); - try self.file.?.pwriteAll(mem.asBytes(&addr), file_offset); + try self.file.?.writePositionalAll(io, mem.asBytes(&addr), file_offset); } self.finalizeDwarfSegment(macho_file); @@ -208,7 +223,8 @@ pub fn flush(self: *DebugSymbols, macho_file: *MachO) !void { pub fn deinit(self: *DebugSymbols) void { const gpa = self.allocator; - if (self.file) |file| file.close(); + const io = self.io; + if (self.file) |file| file.close(io); self.segments.deinit(gpa); self.sections.deinit(gpa); self.relocs.deinit(gpa); @@ -268,6 +284,7 @@ fn finalizeDwarfSegment(self: *DebugSymbols, macho_file: *MachO) void { } fn writeLoadCommands(self: *DebugSymbols, macho_file: *MachO) !struct { usize, usize } { + const io = self.io; const gpa = self.allocator; const needed_size = load_commands.calcLoadCommandsSizeDsym(macho_file, self); const buffer = try gpa.alloc(u8, needed_size); @@ -319,12 +336,13 @@ fn writeLoadCommands(self: *DebugSymbols, macho_file: *MachO) !struct { usize, u assert(writer.end == needed_size); - try self.file.?.pwriteAll(buffer, @sizeOf(macho.mach_header_64)); + try self.file.?.writePositionalAll(io, buffer, @sizeOf(macho.mach_header_64)); return .{ ncmds, buffer.len }; } fn writeHeader(self: *DebugSymbols, macho_file: *MachO, ncmds: usize, sizeofcmds: usize) !void { + const io = self.io; var header: macho.mach_header_64 = .{}; header.filetype = macho.MH_DSYM; @@ -345,7 +363,7 @@ fn writeHeader(self: *DebugSymbols, macho_file: *MachO, ncmds: usize, sizeofcmds log.debug("writing Mach-O header {}", .{header}); - try self.file.?.pwriteAll(mem.asBytes(&header), 0); + try self.file.?.writePositionalAll(io, mem.asBytes(&header), 0); } fn allocatedSize(self: *DebugSymbols, start: u64) u64 { @@ -380,6 +398,8 @@ fn writeLinkeditSegmentData(self: *DebugSymbols, macho_file: *MachO) !void { pub fn writeSymtab(self: *DebugSymbols, off: u32, macho_file: *MachO) !u32 { const tracy = trace(@src()); defer tracy.end(); + + const io = self.io; const gpa = self.allocator; const cmd = &self.symtab_cmd; cmd.nsyms = macho_file.symtab_cmd.nsyms; @@ -403,15 +423,16 @@ pub fn writeSymtab(self: *DebugSymbols, off: u32, macho_file: *MachO) !u32 { internal.writeSymtab(macho_file, self); } - try self.file.?.pwriteAll(@ptrCast(self.symtab.items), cmd.symoff); + try self.file.?.writePositionalAll(io, @ptrCast(self.symtab.items), cmd.symoff); return off + cmd.nsyms * @sizeOf(macho.nlist_64); } pub fn writeStrtab(self: *DebugSymbols, off: u32) !u32 { + const io = self.io; const cmd = &self.symtab_cmd; cmd.stroff = off; - try self.file.?.pwriteAll(self.strtab.items, cmd.stroff); + try self.file.?.writePositionalAll(io, self.strtab.items, cmd.stroff); return off + cmd.strsize; } @@ -443,25 +464,3 @@ pub fn getSection(self: DebugSymbols, sect: u8) macho.section_64 { assert(sect < self.sections.items.len); return self.sections.items[sect]; } - -const DebugSymbols = @This(); - -const std = @import("std"); -const build_options = @import("build_options"); -const assert = std.debug.assert; -const fs = std.fs; -const link = @import("../../link.zig"); -const load_commands = @import("load_commands.zig"); -const log = std.log.scoped(.link_dsym); -const macho = std.macho; -const makeStaticString = MachO.makeStaticString; -const math = std.math; -const mem = std.mem; -const padToIdeal = MachO.padToIdeal; -const trace = @import("../../tracy.zig").trace; -const Writer = std.Io.Writer; - -const Allocator = mem.Allocator; -const MachO = @import("../MachO.zig"); -const StringTable = @import("../StringTable.zig"); -const Type = @import("../../Type.zig"); diff --git a/src/link/MachO/Dylib.zig b/src/link/MachO/Dylib.zig index 69c64b6717..638630b608 100644 --- a/src/link/MachO/Dylib.zig +++ b/src/link/MachO/Dylib.zig @@ -57,7 +57,9 @@ fn parseBinary(self: *Dylib, macho_file: *MachO) !void { const tracy = trace(@src()); defer tracy.end(); - const gpa = macho_file.base.comp.gpa; + const comp = macho_file.base.comp; + const io = comp.io; + const gpa = comp.gpa; const file = macho_file.getFileHandle(self.file_handle); const offset = self.offset; @@ -65,7 +67,7 @@ fn parseBinary(self: *Dylib, macho_file: *MachO) !void { var header_buffer: [@sizeOf(macho.mach_header_64)]u8 = undefined; { - const amt = try file.preadAll(&header_buffer, offset); + const amt = try file.readPositionalAll(io, &header_buffer, offset); if (amt != @sizeOf(macho.mach_header_64)) return error.InputOutput; } const header = @as(*align(1) const macho.mach_header_64, @ptrCast(&header_buffer)).*; @@ -86,7 +88,7 @@ fn parseBinary(self: *Dylib, macho_file: *MachO) !void { const lc_buffer = try gpa.alloc(u8, header.sizeofcmds); defer gpa.free(lc_buffer); { - const amt = try file.preadAll(lc_buffer, offset + @sizeOf(macho.mach_header_64)); + const amt = try file.readPositionalAll(io, lc_buffer, offset + @sizeOf(macho.mach_header_64)); if (amt != lc_buffer.len) return error.InputOutput; } @@ -103,7 +105,7 @@ fn parseBinary(self: *Dylib, macho_file: *MachO) !void { const dyld_cmd = cmd.cast(macho.dyld_info_command).?; const data = try gpa.alloc(u8, dyld_cmd.export_size); defer gpa.free(data); - const amt = try file.preadAll(data, dyld_cmd.export_off + offset); + const amt = try file.readPositionalAll(io, data, dyld_cmd.export_off + offset); if (amt != data.len) return error.InputOutput; try self.parseTrie(data, macho_file); }, @@ -111,7 +113,7 @@ fn parseBinary(self: *Dylib, macho_file: *MachO) !void { const ld_cmd = cmd.cast(macho.linkedit_data_command).?; const data = try gpa.alloc(u8, ld_cmd.datasize); defer gpa.free(data); - const amt = try file.preadAll(data, ld_cmd.dataoff + offset); + const amt = try file.readPositionalAll(io, data, ld_cmd.dataoff + offset); if (amt != data.len) return error.InputOutput; try self.parseTrie(data, macho_file); }, @@ -238,13 +240,15 @@ fn parseTbd(self: *Dylib, macho_file: *MachO) !void { const tracy = trace(@src()); defer tracy.end(); - const gpa = macho_file.base.comp.gpa; + const comp = macho_file.base.comp; + const gpa = comp.gpa; + const io = comp.io; log.debug("parsing dylib from stub: {f}", .{self.path}); const file = macho_file.getFileHandle(self.file_handle); - var lib_stub = LibStub.loadFromFile(gpa, file) catch |err| { - try macho_file.reportParseError2(self.index, "failed to parse TBD file: {s}", .{@errorName(err)}); + var lib_stub = LibStub.loadFromFile(gpa, io, file) catch |err| { + try macho_file.reportParseError2(self.index, "failed to parse TBD file: {t}", .{err}); return error.MalformedTbd; }; defer lib_stub.deinit(); diff --git a/src/link/MachO/Object.zig b/src/link/MachO/Object.zig index 5fc77fe763..b9def4568d 100644 --- a/src/link/MachO/Object.zig +++ b/src/link/MachO/Object.zig @@ -1,3 +1,30 @@ +const Object = @This(); + +const trace = @import("../../tracy.zig").trace; +const Archive = @import("Archive.zig"); +const Atom = @import("Atom.zig"); +const Dwarf = @import("Dwarf.zig"); +const File = @import("file.zig").File; +const MachO = @import("../MachO.zig"); +const Relocation = @import("Relocation.zig"); +const Symbol = @import("Symbol.zig"); +const UnwindInfo = @import("UnwindInfo.zig"); + +const std = @import("std"); +const Io = std.Io; +const Writer = std.Io.Writer; +const assert = std.debug.assert; +const log = std.log.scoped(.link); +const macho = std.macho; +const LoadCommandIterator = macho.LoadCommandIterator; +const math = std.math; +const mem = std.mem; +const Allocator = std.mem.Allocator; + +const eh_frame = @import("eh_frame.zig"); +const Cie = eh_frame.Cie; +const Fde = eh_frame.Fde; + /// Non-zero for fat object files or archives offset: u64, /// If `in_archive` is not `null`, this is the basename of the object in the archive. Otherwise, @@ -75,7 +102,9 @@ pub fn parse(self: *Object, macho_file: *MachO) !void { log.debug("parsing {f}", .{self.fmtPath()}); - const gpa = macho_file.base.comp.gpa; + const comp = macho_file.base.comp; + const io = comp.io; + const gpa = comp.gpa; const handle = macho_file.getFileHandle(self.file_handle); const cpu_arch = macho_file.getTarget().cpu.arch; @@ -84,7 +113,7 @@ pub fn parse(self: *Object, macho_file: *MachO) !void { var header_buffer: [@sizeOf(macho.mach_header_64)]u8 = undefined; { - const amt = try handle.preadAll(&header_buffer, self.offset); + const amt = try handle.readPositionalAll(io, &header_buffer, self.offset); if (amt != @sizeOf(macho.mach_header_64)) return error.InputOutput; } self.header = @as(*align(1) const macho.mach_header_64, @ptrCast(&header_buffer)).*; @@ -105,7 +134,7 @@ pub fn parse(self: *Object, macho_file: *MachO) !void { const lc_buffer = try gpa.alloc(u8, self.header.?.sizeofcmds); defer gpa.free(lc_buffer); { - const amt = try handle.preadAll(lc_buffer, self.offset + @sizeOf(macho.mach_header_64)); + const amt = try handle.readPositionalAll(io, lc_buffer, self.offset + @sizeOf(macho.mach_header_64)); if (amt != self.header.?.sizeofcmds) return error.InputOutput; } @@ -129,14 +158,14 @@ pub fn parse(self: *Object, macho_file: *MachO) !void { const cmd = lc.cast(macho.symtab_command).?; try self.strtab.resize(gpa, cmd.strsize); { - const amt = try handle.preadAll(self.strtab.items, cmd.stroff + self.offset); + const amt = try handle.readPositionalAll(io, self.strtab.items, cmd.stroff + self.offset); if (amt != self.strtab.items.len) return error.InputOutput; } const symtab_buffer = try gpa.alloc(u8, cmd.nsyms * @sizeOf(macho.nlist_64)); defer gpa.free(symtab_buffer); { - const amt = try handle.preadAll(symtab_buffer, cmd.symoff + self.offset); + const amt = try handle.readPositionalAll(io, symtab_buffer, cmd.symoff + self.offset); if (amt != symtab_buffer.len) return error.InputOutput; } const symtab = @as([*]align(1) const macho.nlist_64, @ptrCast(symtab_buffer.ptr))[0..cmd.nsyms]; @@ -154,7 +183,7 @@ pub fn parse(self: *Object, macho_file: *MachO) !void { const buffer = try gpa.alloc(u8, cmd.datasize); defer gpa.free(buffer); { - const amt = try handle.preadAll(buffer, self.offset + cmd.dataoff); + const amt = try handle.readPositionalAll(io, buffer, self.offset + cmd.dataoff); if (amt != buffer.len) return error.InputOutput; } const ndice = @divExact(cmd.datasize, @sizeOf(macho.data_in_code_entry)); @@ -440,12 +469,14 @@ fn initCstringLiterals(self: *Object, allocator: Allocator, file: File.Handle, m const tracy = trace(@src()); defer tracy.end(); + const comp = macho_file.base.comp; + const io = comp.io; const slice = self.sections.slice(); for (slice.items(.header), 0..) |sect, n_sect| { if (!isCstringLiteral(sect)) continue; - const data = try self.readSectionData(allocator, file, @intCast(n_sect)); + const data = try self.readSectionData(allocator, io, file, @intCast(n_sect)); defer allocator.free(data); var count: u32 = 0; @@ -628,7 +659,9 @@ pub fn resolveLiterals(self: *Object, lp: *MachO.LiteralPool, macho_file: *MachO const tracy = trace(@src()); defer tracy.end(); - const gpa = macho_file.base.comp.gpa; + const comp = macho_file.base.comp; + const io = comp.io; + const gpa = comp.gpa; const file = macho_file.getFileHandle(self.file_handle); var buffer = std.array_list.Managed(u8).init(gpa); @@ -647,7 +680,7 @@ pub fn resolveLiterals(self: *Object, lp: *MachO.LiteralPool, macho_file: *MachO const slice = self.sections.slice(); for (slice.items(.header), slice.items(.subsections), 0..) |header, subs, n_sect| { if (isCstringLiteral(header) or isFixedSizeLiteral(header)) { - const data = try self.readSectionData(gpa, file, @intCast(n_sect)); + const data = try self.readSectionData(gpa, io, file, @intCast(n_sect)); defer gpa.free(data); for (subs.items) |sub| { @@ -682,7 +715,7 @@ pub fn resolveLiterals(self: *Object, lp: *MachO.LiteralPool, macho_file: *MachO buffer.resize(target_size) catch unreachable; const gop = try sections_data.getOrPut(target.n_sect); if (!gop.found_existing) { - gop.value_ptr.* = try self.readSectionData(gpa, file, @intCast(target.n_sect)); + gop.value_ptr.* = try self.readSectionData(gpa, io, file, @intCast(target.n_sect)); } const data = gop.value_ptr.*; const target_off = try macho_file.cast(usize, target.off); @@ -1037,9 +1070,11 @@ fn initEhFrameRecords(self: *Object, allocator: Allocator, sect_id: u8, file: Fi const sect = slice.items(.header)[sect_id]; const relocs = slice.items(.relocs)[sect_id]; + const comp = macho_file.base.comp; + const io = comp.io; const size = try macho_file.cast(usize, sect.size); try self.eh_frame_data.resize(allocator, size); - const amt = try file.preadAll(self.eh_frame_data.items, sect.offset + self.offset); + const amt = try file.readPositionalAll(io, self.eh_frame_data.items, sect.offset + self.offset); if (amt != self.eh_frame_data.items.len) return error.InputOutput; // Check for non-personality relocs in FDEs and apply them @@ -1138,8 +1173,10 @@ fn initUnwindRecords(self: *Object, allocator: Allocator, sect_id: u8, file: Fil } }; + const comp = macho_file.base.comp; + const io = comp.io; const header = self.sections.items(.header)[sect_id]; - const data = try self.readSectionData(allocator, file, sect_id); + const data = try self.readSectionData(allocator, io, file, sect_id); defer allocator.free(data); const nrecs = @divExact(data.len, @sizeOf(macho.compact_unwind_entry)); @@ -1348,7 +1385,9 @@ fn parseDebugInfo(self: *Object, macho_file: *MachO) !void { const tracy = trace(@src()); defer tracy.end(); - const gpa = macho_file.base.comp.gpa; + const comp = macho_file.base.comp; + const io = comp.io; + const gpa = comp.gpa; const file = macho_file.getFileHandle(self.file_handle); var dwarf: Dwarf = .{}; @@ -1358,18 +1397,18 @@ fn parseDebugInfo(self: *Object, macho_file: *MachO) !void { const n_sect: u8 = @intCast(index); if (sect.attrs() & macho.S_ATTR_DEBUG == 0) continue; if (mem.eql(u8, sect.sectName(), "__debug_info")) { - dwarf.debug_info = try self.readSectionData(gpa, file, n_sect); + dwarf.debug_info = try self.readSectionData(gpa, io, file, n_sect); } if (mem.eql(u8, sect.sectName(), "__debug_abbrev")) { - dwarf.debug_abbrev = try self.readSectionData(gpa, file, n_sect); + dwarf.debug_abbrev = try self.readSectionData(gpa, io, file, n_sect); } if (mem.eql(u8, sect.sectName(), "__debug_str")) { - dwarf.debug_str = try self.readSectionData(gpa, file, n_sect); + dwarf.debug_str = try self.readSectionData(gpa, io, file, n_sect); } // __debug_str_offs[ets] section is a new addition in DWARFv5 and is generally // required in order to correctly parse strings. if (mem.eql(u8, sect.sectName(), "__debug_str_offs")) { - dwarf.debug_str_offsets = try self.readSectionData(gpa, file, n_sect); + dwarf.debug_str_offsets = try self.readSectionData(gpa, io, file, n_sect); } } @@ -1611,12 +1650,14 @@ pub fn parseAr(self: *Object, macho_file: *MachO) !void { const tracy = trace(@src()); defer tracy.end(); - const gpa = macho_file.base.comp.gpa; + const comp = macho_file.base.comp; + const io = comp.io; + const gpa = comp.gpa; const handle = macho_file.getFileHandle(self.file_handle); var header_buffer: [@sizeOf(macho.mach_header_64)]u8 = undefined; { - const amt = try handle.preadAll(&header_buffer, self.offset); + const amt = try handle.readPositionalAll(io, &header_buffer, self.offset); if (amt != @sizeOf(macho.mach_header_64)) return error.InputOutput; } self.header = @as(*align(1) const macho.mach_header_64, @ptrCast(&header_buffer)).*; @@ -1637,7 +1678,7 @@ pub fn parseAr(self: *Object, macho_file: *MachO) !void { const lc_buffer = try gpa.alloc(u8, self.header.?.sizeofcmds); defer gpa.free(lc_buffer); { - const amt = try handle.preadAll(lc_buffer, self.offset + @sizeOf(macho.mach_header_64)); + const amt = try handle.readPositionalAll(io, lc_buffer, self.offset + @sizeOf(macho.mach_header_64)); if (amt != self.header.?.sizeofcmds) return error.InputOutput; } @@ -1647,14 +1688,14 @@ pub fn parseAr(self: *Object, macho_file: *MachO) !void { const cmd = lc.cast(macho.symtab_command).?; try self.strtab.resize(gpa, cmd.strsize); { - const amt = try handle.preadAll(self.strtab.items, cmd.stroff + self.offset); + const amt = try handle.readPositionalAll(io, self.strtab.items, cmd.stroff + self.offset); if (amt != self.strtab.items.len) return error.InputOutput; } const symtab_buffer = try gpa.alloc(u8, cmd.nsyms * @sizeOf(macho.nlist_64)); defer gpa.free(symtab_buffer); { - const amt = try handle.preadAll(symtab_buffer, cmd.symoff + self.offset); + const amt = try handle.readPositionalAll(io, symtab_buffer, cmd.symoff + self.offset); if (amt != symtab_buffer.len) return error.InputOutput; } const symtab = @as([*]align(1) const macho.nlist_64, @ptrCast(symtab_buffer.ptr))[0..cmd.nsyms]; @@ -1689,13 +1730,15 @@ pub fn updateArSymtab(self: Object, ar_symtab: *Archive.ArSymtab, macho_file: *M } pub fn updateArSize(self: *Object, macho_file: *MachO) !void { + const comp = macho_file.base.comp; + const io = comp.io; self.output_ar_state.size = if (self.in_archive) |ar| ar.size else size: { const file = macho_file.getFileHandle(self.file_handle); - break :size (try file.stat()).size; + break :size (try file.stat(io)).size; }; } -pub fn writeAr(self: Object, ar_format: Archive.Format, macho_file: *MachO, writer: anytype) !void { +pub fn writeAr(self: Object, ar_format: Archive.Format, macho_file: *MachO, writer: *Writer) !void { // Header const size = try macho_file.cast(usize, self.output_ar_state.size); const basename = std.fs.path.basename(self.path); @@ -1703,10 +1746,12 @@ pub fn writeAr(self: Object, ar_format: Archive.Format, macho_file: *MachO, writ // Data const file = macho_file.getFileHandle(self.file_handle); // TODO try using copyRangeAll - const gpa = macho_file.base.comp.gpa; + const comp = macho_file.base.comp; + const io = comp.io; + const gpa = comp.gpa; const data = try gpa.alloc(u8, size); defer gpa.free(data); - const amt = try file.preadAll(data, self.offset); + const amt = try file.readPositionalAll(io, data, self.offset); if (amt != size) return error.InputOutput; try writer.writeAll(data); } @@ -1811,7 +1856,9 @@ pub fn writeAtoms(self: *Object, macho_file: *MachO) !void { const tracy = trace(@src()); defer tracy.end(); - const gpa = macho_file.base.comp.gpa; + const comp = macho_file.base.comp; + const io = comp.io; + const gpa = comp.gpa; const headers = self.sections.items(.header); const sections_data = try gpa.alloc([]const u8, headers.len); defer { @@ -1827,7 +1874,7 @@ pub fn writeAtoms(self: *Object, macho_file: *MachO) !void { if (header.isZerofill()) continue; const size = try macho_file.cast(usize, header.size); const data = try gpa.alloc(u8, size); - const amt = try file.preadAll(data, header.offset + self.offset); + const amt = try file.readPositionalAll(io, data, header.offset + self.offset); if (amt != data.len) return error.InputOutput; sections_data[n_sect] = data; } @@ -1850,7 +1897,9 @@ pub fn writeAtomsRelocatable(self: *Object, macho_file: *MachO) !void { const tracy = trace(@src()); defer tracy.end(); - const gpa = macho_file.base.comp.gpa; + const comp = macho_file.base.comp; + const io = comp.io; + const gpa = comp.gpa; const headers = self.sections.items(.header); const sections_data = try gpa.alloc([]const u8, headers.len); defer { @@ -1866,7 +1915,7 @@ pub fn writeAtomsRelocatable(self: *Object, macho_file: *MachO) !void { if (header.isZerofill()) continue; const size = try macho_file.cast(usize, header.size); const data = try gpa.alloc(u8, size); - const amt = try file.preadAll(data, header.offset + self.offset); + const amt = try file.readPositionalAll(io, data, header.offset + self.offset); if (amt != data.len) return error.InputOutput; sections_data[n_sect] = data; } @@ -2482,11 +2531,11 @@ pub fn getUnwindRecord(self: *Object, index: UnwindInfo.Record.Index) *UnwindInf } /// Caller owns the memory. -pub fn readSectionData(self: Object, allocator: Allocator, file: File.Handle, n_sect: u8) ![]u8 { +pub fn readSectionData(self: Object, allocator: Allocator, io: Io, file: File.Handle, n_sect: u8) ![]u8 { const header = self.sections.items(.header)[n_sect]; const size = math.cast(usize, header.size) orelse return error.Overflow; const data = try allocator.alloc(u8, size); - const amt = try file.preadAll(data, header.offset + self.offset); + const amt = try file.readPositionalAll(io, data, header.offset + self.offset); errdefer allocator.free(data); if (amt != data.len) return error.InputOutput; return data; @@ -2710,15 +2759,17 @@ const x86_64 = struct { handle: File.Handle, macho_file: *MachO, ) !void { - const gpa = macho_file.base.comp.gpa; + const comp = macho_file.base.comp; + const io = comp.io; + const gpa = comp.gpa; const relocs_buffer = try gpa.alloc(u8, sect.nreloc * @sizeOf(macho.relocation_info)); defer gpa.free(relocs_buffer); - const amt = try handle.preadAll(relocs_buffer, sect.reloff + self.offset); + const amt = try handle.readPositionalAll(io, relocs_buffer, sect.reloff + self.offset); if (amt != relocs_buffer.len) return error.InputOutput; const relocs = @as([*]align(1) const macho.relocation_info, @ptrCast(relocs_buffer.ptr))[0..sect.nreloc]; - const code = try self.readSectionData(gpa, handle, n_sect); + const code = try self.readSectionData(gpa, io, handle, n_sect); defer gpa.free(code); try out.ensureTotalCapacityPrecise(gpa, relocs.len); @@ -2877,15 +2928,17 @@ const aarch64 = struct { handle: File.Handle, macho_file: *MachO, ) !void { - const gpa = macho_file.base.comp.gpa; + const comp = macho_file.base.comp; + const io = comp.io; + const gpa = comp.gpa; const relocs_buffer = try gpa.alloc(u8, sect.nreloc * @sizeOf(macho.relocation_info)); defer gpa.free(relocs_buffer); - const amt = try handle.preadAll(relocs_buffer, sect.reloff + self.offset); + const amt = try handle.readPositionalAll(io, relocs_buffer, sect.reloff + self.offset); if (amt != relocs_buffer.len) return error.InputOutput; const relocs = @as([*]align(1) const macho.relocation_info, @ptrCast(relocs_buffer.ptr))[0..sect.nreloc]; - const code = try self.readSectionData(gpa, handle, n_sect); + const code = try self.readSectionData(gpa, io, handle, n_sect); defer gpa.free(code); try out.ensureTotalCapacityPrecise(gpa, relocs.len); @@ -3061,27 +3114,3 @@ const aarch64 = struct { } } }; - -const std = @import("std"); -const assert = std.debug.assert; -const log = std.log.scoped(.link); -const macho = std.macho; -const math = std.math; -const mem = std.mem; -const Allocator = std.mem.Allocator; -const Writer = std.Io.Writer; - -const eh_frame = @import("eh_frame.zig"); -const trace = @import("../../tracy.zig").trace; -const Archive = @import("Archive.zig"); -const Atom = @import("Atom.zig"); -const Cie = eh_frame.Cie; -const Dwarf = @import("Dwarf.zig"); -const Fde = eh_frame.Fde; -const File = @import("file.zig").File; -const LoadCommandIterator = macho.LoadCommandIterator; -const MachO = @import("../MachO.zig"); -const Object = @This(); -const Relocation = @import("Relocation.zig"); -const Symbol = @import("Symbol.zig"); -const UnwindInfo = @import("UnwindInfo.zig"); diff --git a/src/link/MachO/ZigObject.zig b/src/link/MachO/ZigObject.zig index 5a4ea65790..49555c2746 100644 --- a/src/link/MachO/ZigObject.zig +++ b/src/link/MachO/ZigObject.zig @@ -171,6 +171,9 @@ pub fn getAtomData(self: ZigObject, macho_file: *MachO, atom: Atom, buffer: []u8 const isec = atom.getInputSection(macho_file); assert(!isec.isZerofill()); + const comp = macho_file.base.comp; + const io = comp.io; + switch (isec.type()) { macho.S_THREAD_LOCAL_REGULAR => { const tlv = self.tlv_initializers.get(atom.atom_index).?; @@ -182,7 +185,7 @@ pub fn getAtomData(self: ZigObject, macho_file: *MachO, atom: Atom, buffer: []u8 else => { const sect = macho_file.sections.items(.header)[atom.out_n_sect]; const file_offset = sect.offset + atom.value; - const amt = try macho_file.base.file.?.preadAll(buffer, file_offset); + const amt = try macho_file.base.file.?.readPositionalAll(io, buffer, file_offset); if (amt != buffer.len) return error.InputOutput; }, } @@ -290,12 +293,14 @@ pub fn dedupLiterals(self: *ZigObject, lp: MachO.LiteralPool, macho_file: *MachO /// We need this so that we can write to an archive. /// TODO implement writing ZigObject data directly to a buffer instead. pub fn readFileContents(self: *ZigObject, macho_file: *MachO) !void { - const diags = &macho_file.base.comp.link_diags; + const comp = macho_file.base.comp; + const gpa = comp.gpa; + const io = comp.io; + const diags = &comp.link_diags; // Size of the output object file is always the offset + size of the strtab const size = macho_file.symtab_cmd.stroff + macho_file.symtab_cmd.strsize; - const gpa = macho_file.base.comp.gpa; try self.data.resize(gpa, size); - const amt = macho_file.base.file.?.preadAll(self.data.items, 0) catch |err| + const amt = macho_file.base.file.?.readPositionalAll(io, self.data.items, 0) catch |err| return diags.fail("failed to read output file: {s}", .{@errorName(err)}); if (amt != size) return diags.fail("unexpected EOF reading from output file", .{}); @@ -945,6 +950,8 @@ fn updateNavCode( ) link.File.UpdateNavError!void { const zcu = pt.zcu; const gpa = zcu.gpa; + const comp = zcu.comp; + const io = comp.io; const ip = &zcu.intern_pool; const nav = ip.getNav(nav_index); @@ -1012,8 +1019,8 @@ fn updateNavCode( if (!sect.isZerofill()) { const file_offset = sect.offset + atom.value; - macho_file.base.file.?.pwriteAll(code, file_offset) catch |err| - return macho_file.base.cgFail(nav_index, "failed to write output file: {s}", .{@errorName(err)}); + macho_file.base.file.?.writePositionalAll(io, code, file_offset) catch |err| + return macho_file.base.cgFail(nav_index, "failed to write output file: {t}", .{err}); } } @@ -1493,7 +1500,7 @@ fn writeTrampoline(tr_sym: Symbol, target: Symbol, macho_file: *MachO) !void { .x86_64 => try x86_64.writeTrampolineCode(source_addr, target_addr, &buf), else => @panic("TODO implement write trampoline for this CPU arch"), }; - try macho_file.base.file.?.pwriteAll(out, fileoff); + return macho_file.pwriteAll(out, fileoff); } pub fn getOrCreateMetadataForNav( diff --git a/src/link/MachO/fat.zig b/src/link/MachO/fat.zig index 7772f7a4de..73b9c626e8 100644 --- a/src/link/MachO/fat.zig +++ b/src/link/MachO/fat.zig @@ -1,20 +1,22 @@ +const builtin = @import("builtin"); +const native_endian = builtin.target.cpu.arch.endian(); + const std = @import("std"); +const Io = std.Io; const assert = std.debug.assert; -const builtin = @import("builtin"); const log = std.log.scoped(.macho); const macho = std.macho; const mem = std.mem; -const native_endian = builtin.target.cpu.arch.endian(); const MachO = @import("../MachO.zig"); -pub fn readFatHeader(file: std.fs.File) !macho.fat_header { - return readFatHeaderGeneric(macho.fat_header, file, 0); +pub fn readFatHeader(io: Io, file: Io.File) !macho.fat_header { + return readFatHeaderGeneric(io, macho.fat_header, file, 0); } -fn readFatHeaderGeneric(comptime Hdr: type, file: std.fs.File, offset: usize) !Hdr { +fn readFatHeaderGeneric(io: Io, comptime Hdr: type, file: Io.File, offset: usize) !Hdr { var buffer: [@sizeOf(Hdr)]u8 = undefined; - const nread = try file.preadAll(&buffer, offset); + const nread = try file.readPositionalAll(io, &buffer, offset); if (nread != buffer.len) return error.InputOutput; var hdr = @as(*align(1) const Hdr, @ptrCast(&buffer)).*; mem.byteSwapAllFields(Hdr, &hdr); @@ -27,12 +29,12 @@ pub const Arch = struct { size: u32, }; -pub fn parseArchs(file: std.fs.File, fat_header: macho.fat_header, out: *[2]Arch) ![]const Arch { +pub fn parseArchs(io: Io, file: Io.File, fat_header: macho.fat_header, out: *[2]Arch) ![]const Arch { var count: usize = 0; var fat_arch_index: u32 = 0; while (fat_arch_index < fat_header.nfat_arch and count < out.len) : (fat_arch_index += 1) { const offset = @sizeOf(macho.fat_header) + @sizeOf(macho.fat_arch) * fat_arch_index; - const fat_arch = try readFatHeaderGeneric(macho.fat_arch, file, offset); + const fat_arch = try readFatHeaderGeneric(io, macho.fat_arch, file, offset); // If we come across an architecture that we do not know how to handle, that's // fine because we can keep looking for one that might match. const arch: std.Target.Cpu.Arch = switch (fat_arch.cputype) { diff --git a/src/link/MachO/file.zig b/src/link/MachO/file.zig index 05b43de181..cd687a4941 100644 --- a/src/link/MachO/file.zig +++ b/src/link/MachO/file.zig @@ -355,11 +355,12 @@ pub const File = union(enum) { dylib: Dylib, }; - pub const Handle = std.fs.File; + pub const Handle = Io.File; pub const HandleIndex = Index; }; const std = @import("std"); +const Io = std.Io; const assert = std.debug.assert; const log = std.log.scoped(.link); const macho = std.macho; diff --git a/src/link/MachO/hasher.zig b/src/link/MachO/hasher.zig index 78cd847c40..822474e3e1 100644 --- a/src/link/MachO/hasher.zig +++ b/src/link/MachO/hasher.zig @@ -1,34 +1,36 @@ +const std = @import("std"); +const Io = std.Io; +const assert = std.debug.assert; +const Allocator = std.mem.Allocator; + +const trace = @import("../../tracy.zig").trace; + pub fn ParallelHasher(comptime Hasher: type) type { const hash_size = Hasher.digest_length; return struct { - allocator: Allocator, - io: std.Io, - - pub fn hash(self: Self, file: fs.File, out: [][hash_size]u8, opts: struct { + pub fn hash(gpa: Allocator, io: Io, file: Io.File, out: [][hash_size]u8, opts: struct { chunk_size: u64 = 0x4000, max_file_size: ?u64 = null, }) !void { const tracy = trace(@src()); defer tracy.end(); - const io = self.io; - const file_size = blk: { - const file_size = opts.max_file_size orelse try file.getEndPos(); + const file_size = opts.max_file_size orelse try file.length(io); break :blk std.math.cast(usize, file_size) orelse return error.Overflow; }; const chunk_size = std.math.cast(usize, opts.chunk_size) orelse return error.Overflow; - const buffer = try self.allocator.alloc(u8, chunk_size * out.len); - defer self.allocator.free(buffer); + const buffer = try gpa.alloc(u8, chunk_size * out.len); + defer gpa.free(buffer); - const results = try self.allocator.alloc(fs.File.PReadError!usize, out.len); - defer self.allocator.free(results); + const results = try gpa.alloc(Io.File.ReadPositionalError!usize, out.len); + defer gpa.free(results); { - var group: std.Io.Group = .init; - errdefer group.cancel(io); + var group: Io.Group = .init; + defer group.cancel(io); for (out, results, 0..) |*out_buf, *result, i| { const fstart = i * chunk_size; @@ -37,6 +39,7 @@ pub fn ParallelHasher(comptime Hasher: type) type { else chunk_size; group.async(io, worker, .{ + io, file, fstart, buffer[fstart..][0..fsize], @@ -51,26 +54,15 @@ pub fn ParallelHasher(comptime Hasher: type) type { } fn worker( - file: fs.File, + io: Io, + file: Io.File, fstart: usize, buffer: []u8, out: *[hash_size]u8, - err: *fs.File.PReadError!usize, + err: *Io.File.ReadPositionalError!usize, ) void { - const tracy = trace(@src()); - defer tracy.end(); - err.* = file.preadAll(buffer, fstart); + err.* = file.readPositionalAll(io, buffer, fstart); Hasher.hash(buffer, out, .{}); } - - const Self = @This(); }; } - -const assert = std.debug.assert; -const fs = std.fs; -const mem = std.mem; -const std = @import("std"); -const trace = @import("../../tracy.zig").trace; - -const Allocator = mem.Allocator; diff --git a/src/link/MachO/relocatable.zig b/src/link/MachO/relocatable.zig index d2a6c2a3ab..13dd35a558 100644 --- a/src/link/MachO/relocatable.zig +++ b/src/link/MachO/relocatable.zig @@ -1,6 +1,7 @@ pub fn flushObject(macho_file: *MachO, comp: *Compilation, module_obj_path: ?Path) link.File.FlushError!void { - const gpa = macho_file.base.comp.gpa; - const diags = &macho_file.base.comp.link_diags; + const gpa = comp.gpa; + const io = comp.io; + const diags = &comp.link_diags; // TODO: "positional arguments" is a CLI concept, not a linker concept. Delete this unnecessary array list. var positionals = std.array_list.Managed(link.Input).init(gpa); @@ -9,24 +10,22 @@ pub fn flushObject(macho_file: *MachO, comp: *Compilation, module_obj_path: ?Pat positionals.appendSliceAssumeCapacity(comp.link_inputs); for (comp.c_object_table.keys()) |key| { - try positionals.append(try link.openObjectInput(diags, key.status.success.object_path)); + try positionals.append(try link.openObjectInput(io, diags, key.status.success.object_path)); } - if (module_obj_path) |path| try positionals.append(try link.openObjectInput(diags, path)); + if (module_obj_path) |path| try positionals.append(try link.openObjectInput(io, diags, path)); if (macho_file.getZigObject() == null and positionals.items.len == 1) { // Instead of invoking a full-blown `-r` mode on the input which sadly will strip all // debug info segments/sections (this is apparently by design by Apple), we copy // the *only* input file over. const path = positionals.items[0].path().?; - const in_file = path.root_dir.handle.openFile(path.sub_path, .{}) catch |err| + const in_file = path.root_dir.handle.openFile(io, path.sub_path, .{}) catch |err| return diags.fail("failed to open {f}: {s}", .{ path, @errorName(err) }); - const stat = in_file.stat() catch |err| + const stat = in_file.stat(io) catch |err| return diags.fail("failed to stat {f}: {s}", .{ path, @errorName(err) }); - const amt = in_file.copyRangeAll(0, macho_file.base.file.?, 0, stat.size) catch |err| - return diags.fail("failed to copy range of file {f}: {s}", .{ path, @errorName(err) }); - if (amt != stat.size) - return diags.fail("unexpected short write in copy range of file {f}", .{path}); + link.File.copyRangeAll2(io, in_file, macho_file.base.file.?, 0, 0, stat.size) catch |err| + return diags.fail("failed to copy range of file {f}: {t}", .{ path, err }); return; } @@ -79,6 +78,7 @@ pub fn flushObject(macho_file: *MachO, comp: *Compilation, module_obj_path: ?Pat pub fn flushStaticLib(macho_file: *MachO, comp: *Compilation, module_obj_path: ?Path) link.File.FlushError!void { const gpa = comp.gpa; + const io = comp.io; const diags = &macho_file.base.comp.link_diags; var positionals = std.array_list.Managed(link.Input).init(gpa); @@ -88,17 +88,17 @@ pub fn flushStaticLib(macho_file: *MachO, comp: *Compilation, module_obj_path: ? positionals.appendSliceAssumeCapacity(comp.link_inputs); for (comp.c_object_table.keys()) |key| { - try positionals.append(try link.openObjectInput(diags, key.status.success.object_path)); + try positionals.append(try link.openObjectInput(io, diags, key.status.success.object_path)); } - if (module_obj_path) |path| try positionals.append(try link.openObjectInput(diags, path)); + if (module_obj_path) |path| try positionals.append(try link.openObjectInput(io, diags, path)); if (comp.compiler_rt_strat == .obj) { - try positionals.append(try link.openObjectInput(diags, comp.compiler_rt_obj.?.full_object_path)); + try positionals.append(try link.openObjectInput(io, diags, comp.compiler_rt_obj.?.full_object_path)); } if (comp.ubsan_rt_strat == .obj) { - try positionals.append(try link.openObjectInput(diags, comp.ubsan_rt_obj.?.full_object_path)); + try positionals.append(try link.openObjectInput(io, diags, comp.ubsan_rt_obj.?.full_object_path)); } for (positionals.items) |link_input| { @@ -229,7 +229,7 @@ pub fn flushStaticLib(macho_file: *MachO, comp: *Compilation, module_obj_path: ? assert(writer.end == total_size); - try macho_file.setEndPos(total_size); + try macho_file.setLength(total_size); try macho_file.pwriteAll(writer.buffered(), 0); if (diags.hasErrors()) return error.LinkFailure; diff --git a/src/link/MachO/uuid.zig b/src/link/MachO/uuid.zig index d08ac0c5b8..a75799d01e 100644 --- a/src/link/MachO/uuid.zig +++ b/src/link/MachO/uuid.zig @@ -1,28 +1,38 @@ +const std = @import("std"); +const Io = std.Io; +const Md5 = std.crypto.hash.Md5; + +const trace = @import("../../tracy.zig").trace; +const Compilation = @import("../../Compilation.zig"); +const ParallelHasher = @import("hasher.zig").ParallelHasher; + /// Calculates Md5 hash of each chunk in parallel and then hashes all Md5 hashes to produce /// the final digest. /// While this is NOT a correct MD5 hash of the contents, this methodology is used by LLVM/LLD /// and we will use it too as it seems accepted by Apple OSes. /// TODO LLD also hashes the output filename to disambiguate between same builds with different /// output files. Should we also do that? -pub fn calcUuid(comp: *const Compilation, file: fs.File, file_size: u64, out: *[Md5.digest_length]u8) !void { +pub fn calcUuid(comp: *const Compilation, file: Io.File, file_size: u64, out: *[Md5.digest_length]u8) !void { const tracy = trace(@src()); defer tracy.end(); + const gpa = comp.gpa; + const io = comp.io; + const chunk_size: usize = 1024 * 1024; const num_chunks: usize = std.math.cast(usize, @divTrunc(file_size, chunk_size)) orelse return error.Overflow; const actual_num_chunks = if (@rem(file_size, chunk_size) > 0) num_chunks + 1 else num_chunks; - const hashes = try comp.gpa.alloc([Md5.digest_length]u8, actual_num_chunks); - defer comp.gpa.free(hashes); + const hashes = try gpa.alloc([Md5.digest_length]u8, actual_num_chunks); + defer gpa.free(hashes); - var hasher = Hasher(Md5){ .allocator = comp.gpa, .io = comp.io }; - try hasher.hash(file, hashes, .{ + try ParallelHasher(Md5).hash(gpa, io, file, hashes, .{ .chunk_size = chunk_size, .max_file_size = file_size, }); - const final_buffer = try comp.gpa.alloc(u8, actual_num_chunks * Md5.digest_length); - defer comp.gpa.free(final_buffer); + const final_buffer = try gpa.alloc(u8, actual_num_chunks * Md5.digest_length); + defer gpa.free(final_buffer); for (hashes, 0..) |hash, i| { @memcpy(final_buffer[i * Md5.digest_length ..][0..Md5.digest_length], &hash); @@ -37,12 +47,3 @@ inline fn conform(out: *[Md5.digest_length]u8) void { out[6] = (out[6] & 0x0F) | (3 << 4); out[8] = (out[8] & 0x3F) | 0x80; } - -const fs = std.fs; -const mem = std.mem; -const std = @import("std"); -const trace = @import("../../tracy.zig").trace; - -const Compilation = @import("../../Compilation.zig"); -const Md5 = std.crypto.hash.Md5; -const Hasher = @import("hasher.zig").ParallelHasher; diff --git a/src/link/MappedFile.zig b/src/link/MappedFile.zig index 975b94578b..2986e27e24 100644 --- a/src/link/MappedFile.zig +++ b/src/link/MappedFile.zig @@ -1,3 +1,17 @@ +/// TODO add a mapped file abstraction to std.Io +const MappedFile = @This(); + +const builtin = @import("builtin"); +const is_linux = builtin.os.tag == .linux; +const is_windows = builtin.os.tag == .windows; + +const std = @import("std"); +const Io = std.Io; +const assert = std.debug.assert; +const linux = std.os.linux; +const windows = std.os.windows; + +io: Io, file: std.Io.File, flags: packed struct { block_size: std.mem.Alignment, @@ -16,16 +30,22 @@ writers: std.SinglyLinkedList, pub const growth_factor = 4; -pub const Error = std.posix.MMapError || std.posix.MRemapError || std.fs.File.SetEndPosError || error{ +pub const Error = std.posix.MMapError || std.posix.MRemapError || Io.File.LengthError || error{ NotFile, SystemResources, IsDir, Unseekable, NoSpaceLeft, + + InputOutput, + FileTooBig, + FileBusy, + NonResizable, }; -pub fn init(file: std.Io.File, gpa: std.mem.Allocator) !MappedFile { +pub fn init(file: std.Io.File, gpa: std.mem.Allocator, io: Io) !MappedFile { var mf: MappedFile = .{ + .io = io, .file = file, .flags = undefined, .section = if (is_windows) windows.INVALID_HANDLE_VALUE else {}, @@ -55,18 +75,41 @@ pub fn init(file: std.Io.File, gpa: std.mem.Allocator) !MappedFile { }; } if (is_linux) { - const statx = try linux.wrapped.statx( - mf.file.handle, - "", - std.posix.AT.EMPTY_PATH, - .{ .TYPE = true, .SIZE = true, .BLOCKS = true }, - ); - assert(statx.mask.TYPE); - assert(statx.mask.SIZE); - assert(statx.mask.BLOCKS); - - if (!std.posix.S.ISREG(statx.mode)) return error.PathAlreadyExists; - break :stat .{ statx.size, @max(std.heap.pageSize(), statx.blksize) }; + const use_c = std.c.versionCheck(if (builtin.abi.isAndroid()) + .{ .major = 30, .minor = 0, .patch = 0 } + else + .{ .major = 2, .minor = 28, .patch = 0 }); + const sys = if (use_c) std.c else std.os.linux; + while (true) { + var statx = std.mem.zeroes(linux.Statx); + const rc = sys.statx( + mf.file.handle, + "", + std.posix.AT.EMPTY_PATH, + .{ .TYPE = true, .SIZE = true, .BLOCKS = true }, + &statx, + ); + switch (sys.errno(rc)) { + .SUCCESS => { + assert(statx.mask.TYPE); + assert(statx.mask.SIZE); + assert(statx.mask.BLOCKS); + if (!std.posix.S.ISREG(statx.mode)) return error.PathAlreadyExists; + break :stat .{ statx.size, @max(std.heap.pageSize(), statx.blksize) }; + }, + .INTR => continue, + .ACCES => return error.AccessDenied, + .BADF => if (std.debug.runtime_safety) unreachable else return error.Unexpected, + .FAULT => if (std.debug.runtime_safety) unreachable else return error.Unexpected, + .INVAL => if (std.debug.runtime_safety) unreachable else return error.Unexpected, + .LOOP => return error.SymLinkLoop, + .NAMETOOLONG => return error.NameTooLong, + .NOENT => return error.FileNotFound, + .NOTDIR => return error.FileNotFound, + .NOMEM => return error.SystemResources, + else => |err| return std.posix.unexpectedErrno(err), + } + } } const stat = try std.posix.fstat(mf.file.handle); if (!std.posix.S.ISREG(stat.mode)) return error.PathAlreadyExists; @@ -433,8 +476,8 @@ pub const Node = extern struct { return n; }, .streaming, - .streaming_reading, - .positional_reading, + .streaming_simple, + .positional_simple, .failure, => { const dest = limit.slice(interface.unusedCapacitySlice()); @@ -612,13 +655,14 @@ pub fn addNodeAfter( } fn resizeNode(mf: *MappedFile, gpa: std.mem.Allocator, ni: Node.Index, requested_size: u64) !void { + const io = mf.io; const node = ni.get(mf); const old_offset, const old_size = node.location().resolve(mf); const new_size = node.flags.alignment.forward(@intCast(requested_size)); // Resize the entire file if (ni == Node.Index.root) { try mf.ensureCapacityForSetLocation(gpa); - try std.fs.File.adaptFromNewApi(mf.file).setEndPos(new_size); + try mf.file.setLength(io, new_size); try mf.ensureTotalCapacity(@intCast(new_size)); ni.setLocationAssumeCapacity(mf, old_offset, new_size); return; @@ -1059,12 +1103,3 @@ fn verifyNode(mf: *MappedFile, parent_ni: Node.Index) void { ni = node.next; } } - -const assert = std.debug.assert; -const builtin = @import("builtin"); -const is_linux = builtin.os.tag == .linux; -const is_windows = builtin.os.tag == .windows; -const linux = std.os.linux; -const MappedFile = @This(); -const std = @import("std"); -const windows = std.os.windows; diff --git a/src/link/Queue.zig b/src/link/Queue.zig index e8e7700695..b716800bae 100644 --- a/src/link/Queue.zig +++ b/src/link/Queue.zig @@ -121,7 +121,7 @@ pub fn enqueueZcu( link.doZcuTask(comp, tid, task); } -pub fn finishPrelinkQueue(q: *Queue, comp: *Compilation) void { +pub fn finishPrelinkQueue(q: *Queue, comp: *Compilation) Io.Cancelable!void { if (q.future != null) { q.prelink_queue.close(comp.io); return; @@ -136,6 +136,7 @@ pub fn finishPrelinkQueue(q: *Queue, comp: *Compilation) void { } else |err| switch (err) { error.OutOfMemory => comp.link_diags.setAllocFailure(), error.LinkFailure => {}, + error.Canceled => |e| return e, } } } @@ -175,6 +176,7 @@ fn runLinkTasks(q: *Queue, comp: *Compilation) void { lf.post_prelink = true; } else |err| switch (err) { error.OutOfMemory => comp.link_diags.setAllocFailure(), + error.Canceled => @panic("TODO"), error.LinkFailure => {}, } } diff --git a/src/link/SpirV.zig b/src/link/SpirV.zig index 7e28dc0a8b..4dbdd5c089 100644 --- a/src/link/SpirV.zig +++ b/src/link/SpirV.zig @@ -33,6 +33,7 @@ pub fn createEmpty( options: link.File.OpenOptions, ) !*Linker { const gpa = comp.gpa; + const io = comp.io; const target = &comp.root_mod.resolved_target.result; assert(!comp.config.use_lld); // Caught by Compilation.Config.resolve @@ -78,7 +79,7 @@ pub fn createEmpty( }; errdefer linker.deinit(); - linker.base.file = try emit.root_dir.handle.createFile(emit.sub_path, .{ + linker.base.file = try emit.root_dir.handle.createFile(io, emit.sub_path, .{ .truncate = true, .read = true, }); @@ -245,6 +246,7 @@ pub fn flush( const comp = linker.base.comp; const diags = &comp.link_diags; const gpa = comp.gpa; + const io = comp.io; // We need to export the list of error names somewhere so that we can pretty-print them in the // executor. This is not really an important thing though, so we can just dump it in any old @@ -286,8 +288,8 @@ pub fn flush( }; // TODO endianness bug. use file writer and call writeSliceEndian instead - linker.base.file.?.writeAll(@ptrCast(linked_module)) catch |err| - return diags.fail("failed to write: {s}", .{@errorName(err)}); + linker.base.file.?.writeStreamingAll(io, @ptrCast(linked_module)) catch |err| + return diags.fail("failed to write: {t}", .{err}); } fn linkModule(arena: Allocator, module: []Word, progress: std.Progress.Node) ![]Word { diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index 92307ec40c..af800d77d2 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -20,6 +20,7 @@ const native_endian = builtin.cpu.arch.endian(); const build_options = @import("build_options"); const std = @import("std"); +const Io = std.Io; const Allocator = std.mem.Allocator; const Cache = std.Build.Cache; const Path = Cache.Path; @@ -428,7 +429,11 @@ pub const OutputFunctionIndex = enum(u32) { pub fn fromSymbolName(wasm: *const Wasm, name: String) OutputFunctionIndex { if (wasm.flush_buffer.function_imports.getIndex(name)) |i| return @enumFromInt(i); - return fromFunctionIndex(wasm, FunctionIndex.fromSymbolName(wasm, name).?); + return fromFunctionIndex(wasm, FunctionIndex.fromSymbolName(wasm, name) orelse { + if (std.debug.runtime_safety) { + std.debug.panic("function index for symbol not found: {s}", .{name.slice(wasm)}); + } else unreachable; + }); } }; @@ -2996,16 +3001,18 @@ pub fn createEmpty( .named => |name| (try wasm.internString(name)).toOptional(), }; - wasm.base.file = try emit.root_dir.handle.createFile(emit.sub_path, .{ + const io = comp.io; + + wasm.base.file = try emit.root_dir.handle.createFile(io, emit.sub_path, .{ .truncate = true, .read = true, - .mode = if (fs.has_executable_bit) + .permissions = if (Io.File.Permissions.has_executable_bit) if (target.os.tag == .wasi and output_mode == .Exe) - fs.File.default_mode | 0b001_000_000 + .executable_file else - fs.File.default_mode + .default_file else - 0, + .default_file, }); wasm.name = emit.sub_path; @@ -3013,14 +3020,16 @@ pub fn createEmpty( } fn openParseObjectReportingFailure(wasm: *Wasm, path: Path) void { - const diags = &wasm.base.comp.link_diags; - const obj = link.openObject(path, false, false) catch |err| { - switch (diags.failParse(path, "failed to open object: {s}", .{@errorName(err)})) { + const comp = wasm.base.comp; + const io = comp.io; + const diags = &comp.link_diags; + const obj = link.openObject(io, path, false, false) catch |err| { + switch (diags.failParse(path, "failed to open object: {t}", .{err})) { error.LinkFailure => return, } }; wasm.parseObject(obj) catch |err| { - switch (diags.failParse(path, "failed to parse object: {s}", .{@errorName(err)})) { + switch (diags.failParse(path, "failed to parse object: {t}", .{err})) { error.LinkFailure => return, } }; @@ -3032,7 +3041,7 @@ fn parseObject(wasm: *Wasm, obj: link.Input.Object) !void { const io = wasm.base.comp.io; const gc_sections = wasm.base.gc_sections; - defer obj.file.close(); + defer obj.file.close(io); var file_reader = obj.file.reader(io, &.{}); @@ -3060,7 +3069,7 @@ fn parseArchive(wasm: *Wasm, obj: link.Input.Object) !void { const io = wasm.base.comp.io; const gc_sections = wasm.base.gc_sections; - defer obj.file.close(); + defer obj.file.close(io); var file_reader = obj.file.reader(io, &.{}); @@ -3529,7 +3538,10 @@ pub fn markFunctionImport( import: *FunctionImport, func_index: FunctionImport.Index, ) link.File.FlushError!void { - if (import.flags.alive) return; + // import.flags.alive might be already true from a previous update. In such + // case, we must still run the logic in this function, in case the item + // being marked was reverted by the `flush` logic that resets the hash + // table watermarks. import.flags.alive = true; const comp = wasm.base.comp; @@ -3549,8 +3561,9 @@ pub fn markFunctionImport( } else { try wasm.function_imports.put(gpa, name, .fromObject(func_index, wasm)); } - } else { - try markFunction(wasm, import.resolution.unpack(wasm).object_function, import.flags.exported); + } else switch (import.resolution.unpack(wasm)) { + .object_function => try markFunction(wasm, import.resolution.unpack(wasm).object_function, import.flags.exported), + else => return, } } @@ -3589,7 +3602,10 @@ fn markGlobalImport( import: *GlobalImport, global_index: GlobalImport.Index, ) link.File.FlushError!void { - if (import.flags.alive) return; + // import.flags.alive might be already true from a previous update. In such + // case, we must still run the logic in this function, in case the item + // being marked was reverted by the `flush` logic that resets the hash + // table watermarks. import.flags.alive = true; const comp = wasm.base.comp; @@ -3619,8 +3635,9 @@ fn markGlobalImport( } else { try wasm.global_imports.put(gpa, name, .fromObject(global_index, wasm)); } - } else { - try markGlobal(wasm, import.resolution.unpack(wasm).object_global, import.flags.exported); + } else switch (import.resolution.unpack(wasm)) { + .object_global => try markGlobal(wasm, import.resolution.unpack(wasm).object_global, import.flags.exported), + else => return, } } @@ -3823,8 +3840,9 @@ pub fn flush( const comp = wasm.base.comp; const diags = &comp.link_diags; const gpa = comp.gpa; + const io = comp.io; - if (comp.verbose_link) Compilation.dump_argv(wasm.dump_argv_list.items); + if (comp.verbose_link) try Compilation.dumpArgv(io, wasm.dump_argv_list.items); if (wasm.base.zcu_object_basename) |raw| { const zcu_obj_path: Path = try comp.resolveEmitPathFlush(arena, .temp, raw); @@ -4037,7 +4055,7 @@ pub fn tagNameSymbolIndex(wasm: *Wasm, ip_index: InternPool.Index) Allocator.Err const comp = wasm.base.comp; assert(comp.config.output_mode == .Obj); const gpa = comp.gpa; - const name = try wasm.internStringFmt("__zig_tag_name_{d}", .{@intFromEnum(ip_index)}); + const name = try wasm.internStringFmt("__zig_tag_name_{d}", .{ip_index}); const gop = try wasm.symbol_table.getOrPut(gpa, name); gop.value_ptr.* = {}; return @enumFromInt(gop.index); diff --git a/src/link/Wasm/Flush.zig b/src/link/Wasm/Flush.zig index 6f7792f473..5bd18a1936 100644 --- a/src/link/Wasm/Flush.zig +++ b/src/link/Wasm/Flush.zig @@ -108,6 +108,7 @@ pub fn deinit(f: *Flush, gpa: Allocator) void { pub fn finish(f: *Flush, wasm: *Wasm) !void { const comp = wasm.base.comp; + const io = comp.io; const shared_memory = comp.config.shared_memory; const diags = &comp.link_diags; const gpa = comp.gpa; @@ -127,17 +128,20 @@ pub fn finish(f: *Flush, wasm: *Wasm) !void { if (comp.zcu) |zcu| { const ip: *const InternPool = &zcu.intern_pool; // No mutations allowed! + log.debug("total MIR instructions: {d}", .{wasm.mir_instructions.len}); + // Detect any intrinsics that were called; they need to have dependencies on the symbols marked. // Likewise detect `@tagName` calls so those functions can be included in the output and synthesized. for (wasm.mir_instructions.items(.tag), wasm.mir_instructions.items(.data)) |tag, *data| switch (tag) { .call_intrinsic => { const symbol_name = try wasm.internString(@tagName(data.intrinsic)); const i: Wasm.FunctionImport.Index = @enumFromInt(wasm.object_function_imports.getIndex(symbol_name) orelse { - return diags.fail("missing compiler runtime intrinsic '{s}' (undefined linker symbol)", .{ - @tagName(data.intrinsic), + return diags.fail("missing compiler runtime intrinsic '{t}' (undefined linker symbol)", .{ + data.intrinsic, }); }); try wasm.markFunctionImport(symbol_name, i.value(wasm), i); + log.debug("markFunctionImport intrinsic {d}={t}", .{ i, data.intrinsic }); }, .call_tag_name => { assert(ip.indexToKey(data.ip_index) == .enum_type); @@ -146,11 +150,10 @@ pub fn finish(f: *Flush, wasm: *Wasm) !void { wasm.tag_name_table_ref_count += 1; const int_tag_ty = Zcu.Type.fromInterned(data.ip_index).intTagType(zcu); gop.value_ptr.* = .{ .tag_name = .{ - .symbol_name = try wasm.internStringFmt("__zig_tag_name_{d}", .{@intFromEnum(data.ip_index)}), + .symbol_name = try wasm.internStringFmt("__zig_tag_name_{d}", .{data.ip_index}), .type_index = try wasm.internFunctionType(.auto, &.{int_tag_ty.ip_index}, .slice_const_u8_sentinel_0, target), .table_index = @intCast(wasm.tag_name_offs.items.len), } }; - try wasm.functions.put(gpa, .fromZcuFunc(wasm, @enumFromInt(gop.index)), {}); const tag_names = ip.loadEnumType(data.ip_index).names; for (tag_names.get(ip)) |tag_name| { const slice = tag_name.toSlice(ip); @@ -158,6 +161,7 @@ pub fn finish(f: *Flush, wasm: *Wasm) !void { try wasm.tag_name_bytes.appendSlice(gpa, slice[0 .. slice.len + 1]); } } + try wasm.functions.put(gpa, .fromZcuFunc(wasm, @enumFromInt(gop.index)), {}); }, else => continue, }; @@ -1067,7 +1071,7 @@ pub fn finish(f: *Flush, wasm: *Wasm) !void { } // Finally, write the entire binary into the file. - var file_writer = wasm.base.file.?.writer(&.{}); + var file_writer = wasm.base.file.?.writer(io, &.{}); file_writer.interface.writeAll(binary_bytes.items) catch |err| switch (err) { error.WriteFailed => return file_writer.err.?, }; diff --git a/src/link/tapi.zig b/src/link/tapi.zig index 4c1471a6b4..33c31a8415 100644 --- a/src/link/tapi.zig +++ b/src/link/tapi.zig @@ -1,10 +1,10 @@ const std = @import("std"); -const fs = std.fs; +const Io = std.Io; const mem = std.mem; const log = std.log.scoped(.tapi); -const yaml = @import("tapi/yaml.zig"); +const Allocator = std.mem.Allocator; -const Allocator = mem.Allocator; +const yaml = @import("tapi/yaml.zig"); const Yaml = yaml.Yaml; const VersionField = union(enum) { @@ -130,7 +130,7 @@ pub const Tbd = union(enum) { pub const TapiError = error{ NotLibStub, InputOutput, -} || yaml.YamlError || std.fs.File.PReadError; +} || yaml.YamlError || Io.File.ReadPositionalError; pub const LibStub = struct { /// Underlying memory for stub's contents. @@ -139,14 +139,14 @@ pub const LibStub = struct { /// Typed contents of the tbd file. inner: []Tbd, - pub fn loadFromFile(allocator: Allocator, file: fs.File) TapiError!LibStub { + pub fn loadFromFile(allocator: Allocator, io: Io, file: Io.File) TapiError!LibStub { const filesize = blk: { - const stat = file.stat() catch break :blk std.math.maxInt(u32); + const stat = file.stat(io) catch break :blk std.math.maxInt(u32); break :blk @min(stat.size, std.math.maxInt(u32)); }; const source = try allocator.alloc(u8, filesize); defer allocator.free(source); - const amt = try file.preadAll(source, 0); + const amt = try file.readPositionalAll(io, source, 0); if (amt != filesize) return error.InputOutput; var lib_stub = LibStub{ |
