aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorAndrew Kelley <andrew@ziglang.org>2025-08-29 03:48:45 -0700
committerGitHub <noreply@github.com>2025-08-29 03:48:45 -0700
commit4b948e8556b80cbc874415aa7c4bf9ac0027ffed (patch)
treeca48e7208aa23a24db82e8521c37a6c2abcd5dc1 /src
parent640c11171bf8d13776629941f3305cf11c62c1f3 (diff)
parent43fbc37a490442ffcecf9817877f542251fee664 (diff)
downloadzig-4b948e8556b80cbc874415aa7c4bf9ac0027ffed.tar.gz
zig-4b948e8556b80cbc874415aa7c4bf9ac0027ffed.zip
Merge pull request #25036 from ziglang/GenericWriter
std.Io: delete GenericWriter, AnyWriter, and null_writer
Diffstat (limited to 'src')
-rw-r--r--src/Compilation.zig19
-rw-r--r--src/IncrementalDebugServer.zig8
-rw-r--r--src/Package/Fetch.zig10
-rw-r--r--src/arch/riscv64/Emit.zig2
-rw-r--r--src/arch/riscv64/encoding.zig2
-rw-r--r--src/arch/wasm/Emit.zig80
-rw-r--r--src/arch/wasm/Mir.zig39
-rw-r--r--src/codegen.zig25
-rw-r--r--src/libs/freebsd.zig81
-rw-r--r--src/libs/glibc.zig53
-rw-r--r--src/libs/mingw.zig22
-rw-r--r--src/libs/musl.zig6
-rw-r--r--src/libs/netbsd.zig49
-rw-r--r--src/link.zig8
-rw-r--r--src/link/Coff.zig8
-rw-r--r--src/link/Elf.zig72
-rw-r--r--src/link/Elf/Archive.zig9
-rw-r--r--src/link/Elf/Atom.zig255
-rw-r--r--src/link/Elf/AtomList.zig9
-rw-r--r--src/link/Elf/Object.zig2
-rw-r--r--src/link/Elf/gc.zig16
-rw-r--r--src/link/Elf/relocatable.zig45
-rw-r--r--src/link/Elf/synthetic_sections.zig104
-rw-r--r--src/link/MachO.zig136
-rw-r--r--src/link/MachO/Archive.zig56
-rw-r--r--src/link/MachO/Atom.zig13
-rw-r--r--src/link/MachO/CodeSignature.zig18
-rw-r--r--src/link/MachO/DebugSymbols.zig19
-rw-r--r--src/link/MachO/InternalObject.zig2
-rw-r--r--src/link/MachO/UnwindInfo.zig47
-rw-r--r--src/link/MachO/dyld_info/Rebase.zig19
-rw-r--r--src/link/MachO/dyld_info/Trie.zig28
-rw-r--r--src/link/MachO/dyld_info/bind.zig60
-rw-r--r--src/link/MachO/load_commands.zig38
-rw-r--r--src/link/MachO/relocatable.zig62
-rw-r--r--src/link/MachO/synthetic.zig24
-rw-r--r--src/link/Wasm/Flush.zig325
-rw-r--r--src/link/riscv.zig29
-rw-r--r--src/main.zig4
39 files changed, 901 insertions, 903 deletions
diff --git a/src/Compilation.zig b/src/Compilation.zig
index 4d5e7d9eef..6c917b79c4 100644
--- a/src/Compilation.zig
+++ b/src/Compilation.zig
@@ -5893,15 +5893,16 @@ fn buildGlibcCrtFile(comp: *Compilation, crt_file: glibc.CrtFile, prog_node: std
fn buildGlibcSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) void {
defer comp.link_task_queue.finishPrelinkItem(comp);
- if (glibc.buildSharedObjects(comp, prog_node)) |_| {
- // The job should no longer be queued up since it succeeded.
- comp.queued_jobs.glibc_shared_objects = false;
- } else |err| switch (err) {
- error.AlreadyReported => return,
- else => comp.lockAndSetMiscFailure(.glibc_shared_objects, "unable to build glibc shared objects: {s}", .{
- @errorName(err),
- }),
- }
+ glibc.buildSharedObjects(comp, prog_node) catch unreachable;
+ //if (glibc.buildSharedObjects(comp, prog_node)) |_| {
+ // // The job should no longer be queued up since it succeeded.
+ // comp.queued_jobs.glibc_shared_objects = false;
+ //} else |err| switch (err) {
+ // error.AlreadyReported => return,
+ // else => comp.lockAndSetMiscFailure(.glibc_shared_objects, "unable to build glibc shared objects: {s}", .{
+ // @errorName(err),
+ // }),
+ //}
}
fn buildFreeBSDCrtFile(comp: *Compilation, crt_file: freebsd.CrtFile, prog_node: std.Progress.Node) void {
diff --git a/src/IncrementalDebugServer.zig b/src/IncrementalDebugServer.zig
index e5fcb71424..358b1a4327 100644
--- a/src/IncrementalDebugServer.zig
+++ b/src/IncrementalDebugServer.zig
@@ -76,7 +76,9 @@ fn runThread(ids: *IncrementalDebugServer) void {
ids.mutex.lock();
}
defer ids.mutex.unlock();
- handleCommand(ids.zcu, &text_out, cmd, arg) catch @panic("IncrementalDebugServer: out of memory");
+ var allocating: std.Io.Writer.Allocating = .fromArrayList(gpa, &text_out);
+ defer text_out = allocating.toArrayList();
+ handleCommand(ids.zcu, &allocating.writer, cmd, arg) catch @panic("IncrementalDebugServer: out of memory");
}
text_out.append(gpa, '\n') catch @panic("IncrementalDebugServer: out of memory");
conn.stream.writeAll(text_out.items) catch @panic("IncrementalDebugServer: failed to write");
@@ -119,10 +121,8 @@ const help_str: []const u8 =
\\
;
-fn handleCommand(zcu: *Zcu, output: *std.ArrayListUnmanaged(u8), cmd_str: []const u8, arg_str: []const u8) Allocator.Error!void {
+fn handleCommand(zcu: *Zcu, w: *std.Io.Writer, cmd_str: []const u8, arg_str: []const u8) error{ WriteFailed, OutOfMemory }!void {
const ip = &zcu.intern_pool;
- const gpa = zcu.gpa;
- const w = output.writer(gpa);
if (std.mem.eql(u8, cmd_str, "help")) {
try w.writeAll(help_str);
} else if (std.mem.eql(u8, cmd_str, "summary")) {
diff --git a/src/Package/Fetch.zig b/src/Package/Fetch.zig
index 5972b0bf96..499d908c66 100644
--- a/src/Package/Fetch.zig
+++ b/src/Package/Fetch.zig
@@ -200,7 +200,7 @@ pub const JobQueue = struct {
const hash_slice = hash.toSlice();
- try buf.writer().print(
+ try buf.print(
\\ pub const {f} = struct {{
\\
, .{std.zig.fmtId(hash_slice)});
@@ -226,13 +226,13 @@ pub const JobQueue = struct {
}
}
- try buf.writer().print(
+ try buf.print(
\\ pub const build_root = "{f}";
\\
, .{std.fmt.alt(fetch.package_root, .formatEscapeString)});
if (fetch.has_build_zig) {
- try buf.writer().print(
+ try buf.print(
\\ pub const build_zig = @import("{f}");
\\
, .{std.zig.fmtString(hash_slice)});
@@ -245,7 +245,7 @@ pub const JobQueue = struct {
);
for (manifest.dependencies.keys(), manifest.dependencies.values()) |name, dep| {
const h = depDigest(fetch.package_root, jq.global_cache, dep) orelse continue;
- try buf.writer().print(
+ try buf.print(
" .{{ \"{f}\", \"{f}\" }},\n",
.{ std.zig.fmtString(name), std.zig.fmtString(h.toSlice()) },
);
@@ -277,7 +277,7 @@ pub const JobQueue = struct {
for (root_manifest.dependencies.keys(), root_manifest.dependencies.values()) |name, dep| {
const h = depDigest(root_fetch.package_root, jq.global_cache, dep) orelse continue;
- try buf.writer().print(
+ try buf.print(
" .{{ \"{f}\", \"{f}\" }},\n",
.{ std.zig.fmtString(name), std.zig.fmtString(h.toSlice()) },
);
diff --git a/src/arch/riscv64/Emit.zig b/src/arch/riscv64/Emit.zig
index fd8afa677b..2e72aff941 100644
--- a/src/arch/riscv64/Emit.zig
+++ b/src/arch/riscv64/Emit.zig
@@ -31,7 +31,7 @@ pub fn emitMir(emit: *Emit) Error!void {
var lowered_relocs = lowered.relocs;
for (lowered.insts, 0..) |lowered_inst, lowered_index| {
const start_offset: u32 = @intCast(emit.code.items.len);
- try lowered_inst.encode(emit.code.writer(gpa));
+ std.mem.writeInt(u32, try emit.code.addManyAsArray(gpa, 4), lowered_inst.toU32(), .little);
while (lowered_relocs.len > 0 and
lowered_relocs[0].lowered_inst_index == lowered_index) : ({
diff --git a/src/arch/riscv64/encoding.zig b/src/arch/riscv64/encoding.zig
index 3ff497a1ea..109e1d08f8 100644
--- a/src/arch/riscv64/encoding.zig
+++ b/src/arch/riscv64/encoding.zig
@@ -518,7 +518,7 @@ pub const Instruction = union(Lir.Format) {
};
}
- pub fn encode(inst: Instruction, writer: anytype) !void {
+ pub fn encode(inst: Instruction, writer: *std.Io.Writer) !void {
try writer.writeInt(u32, inst.toU32(), .little);
}
diff --git a/src/arch/wasm/Emit.zig b/src/arch/wasm/Emit.zig
index d97aebe553..272d5519a6 100644
--- a/src/arch/wasm/Emit.zig
+++ b/src/arch/wasm/Emit.zig
@@ -3,7 +3,7 @@ const Emit = @This();
const std = @import("std");
const assert = std.debug.assert;
const Allocator = std.mem.Allocator;
-const leb = std.leb;
+const ArrayList = std.ArrayList;
const Wasm = link.File.Wasm;
const Mir = @import("Mir.zig");
@@ -15,7 +15,7 @@ const codegen = @import("../../codegen.zig");
mir: Mir,
wasm: *Wasm,
/// The binary representation that will be emitted by this module.
-code: *std.ArrayListUnmanaged(u8),
+code: *ArrayList(u8),
pub const Error = error{
OutOfMemory,
@@ -85,7 +85,7 @@ pub fn lowerToCode(emit: *Emit) Error!void {
if (is_obj) {
@panic("TODO");
} else {
- leb.writeUleb128(code.fixedWriter(), 1 + @intFromEnum(indirect_func_idx)) catch unreachable;
+ writeUleb128(code, 1 + @intFromEnum(indirect_func_idx));
}
inst += 1;
continue :loop tags[inst];
@@ -99,7 +99,7 @@ pub fn lowerToCode(emit: *Emit) Error!void {
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_const));
// MIR is lowered during flush, so there is indeed only one thread at this time.
const errors_len = 1 + comp.zcu.?.intern_pool.global_error_set.getNamesFromMainThread().len;
- leb.writeIleb128(code.fixedWriter(), errors_len) catch unreachable;
+ writeSleb128(code, errors_len);
inst += 1;
continue :loop tags[inst];
@@ -122,7 +122,7 @@ pub fn lowerToCode(emit: *Emit) Error!void {
continue :loop tags[inst];
} else {
const addr: u32 = wasm.errorNameTableAddr();
- leb.writeIleb128(code.fixedWriter(), addr) catch unreachable;
+ writeSleb128(code, addr);
inst += 1;
continue :loop tags[inst];
@@ -131,7 +131,7 @@ pub fn lowerToCode(emit: *Emit) Error!void {
.br_if, .br, .memory_grow, .memory_size => {
try code.ensureUnusedCapacity(gpa, 11);
code.appendAssumeCapacity(@intFromEnum(tags[inst]));
- leb.writeUleb128(code.fixedWriter(), datas[inst].label) catch unreachable;
+ writeUleb128(code, datas[inst].label);
inst += 1;
continue :loop tags[inst];
@@ -140,7 +140,7 @@ pub fn lowerToCode(emit: *Emit) Error!void {
.local_get, .local_set, .local_tee => {
try code.ensureUnusedCapacity(gpa, 11);
code.appendAssumeCapacity(@intFromEnum(tags[inst]));
- leb.writeUleb128(code.fixedWriter(), datas[inst].local) catch unreachable;
+ writeUleb128(code, datas[inst].local);
inst += 1;
continue :loop tags[inst];
@@ -153,8 +153,8 @@ pub fn lowerToCode(emit: *Emit) Error!void {
try code.ensureUnusedCapacity(gpa, 11 + 10 * labels.len);
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.br_table));
// -1 because default label is not part of length/depth.
- leb.writeUleb128(code.fixedWriter(), extra.data.length - 1) catch unreachable;
- for (labels) |label| leb.writeUleb128(code.fixedWriter(), label) catch unreachable;
+ writeUleb128(code, extra.data.length - 1);
+ for (labels) |label| writeUleb128(code, label);
inst += 1;
continue :loop tags[inst];
@@ -199,9 +199,9 @@ pub fn lowerToCode(emit: *Emit) Error!void {
code.appendNTimesAssumeCapacity(0, 5);
} else {
const index: Wasm.Flush.FuncTypeIndex = .fromTypeIndex(func_ty_index, &wasm.flush_buffer);
- leb.writeUleb128(code.fixedWriter(), @intFromEnum(index)) catch unreachable;
+ writeUleb128(code, @intFromEnum(index));
}
- leb.writeUleb128(code.fixedWriter(), @as(u32, 0)) catch unreachable; // table index
+ writeUleb128(code, @as(u32, 0)); // table index
inst += 1;
continue :loop tags[inst];
@@ -263,7 +263,7 @@ pub fn lowerToCode(emit: *Emit) Error!void {
code.appendNTimesAssumeCapacity(0, 5);
} else {
const sp_global: Wasm.GlobalIndex = .stack_pointer;
- std.leb.writeUleb128(code.fixedWriter(), @intFromEnum(sp_global)) catch unreachable;
+ writeUleb128(code, @intFromEnum(sp_global));
}
inst += 1;
@@ -291,7 +291,7 @@ pub fn lowerToCode(emit: *Emit) Error!void {
.i32_const => {
try code.ensureUnusedCapacity(gpa, 6);
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_const));
- leb.writeIleb128(code.fixedWriter(), datas[inst].imm32) catch unreachable;
+ writeSleb128(code, datas[inst].imm32);
inst += 1;
continue :loop tags[inst];
@@ -300,7 +300,7 @@ pub fn lowerToCode(emit: *Emit) Error!void {
try code.ensureUnusedCapacity(gpa, 11);
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i64_const));
const int64: i64 = @bitCast(mir.extraData(Mir.Imm64, datas[inst].payload).data.toInt());
- leb.writeIleb128(code.fixedWriter(), int64) catch unreachable;
+ writeSleb128(code, int64);
inst += 1;
continue :loop tags[inst];
@@ -476,33 +476,33 @@ pub fn lowerToCode(emit: *Emit) Error!void {
const extra_index = datas[inst].payload;
const opcode = mir.extra[extra_index];
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.misc_prefix));
- leb.writeUleb128(code.fixedWriter(), opcode) catch unreachable;
+ writeUleb128(code, opcode);
switch (@as(std.wasm.MiscOpcode, @enumFromInt(opcode))) {
// bulk-memory opcodes
.data_drop => {
const segment = mir.extra[extra_index + 1];
- leb.writeUleb128(code.fixedWriter(), segment) catch unreachable;
+ writeUleb128(code, segment);
inst += 1;
continue :loop tags[inst];
},
.memory_init => {
const segment = mir.extra[extra_index + 1];
- leb.writeUleb128(code.fixedWriter(), segment) catch unreachable;
- leb.writeUleb128(code.fixedWriter(), @as(u32, 0)) catch unreachable; // memory index
+ writeUleb128(code, segment);
+ writeUleb128(code, @as(u32, 0)); // memory index
inst += 1;
continue :loop tags[inst];
},
.memory_fill => {
- leb.writeUleb128(code.fixedWriter(), @as(u32, 0)) catch unreachable; // memory index
+ writeUleb128(code, @as(u32, 0)); // memory index
inst += 1;
continue :loop tags[inst];
},
.memory_copy => {
- leb.writeUleb128(code.fixedWriter(), @as(u32, 0)) catch unreachable; // dst memory index
- leb.writeUleb128(code.fixedWriter(), @as(u32, 0)) catch unreachable; // src memory index
+ writeUleb128(code, @as(u32, 0)); // dst memory index
+ writeUleb128(code, @as(u32, 0)); // src memory index
inst += 1;
continue :loop tags[inst];
@@ -538,7 +538,7 @@ pub fn lowerToCode(emit: *Emit) Error!void {
const extra_index = datas[inst].payload;
const opcode = mir.extra[extra_index];
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.simd_prefix));
- leb.writeUleb128(code.fixedWriter(), opcode) catch unreachable;
+ writeUleb128(code, opcode);
switch (@as(std.wasm.SimdOpcode, @enumFromInt(opcode))) {
.v128_store,
.v128_load,
@@ -824,7 +824,7 @@ pub fn lowerToCode(emit: *Emit) Error!void {
const extra_index = datas[inst].payload;
const opcode = mir.extra[extra_index];
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.atomics_prefix));
- leb.writeUleb128(code.fixedWriter(), opcode) catch unreachable;
+ writeUleb128(code, opcode);
switch (@as(std.wasm.AtomicsOpcode, @enumFromInt(opcode))) {
.i32_atomic_load,
.i64_atomic_load,
@@ -900,7 +900,7 @@ pub fn lowerToCode(emit: *Emit) Error!void {
// Hard-codes memory index 0 since multi-memory proposal is
// not yet accepted nor implemented.
const memory_index: u32 = 0;
- leb.writeUleb128(code.fixedWriter(), memory_index) catch unreachable;
+ writeUleb128(code, memory_index);
inst += 1;
continue :loop tags[inst];
},
@@ -915,15 +915,15 @@ pub fn lowerToCode(emit: *Emit) Error!void {
}
/// Asserts 20 unused capacity.
-fn encodeMemArg(code: *std.ArrayListUnmanaged(u8), mem_arg: Mir.MemArg) void {
+fn encodeMemArg(code: *ArrayList(u8), mem_arg: Mir.MemArg) void {
assert(code.unusedCapacitySlice().len >= 20);
// Wasm encodes alignment as power of 2, rather than natural alignment.
const encoded_alignment = @ctz(mem_arg.alignment);
- leb.writeUleb128(code.fixedWriter(), encoded_alignment) catch unreachable;
- leb.writeUleb128(code.fixedWriter(), mem_arg.offset) catch unreachable;
+ writeUleb128(code, encoded_alignment);
+ writeUleb128(code, mem_arg.offset);
}
-fn uavRefObj(wasm: *Wasm, code: *std.ArrayListUnmanaged(u8), value: InternPool.Index, offset: i32, is_wasm32: bool) !void {
+fn uavRefObj(wasm: *Wasm, code: *ArrayList(u8), value: InternPool.Index, offset: i32, is_wasm32: bool) !void {
const comp = wasm.base.comp;
const gpa = comp.gpa;
const opcode: std.wasm.Opcode = if (is_wasm32) .i32_const else .i64_const;
@@ -940,7 +940,7 @@ fn uavRefObj(wasm: *Wasm, code: *std.ArrayListUnmanaged(u8), value: InternPool.I
code.appendNTimesAssumeCapacity(0, if (is_wasm32) 5 else 10);
}
-fn uavRefExe(wasm: *Wasm, code: *std.ArrayListUnmanaged(u8), value: InternPool.Index, offset: i32, is_wasm32: bool) !void {
+fn uavRefExe(wasm: *Wasm, code: *ArrayList(u8), value: InternPool.Index, offset: i32, is_wasm32: bool) !void {
const comp = wasm.base.comp;
const gpa = comp.gpa;
const opcode: std.wasm.Opcode = if (is_wasm32) .i32_const else .i64_const;
@@ -949,10 +949,10 @@ fn uavRefExe(wasm: *Wasm, code: *std.ArrayListUnmanaged(u8), value: InternPool.I
code.appendAssumeCapacity(@intFromEnum(opcode));
const addr = wasm.uavAddr(value);
- leb.writeUleb128(code.fixedWriter(), @as(u32, @intCast(@as(i64, addr) + offset))) catch unreachable;
+ writeUleb128(code, @as(u32, @intCast(@as(i64, addr) + offset)));
}
-fn navRefOff(wasm: *Wasm, code: *std.ArrayListUnmanaged(u8), data: Mir.NavRefOff, is_wasm32: bool) !void {
+fn navRefOff(wasm: *Wasm, code: *ArrayList(u8), data: Mir.NavRefOff, is_wasm32: bool) !void {
const comp = wasm.base.comp;
const zcu = comp.zcu.?;
const ip = &zcu.intern_pool;
@@ -975,10 +975,22 @@ fn navRefOff(wasm: *Wasm, code: *std.ArrayListUnmanaged(u8), data: Mir.NavRefOff
code.appendNTimesAssumeCapacity(0, if (is_wasm32) 5 else 10);
} else {
const addr = wasm.navAddr(data.nav_index);
- leb.writeUleb128(code.fixedWriter(), @as(u32, @intCast(@as(i64, addr) + data.offset))) catch unreachable;
+ writeUleb128(code, @as(u32, @intCast(@as(i64, addr) + data.offset)));
}
}
-fn appendOutputFunctionIndex(code: *std.ArrayListUnmanaged(u8), i: Wasm.OutputFunctionIndex) void {
- leb.writeUleb128(code.fixedWriter(), @intFromEnum(i)) catch unreachable;
+fn appendOutputFunctionIndex(code: *ArrayList(u8), i: Wasm.OutputFunctionIndex) void {
+ writeUleb128(code, @intFromEnum(i));
+}
+
+fn writeUleb128(code: *ArrayList(u8), arg: anytype) void {
+ var w: std.Io.Writer = .fixed(code.unusedCapacitySlice());
+ w.writeUleb128(arg) catch unreachable;
+ code.items.len += w.end;
+}
+
+fn writeSleb128(code: *ArrayList(u8), arg: anytype) void {
+ var w: std.Io.Writer = .fixed(code.unusedCapacitySlice());
+ w.writeSleb128(arg) catch unreachable;
+ code.items.len += w.end;
}
diff --git a/src/arch/wasm/Mir.zig b/src/arch/wasm/Mir.zig
index 98b263951f..af6e0eb40b 100644
--- a/src/arch/wasm/Mir.zig
+++ b/src/arch/wasm/Mir.zig
@@ -675,10 +675,13 @@ pub fn lower(mir: *const Mir, wasm: *Wasm, code: *std.ArrayListUnmanaged(u8)) st
// Write the locals in the prologue of the function body.
try code.ensureUnusedCapacity(gpa, 5 + mir.locals.len * 6 + 38);
- std.leb.writeUleb128(code.fixedWriter(), @as(u32, @intCast(mir.locals.len))) catch unreachable;
+ var w: std.Io.Writer = .fixed(code.unusedCapacitySlice());
+
+ w.writeLeb128(@as(u32, @intCast(mir.locals.len))) catch unreachable;
+
for (mir.locals) |local| {
- std.leb.writeUleb128(code.fixedWriter(), @as(u32, 1)) catch unreachable;
- code.appendAssumeCapacity(@intFromEnum(local));
+ w.writeLeb128(@as(u32, 1)) catch unreachable;
+ w.writeByte(@intFromEnum(local)) catch unreachable;
}
// Stack management section of function prologue.
@@ -686,33 +689,35 @@ pub fn lower(mir: *const Mir, wasm: *Wasm, code: *std.ArrayListUnmanaged(u8)) st
if (stack_alignment.toByteUnits()) |align_bytes| {
const sp_global: Wasm.GlobalIndex = .stack_pointer;
// load stack pointer
- code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.global_get));
- std.leb.writeUleb128(code.fixedWriter(), @intFromEnum(sp_global)) catch unreachable;
+ w.writeByte(@intFromEnum(std.wasm.Opcode.global_get)) catch unreachable;
+ w.writeUleb128(@intFromEnum(sp_global)) catch unreachable;
// store stack pointer so we can restore it when we return from the function
- code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.local_tee));
- leb.writeUleb128(code.fixedWriter(), mir.prologue.sp_local) catch unreachable;
+ w.writeByte(@intFromEnum(std.wasm.Opcode.local_tee)) catch unreachable;
+ w.writeUleb128(mir.prologue.sp_local) catch unreachable;
// get the total stack size
const aligned_stack: i32 = @intCast(stack_alignment.forward(mir.prologue.stack_size));
- code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_const));
- leb.writeIleb128(code.fixedWriter(), aligned_stack) catch unreachable;
+ w.writeByte(@intFromEnum(std.wasm.Opcode.i32_const)) catch unreachable;
+ w.writeSleb128(aligned_stack) catch unreachable;
// subtract it from the current stack pointer
- code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_sub));
+ w.writeByte(@intFromEnum(std.wasm.Opcode.i32_sub)) catch unreachable;
// Get negative stack alignment
const neg_stack_align = @as(i32, @intCast(align_bytes)) * -1;
- code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_const));
- leb.writeIleb128(code.fixedWriter(), neg_stack_align) catch unreachable;
+ w.writeByte(@intFromEnum(std.wasm.Opcode.i32_const)) catch unreachable;
+ w.writeSleb128(neg_stack_align) catch unreachable;
// Bitwise-and the value to get the new stack pointer to ensure the
// pointers are aligned with the abi alignment.
- code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_and));
+ w.writeByte(@intFromEnum(std.wasm.Opcode.i32_and)) catch unreachable;
// The bottom will be used to calculate all stack pointer offsets.
- code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.local_tee));
- leb.writeUleb128(code.fixedWriter(), mir.prologue.bottom_stack_local) catch unreachable;
+ w.writeByte(@intFromEnum(std.wasm.Opcode.local_tee)) catch unreachable;
+ w.writeUleb128(mir.prologue.bottom_stack_local) catch unreachable;
// Store the current stack pointer value into the global stack pointer so other function calls will
// start from this value instead and not overwrite the current stack.
- code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.global_set));
- std.leb.writeUleb128(code.fixedWriter(), @intFromEnum(sp_global)) catch unreachable;
+ w.writeByte(@intFromEnum(std.wasm.Opcode.global_set)) catch unreachable;
+ w.writeUleb128(@intFromEnum(sp_global)) catch unreachable;
}
+ code.items.len += w.end;
+
var emit: Emit = .{
.mir = mir.*,
.wasm = wasm,
diff --git a/src/codegen.zig b/src/codegen.zig
index 91ce034966..56e6e7c99f 100644
--- a/src/codegen.zig
+++ b/src/codegen.zig
@@ -6,6 +6,7 @@ const link = @import("link.zig");
const log = std.log.scoped(.codegen);
const mem = std.mem;
const math = std.math;
+const ArrayList = std.ArrayList;
const target_util = @import("target.zig");
const trace = @import("tracy.zig").trace;
@@ -179,7 +180,7 @@ pub fn emitFunction(
src_loc: Zcu.LazySrcLoc,
func_index: InternPool.Index,
any_mir: *const AnyMir,
- code: *std.ArrayListUnmanaged(u8),
+ code: *ArrayList(u8),
debug_output: link.File.DebugInfoOutput,
) CodeGenError!void {
const zcu = pt.zcu;
@@ -204,7 +205,7 @@ pub fn generateLazyFunction(
pt: Zcu.PerThread,
src_loc: Zcu.LazySrcLoc,
lazy_sym: link.File.LazySymbol,
- code: *std.ArrayListUnmanaged(u8),
+ code: *ArrayList(u8),
debug_output: link.File.DebugInfoOutput,
) CodeGenError!void {
const zcu = pt.zcu;
@@ -236,7 +237,7 @@ pub fn generateLazySymbol(
lazy_sym: link.File.LazySymbol,
// TODO don't use an "out" parameter like this; put it in the result instead
alignment: *Alignment,
- code: *std.ArrayListUnmanaged(u8),
+ code: *ArrayList(u8),
debug_output: link.File.DebugInfoOutput,
reloc_parent: link.File.RelocInfo.Parent,
) CodeGenError!void {
@@ -311,7 +312,7 @@ pub fn generateSymbol(
pt: Zcu.PerThread,
src_loc: Zcu.LazySrcLoc,
val: Value,
- code: *std.ArrayListUnmanaged(u8),
+ code: *ArrayList(u8),
reloc_parent: link.File.RelocInfo.Parent,
) GenerateSymbolError!void {
const tracy = trace(@src());
@@ -379,7 +380,7 @@ pub fn generateSymbol(
},
.err => |err| {
const int = try pt.getErrorValue(err.name);
- try code.writer(gpa).writeInt(u16, @intCast(int), endian);
+ mem.writeInt(u16, try code.addManyAsArray(gpa, 2), @intCast(int), endian);
},
.error_union => |error_union| {
const payload_ty = ty.errorUnionPayload(zcu);
@@ -389,7 +390,7 @@ pub fn generateSymbol(
};
if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
- try code.writer(gpa).writeInt(u16, err_val, endian);
+ mem.writeInt(u16, try code.addManyAsArray(gpa, 2), err_val, endian);
return;
}
@@ -399,7 +400,7 @@ pub fn generateSymbol(
// error value first when its type is larger than the error union's payload
if (error_align.order(payload_align) == .gt) {
- try code.writer(gpa).writeInt(u16, err_val, endian);
+ mem.writeInt(u16, try code.addManyAsArray(gpa, 2), err_val, endian);
}
// emit payload part of the error union
@@ -421,7 +422,7 @@ pub fn generateSymbol(
// Payload size is larger than error set, so emit our error set last
if (error_align.compare(.lte, payload_align)) {
const begin = code.items.len;
- try code.writer(gpa).writeInt(u16, err_val, endian);
+ mem.writeInt(u16, try code.addManyAsArray(gpa, 2), err_val, endian);
const unpadded_end = code.items.len - begin;
const padded_end = abi_align.forward(unpadded_end);
const padding = math.cast(usize, padded_end - unpadded_end) orelse return error.Overflow;
@@ -476,7 +477,7 @@ pub fn generateSymbol(
}));
try generateSymbol(bin_file, pt, src_loc, value, code, reloc_parent);
}
- try code.writer(gpa).writeByte(@intFromBool(payload_val != null));
+ try code.append(gpa, @intFromBool(payload_val != null));
try code.appendNTimes(gpa, 0, padding);
}
},
@@ -721,7 +722,7 @@ fn lowerPtr(
pt: Zcu.PerThread,
src_loc: Zcu.LazySrcLoc,
ptr_val: InternPool.Index,
- code: *std.ArrayListUnmanaged(u8),
+ code: *ArrayList(u8),
reloc_parent: link.File.RelocInfo.Parent,
prev_offset: u64,
) GenerateSymbolError!void {
@@ -774,7 +775,7 @@ fn lowerUavRef(
pt: Zcu.PerThread,
src_loc: Zcu.LazySrcLoc,
uav: InternPool.Key.Ptr.BaseAddr.Uav,
- code: *std.ArrayListUnmanaged(u8),
+ code: *ArrayList(u8),
reloc_parent: link.File.RelocInfo.Parent,
offset: u64,
) GenerateSymbolError!void {
@@ -834,7 +835,7 @@ fn lowerNavRef(
lf: *link.File,
pt: Zcu.PerThread,
nav_index: InternPool.Nav.Index,
- code: *std.ArrayListUnmanaged(u8),
+ code: *ArrayList(u8),
reloc_parent: link.File.RelocInfo.Parent,
offset: u64,
) GenerateSymbolError!void {
diff --git a/src/libs/freebsd.zig b/src/libs/freebsd.zig
index 48d17bd5f6..2315964a50 100644
--- a/src/libs/freebsd.zig
+++ b/src/libs/freebsd.zig
@@ -512,7 +512,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
{
var map_contents = std.array_list.Managed(u8).init(arena);
for (metadata.all_versions[0 .. target_ver_index + 1]) |ver| {
- try map_contents.writer().print("FBSD_{d}.{d} {{ }};\n", .{ ver.major, ver.minor });
+ try map_contents.print("FBSD_{d}.{d} {{ }};\n", .{ ver.major, ver.minor });
}
try o_directory.handle.writeFile(.{ .sub_path = all_map_basename, .data = map_contents.items });
map_contents.deinit();
@@ -524,20 +524,17 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
for (libs, 0..) |lib, lib_i| {
stubs_asm.shrinkRetainingCapacity(0);
- const stubs_writer = stubs_asm.writer();
-
- try stubs_writer.writeAll(".text\n");
+ try stubs_asm.appendSlice(".text\n");
var sym_i: usize = 0;
- var sym_name_buf = std.array_list.Managed(u8).init(arena);
+ var sym_name_buf: std.Io.Writer.Allocating = .init(arena);
var opt_symbol_name: ?[]const u8 = null;
var versions = try std.DynamicBitSetUnmanaged.initEmpty(arena, metadata.all_versions.len);
var weak_linkages = try std.DynamicBitSetUnmanaged.initEmpty(arena, metadata.all_versions.len);
- var inc_fbs = std.io.fixedBufferStream(metadata.inclusions);
- var inc_reader = inc_fbs.reader();
+ var inc_reader: std.Io.Reader = .fixed(metadata.inclusions);
- const fn_inclusions_len = try inc_reader.readInt(u16, .little);
+ const fn_inclusions_len = try inc_reader.takeInt(u16, .little);
// Pick the default symbol version:
// - If there are no versions, don't emit it
@@ -550,19 +547,21 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
while (sym_i < fn_inclusions_len) : (sym_i += 1) {
const sym_name = opt_symbol_name orelse n: {
sym_name_buf.clearRetainingCapacity();
- try inc_reader.streamUntilDelimiter(sym_name_buf.writer(), 0, null);
+ _ = try inc_reader.streamDelimiter(&sym_name_buf.writer, 0);
+ assert(inc_reader.buffered()[0] == 0); // TODO change streamDelimiter API
+ inc_reader.toss(1);
- opt_symbol_name = sym_name_buf.items;
+ opt_symbol_name = sym_name_buf.written();
versions.unsetAll();
weak_linkages.unsetAll();
chosen_def_ver_index = 255;
chosen_unversioned_ver_index = 255;
- break :n sym_name_buf.items;
+ break :n sym_name_buf.written();
};
{
- const targets = try std.leb.readUleb128(u64, inc_reader);
- var lib_index = try inc_reader.readByte();
+ const targets = try inc_reader.takeLeb128(u64);
+ var lib_index = try inc_reader.takeByte();
const is_unversioned = (lib_index & (1 << 5)) != 0;
const is_weak = (lib_index & (1 << 6)) != 0;
@@ -576,7 +575,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
((targets & (@as(u64, 1) << @as(u6, @intCast(target_targ_index)))) != 0);
while (true) {
- const byte = try inc_reader.readByte();
+ const byte = try inc_reader.takeByte();
const last = (byte & 0b1000_0000) != 0;
const ver_i = @as(u7, @truncate(byte));
if (ok_lib_and_target and ver_i <= target_ver_index) {
@@ -608,7 +607,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
// .globl _Exit
// .type _Exit, %function
// _Exit: .long 0
- try stubs_writer.print(
+ try stubs_asm.print(
\\.balign {d}
\\.{s} {s}
\\.type {s}, %function
@@ -640,7 +639,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
.{ sym_name, ver.major, ver.minor },
);
- try stubs_writer.print(
+ try stubs_asm.print(
\\.balign {d}
\\.{s} {s}
\\.type {s}, %function
@@ -665,14 +664,14 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
}
}
- try stubs_writer.writeAll(".data\n");
+ try stubs_asm.appendSlice(".data\n");
// FreeBSD's `libc.so.7` contains strong references to `__progname` and `environ` which are
// defined in the statically-linked startup code. Those references cause the linker to put
// the symbols in the dynamic symbol table. We need to create dummy references to them here
// to get the same effect.
if (std.mem.eql(u8, lib.name, "c")) {
- try stubs_writer.print(
+ try stubs_asm.print(
\\.balign {d}
\\.globl __progname
\\.globl environ
@@ -686,7 +685,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
});
}
- const obj_inclusions_len = try inc_reader.readInt(u16, .little);
+ const obj_inclusions_len = try inc_reader.takeInt(u16, .little);
var sizes = try arena.alloc(u16, metadata.all_versions.len);
@@ -696,21 +695,23 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
while (sym_i < obj_inclusions_len) : (sym_i += 1) {
const sym_name = opt_symbol_name orelse n: {
sym_name_buf.clearRetainingCapacity();
- try inc_reader.streamUntilDelimiter(sym_name_buf.writer(), 0, null);
+ _ = try inc_reader.streamDelimiter(&sym_name_buf.writer, 0);
+ assert(inc_reader.buffered()[0] == 0); // TODO change streamDelimiter API
+ inc_reader.toss(1);
- opt_symbol_name = sym_name_buf.items;
+ opt_symbol_name = sym_name_buf.written();
versions.unsetAll();
weak_linkages.unsetAll();
chosen_def_ver_index = 255;
chosen_unversioned_ver_index = 255;
- break :n sym_name_buf.items;
+ break :n sym_name_buf.written();
};
{
- const targets = try std.leb.readUleb128(u64, inc_reader);
- const size = try std.leb.readUleb128(u16, inc_reader);
- var lib_index = try inc_reader.readByte();
+ const targets = try inc_reader.takeLeb128(u64);
+ const size = try inc_reader.takeLeb128(u16);
+ var lib_index = try inc_reader.takeByte();
const is_unversioned = (lib_index & (1 << 5)) != 0;
const is_weak = (lib_index & (1 << 6)) != 0;
@@ -724,7 +725,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
((targets & (@as(u64, 1) << @as(u6, @intCast(target_targ_index)))) != 0);
while (true) {
- const byte = try inc_reader.readByte();
+ const byte = try inc_reader.takeByte();
const last = (byte & 0b1000_0000) != 0;
const ver_i = @as(u7, @truncate(byte));
if (ok_lib_and_target and ver_i <= target_ver_index) {
@@ -758,7 +759,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
// .type malloc_conf, %object
// .size malloc_conf, 4
// malloc_conf: .fill 4, 1, 0
- try stubs_writer.print(
+ try stubs_asm.print(
\\.balign {d}
\\.{s} {s}
\\.type {s}, %object
@@ -794,7 +795,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
.{ sym_name, ver.major, ver.minor },
);
- try stubs_asm.writer().print(
+ try stubs_asm.print(
\\.balign {d}
\\.{s} {s}
\\.type {s}, %object
@@ -822,9 +823,9 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
}
}
- try stubs_writer.writeAll(".tdata\n");
+ try stubs_asm.appendSlice(".tdata\n");
- const tls_inclusions_len = try inc_reader.readInt(u16, .little);
+ const tls_inclusions_len = try inc_reader.takeInt(u16, .little);
sym_i = 0;
opt_symbol_name = null;
@@ -832,21 +833,23 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
while (sym_i < tls_inclusions_len) : (sym_i += 1) {
const sym_name = opt_symbol_name orelse n: {
sym_name_buf.clearRetainingCapacity();
- try inc_reader.streamUntilDelimiter(sym_name_buf.writer(), 0, null);
+ _ = try inc_reader.streamDelimiter(&sym_name_buf.writer, 0);
+ assert(inc_reader.buffered()[0] == 0); // TODO change streamDelimiter API
+ inc_reader.toss(1);
- opt_symbol_name = sym_name_buf.items;
+ opt_symbol_name = sym_name_buf.written();
versions.unsetAll();
weak_linkages.unsetAll();
chosen_def_ver_index = 255;
chosen_unversioned_ver_index = 255;
- break :n sym_name_buf.items;
+ break :n sym_name_buf.written();
};
{
- const targets = try std.leb.readUleb128(u64, inc_reader);
- const size = try std.leb.readUleb128(u16, inc_reader);
- var lib_index = try inc_reader.readByte();
+ const targets = try inc_reader.takeLeb128(u64);
+ const size = try inc_reader.takeLeb128(u16);
+ var lib_index = try inc_reader.takeByte();
const is_unversioned = (lib_index & (1 << 5)) != 0;
const is_weak = (lib_index & (1 << 6)) != 0;
@@ -860,7 +863,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
((targets & (@as(u64, 1) << @as(u6, @intCast(target_targ_index)))) != 0);
while (true) {
- const byte = try inc_reader.readByte();
+ const byte = try inc_reader.takeByte();
const last = (byte & 0b1000_0000) != 0;
const ver_i = @as(u7, @truncate(byte));
if (ok_lib_and_target and ver_i <= target_ver_index) {
@@ -894,7 +897,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
// .type _ThreadRuneLocale, %object
// .size _ThreadRuneLocale, 4
// _ThreadRuneLocale: .fill 4, 1, 0
- try stubs_writer.print(
+ try stubs_asm.print(
\\.balign {d}
\\.{s} {s}
\\.type {s}, %tls_object
@@ -930,7 +933,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
.{ sym_name, ver.major, ver.minor },
);
- try stubs_writer.print(
+ try stubs_asm.print(
\\.balign {d}
\\.{s} {s}
\\.type {s}, %tls_object
diff --git a/src/libs/glibc.zig b/src/libs/glibc.zig
index b228ad60ad..4d3ee15bec 100644
--- a/src/libs/glibc.zig
+++ b/src/libs/glibc.zig
@@ -752,9 +752,9 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
var map_contents = std.array_list.Managed(u8).init(arena);
for (metadata.all_versions[0 .. target_ver_index + 1]) |ver| {
if (ver.patch == 0) {
- try map_contents.writer().print("GLIBC_{d}.{d} {{ }};\n", .{ ver.major, ver.minor });
+ try map_contents.print("GLIBC_{d}.{d} {{ }};\n", .{ ver.major, ver.minor });
} else {
- try map_contents.writer().print("GLIBC_{d}.{d}.{d} {{ }};\n", .{ ver.major, ver.minor, ver.patch });
+ try map_contents.print("GLIBC_{d}.{d}.{d} {{ }};\n", .{ ver.major, ver.minor, ver.patch });
}
}
try o_directory.handle.writeFile(.{ .sub_path = all_map_basename, .data = map_contents.items });
@@ -773,7 +773,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
try stubs_asm.appendSlice(".text\n");
var sym_i: usize = 0;
- var sym_name_buf = std.array_list.Managed(u8).init(arena);
+ var sym_name_buf: std.Io.Writer.Allocating = .init(arena);
var opt_symbol_name: ?[]const u8 = null;
var versions_buffer: [32]u8 = undefined;
var versions_len: usize = undefined;
@@ -794,24 +794,25 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
// twice, which causes a "duplicate symbol" assembler error.
var versions_written = std.AutoArrayHashMap(Version, void).init(arena);
- var inc_fbs = std.io.fixedBufferStream(metadata.inclusions);
- var inc_reader = inc_fbs.reader();
+ var inc_reader: std.Io.Reader = .fixed(metadata.inclusions);
- const fn_inclusions_len = try inc_reader.readInt(u16, .little);
+ const fn_inclusions_len = try inc_reader.takeInt(u16, .little);
while (sym_i < fn_inclusions_len) : (sym_i += 1) {
const sym_name = opt_symbol_name orelse n: {
sym_name_buf.clearRetainingCapacity();
- try inc_reader.streamUntilDelimiter(sym_name_buf.writer(), 0, null);
+ _ = try inc_reader.streamDelimiter(&sym_name_buf.writer, 0);
+ assert(inc_reader.buffered()[0] == 0); // TODO change streamDelimiter API
+ inc_reader.toss(1);
- opt_symbol_name = sym_name_buf.items;
+ opt_symbol_name = sym_name_buf.written();
versions_buffer = undefined;
versions_len = 0;
- break :n sym_name_buf.items;
+ break :n sym_name_buf.written();
};
- const targets = try std.leb.readUleb128(u64, inc_reader);
- var lib_index = try inc_reader.readByte();
+ const targets = try inc_reader.takeLeb128(u64);
+ var lib_index = try inc_reader.takeByte();
const is_terminal = (lib_index & (1 << 7)) != 0;
if (is_terminal) {
@@ -825,7 +826,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
((targets & (@as(u64, 1) << @as(u6, @intCast(target_targ_index)))) != 0);
while (true) {
- const byte = try inc_reader.readByte();
+ const byte = try inc_reader.takeByte();
const last = (byte & 0b1000_0000) != 0;
const ver_i = @as(u7, @truncate(byte));
if (ok_lib_and_target and ver_i <= target_ver_index) {
@@ -880,7 +881,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
"{s}_{d}_{d}",
.{ sym_name, ver.major, ver.minor },
);
- try stubs_asm.writer().print(
+ try stubs_asm.print(
\\.balign {d}
\\.globl {s}
\\.type {s}, %function
@@ -905,7 +906,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
"{s}_{d}_{d}_{d}",
.{ sym_name, ver.major, ver.minor, ver.patch },
);
- try stubs_asm.writer().print(
+ try stubs_asm.print(
\\.balign {d}
\\.globl {s}
\\.type {s}, %function
@@ -950,7 +951,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
// versions where the symbol didn't exist. We only care about modern glibc versions, so use
// a strong reference.
if (std.mem.eql(u8, lib.name, "c")) {
- try stubs_asm.writer().print(
+ try stubs_asm.print(
\\.balign {d}
\\.globl _IO_stdin_used
\\{s} _IO_stdin_used
@@ -963,7 +964,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
try stubs_asm.appendSlice(".data\n");
- const obj_inclusions_len = try inc_reader.readInt(u16, .little);
+ const obj_inclusions_len = try inc_reader.takeInt(u16, .little);
var sizes = try arena.alloc(u16, metadata.all_versions.len);
@@ -974,17 +975,19 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
while (sym_i < obj_inclusions_len) : (sym_i += 1) {
const sym_name = opt_symbol_name orelse n: {
sym_name_buf.clearRetainingCapacity();
- try inc_reader.streamUntilDelimiter(sym_name_buf.writer(), 0, null);
+ _ = try inc_reader.streamDelimiter(&sym_name_buf.writer, 0);
+ assert(inc_reader.buffered()[0] == 0); // TODO change streamDelimiter API
+ inc_reader.toss(1);
- opt_symbol_name = sym_name_buf.items;
+ opt_symbol_name = sym_name_buf.written();
versions_buffer = undefined;
versions_len = 0;
- break :n sym_name_buf.items;
+ break :n sym_name_buf.written();
};
- const targets = try std.leb.readUleb128(u64, inc_reader);
- const size = try std.leb.readUleb128(u16, inc_reader);
- var lib_index = try inc_reader.readByte();
+ const targets = try inc_reader.takeLeb128(u64);
+ const size = try inc_reader.takeLeb128(u16);
+ var lib_index = try inc_reader.takeByte();
const is_terminal = (lib_index & (1 << 7)) != 0;
if (is_terminal) {
@@ -998,7 +1001,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
((targets & (@as(u64, 1) << @as(u6, @intCast(target_targ_index)))) != 0);
while (true) {
- const byte = try inc_reader.readByte();
+ const byte = try inc_reader.takeByte();
const last = (byte & 0b1000_0000) != 0;
const ver_i = @as(u7, @truncate(byte));
if (ok_lib_and_target and ver_i <= target_ver_index) {
@@ -1055,7 +1058,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
"{s}_{d}_{d}",
.{ sym_name, ver.major, ver.minor },
);
- try stubs_asm.writer().print(
+ try stubs_asm.print(
\\.balign {d}
\\.globl {s}
\\.type {s}, %object
@@ -1083,7 +1086,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
"{s}_{d}_{d}_{d}",
.{ sym_name, ver.major, ver.minor, ver.patch },
);
- try stubs_asm.writer().print(
+ try stubs_asm.print(
\\.balign {d}
\\.globl {s}
\\.type {s}, %object
diff --git a/src/libs/mingw.zig b/src/libs/mingw.zig
index bf234191a9..3f4db34baa 100644
--- a/src/libs/mingw.zig
+++ b/src/libs/mingw.zig
@@ -304,9 +304,8 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void {
const include_dir = try comp.dirs.zig_lib.join(arena, &.{ "libc", "mingw", "def-include" });
if (comp.verbose_cc) print: {
- std.debug.lockStdErr();
- defer std.debug.unlockStdErr();
- const stderr = std.fs.File.stderr().deprecatedWriter();
+ var stderr = std.debug.lockStderrWriter(&.{});
+ defer std.debug.unlockStderrWriter();
nosuspend stderr.print("def file: {s}\n", .{def_file_path}) catch break :print;
nosuspend stderr.print("include dir: {s}\n", .{include_dir}) catch break :print;
nosuspend stderr.print("output path: {s}\n", .{def_final_path}) catch break :print;
@@ -335,7 +334,10 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void {
// new scope to ensure definition file is written before passing the path to WriteImportLibrary
const def_final_file = try o_dir.createFile(final_def_basename, .{ .truncate = true });
defer def_final_file.close();
- try pp.prettyPrintTokens(def_final_file.deprecatedWriter(), .result_only);
+ var buffer: [1024]u8 = undefined;
+ var def_final_file_writer = def_final_file.writer(&buffer);
+ try pp.prettyPrintTokens(&def_final_file_writer.interface, .result_only);
+ try def_final_file_writer.interface.flush();
}
const lib_final_path = try std.fs.path.join(gpa, &.{ "o", &digest, final_lib_basename });
@@ -410,9 +412,9 @@ fn findDef(
// Try the archtecture-specific path first.
const fmt_path = "libc" ++ s ++ "mingw" ++ s ++ "{s}" ++ s ++ "{s}.def";
if (zig_lib_directory.path) |p| {
- try override_path.writer().print("{s}" ++ s ++ fmt_path, .{ p, lib_path, lib_name });
+ try override_path.print("{s}" ++ s ++ fmt_path, .{ p, lib_path, lib_name });
} else {
- try override_path.writer().print(fmt_path, .{ lib_path, lib_name });
+ try override_path.print(fmt_path, .{ lib_path, lib_name });
}
if (std.fs.cwd().access(override_path.items, .{})) |_| {
return override_path.toOwnedSlice();
@@ -427,9 +429,9 @@ fn findDef(
override_path.shrinkRetainingCapacity(0);
const fmt_path = "libc" ++ s ++ "mingw" ++ s ++ "lib-common" ++ s ++ "{s}.def";
if (zig_lib_directory.path) |p| {
- try override_path.writer().print("{s}" ++ s ++ fmt_path, .{ p, lib_name });
+ try override_path.print("{s}" ++ s ++ fmt_path, .{ p, lib_name });
} else {
- try override_path.writer().print(fmt_path, .{lib_name});
+ try override_path.print(fmt_path, .{lib_name});
}
if (std.fs.cwd().access(override_path.items, .{})) |_| {
return override_path.toOwnedSlice();
@@ -444,9 +446,9 @@ fn findDef(
override_path.shrinkRetainingCapacity(0);
const fmt_path = "libc" ++ s ++ "mingw" ++ s ++ "lib-common" ++ s ++ "{s}.def.in";
if (zig_lib_directory.path) |p| {
- try override_path.writer().print("{s}" ++ s ++ fmt_path, .{ p, lib_name });
+ try override_path.print("{s}" ++ s ++ fmt_path, .{ p, lib_name });
} else {
- try override_path.writer().print(fmt_path, .{lib_name});
+ try override_path.print(fmt_path, .{lib_name});
}
if (std.fs.cwd().access(override_path.items, .{})) |_| {
return override_path.toOwnedSlice();
diff --git a/src/libs/musl.zig b/src/libs/musl.zig
index 9c10e71e1e..ba9693a6c3 100644
--- a/src/libs/musl.zig
+++ b/src/libs/musl.zig
@@ -140,21 +140,21 @@ pub fn buildCrtFile(comp: *Compilation, in_crt_file: CrtFile, prog_node: std.Pro
if (!is_arch_specific) {
// Look for an arch specific override.
override_path.shrinkRetainingCapacity(0);
- try override_path.writer().print("{s}" ++ s ++ "{s}" ++ s ++ "{s}.s", .{
+ try override_path.print("{s}" ++ s ++ "{s}" ++ s ++ "{s}.s", .{
dirname, arch_name, noextbasename,
});
if (source_table.contains(override_path.items))
continue;
override_path.shrinkRetainingCapacity(0);
- try override_path.writer().print("{s}" ++ s ++ "{s}" ++ s ++ "{s}.S", .{
+ try override_path.print("{s}" ++ s ++ "{s}" ++ s ++ "{s}.S", .{
dirname, arch_name, noextbasename,
});
if (source_table.contains(override_path.items))
continue;
override_path.shrinkRetainingCapacity(0);
- try override_path.writer().print("{s}" ++ s ++ "{s}" ++ s ++ "{s}.c", .{
+ try override_path.print("{s}" ++ s ++ "{s}" ++ s ++ "{s}.c", .{
dirname, arch_name, noextbasename,
});
if (source_table.contains(override_path.items))
diff --git a/src/libs/netbsd.zig b/src/libs/netbsd.zig
index 04aa4ccfeb..e47bdce3af 100644
--- a/src/libs/netbsd.zig
+++ b/src/libs/netbsd.zig
@@ -460,18 +460,15 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
for (libs, 0..) |lib, lib_i| {
stubs_asm.shrinkRetainingCapacity(0);
- const stubs_writer = stubs_asm.writer();
-
- try stubs_writer.writeAll(".text\n");
+ try stubs_asm.appendSlice(".text\n");
var sym_i: usize = 0;
- var sym_name_buf = std.array_list.Managed(u8).init(arena);
+ var sym_name_buf: std.Io.Writer.Allocating = .init(arena);
var opt_symbol_name: ?[]const u8 = null;
- var inc_fbs = std.io.fixedBufferStream(metadata.inclusions);
- var inc_reader = inc_fbs.reader();
+ var inc_reader: std.Io.Reader = .fixed(metadata.inclusions);
- const fn_inclusions_len = try inc_reader.readInt(u16, .little);
+ const fn_inclusions_len = try inc_reader.takeInt(u16, .little);
var chosen_ver_index: usize = 255;
var chosen_is_weak: bool = undefined;
@@ -479,17 +476,19 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
while (sym_i < fn_inclusions_len) : (sym_i += 1) {
const sym_name = opt_symbol_name orelse n: {
sym_name_buf.clearRetainingCapacity();
- try inc_reader.streamUntilDelimiter(sym_name_buf.writer(), 0, null);
+ _ = try inc_reader.streamDelimiter(&sym_name_buf.writer, 0);
+ assert(inc_reader.buffered()[0] == 0); // TODO change streamDelimiter API
+ inc_reader.toss(1);
- opt_symbol_name = sym_name_buf.items;
+ opt_symbol_name = sym_name_buf.written();
chosen_ver_index = 255;
- break :n sym_name_buf.items;
+ break :n sym_name_buf.written();
};
{
- const targets = try std.leb.readUleb128(u64, inc_reader);
- var lib_index = try inc_reader.readByte();
+ const targets = try inc_reader.takeLeb128(u64);
+ var lib_index = try inc_reader.takeByte();
const is_weak = (lib_index & (1 << 6)) != 0;
const is_terminal = (lib_index & (1 << 7)) != 0;
@@ -502,7 +501,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
((targets & (@as(u64, 1) << @as(u6, @intCast(target_targ_index)))) != 0);
while (true) {
- const byte = try inc_reader.readByte();
+ const byte = try inc_reader.takeByte();
const last = (byte & 0b1000_0000) != 0;
const ver_i = @as(u7, @truncate(byte));
if (ok_lib_and_target and ver_i <= target_ver_index and
@@ -525,7 +524,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
// .globl _Exit
// .type _Exit, %function
// _Exit: .long 0
- try stubs_writer.print(
+ try stubs_asm.print(
\\.balign {d}
\\.{s} {s}
\\.type {s}, %function
@@ -542,9 +541,9 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
}
}
- try stubs_writer.writeAll(".data\n");
+ try stubs_asm.appendSlice(".data\n");
- const obj_inclusions_len = try inc_reader.readInt(u16, .little);
+ const obj_inclusions_len = try inc_reader.takeInt(u16, .little);
sym_i = 0;
opt_symbol_name = null;
@@ -554,18 +553,20 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
while (sym_i < obj_inclusions_len) : (sym_i += 1) {
const sym_name = opt_symbol_name orelse n: {
sym_name_buf.clearRetainingCapacity();
- try inc_reader.streamUntilDelimiter(sym_name_buf.writer(), 0, null);
+ _ = try inc_reader.streamDelimiter(&sym_name_buf.writer, 0);
+ assert(inc_reader.buffered()[0] == 0); // TODO change streamDelimiter API
+ inc_reader.toss(1);
- opt_symbol_name = sym_name_buf.items;
+ opt_symbol_name = sym_name_buf.written();
chosen_ver_index = 255;
- break :n sym_name_buf.items;
+ break :n sym_name_buf.written();
};
{
- const targets = try std.leb.readUleb128(u64, inc_reader);
- const size = try std.leb.readUleb128(u16, inc_reader);
- var lib_index = try inc_reader.readByte();
+ const targets = try inc_reader.takeLeb128(u64);
+ const size = try inc_reader.takeLeb128(u16);
+ var lib_index = try inc_reader.takeByte();
const is_weak = (lib_index & (1 << 6)) != 0;
const is_terminal = (lib_index & (1 << 7)) != 0;
@@ -578,7 +579,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
((targets & (@as(u64, 1) << @as(u6, @intCast(target_targ_index)))) != 0);
while (true) {
- const byte = try inc_reader.readByte();
+ const byte = try inc_reader.takeByte();
const last = (byte & 0b1000_0000) != 0;
const ver_i = @as(u7, @truncate(byte));
if (ok_lib_and_target and ver_i <= target_ver_index and
@@ -603,7 +604,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
// .type malloc_conf, %object
// .size malloc_conf, 4
// malloc_conf: .fill 4, 1, 0
- try stubs_writer.print(
+ try stubs_asm.print(
\\.balign {d}
\\.{s} {s}
\\.type {s}, %object
diff --git a/src/link.zig b/src/link.zig
index 09f4d38606..27cd6620e3 100644
--- a/src/link.zig
+++ b/src/link.zig
@@ -1976,7 +1976,7 @@ fn resolveLibInput(
.root_dir = lib_directory,
.sub_path = try std.fmt.allocPrint(arena, "lib{s}.tbd", .{lib_name}),
};
- try checked_paths.writer(gpa).print("\n {f}", .{test_path});
+ try checked_paths.print(gpa, "\n {f}", .{test_path});
var file = test_path.root_dir.handle.openFile(test_path.sub_path, .{}) catch |err| switch (err) {
error.FileNotFound => break :tbd,
else => |e| fatal("unable to search for tbd library '{f}': {s}", .{ test_path, @errorName(e) }),
@@ -1995,7 +1995,7 @@ fn resolveLibInput(
},
}),
};
- try checked_paths.writer(gpa).print("\n {f}", .{test_path});
+ try checked_paths.print(gpa, "\n {f}", .{test_path});
switch (try resolvePathInputLib(gpa, arena, unresolved_inputs, resolved_inputs, ld_script_bytes, target, .{
.path = test_path,
.query = name_query.query,
@@ -2012,7 +2012,7 @@ fn resolveLibInput(
.root_dir = lib_directory,
.sub_path = try std.fmt.allocPrint(arena, "lib{s}.so", .{lib_name}),
};
- try checked_paths.writer(gpa).print("\n {f}", .{test_path});
+ try checked_paths.print(gpa, "\n {f}", .{test_path});
var file = test_path.root_dir.handle.openFile(test_path.sub_path, .{}) catch |err| switch (err) {
error.FileNotFound => break :so,
else => |e| fatal("unable to search for so library '{f}': {s}", .{
@@ -2030,7 +2030,7 @@ fn resolveLibInput(
.root_dir = lib_directory,
.sub_path = try std.fmt.allocPrint(arena, "lib{s}.a", .{lib_name}),
};
- try checked_paths.writer(gpa).print("\n {f}", .{test_path});
+ try checked_paths.print(gpa, "\n {f}", .{test_path});
var file = test_path.root_dir.handle.openFile(test_path.sub_path, .{}) catch |err| switch (err) {
error.FileNotFound => break :mingw,
else => |e| fatal("unable to search for static library '{f}': {s}", .{ test_path, @errorName(e) }),
diff --git a/src/link/Coff.zig b/src/link/Coff.zig
index 62430f6c08..41c968534a 100644
--- a/src/link/Coff.zig
+++ b/src/link/Coff.zig
@@ -2179,13 +2179,13 @@ fn writeDataDirectoriesHeaders(coff: *Coff) !void {
fn writeHeader(coff: *Coff) !void {
const target = &coff.base.comp.root_mod.resolved_target.result;
const gpa = coff.base.comp.gpa;
- var buffer = std.array_list.Managed(u8).init(gpa);
+ var buffer: std.Io.Writer.Allocating = .init(gpa);
defer buffer.deinit();
- const writer = buffer.writer();
+ const writer = &buffer.writer;
try buffer.ensureTotalCapacity(coff.getSizeOfHeaders());
writer.writeAll(&msdos_stub) catch unreachable;
- mem.writeInt(u32, buffer.items[0x3c..][0..4], msdos_stub.len, .little);
+ mem.writeInt(u32, buffer.writer.buffer[0x3c..][0..4], msdos_stub.len, .little);
writer.writeAll("PE\x00\x00") catch unreachable;
var flags = coff_util.CoffHeaderFlags{
@@ -2313,7 +2313,7 @@ fn writeHeader(coff: *Coff) !void {
},
}
- try coff.pwriteAll(buffer.items, 0);
+ try coff.pwriteAll(buffer.written(), 0);
}
pub fn padToIdeal(actual_size: anytype) @TypeOf(actual_size) {
diff --git a/src/link/Elf.zig b/src/link/Elf.zig
index 0ebbb9f27e..9cf2b0e074 100644
--- a/src/link/Elf.zig
+++ b/src/link/Elf.zig
@@ -811,10 +811,6 @@ fn flushInner(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id) !void {
if (self.base.gc_sections) {
try gc.gcAtoms(self);
-
- if (self.base.print_gc_sections) {
- try gc.dumpPrunedAtoms(self);
- }
}
self.checkDuplicates() catch |err| switch (err) {
@@ -3005,7 +3001,7 @@ fn writeAtoms(self: *Elf) !void {
undefs.deinit();
}
- var buffer = std.array_list.Managed(u8).init(gpa);
+ var buffer: std.Io.Writer.Allocating = .init(gpa);
defer buffer.deinit();
const slice = self.sections.slice();
@@ -3032,9 +3028,9 @@ fn writeAtoms(self: *Elf) !void {
try buffer.ensureUnusedCapacity(thunk_size);
const shdr = slice.items(.shdr)[th.output_section_index];
const offset = @as(u64, @intCast(th.value)) + shdr.sh_offset;
- try th.write(self, buffer.writer());
- assert(buffer.items.len == thunk_size);
- try self.pwriteAll(buffer.items, offset);
+ try th.write(self, &buffer.writer);
+ assert(buffer.written().len == thunk_size);
+ try self.pwriteAll(buffer.written(), offset);
buffer.clearRetainingCapacity();
}
}
@@ -3166,26 +3162,26 @@ fn writeSyntheticSections(self: *Elf) !void {
if (self.section_indexes.verneed) |shndx| {
const shdr = slice.items(.shdr)[shndx];
- var buffer = try std.array_list.Managed(u8).initCapacity(gpa, self.verneed.size());
+ var buffer = try std.Io.Writer.Allocating.initCapacity(gpa, self.verneed.size());
defer buffer.deinit();
- try self.verneed.write(buffer.writer());
- try self.pwriteAll(buffer.items, shdr.sh_offset);
+ try self.verneed.write(&buffer.writer);
+ try self.pwriteAll(buffer.written(), shdr.sh_offset);
}
if (self.section_indexes.dynamic) |shndx| {
const shdr = slice.items(.shdr)[shndx];
- var buffer = try std.array_list.Managed(u8).initCapacity(gpa, self.dynamic.size(self));
+ var buffer = try std.Io.Writer.Allocating.initCapacity(gpa, self.dynamic.size(self));
defer buffer.deinit();
- try self.dynamic.write(self, buffer.writer());
- try self.pwriteAll(buffer.items, shdr.sh_offset);
+ try self.dynamic.write(self, &buffer.writer);
+ try self.pwriteAll(buffer.written(), shdr.sh_offset);
}
if (self.section_indexes.dynsymtab) |shndx| {
const shdr = slice.items(.shdr)[shndx];
- var buffer = try std.array_list.Managed(u8).initCapacity(gpa, self.dynsym.size());
+ var buffer = try std.Io.Writer.Allocating.initCapacity(gpa, self.dynsym.size());
defer buffer.deinit();
- try self.dynsym.write(self, buffer.writer());
- try self.pwriteAll(buffer.items, shdr.sh_offset);
+ try self.dynsym.write(self, &buffer.writer);
+ try self.pwriteAll(buffer.written(), shdr.sh_offset);
}
if (self.section_indexes.dynstrtab) |shndx| {
@@ -3201,28 +3197,28 @@ fn writeSyntheticSections(self: *Elf) !void {
};
const shdr = slice.items(.shdr)[shndx];
const sh_size = try self.cast(usize, shdr.sh_size);
- var buffer = try std.array_list.Managed(u8).initCapacity(gpa, @intCast(sh_size - existing_size));
+ var buffer = try std.Io.Writer.Allocating.initCapacity(gpa, @intCast(sh_size - existing_size));
defer buffer.deinit();
- try eh_frame.writeEhFrame(self, buffer.writer());
- assert(buffer.items.len == sh_size - existing_size);
- try self.pwriteAll(buffer.items, shdr.sh_offset + existing_size);
+ try eh_frame.writeEhFrame(self, &buffer.writer);
+ assert(buffer.written().len == sh_size - existing_size);
+ try self.pwriteAll(buffer.written(), shdr.sh_offset + existing_size);
}
if (self.section_indexes.eh_frame_hdr) |shndx| {
const shdr = slice.items(.shdr)[shndx];
const sh_size = try self.cast(usize, shdr.sh_size);
- var buffer = try std.array_list.Managed(u8).initCapacity(gpa, sh_size);
+ var buffer = try std.Io.Writer.Allocating.initCapacity(gpa, sh_size);
defer buffer.deinit();
- try eh_frame.writeEhFrameHdr(self, buffer.writer());
- try self.pwriteAll(buffer.items, shdr.sh_offset);
+ try eh_frame.writeEhFrameHdr(self, &buffer.writer);
+ try self.pwriteAll(buffer.written(), shdr.sh_offset);
}
if (self.section_indexes.got) |index| {
const shdr = slice.items(.shdr)[index];
- var buffer = try std.array_list.Managed(u8).initCapacity(gpa, self.got.size(self));
+ var buffer = try std.Io.Writer.Allocating.initCapacity(gpa, self.got.size(self));
defer buffer.deinit();
- try self.got.write(self, buffer.writer());
- try self.pwriteAll(buffer.items, shdr.sh_offset);
+ try self.got.write(self, &buffer.writer);
+ try self.pwriteAll(buffer.written(), shdr.sh_offset);
}
if (self.section_indexes.rela_dyn) |shndx| {
@@ -3235,26 +3231,26 @@ fn writeSyntheticSections(self: *Elf) !void {
if (self.section_indexes.plt) |shndx| {
const shdr = slice.items(.shdr)[shndx];
- var buffer = try std.array_list.Managed(u8).initCapacity(gpa, self.plt.size(self));
+ var buffer = try std.Io.Writer.Allocating.initCapacity(gpa, self.plt.size(self));
defer buffer.deinit();
- try self.plt.write(self, buffer.writer());
- try self.pwriteAll(buffer.items, shdr.sh_offset);
+ try self.plt.write(self, &buffer.writer);
+ try self.pwriteAll(buffer.written(), shdr.sh_offset);
}
if (self.section_indexes.got_plt) |shndx| {
const shdr = slice.items(.shdr)[shndx];
- var buffer = try std.array_list.Managed(u8).initCapacity(gpa, self.got_plt.size(self));
+ var buffer = try std.Io.Writer.Allocating.initCapacity(gpa, self.got_plt.size(self));
defer buffer.deinit();
- try self.got_plt.write(self, buffer.writer());
- try self.pwriteAll(buffer.items, shdr.sh_offset);
+ try self.got_plt.write(self, &buffer.writer);
+ try self.pwriteAll(buffer.written(), shdr.sh_offset);
}
if (self.section_indexes.plt_got) |shndx| {
const shdr = slice.items(.shdr)[shndx];
- var buffer = try std.array_list.Managed(u8).initCapacity(gpa, self.plt_got.size(self));
+ var buffer = try std.Io.Writer.Allocating.initCapacity(gpa, self.plt_got.size(self));
defer buffer.deinit();
- try self.plt_got.write(self, buffer.writer());
- try self.pwriteAll(buffer.items, shdr.sh_offset);
+ try self.plt_got.write(self, &buffer.writer);
+ try self.pwriteAll(buffer.written(), shdr.sh_offset);
}
if (self.section_indexes.rela_plt) |shndx| {
@@ -3757,7 +3753,7 @@ pub fn insertShString(self: *Elf, name: [:0]const u8) error{OutOfMemory}!u32 {
const gpa = self.base.comp.gpa;
const off = @as(u32, @intCast(self.shstrtab.items.len));
try self.shstrtab.ensureUnusedCapacity(gpa, name.len + 1);
- self.shstrtab.writer(gpa).print("{s}\x00", .{name}) catch unreachable;
+ self.shstrtab.print(gpa, "{s}\x00", .{name}) catch unreachable;
return off;
}
@@ -3770,7 +3766,7 @@ pub fn insertDynString(self: *Elf, name: []const u8) error{OutOfMemory}!u32 {
const gpa = self.base.comp.gpa;
const off = @as(u32, @intCast(self.dynstrtab.items.len));
try self.dynstrtab.ensureUnusedCapacity(gpa, name.len + 1);
- self.dynstrtab.writer(gpa).print("{s}\x00", .{name}) catch unreachable;
+ self.dynstrtab.print(gpa, "{s}\x00", .{name}) catch unreachable;
return off;
}
diff --git a/src/link/Elf/Archive.zig b/src/link/Elf/Archive.zig
index 2243dc4781..b0f7ca2971 100644
--- a/src/link/Elf/Archive.zig
+++ b/src/link/Elf/Archive.zig
@@ -123,8 +123,7 @@ pub fn setArHdr(opts: struct {
@memcpy(&hdr.ar_fmag, elf.ARFMAG);
{
- var stream = std.io.fixedBufferStream(&hdr.ar_name);
- const writer = stream.writer();
+ var writer: std.Io.Writer = .fixed(&hdr.ar_name);
switch (opts.name) {
.symtab => writer.print("{s}", .{elf.SYM64NAME}) catch unreachable,
.strtab => writer.print("//", .{}) catch unreachable,
@@ -133,8 +132,8 @@ pub fn setArHdr(opts: struct {
}
}
{
- var stream = std.io.fixedBufferStream(&hdr.ar_size);
- stream.writer().print("{d}", .{opts.size}) catch unreachable;
+ var writer: std.Io.Writer = .fixed(&hdr.ar_size);
+ writer.print("{d}", .{opts.size}) catch unreachable;
}
return hdr;
@@ -246,7 +245,7 @@ pub const ArStrtab = struct {
pub fn insert(ar: *ArStrtab, allocator: Allocator, name: []const u8) error{OutOfMemory}!u32 {
const off = @as(u32, @intCast(ar.buffer.items.len));
- try ar.buffer.writer(allocator).print("{s}/{c}", .{ name, strtab_delimiter });
+ try ar.buffer.print(allocator, "{s}/{c}", .{ name, strtab_delimiter });
return off;
}
diff --git a/src/link/Elf/Atom.zig b/src/link/Elf/Atom.zig
index 28899838b1..6bfaddea97 100644
--- a/src/link/Elf/Atom.zig
+++ b/src/link/Elf/Atom.zig
@@ -621,7 +621,6 @@ pub fn resolveRelocsAlloc(self: Atom, elf_file: *Elf, code: []u8) RelocError!voi
const cpu_arch = elf_file.getTarget().cpu.arch;
const file_ptr = self.file(elf_file).?;
- var stream = std.io.fixedBufferStream(code);
const rels = self.relocs(elf_file);
var it = RelocsIterator{ .relocs = rels };
@@ -661,20 +660,16 @@ pub fn resolveRelocsAlloc(self: Atom, elf_file: *Elf, code: []u8) RelocError!voi
target.name(elf_file),
});
- try stream.seekTo(r_offset);
-
const args = ResolveArgs{ P, A, S, GOT, G, TP, DTP };
switch (cpu_arch) {
- .x86_64 => x86_64.resolveRelocAlloc(self, elf_file, rel, target, args, &it, code, &stream) catch |err| switch (err) {
+ .x86_64 => x86_64.resolveRelocAlloc(self, elf_file, rel, target, args, &it, code) catch |err| switch (err) {
error.RelocFailure,
error.RelaxFailure,
- error.InvalidInstruction,
- error.CannotEncode,
=> has_reloc_errors = true,
else => |e| return e,
},
- .aarch64, .aarch64_be => aarch64.resolveRelocAlloc(self, elf_file, rel, target, args, &it, code, &stream) catch |err| switch (err) {
+ .aarch64, .aarch64_be => aarch64.resolveRelocAlloc(self, elf_file, rel, target, args, &it, code) catch |err| switch (err) {
error.RelocFailure,
error.RelaxFailure,
error.UnexpectedRemainder,
@@ -682,7 +677,7 @@ pub fn resolveRelocsAlloc(self: Atom, elf_file: *Elf, code: []u8) RelocError!voi
=> has_reloc_errors = true,
else => |e| return e,
},
- .riscv64, .riscv64be => riscv.resolveRelocAlloc(self, elf_file, rel, target, args, &it, code, &stream) catch |err| switch (err) {
+ .riscv64, .riscv64be => riscv.resolveRelocAlloc(self, elf_file, rel, target, args, &it, code) catch |err| switch (err) {
error.RelocFailure,
error.RelaxFailure,
=> has_reloc_errors = true,
@@ -701,7 +696,8 @@ fn resolveDynAbsReloc(
rel: elf.Elf64_Rela,
action: RelocAction,
elf_file: *Elf,
- writer: anytype,
+ code: []u8,
+ r_offset: usize,
) !void {
const comp = elf_file.base.comp;
const gpa = comp.gpa;
@@ -726,7 +722,7 @@ fn resolveDynAbsReloc(
.copyrel,
.cplt,
.none,
- => try writer.writeInt(i64, S + A, .little),
+ => mem.writeInt(i64, code[r_offset..][0..8], S + A, .little),
.dyn_copyrel => {
if (is_writeable or elf_file.z_nocopyreloc) {
@@ -737,9 +733,9 @@ fn resolveDynAbsReloc(
.addend = A,
.target = target,
});
- try applyDynamicReloc(A, elf_file, writer);
+ applyDynamicReloc(A, code, r_offset);
} else {
- try writer.writeInt(i64, S + A, .little);
+ mem.writeInt(i64, code[r_offset..][0..8], S + A, .little);
}
},
@@ -752,9 +748,9 @@ fn resolveDynAbsReloc(
.addend = A,
.target = target,
});
- try applyDynamicReloc(A, elf_file, writer);
+ applyDynamicReloc(A, code, r_offset);
} else {
- try writer.writeInt(i64, S + A, .little);
+ mem.writeInt(i64, code[r_offset..][0..8], S + A, .little);
}
},
@@ -766,7 +762,7 @@ fn resolveDynAbsReloc(
.addend = A,
.target = target,
});
- try applyDynamicReloc(A, elf_file, writer);
+ applyDynamicReloc(A, code, r_offset);
},
.baserel => {
@@ -776,7 +772,7 @@ fn resolveDynAbsReloc(
.addend = S + A,
.target = target,
});
- try applyDynamicReloc(S + A, elf_file, writer);
+ applyDynamicReloc(S + A, code, r_offset);
},
.ifunc => {
@@ -787,16 +783,13 @@ fn resolveDynAbsReloc(
.addend = S_ + A,
.target = target,
});
- try applyDynamicReloc(S_ + A, elf_file, writer);
+ applyDynamicReloc(S_ + A, code, r_offset);
},
}
}
-fn applyDynamicReloc(value: i64, elf_file: *Elf, writer: anytype) !void {
- _ = elf_file;
- // if (elf_file.options.apply_dynamic_relocs) {
- try writer.writeInt(i64, value, .little);
- // }
+fn applyDynamicReloc(value: i64, code: []u8, r_offset: usize) void {
+ mem.writeInt(i64, code[r_offset..][0..8], value, .little);
}
pub fn resolveRelocsNonAlloc(self: Atom, elf_file: *Elf, code: []u8, undefs: anytype) !void {
@@ -804,7 +797,6 @@ pub fn resolveRelocsNonAlloc(self: Atom, elf_file: *Elf, code: []u8, undefs: any
const cpu_arch = elf_file.getTarget().cpu.arch;
const file_ptr = self.file(elf_file).?;
- var stream = std.io.fixedBufferStream(code);
const rels = self.relocs(elf_file);
var has_reloc_errors = false;
@@ -863,18 +855,16 @@ pub fn resolveRelocsNonAlloc(self: Atom, elf_file: *Elf, code: []u8, undefs: any
target.name(elf_file),
});
- try stream.seekTo(r_offset);
-
switch (cpu_arch) {
- .x86_64 => x86_64.resolveRelocNonAlloc(self, elf_file, rel, target, args, &it, code, &stream) catch |err| switch (err) {
+ .x86_64 => x86_64.resolveRelocNonAlloc(self, elf_file, rel, target, args, code[r_offset..]) catch |err| switch (err) {
error.RelocFailure => has_reloc_errors = true,
else => |e| return e,
},
- .aarch64, .aarch64_be => aarch64.resolveRelocNonAlloc(self, elf_file, rel, target, args, &it, code, &stream) catch |err| switch (err) {
+ .aarch64, .aarch64_be => aarch64.resolveRelocNonAlloc(self, elf_file, rel, target, args, code[r_offset..]) catch |err| switch (err) {
error.RelocFailure => has_reloc_errors = true,
else => |e| return e,
},
- .riscv64, .riscv64be => riscv.resolveRelocNonAlloc(self, elf_file, rel, target, args, &it, code, &stream) catch |err| switch (err) {
+ .riscv64, .riscv64be => riscv.resolveRelocNonAlloc(self, elf_file, rel, target, args, code[r_offset..]) catch |err| switch (err) {
error.RelocFailure => has_reloc_errors = true,
else => |e| return e,
},
@@ -915,7 +905,7 @@ const Format = struct {
atom: Atom,
elf_file: *Elf,
- fn default(f: Format, w: *std.io.Writer) std.io.Writer.Error!void {
+ fn default(f: Format, w: *Writer) Writer.Error!void {
const atom = f.atom;
const elf_file = f.elf_file;
try w.print("atom({d}) : {s} : @{x} : shdr({d}) : align({x}) : size({x}) : prev({f}) : next({f})", .{
@@ -1068,16 +1058,13 @@ const x86_64 = struct {
args: ResolveArgs,
it: *RelocsIterator,
code: []u8,
- stream: anytype,
- ) (error{ InvalidInstruction, CannotEncode } || RelocError)!void {
+ ) !void {
dev.check(.x86_64_backend);
const t = &elf_file.base.comp.root_mod.resolved_target.result;
const diags = &elf_file.base.comp.link_diags;
const r_type: elf.R_X86_64 = @enumFromInt(rel.r_type());
const r_offset = std.math.cast(usize, rel.r_offset) orelse return error.Overflow;
- const cwriter = stream.writer();
-
const P, const A, const S, const GOT, const G, const TP, const DTP = args;
switch (r_type) {
@@ -1089,58 +1076,60 @@ const x86_64 = struct {
rel,
dynAbsRelocAction(target, elf_file),
elf_file,
- cwriter,
+ code,
+ r_offset,
);
},
- .PLT32 => try cwriter.writeInt(i32, @as(i32, @intCast(S + A - P)), .little),
- .PC32 => try cwriter.writeInt(i32, @as(i32, @intCast(S + A - P)), .little),
+ .PLT32 => mem.writeInt(i32, code[r_offset..][0..4], @as(i32, @intCast(S + A - P)), .little),
+ .PC32 => mem.writeInt(i32, code[r_offset..][0..4], @as(i32, @intCast(S + A - P)), .little),
- .GOTPCREL => try cwriter.writeInt(i32, @as(i32, @intCast(G + GOT + A - P)), .little),
- .GOTPC32 => try cwriter.writeInt(i32, @as(i32, @intCast(GOT + A - P)), .little),
- .GOTPC64 => try cwriter.writeInt(i64, GOT + A - P, .little),
+ .GOTPCREL => mem.writeInt(i32, code[r_offset..][0..4], @as(i32, @intCast(G + GOT + A - P)), .little),
+ .GOTPC32 => mem.writeInt(i32, code[r_offset..][0..4], @as(i32, @intCast(GOT + A - P)), .little),
+ .GOTPC64 => mem.writeInt(i64, code[r_offset..][0..8], GOT + A - P, .little),
.GOTPCRELX => {
if (!target.flags.import and !target.isIFunc(elf_file) and !target.isAbs(elf_file)) blk: {
x86_64.relaxGotpcrelx(code[r_offset - 2 ..], t) catch break :blk;
- try cwriter.writeInt(i32, @as(i32, @intCast(S + A - P)), .little);
+ mem.writeInt(i32, code[r_offset..][0..4], @as(i32, @intCast(S + A - P)), .little);
return;
}
- try cwriter.writeInt(i32, @as(i32, @intCast(G + GOT + A - P)), .little);
+ mem.writeInt(i32, code[r_offset..][0..4], @as(i32, @intCast(G + GOT + A - P)), .little);
},
.REX_GOTPCRELX => {
if (!target.flags.import and !target.isIFunc(elf_file) and !target.isAbs(elf_file)) blk: {
x86_64.relaxRexGotpcrelx(code[r_offset - 3 ..], t) catch break :blk;
- try cwriter.writeInt(i32, @as(i32, @intCast(S + A - P)), .little);
+ mem.writeInt(i32, code[r_offset..][0..4], @as(i32, @intCast(S + A - P)), .little);
return;
}
- try cwriter.writeInt(i32, @as(i32, @intCast(G + GOT + A - P)), .little);
+ mem.writeInt(i32, code[r_offset..][0..4], @as(i32, @intCast(G + GOT + A - P)), .little);
},
- .@"32" => try cwriter.writeInt(u32, @as(u32, @truncate(@as(u64, @intCast(S + A)))), .little),
- .@"32S" => try cwriter.writeInt(i32, @as(i32, @truncate(S + A)), .little),
+ .@"32" => mem.writeInt(u32, code[r_offset..][0..4], @as(u32, @truncate(@as(u64, @intCast(S + A)))), .little),
+ .@"32S" => mem.writeInt(i32, code[r_offset..][0..4], @as(i32, @truncate(S + A)), .little),
- .TPOFF32 => try cwriter.writeInt(i32, @as(i32, @truncate(S + A - TP)), .little),
- .TPOFF64 => try cwriter.writeInt(i64, S + A - TP, .little),
+ .TPOFF32 => mem.writeInt(i32, code[r_offset..][0..4], @as(i32, @truncate(S + A - TP)), .little),
+ .TPOFF64 => mem.writeInt(i64, code[r_offset..][0..8], S + A - TP, .little),
- .DTPOFF32 => try cwriter.writeInt(i32, @as(i32, @truncate(S + A - DTP)), .little),
- .DTPOFF64 => try cwriter.writeInt(i64, S + A - DTP, .little),
+ .DTPOFF32 => mem.writeInt(i32, code[r_offset..][0..4], @as(i32, @truncate(S + A - DTP)), .little),
+ .DTPOFF64 => mem.writeInt(i64, code[r_offset..][0..8], S + A - DTP, .little),
.TLSGD => {
if (target.flags.has_tlsgd) {
const S_ = target.tlsGdAddress(elf_file);
- try cwriter.writeInt(i32, @as(i32, @intCast(S_ + A - P)), .little);
+ mem.writeInt(i32, code[r_offset..][0..4], @as(i32, @intCast(S_ + A - P)), .little);
} else if (target.flags.has_gottp) {
const S_ = target.gotTpAddress(elf_file);
- try x86_64.relaxTlsGdToIe(atom, &.{ rel, it.next().? }, @intCast(S_ - P), elf_file, stream);
+ try x86_64.relaxTlsGdToIe(atom, &.{ rel, it.next().? }, @intCast(S_ - P), elf_file, code, r_offset);
} else {
try x86_64.relaxTlsGdToLe(
atom,
&.{ rel, it.next().? },
@as(i32, @intCast(S - TP)),
elf_file,
- stream,
+ code,
+ r_offset,
);
}
},
@@ -1149,14 +1138,15 @@ const x86_64 = struct {
if (elf_file.got.tlsld_index) |entry_index| {
const tlsld_entry = elf_file.got.entries.items[entry_index];
const S_ = tlsld_entry.address(elf_file);
- try cwriter.writeInt(i32, @as(i32, @intCast(S_ + A - P)), .little);
+ mem.writeInt(i32, code[r_offset..][0..4], @as(i32, @intCast(S_ + A - P)), .little);
} else {
try x86_64.relaxTlsLdToLe(
atom,
&.{ rel, it.next().? },
@as(i32, @intCast(TP - elf_file.tlsAddress())),
elf_file,
- stream,
+ code,
+ r_offset,
);
}
},
@@ -1164,7 +1154,7 @@ const x86_64 = struct {
.GOTPC32_TLSDESC => {
if (target.flags.has_tlsdesc) {
const S_ = target.tlsDescAddress(elf_file);
- try cwriter.writeInt(i32, @as(i32, @intCast(S_ + A - P)), .little);
+ mem.writeInt(i32, code[r_offset..][0..4], @as(i32, @intCast(S_ + A - P)), .little);
} else {
x86_64.relaxGotPcTlsDesc(code[r_offset - 3 ..], t) catch {
var err = try diags.addErrorWithNotes(1);
@@ -1176,26 +1166,26 @@ const x86_64 = struct {
});
return error.RelaxFailure;
};
- try cwriter.writeInt(i32, @as(i32, @intCast(S - TP)), .little);
+ mem.writeInt(i32, code[r_offset..][0..4], @as(i32, @intCast(S - TP)), .little);
}
},
.TLSDESC_CALL => if (!target.flags.has_tlsdesc) {
// call -> nop
- try cwriter.writeAll(&.{ 0x66, 0x90 });
+ code[r_offset..][0..2].* = .{ 0x66, 0x90 };
},
.GOTTPOFF => {
if (target.flags.has_gottp) {
const S_ = target.gotTpAddress(elf_file);
- try cwriter.writeInt(i32, @as(i32, @intCast(S_ + A - P)), .little);
+ mem.writeInt(i32, code[r_offset..][0..4], @as(i32, @intCast(S_ + A - P)), .little);
} else {
x86_64.relaxGotTpOff(code[r_offset - 3 ..], t);
- try cwriter.writeInt(i32, @as(i32, @intCast(S - TP)), .little);
+ mem.writeInt(i32, code[r_offset..][0..4], @as(i32, @intCast(S - TP)), .little);
}
},
- .GOT32 => try cwriter.writeInt(i32, @as(i32, @intCast(G + A)), .little),
+ .GOT32 => mem.writeInt(i32, code[r_offset..][0..4], @as(i32, @intCast(G + A)), .little),
else => try atom.reportUnhandledRelocError(rel, elf_file),
}
@@ -1207,45 +1197,42 @@ const x86_64 = struct {
rel: elf.Elf64_Rela,
target: *const Symbol,
args: ResolveArgs,
- it: *RelocsIterator,
code: []u8,
- stream: anytype,
) !void {
dev.check(.x86_64_backend);
- _ = code;
- _ = it;
const r_type: elf.R_X86_64 = @enumFromInt(rel.r_type());
- const cwriter = stream.writer();
_, const A, const S, const GOT, _, _, const DTP = args;
+ var writer: Writer = .fixed(code);
+
switch (r_type) {
.NONE => unreachable,
- .@"8" => try cwriter.writeInt(u8, @as(u8, @bitCast(@as(i8, @intCast(S + A)))), .little),
- .@"16" => try cwriter.writeInt(u16, @as(u16, @bitCast(@as(i16, @intCast(S + A)))), .little),
- .@"32" => try cwriter.writeInt(u32, @as(u32, @bitCast(@as(i32, @intCast(S + A)))), .little),
- .@"32S" => try cwriter.writeInt(i32, @as(i32, @intCast(S + A)), .little),
+ .@"8" => try writer.writeInt(u8, @as(u8, @bitCast(@as(i8, @intCast(S + A)))), .little),
+ .@"16" => try writer.writeInt(u16, @as(u16, @bitCast(@as(i16, @intCast(S + A)))), .little),
+ .@"32" => try writer.writeInt(u32, @as(u32, @bitCast(@as(i32, @intCast(S + A)))), .little),
+ .@"32S" => try writer.writeInt(i32, @as(i32, @intCast(S + A)), .little),
.@"64" => if (atom.debugTombstoneValue(target.*, elf_file)) |value|
- try cwriter.writeInt(u64, value, .little)
+ try writer.writeInt(u64, value, .little)
else
- try cwriter.writeInt(i64, S + A, .little),
+ try writer.writeInt(i64, S + A, .little),
.DTPOFF32 => if (atom.debugTombstoneValue(target.*, elf_file)) |value|
- try cwriter.writeInt(u64, value, .little)
+ try writer.writeInt(u64, value, .little)
else
- try cwriter.writeInt(i32, @as(i32, @intCast(S + A - DTP)), .little),
+ try writer.writeInt(i32, @as(i32, @intCast(S + A - DTP)), .little),
.DTPOFF64 => if (atom.debugTombstoneValue(target.*, elf_file)) |value|
- try cwriter.writeInt(u64, value, .little)
+ try writer.writeInt(u64, value, .little)
else
- try cwriter.writeInt(i64, S + A - DTP, .little),
- .GOTOFF64 => try cwriter.writeInt(i64, S + A - GOT, .little),
- .GOTPC64 => try cwriter.writeInt(i64, GOT + A, .little),
+ try writer.writeInt(i64, S + A - DTP, .little),
+ .GOTOFF64 => try writer.writeInt(i64, S + A - GOT, .little),
+ .GOTPC64 => try writer.writeInt(i64, GOT + A, .little),
.SIZE32 => {
const size = @as(i64, @intCast(target.elfSym(elf_file).st_size));
- try cwriter.writeInt(u32, @bitCast(@as(i32, @intCast(size + A))), .little);
+ try writer.writeInt(u32, @bitCast(@as(i32, @intCast(size + A))), .little);
},
.SIZE64 => {
const size = @as(i64, @intCast(target.elfSym(elf_file).st_size));
- try cwriter.writeInt(i64, @intCast(size + A), .little);
+ try writer.writeInt(i64, @intCast(size + A), .little);
},
else => try atom.reportUnhandledRelocError(rel, elf_file),
}
@@ -1288,12 +1275,12 @@ const x86_64 = struct {
rels: []const elf.Elf64_Rela,
value: i32,
elf_file: *Elf,
- stream: anytype,
+ code: []u8,
+ r_offset: usize,
) !void {
dev.check(.x86_64_backend);
assert(rels.len == 2);
const diags = &elf_file.base.comp.link_diags;
- const writer = stream.writer();
const rel: elf.R_X86_64 = @enumFromInt(rels[1].r_type());
switch (rel) {
.PC32,
@@ -1304,8 +1291,7 @@ const x86_64 = struct {
0x48, 0x03, 0x05, 0, 0, 0, 0, // add foo@gottpoff(%rip), %rax
};
std.mem.writeInt(i32, insts[12..][0..4], value - 12, .little);
- try stream.seekBy(-4);
- try writer.writeAll(&insts);
+ @memcpy(code[r_offset - 4 ..][0..insts.len], &insts);
},
else => {
@@ -1329,12 +1315,12 @@ const x86_64 = struct {
rels: []const elf.Elf64_Rela,
value: i32,
elf_file: *Elf,
- stream: anytype,
+ code: []u8,
+ r_offset: usize,
) !void {
dev.check(.x86_64_backend);
assert(rels.len == 2);
const diags = &elf_file.base.comp.link_diags;
- const writer = stream.writer();
const rel: elf.R_X86_64 = @enumFromInt(rels[1].r_type());
switch (rel) {
.PC32,
@@ -1346,8 +1332,7 @@ const x86_64 = struct {
0x48, 0x2d, 0, 0, 0, 0, // sub $tls_size, %rax
};
std.mem.writeInt(i32, insts[8..][0..4], value, .little);
- try stream.seekBy(-3);
- try writer.writeAll(&insts);
+ @memcpy(code[r_offset - 3 ..][0..insts.len], &insts);
},
.GOTPCREL,
@@ -1360,8 +1345,7 @@ const x86_64 = struct {
0x90, // nop
};
std.mem.writeInt(i32, insts[8..][0..4], value, .little);
- try stream.seekBy(-3);
- try writer.writeAll(&insts);
+ @memcpy(code[r_offset - 3 ..][0..insts.len], &insts);
},
else => {
@@ -1390,7 +1374,7 @@ const x86_64 = struct {
// TODO: hack to force imm32s in the assembler
.{ .imm = .s(-129) },
}, t) catch return false;
- var trash: std.io.Writer.Discarding = .init(&.{});
+ var trash: Writer.Discarding = .init(&.{});
inst.encode(&trash.writer, .{}) catch return false;
return true;
},
@@ -1437,12 +1421,12 @@ const x86_64 = struct {
rels: []const elf.Elf64_Rela,
value: i32,
elf_file: *Elf,
- stream: anytype,
+ code: []u8,
+ r_offset: usize,
) !void {
dev.check(.x86_64_backend);
assert(rels.len == 2);
const diags = &elf_file.base.comp.link_diags;
- const writer = stream.writer();
const rel: elf.R_X86_64 = @enumFromInt(rels[1].r_type());
switch (rel) {
.PC32,
@@ -1455,8 +1439,7 @@ const x86_64 = struct {
0x48, 0x81, 0xc0, 0, 0, 0, 0, // add $tp_offset, %rax
};
std.mem.writeInt(i32, insts[12..][0..4], value, .little);
- try stream.seekBy(-4);
- try writer.writeAll(&insts);
+ @memcpy(code[r_offset - 4 ..][0..insts.len], &insts);
relocs_log.debug(" relaxing {f} and {f}", .{
relocation.fmtRelocType(rels[0].r_type(), .x86_64),
relocation.fmtRelocType(rels[1].r_type(), .x86_64),
@@ -1486,8 +1469,8 @@ const x86_64 = struct {
}
fn encode(insts: []const Instruction, code: []u8) !void {
- var stream: std.io.Writer = .fixed(code);
- for (insts) |inst| try inst.encode(&stream, .{});
+ var writer: Writer = .fixed(code);
+ for (insts) |inst| try inst.encode(&writer, .{});
}
const bits = @import("../../arch/x86_64/bits.zig");
@@ -1592,14 +1575,12 @@ const aarch64 = struct {
args: ResolveArgs,
it: *RelocsIterator,
code_buffer: []u8,
- stream: anytype,
) (error{ UnexpectedRemainder, DivisionByZero } || RelocError)!void {
_ = it;
const diags = &elf_file.base.comp.link_diags;
const r_type: elf.R_AARCH64 = @enumFromInt(rel.r_type());
const r_offset = std.math.cast(usize, rel.r_offset) orelse return error.Overflow;
- const cwriter = stream.writer();
const code = code_buffer[r_offset..][0..4];
const file_ptr = atom.file(elf_file).?;
@@ -1614,7 +1595,8 @@ const aarch64 = struct {
rel,
dynAbsRelocAction(target, elf_file),
elf_file,
- cwriter,
+ code_buffer,
+ r_offset,
);
},
@@ -1782,25 +1764,20 @@ const aarch64 = struct {
rel: elf.Elf64_Rela,
target: *const Symbol,
args: ResolveArgs,
- it: *RelocsIterator,
code: []u8,
- stream: anytype,
) !void {
- _ = it;
- _ = code;
-
const r_type: elf.R_AARCH64 = @enumFromInt(rel.r_type());
- const cwriter = stream.writer();
_, const A, const S, _, _, _, _ = args;
+ var writer: Writer = .fixed(code);
switch (r_type) {
.NONE => unreachable,
- .ABS32 => try cwriter.writeInt(i32, @as(i32, @intCast(S + A)), .little),
+ .ABS32 => try writer.writeInt(i32, @as(i32, @intCast(S + A)), .little),
.ABS64 => if (atom.debugTombstoneValue(target.*, elf_file)) |value|
- try cwriter.writeInt(u64, value, .little)
+ try writer.writeInt(u64, value, .little)
else
- try cwriter.writeInt(i64, S + A, .little),
+ try writer.writeInt(i64, S + A, .little),
else => try atom.reportUnhandledRelocError(rel, elf_file),
}
}
@@ -1861,12 +1838,10 @@ const riscv = struct {
args: ResolveArgs,
it: *RelocsIterator,
code: []u8,
- stream: anytype,
) !void {
const diags = &elf_file.base.comp.link_diags;
const r_type: elf.R_RISCV = @enumFromInt(rel.r_type());
const r_offset = std.math.cast(usize, rel.r_offset) orelse return error.Overflow;
- const cwriter = stream.writer();
const P, const A, const S, const GOT, const G, const TP, const DTP = args;
_ = TP;
@@ -1875,7 +1850,7 @@ const riscv = struct {
switch (r_type) {
.NONE => unreachable,
- .@"32" => try cwriter.writeInt(u32, @as(u32, @truncate(@as(u64, @intCast(S + A)))), .little),
+ .@"32" => mem.writeInt(u32, code[r_offset..][0..4], @as(u32, @truncate(@as(u64, @intCast(S + A)))), .little),
.@"64" => {
try atom.resolveDynAbsReloc(
@@ -1883,7 +1858,8 @@ const riscv = struct {
rel,
dynAbsRelocAction(target, elf_file),
elf_file,
- cwriter,
+ code,
+ r_offset,
);
},
@@ -1997,15 +1973,9 @@ const riscv = struct {
rel: elf.Elf64_Rela,
target: *const Symbol,
args: ResolveArgs,
- it: *RelocsIterator,
code: []u8,
- stream: anytype,
) !void {
- _ = it;
-
const r_type: elf.R_RISCV = @enumFromInt(rel.r_type());
- const r_offset = std.math.cast(usize, rel.r_offset) orelse return error.Overflow;
- const cwriter = stream.writer();
_, const A, const S, const GOT, _, _, const DTP = args;
_ = GOT;
@@ -2014,30 +1984,29 @@ const riscv = struct {
switch (r_type) {
.NONE => unreachable,
- .@"32" => try cwriter.writeInt(i32, @as(i32, @intCast(S + A)), .little),
+ .@"32" => mem.writeInt(i32, code[0..4], @intCast(S + A), .little),
.@"64" => if (atom.debugTombstoneValue(target.*, elf_file)) |value|
- try cwriter.writeInt(u64, value, .little)
+ mem.writeInt(u64, code[0..8], value, .little)
else
- try cwriter.writeInt(i64, S + A, .little),
-
- .ADD8 => riscv_util.writeAddend(i8, .add, code[r_offset..][0..1], S + A),
- .SUB8 => riscv_util.writeAddend(i8, .sub, code[r_offset..][0..1], S + A),
- .ADD16 => riscv_util.writeAddend(i16, .add, code[r_offset..][0..2], S + A),
- .SUB16 => riscv_util.writeAddend(i16, .sub, code[r_offset..][0..2], S + A),
- .ADD32 => riscv_util.writeAddend(i32, .add, code[r_offset..][0..4], S + A),
- .SUB32 => riscv_util.writeAddend(i32, .sub, code[r_offset..][0..4], S + A),
- .ADD64 => riscv_util.writeAddend(i64, .add, code[r_offset..][0..8], S + A),
- .SUB64 => riscv_util.writeAddend(i64, .sub, code[r_offset..][0..8], S + A),
-
- .SET8 => mem.writeInt(i8, code[r_offset..][0..1], @as(i8, @truncate(S + A)), .little),
- .SET16 => mem.writeInt(i16, code[r_offset..][0..2], @as(i16, @truncate(S + A)), .little),
- .SET32 => mem.writeInt(i32, code[r_offset..][0..4], @as(i32, @truncate(S + A)), .little),
-
- .SET6 => riscv_util.writeSetSub6(.set, code[r_offset..][0..1], S + A),
- .SUB6 => riscv_util.writeSetSub6(.sub, code[r_offset..][0..1], S + A),
-
- .SET_ULEB128 => try riscv_util.writeSetSubUleb(.set, stream, S + A),
- .SUB_ULEB128 => try riscv_util.writeSetSubUleb(.sub, stream, S - A),
+ mem.writeInt(i64, code[0..8], S + A, .little),
+ .ADD8 => riscv_util.writeAddend(i8, .add, code[0..1], S + A),
+ .SUB8 => riscv_util.writeAddend(i8, .sub, code[0..1], S + A),
+ .ADD16 => riscv_util.writeAddend(i16, .add, code[0..2], S + A),
+ .SUB16 => riscv_util.writeAddend(i16, .sub, code[0..2], S + A),
+ .ADD32 => riscv_util.writeAddend(i32, .add, code[0..4], S + A),
+ .SUB32 => riscv_util.writeAddend(i32, .sub, code[0..4], S + A),
+ .ADD64 => riscv_util.writeAddend(i64, .add, code[0..8], S + A),
+ .SUB64 => riscv_util.writeAddend(i64, .sub, code[0..8], S + A),
+
+ .SET8 => mem.writeInt(i8, code[0..1], @as(i8, @truncate(S + A)), .little),
+ .SET16 => mem.writeInt(i16, code[0..2], @as(i16, @truncate(S + A)), .little),
+ .SET32 => mem.writeInt(i32, code[0..4], @as(i32, @truncate(S + A)), .little),
+
+ .SET6 => riscv_util.writeSetSub6(.set, code[0..1], S + A),
+ .SUB6 => riscv_util.writeSetSub6(.sub, code[0..1], S + A),
+
+ .SET_ULEB128 => riscv_util.writeSetUleb(code, S + A),
+ .SUB_ULEB128 => riscv_util.writeSubUleb(code, S - A),
else => try atom.reportUnhandledRelocError(rel, elf_file),
}
@@ -2108,14 +2077,16 @@ pub const Extra = struct {
const std = @import("std");
const assert = std.debug.assert;
const elf = std.elf;
-const eh_frame = @import("eh_frame.zig");
const log = std.log.scoped(.link);
const math = std.math;
const mem = std.mem;
const relocs_log = std.log.scoped(.link_relocs);
+const Allocator = mem.Allocator;
+const Writer = std.Io.Writer;
+
+const eh_frame = @import("eh_frame.zig");
const relocation = @import("relocation.zig");
-const Allocator = mem.Allocator;
const Atom = @This();
const Elf = @import("../Elf.zig");
const Fde = eh_frame.Fde;
diff --git a/src/link/Elf/AtomList.zig b/src/link/Elf/AtomList.zig
index 0caa69ca90..900a6e32c7 100644
--- a/src/link/Elf/AtomList.zig
+++ b/src/link/Elf/AtomList.zig
@@ -89,7 +89,7 @@ pub fn allocate(list: *AtomList, elf_file: *Elf) !void {
list.dirty = false;
}
-pub fn write(list: AtomList, buffer: *std.array_list.Managed(u8), undefs: anytype, elf_file: *Elf) !void {
+pub fn write(list: AtomList, buffer: *std.Io.Writer.Allocating, undefs: anytype, elf_file: *Elf) !void {
const gpa = elf_file.base.comp.gpa;
const osec = elf_file.sections.items(.shdr)[list.output_section_index];
assert(osec.sh_type != elf.SHT_NOBITS);
@@ -98,8 +98,7 @@ pub fn write(list: AtomList, buffer: *std.array_list.Managed(u8), undefs: anytyp
log.debug("writing atoms in section '{s}'", .{elf_file.getShString(osec.sh_name)});
const list_size = math.cast(usize, list.size) orelse return error.Overflow;
- try buffer.ensureUnusedCapacity(list_size);
- buffer.appendNTimesAssumeCapacity(0, list_size);
+ try buffer.writer.splatByteAll(0, list_size);
for (list.atoms.keys()) |ref| {
const atom_ptr = elf_file.atom(ref).?;
@@ -113,7 +112,7 @@ pub fn write(list: AtomList, buffer: *std.array_list.Managed(u8), undefs: anytyp
const object = atom_ptr.file(elf_file).?.object;
const code = try object.codeDecompressAlloc(elf_file, ref.index);
defer gpa.free(code);
- const out_code = buffer.items[off..][0..size];
+ const out_code = buffer.written()[off..][0..size];
@memcpy(out_code, code);
if (osec.sh_flags & elf.SHF_ALLOC == 0)
@@ -122,7 +121,7 @@ pub fn write(list: AtomList, buffer: *std.array_list.Managed(u8), undefs: anytyp
try atom_ptr.resolveRelocsAlloc(elf_file, out_code);
}
- try elf_file.base.file.?.pwriteAll(buffer.items, list.offset(elf_file));
+ try elf_file.base.file.?.pwriteAll(buffer.written(), list.offset(elf_file));
buffer.clearRetainingCapacity();
}
diff --git a/src/link/Elf/Object.zig b/src/link/Elf/Object.zig
index 921b5eb733..88e484fcd5 100644
--- a/src/link/Elf/Object.zig
+++ b/src/link/Elf/Object.zig
@@ -952,7 +952,7 @@ pub fn convertCommonSymbols(self: *Object, elf_file: *Elf) !void {
const is_tls = sym.type(elf_file) == elf.STT_TLS;
const name = if (is_tls) ".tls_common" else ".common";
const name_offset = @as(u32, @intCast(self.strtab.items.len));
- try self.strtab.writer(gpa).print("{s}\x00", .{name});
+ try self.strtab.print(gpa, "{s}\x00", .{name});
var sh_flags: u32 = elf.SHF_ALLOC | elf.SHF_WRITE;
if (is_tls) sh_flags |= elf.SHF_TLS;
diff --git a/src/link/Elf/gc.zig b/src/link/Elf/gc.zig
index 92d80580b8..8778a0846c 100644
--- a/src/link/Elf/gc.zig
+++ b/src/link/Elf/gc.zig
@@ -162,22 +162,6 @@ fn prune(elf_file: *Elf) void {
}
}
-pub fn dumpPrunedAtoms(elf_file: *Elf) !void {
- const stderr = std.fs.File.stderr().deprecatedWriter();
- for (elf_file.objects.items) |index| {
- const file = elf_file.file(index).?;
- for (file.atoms()) |atom_index| {
- const atom = file.atom(atom_index) orelse continue;
- if (!atom.alive)
- // TODO should we simply print to stderr?
- try stderr.print("link: removing unused section '{s}' in file '{f}'\n", .{
- atom.name(elf_file),
- atom.file(elf_file).?.fmtPath(),
- });
- }
- }
-}
-
const Level = struct {
value: usize = 0,
diff --git a/src/link/Elf/relocatable.zig b/src/link/Elf/relocatable.zig
index 58f18bd08b..4dd6e0370d 100644
--- a/src/link/Elf/relocatable.zig
+++ b/src/link/Elf/relocatable.zig
@@ -100,32 +100,33 @@ pub fn flushStaticLib(elf_file: *Elf, comp: *Compilation) !void {
state_log.debug("ar_strtab\n{f}\n", .{ar_strtab});
}
- var buffer = std.array_list.Managed(u8).init(gpa);
- defer buffer.deinit();
- try buffer.ensureTotalCapacityPrecise(total_size);
+ const buffer = try gpa.alloc(u8, total_size);
+ defer gpa.free(buffer);
+
+ var writer: std.Io.Writer = .fixed(buffer);
// Write magic
- try buffer.writer().writeAll(elf.ARMAG);
+ try writer.writeAll(elf.ARMAG);
// Write symtab
- try ar_symtab.write(.p64, elf_file, buffer.writer());
+ try ar_symtab.write(.p64, elf_file, &writer);
// Write strtab
if (ar_strtab.size() > 0) {
- if (!mem.isAligned(buffer.items.len, 2)) try buffer.writer().writeByte(0);
- try ar_strtab.write(buffer.writer());
+ if (!mem.isAligned(writer.end, 2)) try writer.writeByte(0);
+ try ar_strtab.write(&writer);
}
// Write object files
for (files.items) |index| {
- if (!mem.isAligned(buffer.items.len, 2)) try buffer.writer().writeByte(0);
- try elf_file.file(index).?.writeAr(elf_file, buffer.writer());
+ if (!mem.isAligned(writer.end, 2)) try writer.writeByte(0);
+ try elf_file.file(index).?.writeAr(elf_file, &writer);
}
- assert(buffer.items.len == total_size);
+ assert(writer.buffered().len == total_size);
try elf_file.base.file.?.setEndPos(total_size);
- try elf_file.base.file.?.pwriteAll(buffer.items, 0);
+ try elf_file.base.file.?.pwriteAll(writer.buffered(), 0);
if (diags.hasErrors()) return error.LinkFailure;
}
@@ -407,15 +408,16 @@ fn writeSyntheticSections(elf_file: *Elf) !void {
};
const shdr = slice.items(.shdr)[shndx];
const sh_size = math.cast(usize, shdr.sh_size) orelse return error.Overflow;
- var buffer = try std.array_list.Managed(u8).initCapacity(gpa, @intCast(sh_size - existing_size));
- defer buffer.deinit();
- try eh_frame.writeEhFrameRelocatable(elf_file, buffer.writer());
+ const buffer = try gpa.alloc(u8, @intCast(sh_size - existing_size));
+ defer gpa.free(buffer);
+ var writer: std.Io.Writer = .fixed(buffer);
+ try eh_frame.writeEhFrameRelocatable(elf_file, &writer);
log.debug("writing .eh_frame from 0x{x} to 0x{x}", .{
shdr.sh_offset + existing_size,
shdr.sh_offset + sh_size,
});
- assert(buffer.items.len == sh_size - existing_size);
- try elf_file.base.file.?.pwriteAll(buffer.items, shdr.sh_offset + existing_size);
+ assert(writer.buffered().len == sh_size - existing_size);
+ try elf_file.base.file.?.pwriteAll(writer.buffered(), shdr.sh_offset + existing_size);
}
if (elf_file.section_indexes.eh_frame_rela) |shndx| {
const shdr = slice.items(.shdr)[shndx];
@@ -446,15 +448,16 @@ fn writeGroups(elf_file: *Elf) !void {
for (elf_file.group_sections.items) |cgs| {
const shdr = elf_file.sections.items(.shdr)[cgs.shndx];
const sh_size = math.cast(usize, shdr.sh_size) orelse return error.Overflow;
- var buffer = try std.array_list.Managed(u8).initCapacity(gpa, sh_size);
- defer buffer.deinit();
- try cgs.write(elf_file, buffer.writer());
- assert(buffer.items.len == sh_size);
+ const buffer = try gpa.alloc(u8, sh_size);
+ defer gpa.free(buffer);
+ var writer: std.Io.Writer = .fixed(buffer);
+ try cgs.write(elf_file, &writer);
+ assert(writer.buffered().len == sh_size);
log.debug("writing group from 0x{x} to 0x{x}", .{
shdr.sh_offset,
shdr.sh_offset + shdr.sh_size,
});
- try elf_file.base.file.?.pwriteAll(buffer.items, shdr.sh_offset);
+ try elf_file.base.file.?.pwriteAll(writer.buffered(), shdr.sh_offset);
}
}
diff --git a/src/link/Elf/synthetic_sections.zig b/src/link/Elf/synthetic_sections.zig
index be45937ffe..f0b3798d29 100644
--- a/src/link/Elf/synthetic_sections.zig
+++ b/src/link/Elf/synthetic_sections.zig
@@ -94,134 +94,134 @@ pub const DynamicSection = struct {
return nentries * @sizeOf(elf.Elf64_Dyn);
}
- pub fn write(dt: DynamicSection, elf_file: *Elf, writer: anytype) !void {
+ pub fn write(dt: DynamicSection, elf_file: *Elf, writer: *std.Io.Writer) !void {
const shdrs = elf_file.sections.items(.shdr);
// NEEDED
for (dt.needed.items) |off| {
- try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_NEEDED, .d_val = off });
+ try writer.writeStruct(@as(elf.Elf64_Dyn, .{ .d_tag = elf.DT_NEEDED, .d_val = off }), .little);
}
if (dt.soname) |off| {
- try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_SONAME, .d_val = off });
+ try writer.writeStruct(@as(elf.Elf64_Dyn, .{ .d_tag = elf.DT_SONAME, .d_val = off }), .little);
}
// RUNPATH
// TODO add option in Options to revert to old RPATH tag
if (dt.rpath > 0) {
- try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_RUNPATH, .d_val = dt.rpath });
+ try writer.writeStruct(@as(elf.Elf64_Dyn, .{ .d_tag = elf.DT_RUNPATH, .d_val = dt.rpath }), .little);
}
// INIT
if (elf_file.sectionByName(".init")) |shndx| {
const addr = shdrs[shndx].sh_addr;
- try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_INIT, .d_val = addr });
+ try writer.writeStruct(@as(elf.Elf64_Dyn, .{ .d_tag = elf.DT_INIT, .d_val = addr }), .little);
}
// FINI
if (elf_file.sectionByName(".fini")) |shndx| {
const addr = shdrs[shndx].sh_addr;
- try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_FINI, .d_val = addr });
+ try writer.writeStruct(@as(elf.Elf64_Dyn, .{ .d_tag = elf.DT_FINI, .d_val = addr }), .little);
}
// INIT_ARRAY
if (elf_file.sectionByName(".init_array")) |shndx| {
const shdr = shdrs[shndx];
- try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_INIT_ARRAY, .d_val = shdr.sh_addr });
- try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_INIT_ARRAYSZ, .d_val = shdr.sh_size });
+ try writer.writeStruct(@as(elf.Elf64_Dyn, .{ .d_tag = elf.DT_INIT_ARRAY, .d_val = shdr.sh_addr }), .little);
+ try writer.writeStruct(@as(elf.Elf64_Dyn, .{ .d_tag = elf.DT_INIT_ARRAYSZ, .d_val = shdr.sh_size }), .little);
}
// FINI_ARRAY
if (elf_file.sectionByName(".fini_array")) |shndx| {
const shdr = shdrs[shndx];
- try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_FINI_ARRAY, .d_val = shdr.sh_addr });
- try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_FINI_ARRAYSZ, .d_val = shdr.sh_size });
+ try writer.writeStruct(@as(elf.Elf64_Dyn, .{ .d_tag = elf.DT_FINI_ARRAY, .d_val = shdr.sh_addr }), .little);
+ try writer.writeStruct(@as(elf.Elf64_Dyn, .{ .d_tag = elf.DT_FINI_ARRAYSZ, .d_val = shdr.sh_size }), .little);
}
// RELA
if (elf_file.section_indexes.rela_dyn) |shndx| {
const shdr = shdrs[shndx];
- try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_RELA, .d_val = shdr.sh_addr });
- try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_RELASZ, .d_val = shdr.sh_size });
- try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_RELAENT, .d_val = shdr.sh_entsize });
+ try writer.writeStruct(@as(elf.Elf64_Dyn, .{ .d_tag = elf.DT_RELA, .d_val = shdr.sh_addr }), .little);
+ try writer.writeStruct(@as(elf.Elf64_Dyn, .{ .d_tag = elf.DT_RELASZ, .d_val = shdr.sh_size }), .little);
+ try writer.writeStruct(@as(elf.Elf64_Dyn, .{ .d_tag = elf.DT_RELAENT, .d_val = shdr.sh_entsize }), .little);
}
// JMPREL
if (elf_file.section_indexes.rela_plt) |shndx| {
const shdr = shdrs[shndx];
- try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_JMPREL, .d_val = shdr.sh_addr });
- try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_PLTRELSZ, .d_val = shdr.sh_size });
- try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_PLTREL, .d_val = elf.DT_RELA });
+ try writer.writeStruct(@as(elf.Elf64_Dyn, .{ .d_tag = elf.DT_JMPREL, .d_val = shdr.sh_addr }), .little);
+ try writer.writeStruct(@as(elf.Elf64_Dyn, .{ .d_tag = elf.DT_PLTRELSZ, .d_val = shdr.sh_size }), .little);
+ try writer.writeStruct(@as(elf.Elf64_Dyn, .{ .d_tag = elf.DT_PLTREL, .d_val = elf.DT_RELA }), .little);
}
// PLTGOT
if (elf_file.section_indexes.got_plt) |shndx| {
const addr = shdrs[shndx].sh_addr;
- try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_PLTGOT, .d_val = addr });
+ try writer.writeStruct(@as(elf.Elf64_Dyn, .{ .d_tag = elf.DT_PLTGOT, .d_val = addr }), .little);
}
{
assert(elf_file.section_indexes.hash != null);
const addr = shdrs[elf_file.section_indexes.hash.?].sh_addr;
- try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_HASH, .d_val = addr });
+ try writer.writeStruct(@as(elf.Elf64_Dyn, .{ .d_tag = elf.DT_HASH, .d_val = addr }), .little);
}
if (elf_file.section_indexes.gnu_hash) |shndx| {
const addr = shdrs[shndx].sh_addr;
- try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_GNU_HASH, .d_val = addr });
+ try writer.writeStruct(@as(elf.Elf64_Dyn, .{ .d_tag = elf.DT_GNU_HASH, .d_val = addr }), .little);
}
// TEXTREL
if (elf_file.has_text_reloc) {
- try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_TEXTREL, .d_val = 0 });
+ try writer.writeStruct(@as(elf.Elf64_Dyn, .{ .d_tag = elf.DT_TEXTREL, .d_val = 0 }), .little);
}
// SYMTAB + SYMENT
{
assert(elf_file.section_indexes.dynsymtab != null);
const shdr = shdrs[elf_file.section_indexes.dynsymtab.?];
- try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_SYMTAB, .d_val = shdr.sh_addr });
- try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_SYMENT, .d_val = shdr.sh_entsize });
+ try writer.writeStruct(@as(elf.Elf64_Dyn, .{ .d_tag = elf.DT_SYMTAB, .d_val = shdr.sh_addr }), .little);
+ try writer.writeStruct(@as(elf.Elf64_Dyn, .{ .d_tag = elf.DT_SYMENT, .d_val = shdr.sh_entsize }), .little);
}
// STRTAB + STRSZ
{
assert(elf_file.section_indexes.dynstrtab != null);
const shdr = shdrs[elf_file.section_indexes.dynstrtab.?];
- try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_STRTAB, .d_val = shdr.sh_addr });
- try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_STRSZ, .d_val = shdr.sh_size });
+ try writer.writeStruct(@as(elf.Elf64_Dyn, .{ .d_tag = elf.DT_STRTAB, .d_val = shdr.sh_addr }), .little);
+ try writer.writeStruct(@as(elf.Elf64_Dyn, .{ .d_tag = elf.DT_STRSZ, .d_val = shdr.sh_size }), .little);
}
// VERSYM
if (elf_file.section_indexes.versym) |shndx| {
const addr = shdrs[shndx].sh_addr;
- try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_VERSYM, .d_val = addr });
+ try writer.writeStruct(@as(elf.Elf64_Dyn, .{ .d_tag = elf.DT_VERSYM, .d_val = addr }), .little);
}
// VERNEED + VERNEEDNUM
if (elf_file.section_indexes.verneed) |shndx| {
const addr = shdrs[shndx].sh_addr;
- try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_VERNEED, .d_val = addr });
- try writer.writeStruct(elf.Elf64_Dyn{
+ try writer.writeStruct(@as(elf.Elf64_Dyn, .{ .d_tag = elf.DT_VERNEED, .d_val = addr }), .little);
+ try writer.writeStruct(@as(elf.Elf64_Dyn, .{
.d_tag = elf.DT_VERNEEDNUM,
.d_val = elf_file.verneed.verneed.items.len,
- });
+ }), .little);
}
// FLAGS
if (dt.getFlags(elf_file)) |flags| {
- try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_FLAGS, .d_val = flags });
+ try writer.writeStruct(@as(elf.Elf64_Dyn, .{ .d_tag = elf.DT_FLAGS, .d_val = flags }), .little);
}
// FLAGS_1
if (dt.getFlags1(elf_file)) |flags_1| {
- try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_FLAGS_1, .d_val = flags_1 });
+ try writer.writeStruct(@as(elf.Elf64_Dyn, .{ .d_tag = elf.DT_FLAGS_1, .d_val = flags_1 }), .little);
}
// DEBUG
- if (!elf_file.isEffectivelyDynLib()) try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_DEBUG, .d_val = 0 });
+ if (!elf_file.isEffectivelyDynLib()) try writer.writeStruct(@as(elf.Elf64_Dyn, .{ .d_tag = elf.DT_DEBUG, .d_val = 0 }), .little);
// NULL
- try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_NULL, .d_val = 0 });
+ try writer.writeStruct(@as(elf.Elf64_Dyn, .{ .d_tag = elf.DT_NULL, .d_val = 0 }), .little);
}
};
@@ -360,7 +360,7 @@ pub const GotSection = struct {
return s;
}
- pub fn write(got: GotSection, elf_file: *Elf, writer: anytype) !void {
+ pub fn write(got: GotSection, elf_file: *Elf, writer: *std.Io.Writer) !void {
const comp = elf_file.base.comp;
const is_dyn_lib = elf_file.isEffectivelyDynLib();
const apply_relocs = true; // TODO add user option for this
@@ -666,7 +666,7 @@ pub const PltSection = struct {
};
}
- pub fn write(plt: PltSection, elf_file: *Elf, writer: anytype) !void {
+ pub fn write(plt: PltSection, elf_file: *Elf, writer: *std.Io.Writer) !void {
const cpu_arch = elf_file.getTarget().cpu.arch;
switch (cpu_arch) {
.x86_64 => try x86_64.write(plt, elf_file, writer),
@@ -763,7 +763,7 @@ pub const PltSection = struct {
}
const x86_64 = struct {
- fn write(plt: PltSection, elf_file: *Elf, writer: anytype) !void {
+ fn write(plt: PltSection, elf_file: *Elf, writer: *std.Io.Writer) !void {
const shdrs = elf_file.sections.items(.shdr);
const plt_addr = shdrs[elf_file.section_indexes.plt.?].sh_addr;
const got_plt_addr = shdrs[elf_file.section_indexes.got_plt.?].sh_addr;
@@ -778,7 +778,7 @@ pub const PltSection = struct {
disp = @as(i64, @intCast(got_plt_addr + 16)) - @as(i64, @intCast(plt_addr + 14)) - 4;
mem.writeInt(i32, preamble[14..][0..4], @as(i32, @intCast(disp)), .little);
try writer.writeAll(&preamble);
- try writer.writeByteNTimes(0xcc, preambleSize(.x86_64) - preamble.len);
+ try writer.splatByteAll(0xcc, preambleSize(.x86_64) - preamble.len);
for (plt.symbols.items, 0..) |ref, i| {
const sym = elf_file.symbol(ref).?;
@@ -798,7 +798,7 @@ pub const PltSection = struct {
};
const aarch64 = struct {
- fn write(plt: PltSection, elf_file: *Elf, writer: anytype) !void {
+ fn write(plt: PltSection, elf_file: *Elf, writer: *std.Io.Writer) !void {
{
const shdrs = elf_file.sections.items(.shdr);
const plt_addr: i64 = @intCast(shdrs[elf_file.section_indexes.plt.?].sh_addr);
@@ -853,7 +853,7 @@ pub const GotPltSection = struct {
return preamble_size + elf_file.plt.symbols.items.len * 8;
}
- pub fn write(got_plt: GotPltSection, elf_file: *Elf, writer: anytype) !void {
+ pub fn write(got_plt: GotPltSection, elf_file: *Elf, writer: *std.Io.Writer) !void {
_ = got_plt;
{
// [0]: _DYNAMIC
@@ -904,7 +904,7 @@ pub const PltGotSection = struct {
};
}
- pub fn write(plt_got: PltGotSection, elf_file: *Elf, writer: anytype) !void {
+ pub fn write(plt_got: PltGotSection, elf_file: *Elf, writer: *std.Io.Writer) !void {
const cpu_arch = elf_file.getTarget().cpu.arch;
switch (cpu_arch) {
.x86_64 => try x86_64.write(plt_got, elf_file, writer),
@@ -940,7 +940,7 @@ pub const PltGotSection = struct {
}
const x86_64 = struct {
- pub fn write(plt_got: PltGotSection, elf_file: *Elf, writer: anytype) !void {
+ pub fn write(plt_got: PltGotSection, elf_file: *Elf, writer: *std.Io.Writer) !void {
for (plt_got.symbols.items) |ref| {
const sym = elf_file.symbol(ref).?;
const target_addr = sym.gotAddress(elf_file);
@@ -958,7 +958,7 @@ pub const PltGotSection = struct {
};
const aarch64 = struct {
- fn write(plt_got: PltGotSection, elf_file: *Elf, writer: anytype) !void {
+ fn write(plt_got: PltGotSection, elf_file: *Elf, writer: *std.Io.Writer) !void {
for (plt_got.symbols.items) |ref| {
const sym = elf_file.symbol(ref).?;
const target_addr = sym.gotAddress(elf_file);
@@ -1133,14 +1133,14 @@ pub const DynsymSection = struct {
return @as(u32, @intCast(dynsym.entries.items.len + 1));
}
- pub fn write(dynsym: DynsymSection, elf_file: *Elf, writer: anytype) !void {
- try writer.writeStruct(Elf.null_sym);
+ pub fn write(dynsym: DynsymSection, elf_file: *Elf, writer: *std.Io.Writer) !void {
+ try writer.writeStruct(Elf.null_sym, .little);
for (dynsym.entries.items) |entry| {
const sym = elf_file.symbol(entry.ref).?;
var out_sym: elf.Elf64_Sym = Elf.null_sym;
sym.setOutputSym(elf_file, &out_sym);
out_sym.st_name = entry.off;
- try writer.writeStruct(out_sym);
+ try writer.writeStruct(out_sym, .little);
}
}
};
@@ -1175,10 +1175,12 @@ pub const HashSection = struct {
}
try hs.buffer.ensureTotalCapacityPrecise(gpa, (2 + nsyms * 2) * 4);
- hs.buffer.writer(gpa).writeInt(u32, @as(u32, @intCast(nsyms)), .little) catch unreachable;
- hs.buffer.writer(gpa).writeInt(u32, @as(u32, @intCast(nsyms)), .little) catch unreachable;
- hs.buffer.writer(gpa).writeAll(mem.sliceAsBytes(buckets)) catch unreachable;
- hs.buffer.writer(gpa).writeAll(mem.sliceAsBytes(chains)) catch unreachable;
+ var w: std.Io.Writer = .fixed(hs.buffer.unusedCapacitySlice());
+ w.writeInt(u32, @as(u32, @intCast(nsyms)), .little) catch unreachable;
+ w.writeInt(u32, @as(u32, @intCast(nsyms)), .little) catch unreachable;
+ w.writeAll(@ptrCast(buckets)) catch unreachable;
+ w.writeAll(@ptrCast(chains)) catch unreachable;
+ hs.buffer.items.len += w.end;
}
pub inline fn size(hs: HashSection) usize {
@@ -1439,7 +1441,7 @@ pub const VerneedSection = struct {
return vern.verneed.items.len * @sizeOf(elf.Elf64_Verneed) + vern.vernaux.items.len * @sizeOf(elf.Vernaux);
}
- pub fn write(vern: VerneedSection, writer: anytype) !void {
+ pub fn write(vern: VerneedSection, writer: *std.Io.Writer) !void {
try writer.writeAll(mem.sliceAsBytes(vern.verneed.items));
try writer.writeAll(mem.sliceAsBytes(vern.vernaux.items));
}
@@ -1467,7 +1469,7 @@ pub const GroupSection = struct {
return (members.len + 1) * @sizeOf(u32);
}
- pub fn write(cgs: GroupSection, elf_file: *Elf, writer: anytype) !void {
+ pub fn write(cgs: GroupSection, elf_file: *Elf, writer: *std.Io.Writer) !void {
const cg = cgs.group(elf_file);
const object = cg.file(elf_file).object;
const members = cg.members(elf_file);
@@ -1495,7 +1497,7 @@ pub const GroupSection = struct {
}
};
-fn writeInt(value: anytype, elf_file: *Elf, writer: anytype) !void {
+fn writeInt(value: anytype, elf_file: *Elf, writer: *std.Io.Writer) !void {
const entry_size = elf_file.archPtrWidthBytes();
const target = elf_file.getTarget();
const endian = target.cpu.arch.endian();
diff --git a/src/link/MachO.zig b/src/link/MachO.zig
index 434c27bfa3..43be35994f 100644
--- a/src/link/MachO.zig
+++ b/src/link/MachO.zig
@@ -589,7 +589,7 @@ pub fn flush(
);
const ncmds, const sizeofcmds, const uuid_cmd_offset = self.writeLoadCommands() catch |err| switch (err) {
- error.NoSpaceLeft => unreachable,
+ error.WriteFailed => unreachable,
error.OutOfMemory => return error.OutOfMemory,
error.LinkFailure => return error.LinkFailure,
};
@@ -1074,7 +1074,7 @@ fn accessLibPath(
for (&[_][]const u8{ ".tbd", ".dylib", "" }) |ext| {
test_path.clearRetainingCapacity();
- try test_path.writer().print("{s}" ++ sep ++ "lib{s}{s}", .{ search_dir, name, ext });
+ try test_path.print("{s}" ++ sep ++ "lib{s}{s}", .{ search_dir, name, ext });
try checked_paths.append(try arena.dupe(u8, test_path.items));
fs.cwd().access(test_path.items, .{}) catch |err| switch (err) {
error.FileNotFound => continue,
@@ -1097,7 +1097,7 @@ fn accessFrameworkPath(
for (&[_][]const u8{ ".tbd", ".dylib", "" }) |ext| {
test_path.clearRetainingCapacity();
- try test_path.writer().print("{s}" ++ sep ++ "{s}.framework" ++ sep ++ "{s}{s}", .{
+ try test_path.print("{s}" ++ sep ++ "{s}.framework" ++ sep ++ "{s}{s}", .{
search_dir,
name,
name,
@@ -1178,9 +1178,9 @@ fn parseDependentDylibs(self: *MachO) !void {
for (&[_][]const u8{ ".tbd", ".dylib", "" }) |ext| {
test_path.clearRetainingCapacity();
if (self.base.comp.sysroot) |root| {
- try test_path.writer().print("{s}" ++ fs.path.sep_str ++ "{s}{s}", .{ root, path, ext });
+ try test_path.print("{s}" ++ fs.path.sep_str ++ "{s}{s}", .{ root, path, ext });
} else {
- try test_path.writer().print("{s}{s}", .{ path, ext });
+ try test_path.print("{s}{s}", .{ path, ext });
}
try checked_paths.append(try arena.dupe(u8, test_path.items));
fs.cwd().access(test_path.items, .{}) catch |err| switch (err) {
@@ -2528,8 +2528,8 @@ fn writeThunkWorker(self: *MachO, thunk: Thunk) void {
fn doWork(th: Thunk, buffer: []u8, macho_file: *MachO) !void {
const off = try macho_file.cast(usize, th.value);
const size = th.size();
- var stream = std.io.fixedBufferStream(buffer[off..][0..size]);
- try th.write(macho_file, stream.writer());
+ var stream: Writer = .fixed(buffer[off..][0..size]);
+ try th.write(macho_file, &stream);
}
}.doWork;
const out = self.sections.items(.out)[thunk.out_n_sect].items;
@@ -2556,15 +2556,15 @@ fn writeSyntheticSectionWorker(self: *MachO, sect_id: u8, out: []u8) void {
const doWork = struct {
fn doWork(macho_file: *MachO, tag: Tag, buffer: []u8) !void {
- var stream = std.io.fixedBufferStream(buffer);
+ var stream: Writer = .fixed(buffer);
switch (tag) {
.eh_frame => eh_frame.write(macho_file, buffer),
.unwind_info => try macho_file.unwind_info.write(macho_file, buffer),
- .got => try macho_file.got.write(macho_file, stream.writer()),
- .stubs => try macho_file.stubs.write(macho_file, stream.writer()),
- .la_symbol_ptr => try macho_file.la_symbol_ptr.write(macho_file, stream.writer()),
- .tlv_ptr => try macho_file.tlv_ptr.write(macho_file, stream.writer()),
- .objc_stubs => try macho_file.objc_stubs.write(macho_file, stream.writer()),
+ .got => try macho_file.got.write(macho_file, &stream),
+ .stubs => try macho_file.stubs.write(macho_file, &stream),
+ .la_symbol_ptr => try macho_file.la_symbol_ptr.write(macho_file, &stream),
+ .tlv_ptr => try macho_file.tlv_ptr.write(macho_file, &stream),
+ .objc_stubs => try macho_file.objc_stubs.write(macho_file, &stream),
}
}
}.doWork;
@@ -2605,8 +2605,8 @@ fn updateLazyBindSizeWorker(self: *MachO) void {
try macho_file.lazy_bind_section.updateSize(macho_file);
const sect_id = macho_file.stubs_helper_sect_index.?;
const out = &macho_file.sections.items(.out)[sect_id];
- var stream = std.io.fixedBufferStream(out.items);
- try macho_file.stubs_helper.write(macho_file, stream.writer());
+ var stream: Writer = .fixed(out.items);
+ try macho_file.stubs_helper.write(macho_file, &stream);
}
}.doWork;
doWork(self) catch |err|
@@ -2669,18 +2669,17 @@ fn writeDyldInfo(self: *MachO) !void {
defer gpa.free(buffer);
@memset(buffer, 0);
- var stream = std.io.fixedBufferStream(buffer);
- const writer = stream.writer();
-
- try self.rebase_section.write(writer);
- try stream.seekTo(cmd.bind_off - base_off);
- try self.bind_section.write(writer);
- try stream.seekTo(cmd.weak_bind_off - base_off);
- try self.weak_bind_section.write(writer);
- try stream.seekTo(cmd.lazy_bind_off - base_off);
- try self.lazy_bind_section.write(writer);
- try stream.seekTo(cmd.export_off - base_off);
- try self.export_trie.write(writer);
+ var writer: Writer = .fixed(buffer);
+
+ try self.rebase_section.write(&writer);
+ writer.end = @intCast(cmd.bind_off - base_off);
+ try self.bind_section.write(&writer);
+ writer.end = @intCast(cmd.weak_bind_off - base_off);
+ try self.weak_bind_section.write(&writer);
+ writer.end = @intCast(cmd.lazy_bind_off - base_off);
+ try self.lazy_bind_section.write(&writer);
+ writer.end = @intCast(cmd.export_off - base_off);
+ try self.export_trie.write(&writer);
try self.pwriteAll(buffer, cmd.rebase_off);
}
@@ -2689,10 +2688,10 @@ pub fn writeDataInCode(self: *MachO) !void {
defer tracy.end();
const gpa = self.base.comp.gpa;
const cmd = self.data_in_code_cmd;
- var buffer = try std.array_list.Managed(u8).initCapacity(gpa, self.data_in_code.size());
+ var buffer = try std.Io.Writer.Allocating.initCapacity(gpa, self.data_in_code.size());
defer buffer.deinit();
- try self.data_in_code.write(self, buffer.writer());
- try self.pwriteAll(buffer.items, cmd.dataoff);
+ self.data_in_code.write(self, &buffer.writer) catch return error.OutOfMemory;
+ try self.pwriteAll(buffer.written(), cmd.dataoff);
}
fn writeIndsymtab(self: *MachO) !void {
@@ -2701,10 +2700,11 @@ fn writeIndsymtab(self: *MachO) !void {
const gpa = self.base.comp.gpa;
const cmd = self.dysymtab_cmd;
const needed_size = cmd.nindirectsyms * @sizeOf(u32);
- var buffer = try std.array_list.Managed(u8).initCapacity(gpa, needed_size);
- defer buffer.deinit();
- try self.indsymtab.write(self, buffer.writer());
- try self.pwriteAll(buffer.items, cmd.indirectsymoff);
+ const buffer = try gpa.alloc(u8, needed_size);
+ defer gpa.free(buffer);
+ var writer: Writer = .fixed(buffer);
+ try self.indsymtab.write(self, &writer);
+ try self.pwriteAll(buffer, cmd.indirectsymoff);
}
pub fn writeSymtabToFile(self: *MachO) !void {
@@ -2821,8 +2821,7 @@ fn writeLoadCommands(self: *MachO) !struct { usize, usize, u64 } {
const buffer = try gpa.alloc(u8, needed_size);
defer gpa.free(buffer);
- var stream = std.io.fixedBufferStream(buffer);
- const writer = stream.writer();
+ var writer: Writer = .fixed(buffer);
var ncmds: usize = 0;
@@ -2831,26 +2830,26 @@ fn writeLoadCommands(self: *MachO) !struct { usize, usize, u64 } {
const slice = self.sections.slice();
var sect_id: usize = 0;
for (self.segments.items) |seg| {
- try writer.writeStruct(seg);
+ try writer.writeStruct(seg, .little);
for (slice.items(.header)[sect_id..][0..seg.nsects]) |header| {
- try writer.writeStruct(header);
+ try writer.writeStruct(header, .little);
}
sect_id += seg.nsects;
}
ncmds += self.segments.items.len;
}
- try writer.writeStruct(self.dyld_info_cmd);
+ try writer.writeStruct(self.dyld_info_cmd, .little);
ncmds += 1;
- try writer.writeStruct(self.function_starts_cmd);
+ try writer.writeStruct(self.function_starts_cmd, .little);
ncmds += 1;
- try writer.writeStruct(self.data_in_code_cmd);
+ try writer.writeStruct(self.data_in_code_cmd, .little);
ncmds += 1;
- try writer.writeStruct(self.symtab_cmd);
+ try writer.writeStruct(self.symtab_cmd, .little);
ncmds += 1;
- try writer.writeStruct(self.dysymtab_cmd);
+ try writer.writeStruct(self.dysymtab_cmd, .little);
ncmds += 1;
- try load_commands.writeDylinkerLC(writer);
+ try load_commands.writeDylinkerLC(&writer);
ncmds += 1;
if (self.getInternalObject()) |obj| {
@@ -2861,44 +2860,44 @@ fn writeLoadCommands(self: *MachO) !struct { usize, usize, u64 } {
0
else
@as(u32, @intCast(sym.getAddress(.{ .stubs = true }, self) - seg.vmaddr));
- try writer.writeStruct(macho.entry_point_command{
+ try writer.writeStruct(@as(macho.entry_point_command, .{
.entryoff = entryoff,
.stacksize = self.base.stack_size,
- });
+ }), .little);
ncmds += 1;
}
}
if (self.base.isDynLib()) {
- try load_commands.writeDylibIdLC(self, writer);
+ try load_commands.writeDylibIdLC(self, &writer);
ncmds += 1;
}
for (self.rpath_list) |rpath| {
- try load_commands.writeRpathLC(rpath, writer);
+ try load_commands.writeRpathLC(rpath, &writer);
ncmds += 1;
}
if (comp.config.any_sanitize_thread) {
const path = try comp.tsan_lib.?.full_object_path.toString(gpa);
defer gpa.free(path);
const rpath = std.fs.path.dirname(path) orelse ".";
- try load_commands.writeRpathLC(rpath, writer);
+ try load_commands.writeRpathLC(rpath, &writer);
ncmds += 1;
}
- try writer.writeStruct(macho.source_version_command{ .version = 0 });
+ try writer.writeStruct(@as(macho.source_version_command, .{ .version = 0 }), .little);
ncmds += 1;
if (self.platform.isBuildVersionCompatible()) {
- try load_commands.writeBuildVersionLC(self.platform, self.sdk_version, writer);
+ try load_commands.writeBuildVersionLC(self.platform, self.sdk_version, &writer);
ncmds += 1;
} else {
- try load_commands.writeVersionMinLC(self.platform, self.sdk_version, writer);
+ try load_commands.writeVersionMinLC(self.platform, self.sdk_version, &writer);
ncmds += 1;
}
- const uuid_cmd_offset = @sizeOf(macho.mach_header_64) + stream.pos;
- try writer.writeStruct(self.uuid_cmd);
+ const uuid_cmd_offset = @sizeOf(macho.mach_header_64) + writer.end;
+ try writer.writeStruct(self.uuid_cmd, .little);
ncmds += 1;
for (self.dylibs.items) |index| {
@@ -2916,16 +2915,16 @@ fn writeLoadCommands(self: *MachO) !struct { usize, usize, u64 } {
.timestamp = dylib_id.timestamp,
.current_version = dylib_id.current_version,
.compatibility_version = dylib_id.compatibility_version,
- }, writer);
+ }, &writer);
ncmds += 1;
}
if (self.requiresCodeSig()) {
- try writer.writeStruct(self.codesig_cmd);
+ try writer.writeStruct(self.codesig_cmd, .little);
ncmds += 1;
}
- assert(stream.pos == needed_size);
+ assert(writer.end == needed_size);
try self.pwriteAll(buffer, @sizeOf(macho.mach_header_64));
@@ -3014,25 +3013,32 @@ pub fn writeCodeSignaturePadding(self: *MachO, code_sig: *CodeSignature) !void {
pub fn writeCodeSignature(self: *MachO, code_sig: *CodeSignature) !void {
const seg = self.getTextSegment();
const offset = self.codesig_cmd.dataoff;
+ const gpa = self.base.comp.gpa;
- var buffer = std.array_list.Managed(u8).init(self.base.comp.gpa);
+ var buffer: std.Io.Writer.Allocating = .init(gpa);
defer buffer.deinit();
- try buffer.ensureTotalCapacityPrecise(code_sig.size());
- try code_sig.writeAdhocSignature(self, .{
+ // The writeAdhocSignature function internally changes code_sig.size()
+ // during the execution.
+ try buffer.ensureUnusedCapacity(code_sig.size());
+
+ code_sig.writeAdhocSignature(self, .{
.file = self.base.file.?,
.exec_seg_base = seg.fileoff,
.exec_seg_limit = seg.filesize,
.file_size = offset,
.dylib = self.base.isDynLib(),
- }, buffer.writer());
- assert(buffer.items.len == code_sig.size());
+ }, &buffer.writer) catch |err| switch (err) {
+ error.WriteFailed => return error.OutOfMemory,
+ else => |e| return e,
+ };
+ assert(buffer.written().len == code_sig.size());
log.debug("writing code signature from 0x{x} to 0x{x}", .{
offset,
- offset + buffer.items.len,
+ offset + buffer.written().len,
});
- try self.pwriteAll(buffer.items, offset);
+ try self.pwriteAll(buffer.written(), offset);
}
pub fn updateFunc(
@@ -5372,7 +5378,7 @@ const macho = std.macho;
const math = std.math;
const mem = std.mem;
const meta = std.meta;
-const Writer = std.io.Writer;
+const Writer = std.Io.Writer;
const aarch64 = codegen.aarch64.encoding;
const bind = @import("MachO/dyld_info/bind.zig");
diff --git a/src/link/MachO/Archive.zig b/src/link/MachO/Archive.zig
index ae71dcde8d..1c7e285eb7 100644
--- a/src/link/MachO/Archive.zig
+++ b/src/link/MachO/Archive.zig
@@ -81,34 +81,20 @@ pub fn writeHeader(
object_name: []const u8,
object_size: usize,
format: Format,
- writer: anytype,
+ writer: *Writer,
) !void {
- var hdr: ar_hdr = .{
- .ar_name = undefined,
- .ar_date = undefined,
- .ar_uid = undefined,
- .ar_gid = undefined,
- .ar_mode = undefined,
- .ar_size = undefined,
- .ar_fmag = undefined,
- };
- @memset(mem.asBytes(&hdr), 0x20);
- inline for (@typeInfo(ar_hdr).@"struct".fields) |field| {
- var stream = std.io.fixedBufferStream(&@field(hdr, field.name));
- stream.writer().print("0", .{}) catch unreachable;
- }
- @memcpy(&hdr.ar_fmag, ARFMAG);
+ var hdr: ar_hdr = .{};
const object_name_len = mem.alignForward(usize, object_name.len + 1, ptrWidth(format));
const total_object_size = object_size + object_name_len;
{
- var stream = std.io.fixedBufferStream(&hdr.ar_name);
- stream.writer().print("#1/{d}", .{object_name_len}) catch unreachable;
+ var stream: Writer = .fixed(&hdr.ar_name);
+ stream.print("#1/{d}", .{object_name_len}) catch unreachable;
}
{
- var stream = std.io.fixedBufferStream(&hdr.ar_size);
- stream.writer().print("{d}", .{total_object_size}) catch unreachable;
+ var stream: Writer = .fixed(&hdr.ar_size);
+ stream.print("{d}", .{total_object_size}) catch unreachable;
}
try writer.writeAll(mem.asBytes(&hdr));
@@ -116,7 +102,7 @@ pub fn writeHeader(
const padding = object_name_len - object_name.len - 1;
if (padding > 0) {
- try writer.writeByteNTimes(0, padding);
+ try writer.splatByteAll(0, padding);
}
}
@@ -138,25 +124,19 @@ pub const SYMDEF64_SORTED = "__.SYMDEF_64 SORTED";
pub const ar_hdr = extern struct {
/// Member file name, sometimes / terminated.
- ar_name: [16]u8,
-
+ ar_name: [16]u8 = "0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20".*,
/// File date, decimal seconds since Epoch.
- ar_date: [12]u8,
-
+ ar_date: [12]u8 = "0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20".*,
/// User ID, in ASCII format.
- ar_uid: [6]u8,
-
+ ar_uid: [6]u8 = "0\x20\x20\x20\x20\x20".*,
/// Group ID, in ASCII format.
- ar_gid: [6]u8,
-
+ ar_gid: [6]u8 = "0\x20\x20\x20\x20\x20".*,
/// File mode, in ASCII octal.
- ar_mode: [8]u8,
-
+ ar_mode: [8]u8 = "0\x20\x20\x20\x20\x20\x20\x20".*,
/// File size, in ASCII decimal.
- ar_size: [10]u8,
-
+ ar_size: [10]u8 = "0\x20\x20\x20\x20\x20\x20\x20\x20\x20".*,
/// Always contains ARFMAG.
- ar_fmag: [2]u8,
+ ar_fmag: [2]u8 = ARFMAG.*,
fn date(self: ar_hdr) !u64 {
const value = mem.trimEnd(u8, &self.ar_date, &[_]u8{@as(u8, 0x20)});
@@ -201,7 +181,7 @@ pub const ArSymtab = struct {
return ptr_width + ar.entries.items.len * 2 * ptr_width + ptr_width + mem.alignForward(usize, ar.strtab.buffer.items.len, ptr_width);
}
- pub fn write(ar: ArSymtab, format: Format, macho_file: *MachO, writer: anytype) !void {
+ pub fn write(ar: ArSymtab, format: Format, macho_file: *MachO, writer: *Writer) !void {
const ptr_width = ptrWidth(format);
// Header
try writeHeader(SYMDEF, ar.size(format), format, writer);
@@ -226,7 +206,7 @@ pub const ArSymtab = struct {
// Strtab
try writer.writeAll(ar.strtab.buffer.items);
if (padding > 0) {
- try writer.writeByteNTimes(0, padding);
+ try writer.splatByteAll(0, padding);
}
}
@@ -275,7 +255,7 @@ pub fn ptrWidth(format: Format) usize {
};
}
-pub fn writeInt(format: Format, value: u64, writer: anytype) !void {
+pub fn writeInt(format: Format, value: u64, writer: *Writer) !void {
switch (format) {
.p32 => try writer.writeInt(u32, std.math.cast(u32, value) orelse return error.Overflow, .little),
.p64 => try writer.writeInt(u64, value, .little),
@@ -299,7 +279,7 @@ const mem = std.mem;
const std = @import("std");
const Allocator = std.mem.Allocator;
const Path = std.Build.Cache.Path;
-const Writer = std.io.Writer;
+const Writer = std.Io.Writer;
const Archive = @This();
const File = @import("file.zig").File;
diff --git a/src/link/MachO/Atom.zig b/src/link/MachO/Atom.zig
index 93b3bdde37..c5d29c7ff5 100644
--- a/src/link/MachO/Atom.zig
+++ b/src/link/MachO/Atom.zig
@@ -581,19 +581,19 @@ pub fn resolveRelocs(self: Atom, macho_file: *MachO, buffer: []u8) !void {
relocs_log.debug("{x}: {s}", .{ self.value, name });
var has_error = false;
- var stream = std.io.fixedBufferStream(buffer);
+ var stream: Writer = .fixed(buffer);
var i: usize = 0;
while (i < relocs.len) : (i += 1) {
const rel = relocs[i];
- const rel_offset = rel.offset - self.off;
+ const rel_offset: usize = @intCast(rel.offset - self.off);
const subtractor = if (rel.meta.has_subtractor) relocs[i - 1] else null;
if (rel.tag == .@"extern") {
if (rel.getTargetSymbol(self, macho_file).getFile(macho_file) == null) continue;
}
- try stream.seekTo(rel_offset);
- self.resolveRelocInner(rel, subtractor, buffer, macho_file, stream.writer()) catch |err| {
+ stream.end = rel_offset;
+ self.resolveRelocInner(rel, subtractor, buffer, macho_file, &stream) catch |err| {
switch (err) {
error.RelaxFail => {
const target = switch (rel.tag) {
@@ -630,6 +630,7 @@ const ResolveError = error{
UnexpectedRemainder,
Overflow,
OutOfMemory,
+ WriteFailed,
};
fn resolveRelocInner(
@@ -638,7 +639,7 @@ fn resolveRelocInner(
subtractor: ?Relocation,
code: []u8,
macho_file: *MachO,
- writer: anytype,
+ writer: *Writer,
) ResolveError!void {
const t = &macho_file.base.comp.root_mod.resolved_target.result;
const cpu_arch = t.cpu.arch;
@@ -1147,7 +1148,7 @@ const math = std.math;
const mem = std.mem;
const log = std.log.scoped(.link);
const relocs_log = std.log.scoped(.link_relocs);
-const Writer = std.io.Writer;
+const Writer = std.Io.Writer;
const Allocator = mem.Allocator;
const AtomicBool = std.atomic.Value(bool);
diff --git a/src/link/MachO/CodeSignature.zig b/src/link/MachO/CodeSignature.zig
index 6736505037..25addd8cdc 100644
--- a/src/link/MachO/CodeSignature.zig
+++ b/src/link/MachO/CodeSignature.zig
@@ -263,7 +263,7 @@ pub fn writeAdhocSignature(
self: *CodeSignature,
macho_file: *MachO,
opts: WriteOpts,
- writer: anytype,
+ writer: *std.Io.Writer,
) !void {
const tracy = trace(@src());
defer tracy.end();
@@ -304,10 +304,10 @@ pub fn writeAdhocSignature(
var hash: [hash_size]u8 = undefined;
if (self.requirements) |*req| {
- var buf = std.array_list.Managed(u8).init(allocator);
- defer buf.deinit();
- try req.write(buf.writer());
- Sha256.hash(buf.items, &hash, .{});
+ var a: std.Io.Writer.Allocating = .init(allocator);
+ defer a.deinit();
+ try req.write(&a.writer);
+ Sha256.hash(a.written(), &hash, .{});
self.code_directory.addSpecialHash(req.slotType(), hash);
try blobs.append(.{ .requirements = req });
@@ -316,10 +316,10 @@ pub fn writeAdhocSignature(
}
if (self.entitlements) |*ents| {
- var buf = std.array_list.Managed(u8).init(allocator);
- defer buf.deinit();
- try ents.write(buf.writer());
- Sha256.hash(buf.items, &hash, .{});
+ var a: std.Io.Writer.Allocating = .init(allocator);
+ defer a.deinit();
+ try ents.write(&a.writer);
+ Sha256.hash(a.written(), &hash, .{});
self.code_directory.addSpecialHash(ents.slotType(), hash);
try blobs.append(.{ .entitlements = ents });
diff --git a/src/link/MachO/DebugSymbols.zig b/src/link/MachO/DebugSymbols.zig
index 4212827b2c..550dd3d63d 100644
--- a/src/link/MachO/DebugSymbols.zig
+++ b/src/link/MachO/DebugSymbols.zig
@@ -273,14 +273,13 @@ fn writeLoadCommands(self: *DebugSymbols, macho_file: *MachO) !struct { usize, u
const buffer = try gpa.alloc(u8, needed_size);
defer gpa.free(buffer);
- var stream = std.io.fixedBufferStream(buffer);
- const writer = stream.writer();
+ var writer: Writer = .fixed(buffer);
var ncmds: usize = 0;
// UUID comes first presumably to speed up lookup by the consumer like lldb.
@memcpy(&self.uuid_cmd.uuid, &macho_file.uuid_cmd.uuid);
- try writer.writeStruct(self.uuid_cmd);
+ try writer.writeStruct(self.uuid_cmd, .little);
ncmds += 1;
// Segment and section load commands
@@ -293,11 +292,11 @@ fn writeLoadCommands(self: *DebugSymbols, macho_file: *MachO) !struct { usize, u
var out_seg = seg;
out_seg.fileoff = 0;
out_seg.filesize = 0;
- try writer.writeStruct(out_seg);
+ try writer.writeStruct(out_seg, .little);
for (slice.items(.header)[sect_id..][0..seg.nsects]) |header| {
var out_header = header;
out_header.offset = 0;
- try writer.writeStruct(out_header);
+ try writer.writeStruct(out_header, .little);
}
sect_id += seg.nsects;
}
@@ -306,19 +305,19 @@ fn writeLoadCommands(self: *DebugSymbols, macho_file: *MachO) !struct { usize, u
// Next, commit DSYM's __LINKEDIT and __DWARF segments headers.
sect_id = 0;
for (self.segments.items) |seg| {
- try writer.writeStruct(seg);
+ try writer.writeStruct(seg, .little);
for (self.sections.items[sect_id..][0..seg.nsects]) |header| {
- try writer.writeStruct(header);
+ try writer.writeStruct(header, .little);
}
sect_id += seg.nsects;
}
ncmds += self.segments.items.len;
}
- try writer.writeStruct(self.symtab_cmd);
+ try writer.writeStruct(self.symtab_cmd, .little);
ncmds += 1;
- assert(stream.pos == needed_size);
+ assert(writer.end == needed_size);
try self.file.?.pwriteAll(buffer, @sizeOf(macho.mach_header_64));
@@ -460,7 +459,7 @@ const math = std.math;
const mem = std.mem;
const padToIdeal = MachO.padToIdeal;
const trace = @import("../../tracy.zig").trace;
-const Writer = std.io.Writer;
+const Writer = std.Io.Writer;
const Allocator = mem.Allocator;
const MachO = @import("../MachO.zig");
diff --git a/src/link/MachO/InternalObject.zig b/src/link/MachO/InternalObject.zig
index d284df299a..9e21d49178 100644
--- a/src/link/MachO/InternalObject.zig
+++ b/src/link/MachO/InternalObject.zig
@@ -261,7 +261,7 @@ fn addObjcMethnameSection(self: *InternalObject, methname: []const u8, macho_fil
sect.offset = @intCast(self.objc_methnames.items.len);
try self.objc_methnames.ensureUnusedCapacity(gpa, methname.len + 1);
- self.objc_methnames.writer(gpa).print("{s}\x00", .{methname}) catch unreachable;
+ self.objc_methnames.print(gpa, "{s}\x00", .{methname}) catch unreachable;
const name_str = try self.addString(gpa, "ltmp");
const sym_index = try self.addSymbol(gpa);
diff --git a/src/link/MachO/UnwindInfo.zig b/src/link/MachO/UnwindInfo.zig
index a0ffdfe06a..9b7185743b 100644
--- a/src/link/MachO/UnwindInfo.zig
+++ b/src/link/MachO/UnwindInfo.zig
@@ -293,8 +293,7 @@ pub fn write(info: UnwindInfo, macho_file: *MachO, buffer: []u8) !void {
const seg = macho_file.getTextSegment();
const header = macho_file.sections.items(.header)[macho_file.unwind_info_sect_index.?];
- var stream = std.io.fixedBufferStream(buffer);
- const writer = stream.writer();
+ var writer: Writer = .fixed(buffer);
const common_encodings_offset: u32 = @sizeOf(macho.unwind_info_section_header);
const common_encodings_count: u32 = info.common_encodings_count;
@@ -303,14 +302,14 @@ pub fn write(info: UnwindInfo, macho_file: *MachO, buffer: []u8) !void {
const indexes_offset: u32 = personalities_offset + personalities_count * @sizeOf(u32);
const indexes_count: u32 = @as(u32, @intCast(info.pages.items.len + 1));
- try writer.writeStruct(macho.unwind_info_section_header{
+ try writer.writeStruct(@as(macho.unwind_info_section_header, .{
.commonEncodingsArraySectionOffset = common_encodings_offset,
.commonEncodingsArrayCount = common_encodings_count,
.personalityArraySectionOffset = personalities_offset,
.personalityArrayCount = personalities_count,
.indexSectionOffset = indexes_offset,
.indexCount = indexes_count,
- });
+ }), .little);
try writer.writeAll(mem.sliceAsBytes(info.common_encodings[0..info.common_encodings_count]));
@@ -325,42 +324,42 @@ pub fn write(info: UnwindInfo, macho_file: *MachO, buffer: []u8) !void {
for (info.pages.items, 0..) |page, i| {
assert(page.count > 0);
const rec = info.records.items[page.start].getUnwindRecord(macho_file);
- try writer.writeStruct(macho.unwind_info_section_header_index_entry{
+ try writer.writeStruct(@as(macho.unwind_info_section_header_index_entry, .{
.functionOffset = @as(u32, @intCast(rec.getAtomAddress(macho_file) - seg.vmaddr)),
.secondLevelPagesSectionOffset = @as(u32, @intCast(pages_base_offset + i * second_level_page_bytes)),
.lsdaIndexArraySectionOffset = lsda_base_offset +
info.lsdas_lookup.items[page.start] * @sizeOf(macho.unwind_info_section_header_lsda_index_entry),
- });
+ }), .little);
}
const last_rec = info.records.items[info.records.items.len - 1].getUnwindRecord(macho_file);
const sentinel_address = @as(u32, @intCast(last_rec.getAtomAddress(macho_file) + last_rec.length - seg.vmaddr));
- try writer.writeStruct(macho.unwind_info_section_header_index_entry{
+ try writer.writeStruct(@as(macho.unwind_info_section_header_index_entry, .{
.functionOffset = sentinel_address,
.secondLevelPagesSectionOffset = 0,
.lsdaIndexArraySectionOffset = lsda_base_offset +
@as(u32, @intCast(info.lsdas.items.len)) * @sizeOf(macho.unwind_info_section_header_lsda_index_entry),
- });
+ }), .little);
for (info.lsdas.items) |index| {
const rec = info.records.items[index].getUnwindRecord(macho_file);
- try writer.writeStruct(macho.unwind_info_section_header_lsda_index_entry{
+ try writer.writeStruct(@as(macho.unwind_info_section_header_lsda_index_entry, .{
.functionOffset = @as(u32, @intCast(rec.getAtomAddress(macho_file) - seg.vmaddr)),
.lsdaOffset = @as(u32, @intCast(rec.getLsdaAddress(macho_file) - seg.vmaddr)),
- });
+ }), .little);
}
for (info.pages.items) |page| {
- const start = stream.pos;
- try page.write(info, macho_file, writer);
- const nwritten = stream.pos - start;
+ const start = writer.end;
+ try page.write(info, macho_file, &writer);
+ const nwritten = writer.end - start;
if (nwritten < second_level_page_bytes) {
const padding = math.cast(usize, second_level_page_bytes - nwritten) orelse return error.Overflow;
- try writer.writeByteNTimes(0, padding);
+ try writer.splatByteAll(0, padding);
}
}
- @memset(buffer[stream.pos..], 0);
+ @memset(buffer[writer.end..], 0);
}
fn getOrPutPersonalityFunction(info: *UnwindInfo, ref: MachO.Ref) error{TooManyPersonalities}!u2 {
@@ -611,33 +610,33 @@ const Page = struct {
} };
}
- fn write(page: Page, info: UnwindInfo, macho_file: *MachO, writer: anytype) !void {
+ fn write(page: Page, info: UnwindInfo, macho_file: *MachO, writer: *Writer) !void {
const seg = macho_file.getTextSegment();
switch (page.kind) {
.regular => {
- try writer.writeStruct(macho.unwind_info_regular_second_level_page_header{
+ try writer.writeStruct(@as(macho.unwind_info_regular_second_level_page_header, .{
.entryPageOffset = @sizeOf(macho.unwind_info_regular_second_level_page_header),
.entryCount = page.count,
- });
+ }), .little);
for (info.records.items[page.start..][0..page.count]) |ref| {
const rec = ref.getUnwindRecord(macho_file);
- try writer.writeStruct(macho.unwind_info_regular_second_level_entry{
+ try writer.writeStruct(@as(macho.unwind_info_regular_second_level_entry, .{
.functionOffset = @as(u32, @intCast(rec.getAtomAddress(macho_file) - seg.vmaddr)),
.encoding = rec.enc.enc,
- });
+ }), .little);
}
},
.compressed => {
const entry_offset = @sizeOf(macho.unwind_info_compressed_second_level_page_header) +
@as(u16, @intCast(page.page_encodings_count)) * @sizeOf(u32);
- try writer.writeStruct(macho.unwind_info_compressed_second_level_page_header{
+ try writer.writeStruct(@as(macho.unwind_info_compressed_second_level_page_header, .{
.entryPageOffset = entry_offset,
.entryCount = page.count,
.encodingsPageOffset = @sizeOf(macho.unwind_info_compressed_second_level_page_header),
.encodingsCount = page.page_encodings_count,
- });
+ }), .little);
for (page.page_encodings[0..page.page_encodings_count]) |enc| {
try writer.writeInt(u32, enc.enc, .little);
@@ -656,7 +655,7 @@ const Page = struct {
.funcOffset = @as(u24, @intCast(rec.getAtomAddress(macho_file) - first_rec.getAtomAddress(macho_file))),
.encodingIndex = @as(u8, @intCast(enc_index)),
};
- try writer.writeStruct(compressed);
+ try writer.writeStruct(compressed, .little);
}
},
}
@@ -673,7 +672,7 @@ const macho = std.macho;
const math = std.math;
const mem = std.mem;
const trace = @import("../../tracy.zig").trace;
-const Writer = std.io.Writer;
+const Writer = std.Io.Writer;
const Allocator = mem.Allocator;
const Atom = @import("Atom.zig");
diff --git a/src/link/MachO/dyld_info/Rebase.zig b/src/link/MachO/dyld_info/Rebase.zig
index dae0d3ffeb..2b04a79d41 100644
--- a/src/link/MachO/dyld_info/Rebase.zig
+++ b/src/link/MachO/dyld_info/Rebase.zig
@@ -110,12 +110,14 @@ pub fn updateSize(rebase: *Rebase, macho_file: *MachO) !void {
fn finalize(rebase: *Rebase, gpa: Allocator) !void {
if (rebase.entries.items.len == 0) return;
- const writer = rebase.buffer.writer(gpa);
-
log.debug("rebase opcodes", .{});
std.mem.sort(Entry, rebase.entries.items, {}, Entry.lessThan);
+ var allocating: std.Io.Writer.Allocating = .fromArrayList(gpa, &rebase.buffer);
+ defer rebase.buffer = allocating.toArrayList();
+ const writer = &allocating.writer;
+
try setTypePointer(writer);
var start: usize = 0;
@@ -226,13 +228,13 @@ fn setTypePointer(writer: anytype) !void {
fn setSegmentOffset(segment_id: u8, offset: u64, writer: anytype) !void {
log.debug(">>> set segment: {d} and offset: {x}", .{ segment_id, offset });
try writer.writeByte(macho.REBASE_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB | @as(u4, @truncate(segment_id)));
- try std.leb.writeUleb128(writer, offset);
+ try writer.writeUleb128(offset);
}
fn rebaseAddAddr(addr: u64, writer: anytype) !void {
log.debug(">>> rebase with add: {x}", .{addr});
try writer.writeByte(macho.REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB);
- try std.leb.writeUleb128(writer, addr);
+ try writer.writeUleb128(addr);
}
fn rebaseTimes(count: usize, writer: anytype) !void {
@@ -241,15 +243,15 @@ fn rebaseTimes(count: usize, writer: anytype) !void {
try writer.writeByte(macho.REBASE_OPCODE_DO_REBASE_IMM_TIMES | @as(u4, @truncate(count)));
} else {
try writer.writeByte(macho.REBASE_OPCODE_DO_REBASE_ULEB_TIMES);
- try std.leb.writeUleb128(writer, count);
+ try writer.writeUleb128(count);
}
}
fn rebaseTimesSkip(count: usize, skip: u64, writer: anytype) !void {
log.debug(">>> rebase with count: {d} and skip: {x}", .{ count, skip });
try writer.writeByte(macho.REBASE_OPCODE_DO_REBASE_ULEB_TIMES_SKIPPING_ULEB);
- try std.leb.writeUleb128(writer, count);
- try std.leb.writeUleb128(writer, skip);
+ try writer.writeUleb128(count);
+ try writer.writeUleb128(skip);
}
fn addAddr(addr: u64, writer: anytype) !void {
@@ -262,7 +264,7 @@ fn addAddr(addr: u64, writer: anytype) !void {
}
}
try writer.writeByte(macho.REBASE_OPCODE_ADD_ADDR_ULEB);
- try std.leb.writeUleb128(writer, addr);
+ try writer.writeUleb128(addr);
}
fn done(writer: anytype) !void {
@@ -649,7 +651,6 @@ test "rebase - composite" {
const std = @import("std");
const assert = std.debug.assert;
-const leb = std.leb;
const log = std.log.scoped(.link_dyld_info);
const macho = std.macho;
const mem = std.mem;
diff --git a/src/link/MachO/dyld_info/Trie.zig b/src/link/MachO/dyld_info/Trie.zig
index 18ea348e7a..ea6f1a0666 100644
--- a/src/link/MachO/dyld_info/Trie.zig
+++ b/src/link/MachO/dyld_info/Trie.zig
@@ -170,8 +170,13 @@ fn finalize(self: *Trie, allocator: Allocator) !void {
}
try self.buffer.ensureTotalCapacityPrecise(allocator, size);
+
+ var allocating: std.Io.Writer.Allocating = .fromArrayList(allocator, &self.buffer);
+ defer self.buffer = allocating.toArrayList();
+ const writer = &allocating.writer;
+
for (ordered_nodes.items) |node_index| {
- try self.writeNode(node_index, self.buffer.writer(allocator));
+ try self.writeNode(node_index, writer);
}
}
@@ -232,7 +237,7 @@ pub fn deinit(self: *Trie, allocator: Allocator) void {
self.buffer.deinit(allocator);
}
-pub fn write(self: Trie, writer: anytype) !void {
+pub fn write(self: Trie, writer: *std.Io.Writer) !void {
if (self.buffer.items.len == 0) return;
try writer.writeAll(self.buffer.items);
}
@@ -243,7 +248,7 @@ pub fn write(self: Trie, writer: anytype) !void {
/// iterate over `Trie.ordered_nodes` and call this method on each node.
/// This is one of the requirements of the MachO.
/// Panics if `finalize` was not called before calling this method.
-fn writeNode(self: *Trie, node_index: Node.Index, writer: anytype) !void {
+fn writeNode(self: *Trie, node_index: Node.Index, writer: *std.Io.Writer) !void {
const slice = self.nodes.slice();
const edges = slice.items(.edges)[node_index];
const is_terminal = slice.items(.is_terminal)[node_index];
@@ -253,21 +258,21 @@ fn writeNode(self: *Trie, node_index: Node.Index, writer: anytype) !void {
if (is_terminal) {
// Terminal node info: encode export flags and vmaddr offset of this symbol.
var info_buf: [@sizeOf(u64) * 2]u8 = undefined;
- var info_stream = std.io.fixedBufferStream(&info_buf);
+ var info_stream: std.Io.Writer = .fixed(&info_buf);
// TODO Implement for special flags.
assert(export_flags & macho.EXPORT_SYMBOL_FLAGS_REEXPORT == 0 and
export_flags & macho.EXPORT_SYMBOL_FLAGS_STUB_AND_RESOLVER == 0);
- try leb.writeUleb128(info_stream.writer(), export_flags);
- try leb.writeUleb128(info_stream.writer(), vmaddr_offset);
+ try info_stream.writeUleb128(export_flags);
+ try info_stream.writeUleb128(vmaddr_offset);
// Encode the size of the terminal node info.
var size_buf: [@sizeOf(u64)]u8 = undefined;
- var size_stream = std.io.fixedBufferStream(&size_buf);
- try leb.writeUleb128(size_stream.writer(), info_stream.pos);
+ var size_stream: std.Io.Writer = .fixed(&size_buf);
+ try size_stream.writeUleb128(info_stream.end);
// Now, write them to the output stream.
- try writer.writeAll(size_buf[0..size_stream.pos]);
- try writer.writeAll(info_buf[0..info_stream.pos]);
+ try writer.writeAll(size_buf[0..size_stream.end]);
+ try writer.writeAll(info_buf[0..info_stream.end]);
} else {
// Non-terminal node is delimited by 0 byte.
try writer.writeByte(0);
@@ -280,7 +285,7 @@ fn writeNode(self: *Trie, node_index: Node.Index, writer: anytype) !void {
// Write edge label and offset to next node in trie.
try writer.writeAll(edge.label);
try writer.writeByte(0);
- try leb.writeUleb128(writer, slice.items(.trie_offset)[edge.node]);
+ try writer.writeUleb128(slice.items(.trie_offset)[edge.node]);
}
}
@@ -414,7 +419,6 @@ test "ordering bug" {
}
const assert = std.debug.assert;
-const leb = std.leb;
const log = std.log.scoped(.macho);
const macho = std.macho;
const mem = std.mem;
diff --git a/src/link/MachO/dyld_info/bind.zig b/src/link/MachO/dyld_info/bind.zig
index bc5484a846..dc3c17ebee 100644
--- a/src/link/MachO/dyld_info/bind.zig
+++ b/src/link/MachO/dyld_info/bind.zig
@@ -132,12 +132,14 @@ pub const Bind = struct {
fn finalize(self: *Self, gpa: Allocator, ctx: *MachO) !void {
if (self.entries.items.len == 0) return;
- const writer = self.buffer.writer(gpa);
-
log.debug("bind opcodes", .{});
std.mem.sort(Entry, self.entries.items, ctx, Entry.lessThan);
+ var allocating: std.Io.Writer.Allocating = .fromArrayList(gpa, &self.buffer);
+ defer self.buffer = allocating.toArrayList();
+ const writer = &allocating.writer;
+
var start: usize = 0;
var seg_id: ?u8 = null;
for (self.entries.items, 0..) |entry, i| {
@@ -151,7 +153,7 @@ pub const Bind = struct {
try done(writer);
}
- fn finalizeSegment(entries: []const Entry, ctx: *MachO, writer: anytype) !void {
+ fn finalizeSegment(entries: []const Entry, ctx: *MachO, writer: *std.Io.Writer) !void {
if (entries.len == 0) return;
const seg_id = entries[0].segment_id;
@@ -263,7 +265,7 @@ pub const Bind = struct {
}
}
- pub fn write(self: Self, writer: anytype) !void {
+ pub fn write(self: Self, writer: *std.Io.Writer) !void {
try writer.writeAll(self.buffer.items);
}
};
@@ -385,12 +387,14 @@ pub const WeakBind = struct {
fn finalize(self: *Self, gpa: Allocator, ctx: *MachO) !void {
if (self.entries.items.len == 0) return;
- const writer = self.buffer.writer(gpa);
-
log.debug("weak bind opcodes", .{});
std.mem.sort(Entry, self.entries.items, ctx, Entry.lessThan);
+ var allocating: std.Io.Writer.Allocating = .fromArrayList(gpa, &self.buffer);
+ defer self.buffer = allocating.toArrayList();
+ const writer = &allocating.writer;
+
var start: usize = 0;
var seg_id: ?u8 = null;
for (self.entries.items, 0..) |entry, i| {
@@ -404,7 +408,7 @@ pub const WeakBind = struct {
try done(writer);
}
- fn finalizeSegment(entries: []const Entry, ctx: *MachO, writer: anytype) !void {
+ fn finalizeSegment(entries: []const Entry, ctx: *MachO, writer: *std.Io.Writer) !void {
if (entries.len == 0) return;
const seg_id = entries[0].segment_id;
@@ -505,7 +509,7 @@ pub const WeakBind = struct {
}
}
- pub fn write(self: Self, writer: anytype) !void {
+ pub fn write(self: Self, writer: *std.Io.Writer) !void {
try writer.writeAll(self.buffer.items);
}
};
@@ -555,8 +559,6 @@ pub const LazyBind = struct {
fn finalize(self: *Self, gpa: Allocator, ctx: *MachO) !void {
try self.offsets.ensureTotalCapacityPrecise(gpa, self.entries.items.len);
- const writer = self.buffer.writer(gpa);
-
log.debug("lazy bind opcodes", .{});
var addend: i64 = 0;
@@ -578,6 +580,9 @@ pub const LazyBind = struct {
break :ord macho.BIND_SPECIAL_DYLIB_SELF;
};
+ var allocating: std.Io.Writer.Allocating = .fromArrayList(gpa, &self.buffer);
+ defer self.buffer = allocating.toArrayList();
+ const writer = &allocating.writer;
try setSegmentOffset(entry.segment_id, entry.offset, writer);
try setSymbol(name, flags, writer);
try setDylibOrdinal(ordinal, writer);
@@ -592,30 +597,30 @@ pub const LazyBind = struct {
}
}
- pub fn write(self: Self, writer: anytype) !void {
+ pub fn write(self: Self, writer: *std.Io.Writer) !void {
try writer.writeAll(self.buffer.items);
}
};
-fn setSegmentOffset(segment_id: u8, offset: u64, writer: anytype) !void {
+fn setSegmentOffset(segment_id: u8, offset: u64, writer: *std.Io.Writer) !void {
log.debug(">>> set segment: {d} and offset: {x}", .{ segment_id, offset });
try writer.writeByte(macho.BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB | @as(u4, @truncate(segment_id)));
- try std.leb.writeUleb128(writer, offset);
+ try writer.writeUleb128(offset);
}
-fn setSymbol(name: []const u8, flags: u8, writer: anytype) !void {
+fn setSymbol(name: []const u8, flags: u8, writer: *std.Io.Writer) !void {
log.debug(">>> set symbol: {s} with flags: {x}", .{ name, flags });
try writer.writeByte(macho.BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM | @as(u4, @truncate(flags)));
try writer.writeAll(name);
try writer.writeByte(0);
}
-fn setTypePointer(writer: anytype) !void {
+fn setTypePointer(writer: *std.Io.Writer) !void {
log.debug(">>> set type: {d}", .{macho.BIND_TYPE_POINTER});
try writer.writeByte(macho.BIND_OPCODE_SET_TYPE_IMM | @as(u4, @truncate(macho.BIND_TYPE_POINTER)));
}
-fn setDylibOrdinal(ordinal: i16, writer: anytype) !void {
+fn setDylibOrdinal(ordinal: i16, writer: *std.Io.Writer) !void {
if (ordinal <= 0) {
switch (ordinal) {
macho.BIND_SPECIAL_DYLIB_SELF,
@@ -634,23 +639,23 @@ fn setDylibOrdinal(ordinal: i16, writer: anytype) !void {
try writer.writeByte(macho.BIND_OPCODE_SET_DYLIB_ORDINAL_IMM | @as(u4, @truncate(cast)));
} else {
try writer.writeByte(macho.BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB);
- try std.leb.writeUleb128(writer, cast);
+ try writer.writeUleb128(cast);
}
}
}
-fn setAddend(addend: i64, writer: anytype) !void {
+fn setAddend(addend: i64, writer: *std.Io.Writer) !void {
log.debug(">>> set addend: {x}", .{addend});
try writer.writeByte(macho.BIND_OPCODE_SET_ADDEND_SLEB);
try std.leb.writeIleb128(writer, addend);
}
-fn doBind(writer: anytype) !void {
+fn doBind(writer: *std.Io.Writer) !void {
log.debug(">>> bind", .{});
try writer.writeByte(macho.BIND_OPCODE_DO_BIND);
}
-fn doBindAddAddr(addr: u64, writer: anytype) !void {
+fn doBindAddAddr(addr: u64, writer: *std.Io.Writer) !void {
log.debug(">>> bind with add: {x}", .{addr});
if (std.mem.isAlignedGeneric(u64, addr, @sizeOf(u64))) {
const imm = @divExact(addr, @sizeOf(u64));
@@ -662,29 +667,28 @@ fn doBindAddAddr(addr: u64, writer: anytype) !void {
}
}
try writer.writeByte(macho.BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB);
- try std.leb.writeUleb128(writer, addr);
+ try writer.writeUleb128(addr);
}
-fn doBindTimesSkip(count: usize, skip: u64, writer: anytype) !void {
+fn doBindTimesSkip(count: usize, skip: u64, writer: *std.Io.Writer) !void {
log.debug(">>> bind with count: {d} and skip: {x}", .{ count, skip });
try writer.writeByte(macho.BIND_OPCODE_DO_BIND_ULEB_TIMES_SKIPPING_ULEB);
- try std.leb.writeUleb128(writer, count);
- try std.leb.writeUleb128(writer, skip);
+ try writer.writeUleb128(count);
+ try writer.writeUleb128(skip);
}
-fn addAddr(addr: u64, writer: anytype) !void {
+fn addAddr(addr: u64, writer: *std.Io.Writer) !void {
log.debug(">>> add: {x}", .{addr});
try writer.writeByte(macho.BIND_OPCODE_ADD_ADDR_ULEB);
- try std.leb.writeUleb128(writer, addr);
+ try writer.writeUleb128(addr);
}
-fn done(writer: anytype) !void {
+fn done(writer: *std.Io.Writer) !void {
log.debug(">>> done", .{});
try writer.writeByte(macho.BIND_OPCODE_DONE);
}
const assert = std.debug.assert;
-const leb = std.leb;
const log = std.log.scoped(.link_dyld_info);
const macho = std.macho;
const mem = std.mem;
diff --git a/src/link/MachO/load_commands.zig b/src/link/MachO/load_commands.zig
index 7c7b4416f4..59717dc9c9 100644
--- a/src/link/MachO/load_commands.zig
+++ b/src/link/MachO/load_commands.zig
@@ -3,9 +3,9 @@ const assert = std.debug.assert;
const log = std.log.scoped(.link);
const macho = std.macho;
const mem = std.mem;
-const Writer = std.io.Writer;
+const Writer = std.Io.Writer;
+const Allocator = std.mem.Allocator;
-const Allocator = mem.Allocator;
const DebugSymbols = @import("DebugSymbols.zig");
const Dylib = @import("Dylib.zig");
const MachO = @import("../MachO.zig");
@@ -181,22 +181,22 @@ pub fn calcMinHeaderPadSize(macho_file: *MachO) !u32 {
return offset;
}
-pub fn writeDylinkerLC(writer: anytype) !void {
+pub fn writeDylinkerLC(writer: *Writer) !void {
const name_len = mem.sliceTo(default_dyld_path, 0).len;
const cmdsize = @as(u32, @intCast(mem.alignForward(
u64,
@sizeOf(macho.dylinker_command) + name_len,
@sizeOf(u64),
)));
- try writer.writeStruct(macho.dylinker_command{
+ try writer.writeStruct(@as(macho.dylinker_command, .{
.cmd = .LOAD_DYLINKER,
.cmdsize = cmdsize,
.name = @sizeOf(macho.dylinker_command),
- });
+ }), .little);
try writer.writeAll(mem.sliceTo(default_dyld_path, 0));
const padding = cmdsize - @sizeOf(macho.dylinker_command) - name_len;
if (padding > 0) {
- try writer.writeByteNTimes(0, padding);
+ try writer.splatByteAll(0, padding);
}
}
@@ -208,14 +208,14 @@ const WriteDylibLCCtx = struct {
compatibility_version: u32 = 0x10000,
};
-pub fn writeDylibLC(ctx: WriteDylibLCCtx, writer: anytype) !void {
+pub fn writeDylibLC(ctx: WriteDylibLCCtx, writer: *Writer) !void {
const name_len = ctx.name.len + 1;
const cmdsize = @as(u32, @intCast(mem.alignForward(
u64,
@sizeOf(macho.dylib_command) + name_len,
@sizeOf(u64),
)));
- try writer.writeStruct(macho.dylib_command{
+ try writer.writeStruct(@as(macho.dylib_command, .{
.cmd = ctx.cmd,
.cmdsize = cmdsize,
.dylib = .{
@@ -224,16 +224,16 @@ pub fn writeDylibLC(ctx: WriteDylibLCCtx, writer: anytype) !void {
.current_version = ctx.current_version,
.compatibility_version = ctx.compatibility_version,
},
- });
+ }), .little);
try writer.writeAll(ctx.name);
try writer.writeByte(0);
const padding = cmdsize - @sizeOf(macho.dylib_command) - name_len;
if (padding > 0) {
- try writer.writeByteNTimes(0, padding);
+ try writer.splatByteAll(0, padding);
}
}
-pub fn writeDylibIdLC(macho_file: *MachO, writer: anytype) !void {
+pub fn writeDylibIdLC(macho_file: *MachO, writer: *Writer) !void {
const comp = macho_file.base.comp;
const gpa = comp.gpa;
assert(comp.config.output_mode == .Lib and comp.config.link_mode == .dynamic);
@@ -259,26 +259,26 @@ pub fn writeDylibIdLC(macho_file: *MachO, writer: anytype) !void {
}, writer);
}
-pub fn writeRpathLC(rpath: []const u8, writer: anytype) !void {
+pub fn writeRpathLC(rpath: []const u8, writer: *Writer) !void {
const rpath_len = rpath.len + 1;
const cmdsize = @as(u32, @intCast(mem.alignForward(
u64,
@sizeOf(macho.rpath_command) + rpath_len,
@sizeOf(u64),
)));
- try writer.writeStruct(macho.rpath_command{
+ try writer.writeStruct(@as(macho.rpath_command, .{
.cmdsize = cmdsize,
.path = @sizeOf(macho.rpath_command),
- });
+ }), .little);
try writer.writeAll(rpath);
try writer.writeByte(0);
const padding = cmdsize - @sizeOf(macho.rpath_command) - rpath_len;
if (padding > 0) {
- try writer.writeByteNTimes(0, padding);
+ try writer.splatByteAll(0, padding);
}
}
-pub fn writeVersionMinLC(platform: MachO.Platform, sdk_version: ?std.SemanticVersion, writer: anytype) !void {
+pub fn writeVersionMinLC(platform: MachO.Platform, sdk_version: ?std.SemanticVersion, writer: *Writer) !void {
const cmd: macho.LC = switch (platform.os_tag) {
.macos => .VERSION_MIN_MACOSX,
.ios => .VERSION_MIN_IPHONEOS,
@@ -296,9 +296,9 @@ pub fn writeVersionMinLC(platform: MachO.Platform, sdk_version: ?std.SemanticVer
}));
}
-pub fn writeBuildVersionLC(platform: MachO.Platform, sdk_version: ?std.SemanticVersion, writer: anytype) !void {
+pub fn writeBuildVersionLC(platform: MachO.Platform, sdk_version: ?std.SemanticVersion, writer: *Writer) !void {
const cmdsize = @sizeOf(macho.build_version_command) + @sizeOf(macho.build_tool_version);
- try writer.writeStruct(macho.build_version_command{
+ try writer.writeStruct(@as(macho.build_version_command, .{
.cmdsize = cmdsize,
.platform = platform.toApplePlatform(),
.minos = platform.toAppleVersion(),
@@ -307,7 +307,7 @@ pub fn writeBuildVersionLC(platform: MachO.Platform, sdk_version: ?std.SemanticV
else
platform.toAppleVersion(),
.ntools = 1,
- });
+ }), .little);
try writer.writeAll(mem.asBytes(&macho.build_tool_version{
.tool = .ZIG,
.version = 0x0,
diff --git a/src/link/MachO/relocatable.zig b/src/link/MachO/relocatable.zig
index e962ce3fd2..a9f4d51ea6 100644
--- a/src/link/MachO/relocatable.zig
+++ b/src/link/MachO/relocatable.zig
@@ -205,35 +205,32 @@ pub fn flushStaticLib(macho_file: *MachO, comp: *Compilation, module_obj_path: ?
state_log.debug("ar_symtab\n{f}\n", .{ar_symtab.fmt(macho_file)});
}
- var buffer = std.array_list.Managed(u8).init(gpa);
- defer buffer.deinit();
- try buffer.ensureTotalCapacityPrecise(total_size);
- const writer = buffer.writer();
+ const buffer = try gpa.alloc(u8, total_size);
+ defer gpa.free(buffer);
+ var writer: Writer = .fixed(buffer);
// Write magic
- try writer.writeAll(Archive.ARMAG);
+ writer.writeAll(Archive.ARMAG) catch unreachable;
// Write symtab
- ar_symtab.write(format, macho_file, writer) catch |err| switch (err) {
- error.OutOfMemory => return error.OutOfMemory,
- else => |e| return diags.fail("failed to write archive symbol table: {s}", .{@errorName(e)}),
- };
+ ar_symtab.write(format, macho_file, &writer) catch |err|
+ return diags.fail("failed to write archive symbol table: {t}", .{err});
// Write object files
for (files.items) |index| {
- const aligned = mem.alignForward(usize, buffer.items.len, 2);
- const padding = aligned - buffer.items.len;
+ const aligned = mem.alignForward(usize, writer.end, 2);
+ const padding = aligned - writer.end;
if (padding > 0) {
- try writer.writeByteNTimes(0, padding);
+ writer.splatByteAll(0, padding) catch unreachable;
}
- macho_file.getFile(index).?.writeAr(format, macho_file, writer) catch |err|
- return diags.fail("failed to write archive: {s}", .{@errorName(err)});
+ macho_file.getFile(index).?.writeAr(format, macho_file, &writer) catch |err|
+ return diags.fail("failed to write archive: {t}", .{err});
}
- assert(buffer.items.len == total_size);
+ assert(writer.end == total_size);
try macho_file.setEndPos(total_size);
- try macho_file.pwriteAll(buffer.items, 0);
+ try macho_file.pwriteAll(writer.buffered(), 0);
if (diags.hasErrors()) return error.LinkFailure;
}
@@ -693,8 +690,7 @@ fn writeLoadCommands(macho_file: *MachO) error{ LinkFailure, OutOfMemory }!struc
const buffer = try gpa.alloc(u8, needed_size);
defer gpa.free(buffer);
- var stream = std.io.fixedBufferStream(buffer);
- const writer = stream.writer();
+ var writer: Writer = .fixed(buffer);
var ncmds: usize = 0;
@@ -702,43 +698,43 @@ fn writeLoadCommands(macho_file: *MachO) error{ LinkFailure, OutOfMemory }!struc
{
assert(macho_file.segments.items.len == 1);
const seg = macho_file.segments.items[0];
- writer.writeStruct(seg) catch |err| switch (err) {
- error.NoSpaceLeft => unreachable,
+ writer.writeStruct(seg, .little) catch |err| switch (err) {
+ error.WriteFailed => unreachable,
};
for (macho_file.sections.items(.header)) |header| {
- writer.writeStruct(header) catch |err| switch (err) {
- error.NoSpaceLeft => unreachable,
+ writer.writeStruct(header, .little) catch |err| switch (err) {
+ error.WriteFailed => unreachable,
};
}
ncmds += 1;
}
- writer.writeStruct(macho_file.data_in_code_cmd) catch |err| switch (err) {
- error.NoSpaceLeft => unreachable,
+ writer.writeStruct(macho_file.data_in_code_cmd, .little) catch |err| switch (err) {
+ error.WriteFailed => unreachable,
};
ncmds += 1;
- writer.writeStruct(macho_file.symtab_cmd) catch |err| switch (err) {
- error.NoSpaceLeft => unreachable,
+ writer.writeStruct(macho_file.symtab_cmd, .little) catch |err| switch (err) {
+ error.WriteFailed => unreachable,
};
ncmds += 1;
- writer.writeStruct(macho_file.dysymtab_cmd) catch |err| switch (err) {
- error.NoSpaceLeft => unreachable,
+ writer.writeStruct(macho_file.dysymtab_cmd, .little) catch |err| switch (err) {
+ error.WriteFailed => unreachable,
};
ncmds += 1;
if (macho_file.platform.isBuildVersionCompatible()) {
- load_commands.writeBuildVersionLC(macho_file.platform, macho_file.sdk_version, writer) catch |err| switch (err) {
- error.NoSpaceLeft => unreachable,
+ load_commands.writeBuildVersionLC(macho_file.platform, macho_file.sdk_version, &writer) catch |err| switch (err) {
+ error.WriteFailed => unreachable,
};
ncmds += 1;
} else {
- load_commands.writeVersionMinLC(macho_file.platform, macho_file.sdk_version, writer) catch |err| switch (err) {
- error.NoSpaceLeft => unreachable,
+ load_commands.writeVersionMinLC(macho_file.platform, macho_file.sdk_version, &writer) catch |err| switch (err) {
+ error.WriteFailed => unreachable,
};
ncmds += 1;
}
- assert(stream.pos == needed_size);
+ assert(writer.end == needed_size);
try macho_file.pwriteAll(buffer, @sizeOf(macho.mach_header_64));
diff --git a/src/link/MachO/synthetic.zig b/src/link/MachO/synthetic.zig
index 22c44d2f7a..6761a4a340 100644
--- a/src/link/MachO/synthetic.zig
+++ b/src/link/MachO/synthetic.zig
@@ -27,7 +27,7 @@ pub const GotSection = struct {
return got.symbols.items.len * @sizeOf(u64);
}
- pub fn write(got: GotSection, macho_file: *MachO, writer: anytype) !void {
+ pub fn write(got: GotSection, macho_file: *MachO, writer: *Writer) !void {
const tracy = trace(@src());
defer tracy.end();
for (got.symbols.items) |ref| {
@@ -89,7 +89,7 @@ pub const StubsSection = struct {
return stubs.symbols.items.len * header.reserved2;
}
- pub fn write(stubs: StubsSection, macho_file: *MachO, writer: anytype) !void {
+ pub fn write(stubs: StubsSection, macho_file: *MachO, writer: *Writer) !void {
const tracy = trace(@src());
defer tracy.end();
const cpu_arch = macho_file.getTarget().cpu.arch;
@@ -174,7 +174,7 @@ pub const StubsHelperSection = struct {
return s;
}
- pub fn write(stubs_helper: StubsHelperSection, macho_file: *MachO, writer: anytype) !void {
+ pub fn write(stubs_helper: StubsHelperSection, macho_file: *MachO, writer: *Writer) !void {
const tracy = trace(@src());
defer tracy.end();
@@ -217,7 +217,7 @@ pub const StubsHelperSection = struct {
}
}
- fn writePreamble(stubs_helper: StubsHelperSection, macho_file: *MachO, writer: anytype) !void {
+ fn writePreamble(stubs_helper: StubsHelperSection, macho_file: *MachO, writer: *Writer) !void {
_ = stubs_helper;
const obj = macho_file.getInternalObject().?;
const cpu_arch = macho_file.getTarget().cpu.arch;
@@ -273,7 +273,7 @@ pub const LaSymbolPtrSection = struct {
return macho_file.stubs.symbols.items.len * @sizeOf(u64);
}
- pub fn write(laptr: LaSymbolPtrSection, macho_file: *MachO, writer: anytype) !void {
+ pub fn write(laptr: LaSymbolPtrSection, macho_file: *MachO, writer: *Writer) !void {
const tracy = trace(@src());
defer tracy.end();
_ = laptr;
@@ -323,7 +323,7 @@ pub const TlvPtrSection = struct {
return tlv.symbols.items.len * @sizeOf(u64);
}
- pub fn write(tlv: TlvPtrSection, macho_file: *MachO, writer: anytype) !void {
+ pub fn write(tlv: TlvPtrSection, macho_file: *MachO, writer: *Writer) !void {
const tracy = trace(@src());
defer tracy.end();
@@ -394,7 +394,7 @@ pub const ObjcStubsSection = struct {
return objc.symbols.items.len * entrySize(macho_file.getTarget().cpu.arch);
}
- pub fn write(objc: ObjcStubsSection, macho_file: *MachO, writer: anytype) !void {
+ pub fn write(objc: ObjcStubsSection, macho_file: *MachO, writer: *Writer) !void {
const tracy = trace(@src());
defer tracy.end();
@@ -487,7 +487,7 @@ pub const Indsymtab = struct {
macho_file.dysymtab_cmd.nindirectsyms = ind.nsyms(macho_file);
}
- pub fn write(ind: Indsymtab, macho_file: *MachO, writer: anytype) !void {
+ pub fn write(ind: Indsymtab, macho_file: *MachO, writer: *Writer) !void {
const tracy = trace(@src());
defer tracy.end();
@@ -564,7 +564,7 @@ pub const DataInCode = struct {
macho_file.data_in_code_cmd.datasize = math.cast(u32, dice.size()) orelse return error.Overflow;
}
- pub fn write(dice: DataInCode, macho_file: *MachO, writer: anytype) !void {
+ pub fn write(dice: DataInCode, macho_file: *MachO, writer: *Writer) !void {
const base_address = if (!macho_file.base.isRelocatable())
macho_file.getTextSegment().vmaddr
else
@@ -572,11 +572,11 @@ pub const DataInCode = struct {
for (dice.entries.items) |entry| {
const atom_address = entry.atom_ref.getAtom(macho_file).?.getAddress(macho_file);
const offset = atom_address + entry.offset - base_address;
- try writer.writeStruct(macho.data_in_code_entry{
+ try writer.writeStruct(@as(macho.data_in_code_entry, .{
.offset = @intCast(offset),
.length = entry.length,
.kind = entry.kind,
- });
+ }), .little);
}
}
@@ -594,7 +594,7 @@ const assert = std.debug.assert;
const macho = std.macho;
const math = std.math;
const Allocator = std.mem.Allocator;
-const Writer = std.io.Writer;
+const Writer = std.Io.Writer;
const trace = @import("../../tracy.zig").trace;
const MachO = @import("../MachO.zig");
diff --git a/src/link/Wasm/Flush.zig b/src/link/Wasm/Flush.zig
index cc8df06743..f728a85786 100644
--- a/src/link/Wasm/Flush.zig
+++ b/src/link/Wasm/Flush.zig
@@ -19,6 +19,7 @@ const mem = std.mem;
const leb = std.leb;
const log = std.log.scoped(.link);
const assert = std.debug.assert;
+const ArrayList = std.ArrayList;
/// Ordered list of data segments that will appear in the final binary.
/// When sorted, to-be-merged segments will be made adjacent.
@@ -27,9 +28,9 @@ data_segments: std.AutoArrayHashMapUnmanaged(Wasm.DataSegmentId, u32) = .empty,
/// Each time a `data_segment` offset equals zero it indicates a new group, and
/// the next element in this array will contain the total merged segment size.
/// Value is the virtual memory address of the end of the segment.
-data_segment_groups: std.ArrayListUnmanaged(DataSegmentGroup) = .empty,
+data_segment_groups: ArrayList(DataSegmentGroup) = .empty,
-binary_bytes: std.ArrayListUnmanaged(u8) = .empty,
+binary_bytes: ArrayList(u8) = .empty,
missing_exports: std.AutoArrayHashMapUnmanaged(String, void) = .empty,
function_imports: std.AutoArrayHashMapUnmanaged(String, Wasm.FunctionImportId) = .empty,
global_imports: std.AutoArrayHashMapUnmanaged(String, Wasm.GlobalImportId) = .empty,
@@ -563,8 +564,6 @@ pub fn finish(f: *Flush, wasm: *Wasm) !void {
try binary_bytes.appendSlice(gpa, &std.wasm.magic ++ &std.wasm.version);
assert(binary_bytes.items.len == 8);
- const binary_writer = binary_bytes.writer(gpa);
-
// Type section.
for (f.function_imports.values()) |id| {
try f.func_types.put(gpa, id.functionType(wasm), {});
@@ -576,16 +575,16 @@ pub fn finish(f: *Flush, wasm: *Wasm) !void {
const header_offset = try reserveVecSectionHeader(gpa, binary_bytes);
for (f.func_types.keys()) |func_type_index| {
const func_type = func_type_index.ptr(wasm);
- try leb.writeUleb128(binary_writer, std.wasm.function_type);
+ try appendLeb128(gpa, binary_bytes, std.wasm.function_type);
const params = func_type.params.slice(wasm);
- try leb.writeUleb128(binary_writer, @as(u32, @intCast(params.len)));
+ try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(params.len)));
for (params) |param_ty| {
- try leb.writeUleb128(binary_writer, @intFromEnum(param_ty));
+ try appendLeb128(gpa, binary_bytes, @intFromEnum(param_ty));
}
const returns = func_type.returns.slice(wasm);
- try leb.writeUleb128(binary_writer, @as(u32, @intCast(returns.len)));
+ try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(returns.len)));
for (returns) |ret_ty| {
- try leb.writeUleb128(binary_writer, @intFromEnum(ret_ty));
+ try appendLeb128(gpa, binary_bytes, @intFromEnum(ret_ty));
}
}
replaceVecSectionHeader(binary_bytes, header_offset, .type, @intCast(f.func_types.entries.len));
@@ -605,31 +604,31 @@ pub fn finish(f: *Flush, wasm: *Wasm) !void {
for (f.function_imports.values()) |id| {
const module_name = id.moduleName(wasm).slice(wasm).?;
- try leb.writeUleb128(binary_writer, @as(u32, @intCast(module_name.len)));
- try binary_writer.writeAll(module_name);
+ try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(module_name.len)));
+ try binary_bytes.appendSlice(gpa, module_name);
const name = id.importName(wasm).slice(wasm);
- try leb.writeUleb128(binary_writer, @as(u32, @intCast(name.len)));
- try binary_writer.writeAll(name);
+ try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(name.len)));
+ try binary_bytes.appendSlice(gpa, name);
- try binary_writer.writeByte(@intFromEnum(std.wasm.ExternalKind.function));
+ try binary_bytes.append(gpa, @intFromEnum(std.wasm.ExternalKind.function));
const type_index: FuncTypeIndex = .fromTypeIndex(id.functionType(wasm), f);
- try leb.writeUleb128(binary_writer, @intFromEnum(type_index));
+ try appendLeb128(gpa, binary_bytes, @intFromEnum(type_index));
}
total_imports += f.function_imports.entries.len;
for (wasm.table_imports.values()) |id| {
const table_import = id.value(wasm);
const module_name = table_import.module_name.slice(wasm);
- try leb.writeUleb128(binary_writer, @as(u32, @intCast(module_name.len)));
- try binary_writer.writeAll(module_name);
+ try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(module_name.len)));
+ try binary_bytes.appendSlice(gpa, module_name);
const name = table_import.name.slice(wasm);
- try leb.writeUleb128(binary_writer, @as(u32, @intCast(name.len)));
- try binary_writer.writeAll(name);
+ try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(name.len)));
+ try binary_bytes.appendSlice(gpa, name);
- try binary_writer.writeByte(@intFromEnum(std.wasm.ExternalKind.table));
- try leb.writeUleb128(binary_writer, @intFromEnum(@as(std.wasm.RefType, table_import.flags.ref_type.to())));
+ try binary_bytes.append(gpa, @intFromEnum(std.wasm.ExternalKind.table));
+ try appendLeb128(gpa, binary_bytes, @intFromEnum(@as(std.wasm.RefType, table_import.flags.ref_type.to())));
try emitLimits(gpa, binary_bytes, table_import.limits());
}
total_imports += wasm.table_imports.entries.len;
@@ -650,17 +649,17 @@ pub fn finish(f: *Flush, wasm: *Wasm) !void {
for (f.global_imports.values()) |id| {
const module_name = id.moduleName(wasm).slice(wasm).?;
- try leb.writeUleb128(binary_writer, @as(u32, @intCast(module_name.len)));
- try binary_writer.writeAll(module_name);
+ try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(module_name.len)));
+ try binary_bytes.appendSlice(gpa, module_name);
const name = id.importName(wasm).slice(wasm);
- try leb.writeUleb128(binary_writer, @as(u32, @intCast(name.len)));
- try binary_writer.writeAll(name);
+ try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(name.len)));
+ try binary_bytes.appendSlice(gpa, name);
- try binary_writer.writeByte(@intFromEnum(std.wasm.ExternalKind.global));
+ try binary_bytes.append(gpa, @intFromEnum(std.wasm.ExternalKind.global));
const global_type = id.globalType(wasm);
- try leb.writeUleb128(binary_writer, @intFromEnum(@as(std.wasm.Valtype, global_type.valtype)));
- try binary_writer.writeByte(@intFromBool(global_type.mutable));
+ try appendLeb128(gpa, binary_bytes, @intFromEnum(@as(std.wasm.Valtype, global_type.valtype)));
+ try binary_bytes.append(gpa, @intFromBool(global_type.mutable));
}
total_imports += f.global_imports.entries.len;
@@ -677,7 +676,7 @@ pub fn finish(f: *Flush, wasm: *Wasm) !void {
const header_offset = try reserveVecSectionHeader(gpa, binary_bytes);
for (wasm.functions.keys()) |function| {
const index: FuncTypeIndex = .fromTypeIndex(function.typeIndex(wasm), f);
- try leb.writeUleb128(binary_writer, @intFromEnum(index));
+ try appendLeb128(gpa, binary_bytes, @intFromEnum(index));
}
replaceVecSectionHeader(binary_bytes, header_offset, .function, @intCast(wasm.functions.count()));
@@ -689,7 +688,7 @@ pub fn finish(f: *Flush, wasm: *Wasm) !void {
const header_offset = try reserveVecSectionHeader(gpa, binary_bytes);
for (wasm.tables.keys()) |table| {
- try leb.writeUleb128(binary_writer, @intFromEnum(@as(std.wasm.RefType, table.refType(wasm))));
+ try appendLeb128(gpa, binary_bytes, @intFromEnum(@as(std.wasm.RefType, table.refType(wasm))));
try emitLimits(gpa, binary_bytes, table.limits(wasm));
}
@@ -743,39 +742,39 @@ pub fn finish(f: *Flush, wasm: *Wasm) !void {
for (wasm.function_exports.keys(), wasm.function_exports.values()) |exp_name, function_index| {
const name = exp_name.slice(wasm);
- try leb.writeUleb128(binary_writer, @as(u32, @intCast(name.len)));
+ try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(name.len)));
try binary_bytes.appendSlice(gpa, name);
try binary_bytes.append(gpa, @intFromEnum(std.wasm.ExternalKind.function));
const func_index = Wasm.OutputFunctionIndex.fromFunctionIndex(wasm, function_index);
- try leb.writeUleb128(binary_writer, @intFromEnum(func_index));
+ try appendLeb128(gpa, binary_bytes, @intFromEnum(func_index));
}
exports_len += wasm.function_exports.entries.len;
if (wasm.export_table and f.indirect_function_table.entries.len > 0) {
const name = "__indirect_function_table";
const index: u32 = @intCast(wasm.tables.getIndex(.__indirect_function_table).?);
- try leb.writeUleb128(binary_writer, @as(u32, @intCast(name.len)));
+ try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(name.len)));
try binary_bytes.appendSlice(gpa, name);
try binary_bytes.append(gpa, @intFromEnum(std.wasm.ExternalKind.table));
- try leb.writeUleb128(binary_writer, index);
+ try appendLeb128(gpa, binary_bytes, index);
exports_len += 1;
}
if (export_memory) {
const name = "memory";
- try leb.writeUleb128(binary_writer, @as(u32, @intCast(name.len)));
+ try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(name.len)));
try binary_bytes.appendSlice(gpa, name);
try binary_bytes.append(gpa, @intFromEnum(std.wasm.ExternalKind.memory));
- try leb.writeUleb128(binary_writer, @as(u32, 0));
+ try appendLeb128(gpa, binary_bytes, @as(u32, 0));
exports_len += 1;
}
for (wasm.global_exports.items) |exp| {
const name = exp.name.slice(wasm);
- try leb.writeUleb128(binary_writer, @as(u32, @intCast(name.len)));
+ try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(name.len)));
try binary_bytes.appendSlice(gpa, name);
try binary_bytes.append(gpa, @intFromEnum(std.wasm.ExternalKind.global));
- try leb.writeUleb128(binary_writer, @intFromEnum(exp.global_index));
+ try appendLeb128(gpa, binary_bytes, @intFromEnum(exp.global_index));
}
exports_len += wasm.global_exports.items.len;
@@ -802,18 +801,22 @@ pub fn finish(f: *Flush, wasm: *Wasm) !void {
const table_index: u32 = @intCast(wasm.tables.getIndex(.__indirect_function_table).?);
// passive with implicit 0-index table or set table index manually
const flags: u32 = if (table_index == 0) 0x0 else 0x02;
- try leb.writeUleb128(binary_writer, flags);
+ try appendLeb128(gpa, binary_bytes, flags);
if (flags == 0x02) {
- try leb.writeUleb128(binary_writer, table_index);
+ try appendLeb128(gpa, binary_bytes, table_index);
}
// We start at index 1, so unresolved function pointers are invalid
- try emitInit(binary_writer, .{ .i32_const = 1 });
+ {
+ var aw: std.Io.Writer.Allocating = .fromArrayList(gpa, binary_bytes);
+ defer binary_bytes.* = aw.toArrayList();
+ try emitInit(&aw.writer, .{ .i32_const = 1 });
+ }
if (flags == 0x02) {
- try leb.writeUleb128(binary_writer, @as(u8, 0)); // represents funcref
+ try appendLeb128(gpa, binary_bytes, @as(u8, 0)); // represents funcref
}
- try leb.writeUleb128(binary_writer, @as(u32, @intCast(f.indirect_function_table.entries.len)));
+ try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(f.indirect_function_table.entries.len)));
for (f.indirect_function_table.keys()) |func_index| {
- try leb.writeUleb128(binary_writer, @intFromEnum(func_index));
+ try appendLeb128(gpa, binary_bytes, @intFromEnum(func_index));
}
replaceVecSectionHeader(binary_bytes, header_offset, .element, 1);
@@ -851,7 +854,7 @@ pub fn finish(f: *Flush, wasm: *Wasm) !void {
.object_function => |i| {
const ptr = i.ptr(wasm);
const code = ptr.code.slice(wasm);
- try leb.writeUleb128(binary_writer, code.len);
+ try appendLeb128(gpa, binary_bytes, code.len);
const code_start = binary_bytes.items.len;
try binary_bytes.appendSlice(gpa, code);
if (!is_obj) applyRelocs(binary_bytes.items[code_start..], ptr.offset, ptr.relocations(wasm), wasm);
@@ -946,12 +949,14 @@ pub fn finish(f: *Flush, wasm: *Wasm) !void {
const group_size = group_end_addr - group_start_addr;
log.debug("emit data section group, {d} bytes", .{group_size});
const flags: Object.DataSegmentFlags = if (segment_id.isPassive(wasm)) .passive else .active;
- try leb.writeUleb128(binary_writer, @intFromEnum(flags));
+ try appendLeb128(gpa, binary_bytes, @intFromEnum(flags));
// Passive segments are initialized at runtime.
if (flags != .passive) {
- try emitInit(binary_writer, .{ .i32_const = @as(i32, @bitCast(group_start_addr)) });
+ var aw: std.Io.Writer.Allocating = .fromArrayList(gpa, binary_bytes);
+ defer binary_bytes.* = aw.toArrayList();
+ try emitInit(&aw.writer, .{ .i32_const = @as(i32, @bitCast(group_start_addr)) });
}
- try leb.writeUleb128(binary_writer, group_size);
+ try appendLeb128(gpa, binary_bytes, group_size);
}
if (segment_id.isEmpty(wasm)) {
// It counted for virtual memory but it does not go into the binary.
@@ -1077,7 +1082,7 @@ const VirtualAddrs = struct {
fn emitNameSection(
wasm: *Wasm,
data_segment_groups: []const DataSegmentGroup,
- binary_bytes: *std.ArrayListUnmanaged(u8),
+ binary_bytes: *ArrayList(u8),
) !void {
const f = &wasm.flush_buffer;
const comp = wasm.base.comp;
@@ -1087,7 +1092,7 @@ fn emitNameSection(
defer writeCustomSectionHeader(binary_bytes, header_offset);
const name_name = "name";
- try leb.writeUleb128(binary_bytes.writer(gpa), @as(u32, name_name.len));
+ try appendLeb128(gpa, binary_bytes, @as(u32, name_name.len));
try binary_bytes.appendSlice(gpa, name_name);
{
@@ -1095,18 +1100,18 @@ fn emitNameSection(
defer replaceHeader(binary_bytes, sub_offset, @intFromEnum(std.wasm.NameSubsection.function));
const total_functions: u32 = @intCast(f.function_imports.entries.len + wasm.functions.entries.len);
- try leb.writeUleb128(binary_bytes.writer(gpa), total_functions);
+ try appendLeb128(gpa, binary_bytes, total_functions);
for (f.function_imports.keys(), 0..) |name_index, function_index| {
const name = name_index.slice(wasm);
- try leb.writeUleb128(binary_bytes.writer(gpa), @as(u32, @intCast(function_index)));
- try leb.writeUleb128(binary_bytes.writer(gpa), @as(u32, @intCast(name.len)));
+ try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(function_index)));
+ try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(name.len)));
try binary_bytes.appendSlice(gpa, name);
}
for (wasm.functions.keys(), f.function_imports.entries.len..) |resolution, function_index| {
const name = resolution.name(wasm).?;
- try leb.writeUleb128(binary_bytes.writer(gpa), @as(u32, @intCast(function_index)));
- try leb.writeUleb128(binary_bytes.writer(gpa), @as(u32, @intCast(name.len)));
+ try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(function_index)));
+ try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(name.len)));
try binary_bytes.appendSlice(gpa, name);
}
}
@@ -1116,18 +1121,18 @@ fn emitNameSection(
defer replaceHeader(binary_bytes, sub_offset, @intFromEnum(std.wasm.NameSubsection.global));
const total_globals: u32 = @intCast(f.global_imports.entries.len + wasm.globals.entries.len);
- try leb.writeUleb128(binary_bytes.writer(gpa), total_globals);
+ try appendLeb128(gpa, binary_bytes, total_globals);
for (f.global_imports.keys(), 0..) |name_index, global_index| {
const name = name_index.slice(wasm);
- try leb.writeUleb128(binary_bytes.writer(gpa), @as(u32, @intCast(global_index)));
- try leb.writeUleb128(binary_bytes.writer(gpa), @as(u32, @intCast(name.len)));
+ try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(global_index)));
+ try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(name.len)));
try binary_bytes.appendSlice(gpa, name);
}
for (wasm.globals.keys(), f.global_imports.entries.len..) |resolution, global_index| {
const name = resolution.name(wasm).?;
- try leb.writeUleb128(binary_bytes.writer(gpa), @as(u32, @intCast(global_index)));
- try leb.writeUleb128(binary_bytes.writer(gpa), @as(u32, @intCast(name.len)));
+ try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(global_index)));
+ try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(name.len)));
try binary_bytes.appendSlice(gpa, name);
}
}
@@ -1137,12 +1142,12 @@ fn emitNameSection(
defer replaceHeader(binary_bytes, sub_offset, @intFromEnum(std.wasm.NameSubsection.data_segment));
const total_data_segments: u32 = @intCast(data_segment_groups.len);
- try leb.writeUleb128(binary_bytes.writer(gpa), total_data_segments);
+ try appendLeb128(gpa, binary_bytes, total_data_segments);
for (data_segment_groups, 0..) |group, i| {
const name, _ = splitSegmentName(group.first_segment.name(wasm));
- try leb.writeUleb128(binary_bytes.writer(gpa), @as(u32, @intCast(i)));
- try leb.writeUleb128(binary_bytes.writer(gpa), @as(u32, @intCast(name.len)));
+ try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(i)));
+ try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(name.len)));
try binary_bytes.appendSlice(gpa, name);
}
}
@@ -1150,7 +1155,7 @@ fn emitNameSection(
fn emitFeaturesSection(
gpa: Allocator,
- binary_bytes: *std.ArrayListUnmanaged(u8),
+ binary_bytes: *ArrayList(u8),
target: *const std.Target,
) Allocator.Error!void {
const feature_count = target.cpu.features.count();
@@ -1159,87 +1164,84 @@ fn emitFeaturesSection(
const header_offset = try reserveCustomSectionHeader(gpa, binary_bytes);
defer writeCustomSectionHeader(binary_bytes, header_offset);
- const writer = binary_bytes.writer(gpa);
const target_features = "target_features";
- try leb.writeUleb128(writer, @as(u32, @intCast(target_features.len)));
- try writer.writeAll(target_features);
+ try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(target_features.len)));
+ try binary_bytes.appendSlice(gpa, target_features);
- try leb.writeUleb128(writer, @as(u32, @intCast(feature_count)));
+ try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(feature_count)));
var safety_count = feature_count;
for (target.cpu.arch.allFeaturesList(), 0..) |*feature, i| {
if (!target.cpu.has(.wasm, @as(std.Target.wasm.Feature, @enumFromInt(i)))) continue;
safety_count -= 1;
- try leb.writeUleb128(writer, @as(u32, '+'));
+ try appendLeb128(gpa, binary_bytes, @as(u32, '+'));
// Depends on llvm_name for the hyphenated version that matches wasm tooling conventions.
const name = feature.llvm_name.?;
- try leb.writeUleb128(writer, @as(u32, @intCast(name.len)));
- try writer.writeAll(name);
+ try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(name.len)));
+ try binary_bytes.appendSlice(gpa, name);
}
assert(safety_count == 0);
}
-fn emitBuildIdSection(gpa: Allocator, binary_bytes: *std.ArrayListUnmanaged(u8), build_id: []const u8) !void {
+fn emitBuildIdSection(gpa: Allocator, binary_bytes: *ArrayList(u8), build_id: []const u8) !void {
const header_offset = try reserveCustomSectionHeader(gpa, binary_bytes);
defer writeCustomSectionHeader(binary_bytes, header_offset);
- const writer = binary_bytes.writer(gpa);
const hdr_build_id = "build_id";
- try leb.writeUleb128(writer, @as(u32, @intCast(hdr_build_id.len)));
- try writer.writeAll(hdr_build_id);
+ try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(hdr_build_id.len)));
+ try binary_bytes.appendSlice(gpa, hdr_build_id);
- try leb.writeUleb128(writer, @as(u32, 1));
- try leb.writeUleb128(writer, @as(u32, @intCast(build_id.len)));
- try writer.writeAll(build_id);
+ try appendLeb128(gpa, binary_bytes, @as(u32, 1));
+ try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(build_id.len)));
+ try binary_bytes.appendSlice(gpa, build_id);
}
-fn emitProducerSection(gpa: Allocator, binary_bytes: *std.ArrayListUnmanaged(u8)) !void {
+fn emitProducerSection(gpa: Allocator, binary_bytes: *ArrayList(u8)) !void {
const header_offset = try reserveCustomSectionHeader(gpa, binary_bytes);
defer writeCustomSectionHeader(binary_bytes, header_offset);
- const writer = binary_bytes.writer(gpa);
const producers = "producers";
- try leb.writeUleb128(writer, @as(u32, @intCast(producers.len)));
- try writer.writeAll(producers);
+ try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(producers.len)));
+ try binary_bytes.appendSlice(gpa, producers);
- try leb.writeUleb128(writer, @as(u32, 2)); // 2 fields: Language + processed-by
+ try appendLeb128(gpa, binary_bytes, @as(u32, 2)); // 2 fields: Language + processed-by
// language field
{
const language = "language";
- try leb.writeUleb128(writer, @as(u32, @intCast(language.len)));
- try writer.writeAll(language);
+ try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(language.len)));
+ try binary_bytes.appendSlice(gpa, language);
// field_value_count (TODO: Parse object files for producer sections to detect their language)
- try leb.writeUleb128(writer, @as(u32, 1));
+ try appendLeb128(gpa, binary_bytes, @as(u32, 1));
// versioned name
{
- try leb.writeUleb128(writer, @as(u32, 3)); // len of "Zig"
- try writer.writeAll("Zig");
+ try appendLeb128(gpa, binary_bytes, @as(u32, 3)); // len of "Zig"
+ try binary_bytes.appendSlice(gpa, "Zig");
- try leb.writeUleb128(writer, @as(u32, @intCast(build_options.version.len)));
- try writer.writeAll(build_options.version);
+ try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(build_options.version.len)));
+ try binary_bytes.appendSlice(gpa, build_options.version);
}
}
// processed-by field
{
const processed_by = "processed-by";
- try leb.writeUleb128(writer, @as(u32, @intCast(processed_by.len)));
- try writer.writeAll(processed_by);
+ try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(processed_by.len)));
+ try binary_bytes.appendSlice(gpa, processed_by);
// field_value_count (TODO: Parse object files for producer sections to detect other used tools)
- try leb.writeUleb128(writer, @as(u32, 1));
+ try appendLeb128(gpa, binary_bytes, @as(u32, 1));
// versioned name
{
- try leb.writeUleb128(writer, @as(u32, 3)); // len of "Zig"
- try writer.writeAll("Zig");
+ try appendLeb128(gpa, binary_bytes, @as(u32, 3)); // len of "Zig"
+ try binary_bytes.appendSlice(gpa, "Zig");
- try leb.writeUleb128(writer, @as(u32, @intCast(build_options.version.len)));
- try writer.writeAll(build_options.version);
+ try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(build_options.version.len)));
+ try binary_bytes.appendSlice(gpa, build_options.version);
}
}
}
@@ -1280,99 +1282,97 @@ fn wantSegmentMerge(
const section_header_reserve_size = 1 + 5 + 5;
const section_header_size = 5 + 1;
-fn reserveVecSectionHeader(gpa: Allocator, bytes: *std.ArrayListUnmanaged(u8)) Allocator.Error!u32 {
+fn reserveVecSectionHeader(gpa: Allocator, bytes: *ArrayList(u8)) Allocator.Error!u32 {
try bytes.appendNTimes(gpa, 0, section_header_reserve_size);
return @intCast(bytes.items.len - section_header_reserve_size);
}
fn replaceVecSectionHeader(
- bytes: *std.ArrayListUnmanaged(u8),
+ bytes: *ArrayList(u8),
offset: u32,
section: std.wasm.Section,
n_items: u32,
) void {
const size: u32 = @intCast(bytes.items.len - offset - section_header_reserve_size + uleb128size(n_items));
var buf: [section_header_reserve_size]u8 = undefined;
- var fbw = std.io.fixedBufferStream(&buf);
- const w = fbw.writer();
+ var w: std.Io.Writer = .fixed(&buf);
w.writeByte(@intFromEnum(section)) catch unreachable;
- leb.writeUleb128(w, size) catch unreachable;
- leb.writeUleb128(w, n_items) catch unreachable;
- bytes.replaceRangeAssumeCapacity(offset, section_header_reserve_size, fbw.getWritten());
+ w.writeUleb128(size) catch unreachable;
+ w.writeUleb128(n_items) catch unreachable;
+ bytes.replaceRangeAssumeCapacity(offset, section_header_reserve_size, w.buffered());
}
-fn reserveCustomSectionHeader(gpa: Allocator, bytes: *std.ArrayListUnmanaged(u8)) Allocator.Error!u32 {
+fn reserveCustomSectionHeader(gpa: Allocator, bytes: *ArrayList(u8)) Allocator.Error!u32 {
try bytes.appendNTimes(gpa, 0, section_header_size);
return @intCast(bytes.items.len - section_header_size);
}
-fn writeCustomSectionHeader(bytes: *std.ArrayListUnmanaged(u8), offset: u32) void {
+fn writeCustomSectionHeader(bytes: *ArrayList(u8), offset: u32) void {
return replaceHeader(bytes, offset, 0); // 0 = 'custom' section
}
-fn replaceHeader(bytes: *std.ArrayListUnmanaged(u8), offset: u32, tag: u8) void {
+fn replaceHeader(bytes: *ArrayList(u8), offset: u32, tag: u8) void {
const size: u32 = @intCast(bytes.items.len - offset - section_header_size);
var buf: [section_header_size]u8 = undefined;
- var fbw = std.io.fixedBufferStream(&buf);
- const w = fbw.writer();
+ var w: std.Io.Writer = .fixed(&buf);
w.writeByte(tag) catch unreachable;
- leb.writeUleb128(w, size) catch unreachable;
- bytes.replaceRangeAssumeCapacity(offset, section_header_size, fbw.getWritten());
+ w.writeUleb128(size) catch unreachable;
+ bytes.replaceRangeAssumeCapacity(offset, section_header_size, w.buffered());
}
const max_size_encoding = 5;
-fn reserveSize(gpa: Allocator, bytes: *std.ArrayListUnmanaged(u8)) Allocator.Error!u32 {
+fn reserveSize(gpa: Allocator, bytes: *ArrayList(u8)) Allocator.Error!u32 {
try bytes.appendNTimes(gpa, 0, max_size_encoding);
return @intCast(bytes.items.len - max_size_encoding);
}
-fn replaceSize(bytes: *std.ArrayListUnmanaged(u8), offset: u32) void {
+fn replaceSize(bytes: *ArrayList(u8), offset: u32) void {
const size: u32 = @intCast(bytes.items.len - offset - max_size_encoding);
var buf: [max_size_encoding]u8 = undefined;
- var fbw = std.io.fixedBufferStream(&buf);
- leb.writeUleb128(fbw.writer(), size) catch unreachable;
- bytes.replaceRangeAssumeCapacity(offset, max_size_encoding, fbw.getWritten());
+ var w: std.Io.Writer = .fixed(&buf);
+ w.writeUleb128(size) catch unreachable;
+ bytes.replaceRangeAssumeCapacity(offset, max_size_encoding, w.buffered());
}
fn emitLimits(
gpa: Allocator,
- binary_bytes: *std.ArrayListUnmanaged(u8),
+ binary_bytes: *ArrayList(u8),
limits: std.wasm.Limits,
) Allocator.Error!void {
try binary_bytes.append(gpa, @bitCast(limits.flags));
- try leb.writeUleb128(binary_bytes.writer(gpa), limits.min);
- if (limits.flags.has_max) try leb.writeUleb128(binary_bytes.writer(gpa), limits.max);
+ try appendLeb128(gpa, binary_bytes, limits.min);
+ if (limits.flags.has_max) try appendLeb128(gpa, binary_bytes, limits.max);
}
fn emitMemoryImport(
wasm: *Wasm,
- binary_bytes: *std.ArrayListUnmanaged(u8),
+ binary_bytes: *ArrayList(u8),
name_index: String,
memory_import: *const Wasm.MemoryImport,
) Allocator.Error!void {
const gpa = wasm.base.comp.gpa;
const module_name = memory_import.module_name.slice(wasm);
- try leb.writeUleb128(binary_bytes.writer(gpa), @as(u32, @intCast(module_name.len)));
+ try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(module_name.len)));
try binary_bytes.appendSlice(gpa, module_name);
const name = name_index.slice(wasm);
- try leb.writeUleb128(binary_bytes.writer(gpa), @as(u32, @intCast(name.len)));
+ try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(name.len)));
try binary_bytes.appendSlice(gpa, name);
try binary_bytes.append(gpa, @intFromEnum(std.wasm.ExternalKind.memory));
try emitLimits(gpa, binary_bytes, memory_import.limits());
}
-pub fn emitInit(writer: anytype, init_expr: std.wasm.InitExpression) !void {
+fn emitInit(writer: *std.Io.Writer, init_expr: std.wasm.InitExpression) !void {
switch (init_expr) {
.i32_const => |val| {
try writer.writeByte(@intFromEnum(std.wasm.Opcode.i32_const));
- try leb.writeIleb128(writer, val);
+ try writer.writeSleb128(val);
},
.i64_const => |val| {
try writer.writeByte(@intFromEnum(std.wasm.Opcode.i64_const));
- try leb.writeIleb128(writer, val);
+ try writer.writeSleb128(val);
},
.f32_const => |val| {
try writer.writeByte(@intFromEnum(std.wasm.Opcode.f32_const));
@@ -1384,13 +1384,13 @@ pub fn emitInit(writer: anytype, init_expr: std.wasm.InitExpression) !void {
},
.global_get => |val| {
try writer.writeByte(@intFromEnum(std.wasm.Opcode.global_get));
- try leb.writeUleb128(writer, val);
+ try writer.writeUleb128(val);
},
}
try writer.writeByte(@intFromEnum(std.wasm.Opcode.end));
}
-pub fn emitExpr(wasm: *const Wasm, binary_bytes: *std.ArrayListUnmanaged(u8), expr: Wasm.Expr) Allocator.Error!void {
+pub fn emitExpr(wasm: *const Wasm, binary_bytes: *ArrayList(u8), expr: Wasm.Expr) Allocator.Error!void {
const gpa = wasm.base.comp.gpa;
const slice = expr.slice(wasm);
try binary_bytes.appendSlice(gpa, slice[0 .. slice.len + 1]); // +1 to include end opcode
@@ -1398,21 +1398,20 @@ pub fn emitExpr(wasm: *const Wasm, binary_bytes: *std.ArrayListUnmanaged(u8), ex
fn emitSegmentInfo(wasm: *Wasm, binary_bytes: *std.array_list.Managed(u8)) !void {
const gpa = wasm.base.comp.gpa;
- const writer = binary_bytes.writer(gpa);
- try leb.writeUleb128(writer, @intFromEnum(Wasm.SubsectionType.segment_info));
+ try appendLeb128(gpa, binary_bytes, @intFromEnum(Wasm.SubsectionType.segment_info));
const segment_offset = binary_bytes.items.len;
- try leb.writeUleb128(writer, @as(u32, @intCast(wasm.segment_info.count())));
+ try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(wasm.segment_info.count())));
for (wasm.segment_info.values()) |segment_info| {
log.debug("Emit segment: {s} align({d}) flags({b})", .{
segment_info.name,
segment_info.alignment,
segment_info.flags,
});
- try leb.writeUleb128(writer, @as(u32, @intCast(segment_info.name.len)));
- try writer.writeAll(segment_info.name);
- try leb.writeUleb128(writer, segment_info.alignment.toLog2Units());
- try leb.writeUleb128(writer, segment_info.flags);
+ try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(segment_info.name.len)));
+ try binary_bytes.appendSlice(gpa, segment_info.name);
+ try appendLeb128(gpa, binary_bytes, segment_info.alignment.toLog2Units());
+ try appendLeb128(gpa, binary_bytes, segment_info.flags);
}
var buf: [5]u8 = undefined;
@@ -1429,7 +1428,7 @@ fn uleb128size(x: u32) u32 {
fn emitTagNameTable(
gpa: Allocator,
- code: *std.ArrayListUnmanaged(u8),
+ code: *ArrayList(u8),
tag_name_offs: []const u32,
tag_name_bytes: []const u8,
base: u32,
@@ -1604,7 +1603,7 @@ fn reloc_leb_type(code: []u8, index: FuncTypeIndex) void {
leb.writeUnsignedFixed(5, code[0..5], @intFromEnum(index));
}
-fn emitCallCtorsFunction(wasm: *const Wasm, binary_bytes: *std.ArrayListUnmanaged(u8)) Allocator.Error!void {
+fn emitCallCtorsFunction(wasm: *const Wasm, binary_bytes: *ArrayList(u8)) Allocator.Error!void {
const gpa = wasm.base.comp.gpa;
try binary_bytes.ensureUnusedCapacity(gpa, 5 + 1);
@@ -1631,7 +1630,7 @@ fn emitCallCtorsFunction(wasm: *const Wasm, binary_bytes: *std.ArrayListUnmanage
fn emitInitMemoryFunction(
wasm: *const Wasm,
- binary_bytes: *std.ArrayListUnmanaged(u8),
+ binary_bytes: *ArrayList(u8),
virtual_addrs: *const VirtualAddrs,
) Allocator.Error!void {
const comp = wasm.base.comp;
@@ -1734,7 +1733,7 @@ fn emitInitMemoryFunction(
// notify any waiters for segment initialization completion
appendReservedI32Const(binary_bytes, flag_address);
binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_const));
- leb.writeIleb128(binary_bytes.fixedWriter(), @as(i32, -1)) catch unreachable; // number of waiters
+ appendReservedLeb128(binary_bytes, @as(i32, -1)); // number of waiters
binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.atomics_prefix));
appendReservedUleb32(binary_bytes, @intFromEnum(std.wasm.AtomicsOpcode.memory_atomic_notify));
appendReservedUleb32(binary_bytes, @as(u32, 2)); // alignment
@@ -1750,7 +1749,7 @@ fn emitInitMemoryFunction(
appendReservedI32Const(binary_bytes, flag_address);
appendReservedI32Const(binary_bytes, 1); // expected flag value
binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i64_const));
- leb.writeIleb128(binary_bytes.fixedWriter(), @as(i64, -1)) catch unreachable; // timeout
+ appendReservedLeb128(binary_bytes, @as(i64, -1)); // timeout
binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.atomics_prefix));
appendReservedUleb32(binary_bytes, @intFromEnum(std.wasm.AtomicsOpcode.memory_atomic_wait32));
appendReservedUleb32(binary_bytes, @as(u32, 2)); // alignment
@@ -1779,7 +1778,7 @@ fn emitInitMemoryFunction(
binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.end));
}
-fn emitInitTlsFunction(wasm: *const Wasm, bytes: *std.ArrayListUnmanaged(u8)) Allocator.Error!void {
+fn emitInitTlsFunction(wasm: *const Wasm, bytes: *ArrayList(u8)) Allocator.Error!void {
const comp = wasm.base.comp;
const gpa = comp.gpa;
@@ -1840,14 +1839,14 @@ fn emitInitTlsFunction(wasm: *const Wasm, bytes: *std.ArrayListUnmanaged(u8)) Al
bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.end));
}
-fn emitStartSection(gpa: Allocator, bytes: *std.ArrayListUnmanaged(u8), i: Wasm.OutputFunctionIndex) !void {
+fn emitStartSection(gpa: Allocator, bytes: *ArrayList(u8), i: Wasm.OutputFunctionIndex) !void {
const header_offset = try reserveVecSectionHeader(gpa, bytes);
replaceVecSectionHeader(bytes, header_offset, .start, @intFromEnum(i));
}
fn emitTagNameFunction(
wasm: *Wasm,
- code: *std.ArrayListUnmanaged(u8),
+ code: *ArrayList(u8),
table_base_addr: u32,
table_index: u32,
enum_type_ip: InternPool.Index,
@@ -1959,22 +1958,34 @@ fn emitTagNameFunction(
}
/// Writes an unsigned 32-bit integer as a LEB128-encoded 'i32.const' value.
-fn appendReservedI32Const(bytes: *std.ArrayListUnmanaged(u8), val: u32) void {
+fn appendReservedI32Const(bytes: *ArrayList(u8), val: u32) void {
bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_const));
- leb.writeIleb128(bytes.fixedWriter(), @as(i32, @bitCast(val))) catch unreachable;
+ var w: std.Io.Writer = .fromArrayList(bytes);
+ defer bytes.* = w.toArrayList();
+ return w.writeSleb128(val) catch |err| switch (err) {
+ error.WriteFailed => unreachable,
+ };
}
/// Writes an unsigned 64-bit integer as a LEB128-encoded 'i64.const' value.
-fn appendReservedI64Const(bytes: *std.ArrayListUnmanaged(u8), val: u64) void {
+fn appendReservedI64Const(bytes: *ArrayList(u8), val: u64) void {
bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i64_const));
- leb.writeIleb128(bytes.fixedWriter(), @as(i64, @bitCast(val))) catch unreachable;
+ var w: std.Io.Writer = .fromArrayList(bytes);
+ defer bytes.* = w.toArrayList();
+ return w.writeSleb128(val) catch |err| switch (err) {
+ error.WriteFailed => unreachable,
+ };
}
-fn appendReservedUleb32(bytes: *std.ArrayListUnmanaged(u8), val: u32) void {
- leb.writeUleb128(bytes.fixedWriter(), val) catch unreachable;
+fn appendReservedUleb32(bytes: *ArrayList(u8), val: u32) void {
+ var w: std.Io.Writer = .fromArrayList(bytes);
+ defer bytes.* = w.toArrayList();
+ return w.writeUleb128(val) catch |err| switch (err) {
+ error.WriteFailed => unreachable,
+ };
}
-fn appendGlobal(gpa: Allocator, bytes: *std.ArrayListUnmanaged(u8), mutable: u8, val: u32) Allocator.Error!void {
+fn appendGlobal(gpa: Allocator, bytes: *ArrayList(u8), mutable: u8, val: u32) Allocator.Error!void {
try bytes.ensureUnusedCapacity(gpa, 9);
bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Valtype.i32));
bytes.appendAssumeCapacity(mutable);
@@ -1982,3 +1993,19 @@ fn appendGlobal(gpa: Allocator, bytes: *std.ArrayListUnmanaged(u8), mutable: u8,
appendReservedUleb32(bytes, val);
bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.end));
}
+
+fn appendLeb128(gpa: Allocator, bytes: *ArrayList(u8), value: anytype) Allocator.Error!void {
+ var aw: std.Io.Writer.Allocating = .fromArrayList(gpa, bytes);
+ defer bytes.* = aw.toArrayList();
+ return aw.writer.writeLeb128(value) catch |err| switch (err) {
+ error.WriteFailed => return error.OutOfMemory,
+ };
+}
+
+fn appendReservedLeb128(bytes: *ArrayList(u8), value: anytype) void {
+ var w: std.Io.Writer = .fromArrayList(bytes);
+ defer bytes.* = w.toArrayList();
+ return w.writeLeb128(value) catch |err| switch (err) {
+ error.WriteFailed => unreachable,
+ };
+}
diff --git a/src/link/riscv.zig b/src/link/riscv.zig
index 31f26b7287..85cc491603 100644
--- a/src/link/riscv.zig
+++ b/src/link/riscv.zig
@@ -9,29 +9,28 @@ pub fn writeSetSub6(comptime op: enum { set, sub }, code: *[1]u8, addend: anytyp
mem.writeInt(u8, code, value, .little);
}
-pub fn writeSetSubUleb(comptime op: enum { set, sub }, stream: *std.io.FixedBufferStream([]u8), addend: i64) !void {
- switch (op) {
- .set => try overwriteUleb(stream, @intCast(addend)),
- .sub => {
- const position = try stream.getPos();
- const value: u64 = try std.leb.readUleb128(u64, stream.reader());
- try stream.seekTo(position);
- try overwriteUleb(stream, value -% @as(u64, @intCast(addend)));
- },
- }
+pub fn writeSubUleb(code: []u8, addend: i64) void {
+ var reader: std.Io.Reader = .fixed(code);
+ const value = reader.takeLeb128(u64) catch unreachable;
+ overwriteUleb(code, value -% @as(u64, @intCast(addend)));
+}
+
+pub fn writeSetUleb(code: []u8, addend: i64) void {
+ overwriteUleb(code, @intCast(addend));
}
-fn overwriteUleb(stream: *std.io.FixedBufferStream([]u8), addend: u64) !void {
+fn overwriteUleb(code: []u8, addend: u64) void {
var value: u64 = addend;
- const writer = stream.writer();
+ var i: usize = 0;
while (true) {
- const byte = stream.buffer[stream.pos];
+ const byte = code[i];
if (byte & 0x80 == 0) break;
- try writer.writeByte(0x80 | @as(u8, @truncate(value & 0x7f)));
+ code[i] = 0x80 | @as(u8, @truncate(value & 0x7f));
+ i += 1;
value >>= 7;
}
- stream.buffer[stream.pos] = @truncate(value & 0x7f);
+ code[i] = @truncate(value & 0x7f);
}
pub fn writeAddend(
diff --git a/src/main.zig b/src/main.zig
index 0e739d4ca1..daf2f09077 100644
--- a/src/main.zig
+++ b/src/main.zig
@@ -4230,7 +4230,7 @@ fn serveUpdateResults(s: *Server, comp: *Compilation) !void {
const decl_name = zir.nullTerminatedString(zir.getDeclaration(resolved.inst).name);
const gop = try files.getOrPut(gpa, resolved.file);
- if (!gop.found_existing) try file_name_bytes.writer(gpa).print("{f}\x00", .{file.path.fmt(comp)});
+ if (!gop.found_existing) try file_name_bytes.print(gpa, "{f}\x00", .{file.path.fmt(comp)});
const codegen_ns = tr.decl_codegen_ns.get(tracked_inst) orelse 0;
const link_ns = tr.decl_link_ns.get(tracked_inst) orelse 0;
@@ -7451,7 +7451,7 @@ const Templates = struct {
i += "_NAME".len;
continue;
} else if (std.mem.startsWith(u8, contents[i + 1 ..], "FINGERPRINT")) {
- try templates.buffer.writer().print("0x{x}", .{fingerprint.int()});
+ try templates.buffer.print("0x{x}", .{fingerprint.int()});
i += "_FINGERPRINT".len;
continue;
} else if (std.mem.startsWith(u8, contents[i + 1 ..], "ZIGVER")) {