Date: Fri, 27 Jan 2023 20:25:48 +0200
Subject: implement `@qualCast`
---
doc/langref.html.in | 15 ++++--
lib/std/child_process.zig | 4 +-
lib/std/fs.zig | 2 +-
lib/std/os.zig | 2 +-
lib/std/os/windows.zig | 14 ++---
lib/std/zig/c_translation.zig | 2 +-
src/AstGen.zig | 2 +
src/BuiltinFn.zig | 8 +++
src/Sema.zig | 62 +++++++++++++++++++++-
src/Zir.zig | 6 +++
src/print_zir.zig | 1 +
test/cases/compile_errors/invalid_qualcast.zig | 12 +++++
.../ptrCast_discards_const_qualifier.zig | 1 +
13 files changed, 113 insertions(+), 18 deletions(-)
create mode 100644 test/cases/compile_errors/invalid_qualcast.zig
(limited to 'src')
diff --git a/doc/langref.html.in b/doc/langref.html.in
index e1521795ca..8c7781ee42 100644
--- a/doc/langref.html.in
+++ b/doc/langref.html.in
@@ -8803,10 +8803,10 @@ pub const PrefetchOptions = struct {
{#syntax#}@ptrCast{#endsyntax#} cannot be used for:
- - Removing {#syntax#}const{#endsyntax#} or {#syntax#}volatile{#endsyntax#} qualifier. TODO add a {#syntax#}@qualCast{#endsyntax#} builtin.
- - Changing pointer address space, use {#link|@addrSpaceCast#}
- - Increasing pointer alignment, use {#link|@alignCast#}
- - Casting a non-slice pointer to a slice, use slicing syntax {#syntax#}ptr[start..end]{#endsyntax#}
+ - Removing {#syntax#}const{#endsyntax#} or {#syntax#}volatile{#endsyntax#} qualifier, use {#link|@qualCast#}.
+ - Changing pointer address space, use {#link|@addrSpaceCast#}.
+ - Increasing pointer alignment, use {#link|@alignCast#}.
+ - Casting a non-slice pointer to a slice, use slicing syntax {#syntax#}ptr[start..end]{#endsyntax#}.
{#header_close#}
@@ -8820,6 +8820,13 @@ pub const PrefetchOptions = struct {
{#header_close#}
+ {#header_open|@qualCast#}
+ {#syntax#}@qualCast(comptime DestType: type, value: anytype) DestType{#endsyntax#}
+
+ Remove {#syntax#}const{#endsyntax#} or {#syntax#}volatile{#endsyntax#} qualifier from a pointer.
+
+ {#header_close#}
+
{#header_open|@rem#}
{#syntax#}@rem(numerator: T, denominator: T) T{#endsyntax#}
diff --git a/lib/std/child_process.zig b/lib/std/child_process.zig
index 4a816c8318..21d7b4fe3e 100644
--- a/lib/std/child_process.zig
+++ b/lib/std/child_process.zig
@@ -1164,7 +1164,7 @@ fn windowsCreateProcessPathExt(
var app_name_unicode_string = windows.UNICODE_STRING{
.Length = app_name_len_bytes,
.MaximumLength = app_name_len_bytes,
- .Buffer = @intToPtr([*]u16, @ptrToInt(app_name_wildcard.ptr)),
+ .Buffer = @qualCast([*:0]u16, app_name_wildcard.ptr),
};
const rc = windows.ntdll.NtQueryDirectoryFile(
dir.fd,
@@ -1261,7 +1261,7 @@ fn windowsCreateProcessPathExt(
var app_name_unicode_string = windows.UNICODE_STRING{
.Length = app_name_len_bytes,
.MaximumLength = app_name_len_bytes,
- .Buffer = @intToPtr([*]u16, @ptrToInt(app_name_appended.ptr)),
+ .Buffer = @qualCast([*:0]u16, app_name_appended.ptr),
};
// Re-use the directory handle but this time we call with the appended app name
diff --git a/lib/std/fs.zig b/lib/std/fs.zig
index 244f3a38ce..2300ad044a 100644
--- a/lib/std/fs.zig
+++ b/lib/std/fs.zig
@@ -1763,7 +1763,7 @@ pub const Dir = struct {
var nt_name = w.UNICODE_STRING{
.Length = path_len_bytes,
.MaximumLength = path_len_bytes,
- .Buffer = @intToPtr([*]u16, @ptrToInt(sub_path_w)),
+ .Buffer = @qualCast([*:0]u16, sub_path_w),
};
var attr = w.OBJECT_ATTRIBUTES{
.Length = @sizeOf(w.OBJECT_ATTRIBUTES),
diff --git a/lib/std/os.zig b/lib/std/os.zig
index 32463aa30e..3cee30c32d 100644
--- a/lib/std/os.zig
+++ b/lib/std/os.zig
@@ -4513,7 +4513,7 @@ pub fn faccessatW(dirfd: fd_t, sub_path_w: [*:0]const u16, mode: u32, flags: u32
var nt_name = windows.UNICODE_STRING{
.Length = path_len_bytes,
.MaximumLength = path_len_bytes,
- .Buffer = @intToPtr([*]u16, @ptrToInt(sub_path_w)),
+ .Buffer = @qualCast([*:0]u16, sub_path_w),
};
var attr = windows.OBJECT_ATTRIBUTES{
.Length = @sizeOf(windows.OBJECT_ATTRIBUTES),
diff --git a/lib/std/os/windows.zig b/lib/std/os/windows.zig
index e53387b27c..93e762827b 100644
--- a/lib/std/os/windows.zig
+++ b/lib/std/os/windows.zig
@@ -85,7 +85,7 @@ pub fn OpenFile(sub_path_w: []const u16, options: OpenFileOptions) OpenError!HAN
var nt_name = UNICODE_STRING{
.Length = path_len_bytes,
.MaximumLength = path_len_bytes,
- .Buffer = @intToPtr([*]u16, @ptrToInt(sub_path_w.ptr)),
+ .Buffer = @qualCast([*]u16, sub_path_w.ptr),
};
var attr = OBJECT_ATTRIBUTES{
.Length = @sizeOf(OBJECT_ATTRIBUTES),
@@ -634,7 +634,7 @@ pub fn SetCurrentDirectory(path_name: []const u16) SetCurrentDirectoryError!void
var nt_name = UNICODE_STRING{
.Length = path_len_bytes,
.MaximumLength = path_len_bytes,
- .Buffer = @intToPtr([*]u16, @ptrToInt(path_name.ptr)),
+ .Buffer = @qualCast([*]u16, path_name.ptr),
};
const rc = ntdll.RtlSetCurrentDirectory_U(&nt_name);
@@ -766,7 +766,7 @@ pub fn ReadLink(dir: ?HANDLE, sub_path_w: []const u16, out_buffer: []u8) ReadLin
var nt_name = UNICODE_STRING{
.Length = path_len_bytes,
.MaximumLength = path_len_bytes,
- .Buffer = @intToPtr([*]u16, @ptrToInt(sub_path_w.ptr)),
+ .Buffer = @qualCast([*]u16, sub_path_w.ptr),
};
var attr = OBJECT_ATTRIBUTES{
.Length = @sizeOf(OBJECT_ATTRIBUTES),
@@ -876,7 +876,7 @@ pub fn DeleteFile(sub_path_w: []const u16, options: DeleteFileOptions) DeleteFil
.Length = path_len_bytes,
.MaximumLength = path_len_bytes,
// The Windows API makes this mutable, but it will not mutate here.
- .Buffer = @intToPtr([*]u16, @ptrToInt(sub_path_w.ptr)),
+ .Buffer = @qualCast([*]u16, sub_path_w.ptr),
};
if (sub_path_w[0] == '.' and sub_path_w[1] == 0) {
@@ -1414,7 +1414,7 @@ pub fn sendmsg(
}
pub fn sendto(s: ws2_32.SOCKET, buf: [*]const u8, len: usize, flags: u32, to: ?*const ws2_32.sockaddr, to_len: ws2_32.socklen_t) i32 {
- var buffer = ws2_32.WSABUF{ .len = @truncate(u31, len), .buf = @intToPtr([*]u8, @ptrToInt(buf)) };
+ var buffer = ws2_32.WSABUF{ .len = @truncate(u31, len), .buf = @qualCast([*]u8, buf) };
var bytes_send: DWORD = undefined;
if (ws2_32.WSASendTo(s, @ptrCast([*]ws2_32.WSABUF, &buffer), 1, &bytes_send, flags, to, @intCast(i32, to_len), null, null) == ws2_32.SOCKET_ERROR) {
return ws2_32.SOCKET_ERROR;
@@ -1876,13 +1876,13 @@ pub fn eqlIgnoreCaseWTF16(a: []const u16, b: []const u16) bool {
const a_string = UNICODE_STRING{
.Length = a_bytes,
.MaximumLength = a_bytes,
- .Buffer = @intToPtr([*]u16, @ptrToInt(a.ptr)),
+ .Buffer = @qualCast([*]u16, a.ptr),
};
const b_bytes = @intCast(u16, b.len * 2);
const b_string = UNICODE_STRING{
.Length = b_bytes,
.MaximumLength = b_bytes,
- .Buffer = @intToPtr([*]u16, @ptrToInt(b.ptr)),
+ .Buffer = @qualCast([*]u16, b.ptr),
};
return ntdll.RtlEqualUnicodeString(&a_string, &b_string, TRUE) == TRUE;
}
diff --git a/lib/std/zig/c_translation.zig b/lib/std/zig/c_translation.zig
index a050e592a2..d33c74d777 100644
--- a/lib/std/zig/c_translation.zig
+++ b/lib/std/zig/c_translation.zig
@@ -75,7 +75,7 @@ fn castPtr(comptime DestType: type, target: anytype) DestType {
const source = ptrInfo(@TypeOf(target));
if (source.is_const and !dest.is_const or source.is_volatile and !dest.is_volatile)
- return @intToPtr(DestType, @ptrToInt(target))
+ return @qualCast(DestType, target)
else if (@typeInfo(dest.child) == .Opaque)
// dest.alignment would error out
return @ptrCast(DestType, target)
diff --git a/src/AstGen.zig b/src/AstGen.zig
index df111906e6..a5667ce9e8 100644
--- a/src/AstGen.zig
+++ b/src/AstGen.zig
@@ -2530,6 +2530,7 @@ fn addEnsureResult(gz: *GenZir, maybe_unused_result: Zir.Inst.Ref, statement: As
.bit_size_of,
.typeof_log2_int_type,
.ptr_to_int,
+ .qual_cast,
.align_of,
.bool_to_int,
.embed_file,
@@ -8037,6 +8038,7 @@ fn builtinCall(
.float_cast => return typeCast(gz, scope, ri, node, params[0], params[1], .float_cast),
.int_cast => return typeCast(gz, scope, ri, node, params[0], params[1], .int_cast),
.ptr_cast => return typeCast(gz, scope, ri, node, params[0], params[1], .ptr_cast),
+ .qual_cast => return typeCast(gz, scope, ri, node, params[0], params[1], .qual_cast),
.truncate => return typeCast(gz, scope, ri, node, params[0], params[1], .truncate),
// zig fmt: on
diff --git a/src/BuiltinFn.zig b/src/BuiltinFn.zig
index b71d96c3dd..80eb739185 100644
--- a/src/BuiltinFn.zig
+++ b/src/BuiltinFn.zig
@@ -75,6 +75,7 @@ pub const Tag = enum {
prefetch,
ptr_cast,
ptr_to_int,
+ qual_cast,
rem,
return_address,
select,
@@ -674,6 +675,13 @@ pub const list = list: {
.param_count = 1,
},
},
+ .{
+ "@qualCast",
+ .{
+ .tag = .qual_cast,
+ .param_count = 2,
+ },
+ },
.{
"@rem",
.{
diff --git a/src/Sema.zig b/src/Sema.zig
index b4731d9509..d306c68e08 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -1015,6 +1015,7 @@ fn analyzeBodyInner(
.float_cast => try sema.zirFloatCast(block, inst),
.int_cast => try sema.zirIntCast(block, inst),
.ptr_cast => try sema.zirPtrCast(block, inst),
+ .qual_cast => try sema.zirQualCast(block, inst),
.truncate => try sema.zirTruncate(block, inst),
.align_cast => try sema.zirAlignCast(block, inst),
.has_decl => try sema.zirHasDecl(block, inst),
@@ -19529,10 +19530,24 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
const operand_info = operand_ty.ptrInfo().data;
const dest_info = dest_ty.ptrInfo().data;
if (!operand_info.mutable and dest_info.mutable) {
- return sema.fail(block, src, "cast discards const qualifier", .{});
+ const msg = msg: {
+ const msg = try sema.errMsg(block, src, "cast discards const qualifier", .{});
+ errdefer msg.destroy(sema.gpa);
+
+ try sema.errNote(block, src, msg, "consider using '@qualCast'", .{});
+ break :msg msg;
+ };
+ return sema.failWithOwnedErrorMsg(msg);
}
if (operand_info.@"volatile" and !dest_info.@"volatile") {
- return sema.fail(block, src, "cast discards volatile qualifier", .{});
+ const msg = msg: {
+ const msg = try sema.errMsg(block, src, "cast discards volatile qualifier", .{});
+ errdefer msg.destroy(sema.gpa);
+
+ try sema.errNote(block, src, msg, "consider using '@qualCast'", .{});
+ break :msg msg;
+ };
+ return sema.failWithOwnedErrorMsg(msg);
}
if (operand_info.@"addrspace" != dest_info.@"addrspace") {
const msg = msg: {
@@ -19634,6 +19649,49 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
return block.addBitCast(aligned_dest_ty, ptr);
}
+fn zirQualCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
+ const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
+ const src = inst_data.src();
+ const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
+ const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
+ const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
+ const dest_ty = try sema.resolveType(block, dest_ty_src, extra.lhs);
+ const operand = try sema.resolveInst(extra.rhs);
+ const operand_ty = sema.typeOf(operand);
+
+ try sema.checkPtrType(block, dest_ty_src, dest_ty);
+ try sema.checkPtrOperand(block, operand_src, operand_ty);
+
+ var operand_payload = operand_ty.ptrInfo();
+ var dest_info = dest_ty.ptrInfo();
+
+ operand_payload.data.mutable = dest_info.data.mutable;
+ operand_payload.data.@"volatile" = dest_info.data.@"volatile";
+
+ const altered_operand_ty = Type.initPayload(&operand_payload.base);
+ if (!altered_operand_ty.eql(dest_ty, sema.mod)) {
+ const msg = msg: {
+ const msg = try sema.errMsg(block, src, "'@qualCast' can only modify 'const' and 'volatile' qualifiers", .{});
+ errdefer msg.destroy(sema.gpa);
+
+ dest_info.data.mutable = !operand_ty.isConstPtr();
+ dest_info.data.@"volatile" = operand_ty.isVolatilePtr();
+ const altered_dest_ty = Type.initPayload(&dest_info.base);
+ try sema.errNote(block, src, msg, "expected type '{}'", .{altered_dest_ty.fmt(sema.mod)});
+ try sema.errNote(block, src, msg, "got type '{}'", .{operand_ty.fmt(sema.mod)});
+ break :msg msg;
+ };
+ return sema.failWithOwnedErrorMsg(msg);
+ }
+
+ if (try sema.resolveMaybeUndefVal(operand)) |operand_val| {
+ return sema.addConstant(dest_ty, operand_val);
+ }
+
+ try sema.requireRuntimeBlock(block, src, null);
+ return block.addBitCast(dest_ty, operand);
+}
+
fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
diff --git a/src/Zir.zig b/src/Zir.zig
index 94e6a9a11a..b93422177e 100644
--- a/src/Zir.zig
+++ b/src/Zir.zig
@@ -857,6 +857,9 @@ pub const Inst = struct {
/// Implements the `@ptrCast` builtin.
/// Uses `pl_node` with payload `Bin`. `lhs` is dest type, `rhs` is operand.
ptr_cast,
+ /// Implements the `@qualCast` builtin.
+ /// Uses `pl_node` with payload `Bin`. `lhs` is dest type, `rhs` is operand.
+ qual_cast,
/// Implements the `@truncate` builtin.
/// Uses `pl_node` with payload `Bin`. `lhs` is dest type, `rhs` is operand.
truncate,
@@ -1195,6 +1198,7 @@ pub const Inst = struct {
.float_cast,
.int_cast,
.ptr_cast,
+ .qual_cast,
.truncate,
.align_cast,
.has_field,
@@ -1484,6 +1488,7 @@ pub const Inst = struct {
.float_cast,
.int_cast,
.ptr_cast,
+ .qual_cast,
.truncate,
.align_cast,
.has_field,
@@ -1755,6 +1760,7 @@ pub const Inst = struct {
.float_cast = .pl_node,
.int_cast = .pl_node,
.ptr_cast = .pl_node,
+ .qual_cast = .pl_node,
.truncate = .pl_node,
.align_cast = .pl_node,
.typeof_builtin = .pl_node,
diff --git a/src/print_zir.zig b/src/print_zir.zig
index 6e8923bed9..e5fc8815ed 100644
--- a/src/print_zir.zig
+++ b/src/print_zir.zig
@@ -332,6 +332,7 @@ const Writer = struct {
.float_cast,
.int_cast,
.ptr_cast,
+ .qual_cast,
.truncate,
.align_cast,
.div_exact,
diff --git a/test/cases/compile_errors/invalid_qualcast.zig b/test/cases/compile_errors/invalid_qualcast.zig
new file mode 100644
index 0000000000..20b223b727
--- /dev/null
+++ b/test/cases/compile_errors/invalid_qualcast.zig
@@ -0,0 +1,12 @@
+pub export fn entry() void {
+ var a: [*:0]const volatile u16 = undefined;
+ _ = @qualCast([*]u16, a);
+}
+
+// error
+// backend=stage2
+// target=native
+//
+// :3:9: error: '@qualCast' can only modify 'const' and 'volatile' qualifiers
+// :3:9: note: expected type '[*]const volatile u16'
+// :3:9: note: got type '[*:0]const volatile u16'
diff --git a/test/cases/compile_errors/ptrCast_discards_const_qualifier.zig b/test/cases/compile_errors/ptrCast_discards_const_qualifier.zig
index a2fea4ff11..eedef01234 100644
--- a/test/cases/compile_errors/ptrCast_discards_const_qualifier.zig
+++ b/test/cases/compile_errors/ptrCast_discards_const_qualifier.zig
@@ -9,3 +9,4 @@ export fn entry() void {
// target=native
//
// :3:15: error: cast discards const qualifier
+// :3:15: note: consider using '@qualCast'
--
cgit v1.2.3
From d42a93105142e3e8f1d02efeecc0c0e52457a5d9 Mon Sep 17 00:00:00 2001
From: Jakub Konka
Date: Mon, 30 Jan 2023 18:22:50 +0100
Subject: link: make MachO atoms fully owned by the linker
---
src/Module.zig | 12 +-
src/Sema.zig | 2 +-
src/arch/aarch64/CodeGen.zig | 38 ++-
src/arch/aarch64/Emit.zig | 10 +-
src/arch/riscv64/CodeGen.zig | 4 +-
src/arch/x86_64/CodeGen.zig | 31 +-
src/arch/x86_64/Emit.zig | 8 +-
src/link.zig | 4 +-
src/link/Dwarf.zig | 2 +-
src/link/MachO.zig | 719 ++++++++++++++++++++++--------------------
src/link/MachO/Atom.zig | 81 ++---
src/link/MachO/Relocation.zig | 16 +-
12 files changed, 495 insertions(+), 432 deletions(-)
(limited to 'src')
diff --git a/src/Module.zig b/src/Module.zig
index dcdbeec322..8301505492 100644
--- a/src/Module.zig
+++ b/src/Module.zig
@@ -4098,7 +4098,7 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void {
// The exports this Decl performs will be re-discovered, so we remove them here
// prior to re-analysis.
- mod.deleteDeclExports(decl_index);
+ try mod.deleteDeclExports(decl_index);
// Similarly, `@setAlignStack` invocations will be re-discovered.
if (decl.getFunction()) |func| {
@@ -5265,7 +5265,7 @@ pub fn clearDecl(
assert(emit_h.decl_table.swapRemove(decl_index));
}
_ = mod.compile_log_decls.swapRemove(decl_index);
- mod.deleteDeclExports(decl_index);
+ try mod.deleteDeclExports(decl_index);
if (decl.has_tv) {
if (decl.ty.isFnOrHasRuntimeBits()) {
@@ -5276,7 +5276,7 @@ pub fn clearDecl(
decl.link = switch (mod.comp.bin_file.tag) {
.coff => .{ .coff = link.File.Coff.Atom.empty },
.elf => .{ .elf = link.File.Elf.TextBlock.empty },
- .macho => .{ .macho = link.File.MachO.Atom.empty },
+ .macho => .{ .macho = {} },
.plan9 => .{ .plan9 = link.File.Plan9.DeclBlock.empty },
.c => .{ .c = {} },
.wasm => .{ .wasm = link.File.Wasm.DeclBlock.empty },
@@ -5358,7 +5358,7 @@ pub fn abortAnonDecl(mod: *Module, decl_index: Decl.Index) void {
/// Delete all the Export objects that are caused by this Decl. Re-analysis of
/// this Decl will cause them to be re-created (or not).
-fn deleteDeclExports(mod: *Module, decl_index: Decl.Index) void {
+fn deleteDeclExports(mod: *Module, decl_index: Decl.Index) Allocator.Error!void {
var export_owners = (mod.export_owners.fetchSwapRemove(decl_index) orelse return).value;
for (export_owners.items) |exp| {
@@ -5384,7 +5384,7 @@ fn deleteDeclExports(mod: *Module, decl_index: Decl.Index) void {
elf.deleteExport(exp.link.elf);
}
if (mod.comp.bin_file.cast(link.File.MachO)) |macho| {
- macho.deleteExport(exp.link.macho);
+ try macho.deleteDeclExport(decl_index, exp.options.name);
}
if (mod.comp.bin_file.cast(link.File.Wasm)) |wasm| {
wasm.deleteExport(exp.link.wasm);
@@ -5696,7 +5696,7 @@ pub fn allocateNewDecl(
.link = switch (mod.comp.bin_file.tag) {
.coff => .{ .coff = link.File.Coff.Atom.empty },
.elf => .{ .elf = link.File.Elf.TextBlock.empty },
- .macho => .{ .macho = link.File.MachO.Atom.empty },
+ .macho => .{ .macho = {} },
.plan9 => .{ .plan9 = link.File.Plan9.DeclBlock.empty },
.c => .{ .c = {} },
.wasm => .{ .wasm = link.File.Wasm.DeclBlock.empty },
diff --git a/src/Sema.zig b/src/Sema.zig
index 9c553a0092..9083cc92ab 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -5567,7 +5567,7 @@ pub fn analyzeExport(
.link = switch (mod.comp.bin_file.tag) {
.coff => .{ .coff = .{} },
.elf => .{ .elf = .{} },
- .macho => .{ .macho = .{} },
+ .macho => .{ .macho = {} },
.plan9 => .{ .plan9 = null },
.c => .{ .c = {} },
.wasm => .{ .wasm = .{} },
diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig
index 0efd34937a..edbe7905a2 100644
--- a/src/arch/aarch64/CodeGen.zig
+++ b/src/arch/aarch64/CodeGen.zig
@@ -4022,7 +4022,11 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
const mod = self.bin_file.options.module.?;
const owner_decl = mod.declPtr(self.mod_fn.owner_decl);
const atom_index = switch (self.bin_file.tag) {
- .macho => owner_decl.link.macho.getSymbolIndex().?,
+ .macho => blk: {
+ const macho_file = self.bin_file.cast(link.File.MachO).?;
+ const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
+ break :blk macho_file.getAtom(atom).getSymbolIndex().?;
+ },
.coff => owner_decl.link.coff.getSymbolIndex().?,
else => unreachable, // unsupported target format
};
@@ -4308,11 +4312,12 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
const got_addr = @intCast(u32, fn_owner_decl.link.elf.getOffsetTableAddress(elf_file));
try self.genSetReg(Type.initTag(.usize), .x30, .{ .memory = got_addr });
} else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
- try fn_owner_decl.link.macho.ensureInitialized(macho_file);
+ const atom = try macho_file.getOrCreateAtomForDecl(func.owner_decl);
+ const sym_index = macho_file.getAtom(atom).getSymbolIndex().?;
try self.genSetReg(Type.initTag(.u64), .x30, .{
.linker_load = .{
.type = .got,
- .sym_index = fn_owner_decl.link.macho.getSymbolIndex().?,
+ .sym_index = sym_index,
},
});
} else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
@@ -4349,11 +4354,13 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
if (self.bin_file.cast(link.File.MachO)) |macho_file| {
const sym_index = try macho_file.getGlobalSymbol(mem.sliceTo(decl_name, 0));
+ const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
+ const atom_index = macho_file.getAtom(atom).getSymbolIndex().?;
_ = try self.addInst(.{
.tag = .call_extern,
.data = .{
.relocation = .{
- .atom_index = mod.declPtr(self.mod_fn.owner_decl).link.macho.getSymbolIndex().?,
+ .atom_index = atom_index,
.sym_index = sym_index,
},
},
@@ -5491,7 +5498,11 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
const mod = self.bin_file.options.module.?;
const owner_decl = mod.declPtr(self.mod_fn.owner_decl);
const atom_index = switch (self.bin_file.tag) {
- .macho => owner_decl.link.macho.getSymbolIndex().?,
+ .macho => blk: {
+ const macho_file = self.bin_file.cast(link.File.MachO).?;
+ const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
+ break :blk macho_file.getAtom(atom).getSymbolIndex().?;
+ },
.coff => owner_decl.link.coff.getSymbolIndex().?,
else => unreachable, // unsupported target format
};
@@ -5605,7 +5616,11 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
const mod = self.bin_file.options.module.?;
const owner_decl = mod.declPtr(self.mod_fn.owner_decl);
const atom_index = switch (self.bin_file.tag) {
- .macho => owner_decl.link.macho.getSymbolIndex().?,
+ .macho => blk: {
+ const macho_file = self.bin_file.cast(link.File.MachO).?;
+ const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
+ break :blk macho_file.getAtom(atom).getSymbolIndex().?;
+ },
.coff => owner_decl.link.coff.getSymbolIndex().?,
else => unreachable, // unsupported target format
};
@@ -5799,7 +5814,11 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I
const mod = self.bin_file.options.module.?;
const owner_decl = mod.declPtr(self.mod_fn.owner_decl);
const atom_index = switch (self.bin_file.tag) {
- .macho => owner_decl.link.macho.getSymbolIndex().?,
+ .macho => blk: {
+ const macho_file = self.bin_file.cast(link.File.MachO).?;
+ const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
+ break :blk macho_file.getAtom(atom).getSymbolIndex().?;
+ },
.coff => owner_decl.link.coff.getSymbolIndex().?,
else => unreachable, // unsupported target format
};
@@ -6122,10 +6141,11 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) Inne
try decl.link.elf.ensureInitialized(elf_file);
return MCValue{ .memory = decl.link.elf.getOffsetTableAddress(elf_file) };
} else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
- try decl.link.macho.ensureInitialized(macho_file);
+ const atom = try macho_file.getOrCreateAtomForDecl(decl_index);
+ const sym_index = macho_file.getAtom(atom).getSymbolIndex().?;
return MCValue{ .linker_load = .{
.type = .got,
- .sym_index = decl.link.macho.getSymbolIndex().?,
+ .sym_index = sym_index,
} };
} else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
try decl.link.coff.ensureInitialized(coff_file);
diff --git a/src/arch/aarch64/Emit.zig b/src/arch/aarch64/Emit.zig
index 3812597789..f348fb70e3 100644
--- a/src/arch/aarch64/Emit.zig
+++ b/src/arch/aarch64/Emit.zig
@@ -670,9 +670,9 @@ fn mirCallExtern(emit: *Emit, inst: Mir.Inst.Index) !void {
if (emit.bin_file.cast(link.File.MachO)) |macho_file| {
// Add relocation to the decl.
- const atom = macho_file.getAtomForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
+ const atom_index = macho_file.getAtomIndexForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
const target = macho_file.getGlobalByIndex(relocation.sym_index);
- try atom.addRelocation(macho_file, .{
+ try link.File.MachO.Atom.addRelocation(macho_file, atom_index, .{
.type = @enumToInt(std.macho.reloc_type_arm64.ARM64_RELOC_BRANCH26),
.target = target,
.offset = offset,
@@ -883,10 +883,10 @@ fn mirLoadMemoryPie(emit: *Emit, inst: Mir.Inst.Index) !void {
}
if (emit.bin_file.cast(link.File.MachO)) |macho_file| {
- const atom = macho_file.getAtomForSymbol(.{ .sym_index = data.atom_index, .file = null }).?;
+ const atom_index = macho_file.getAtomIndexForSymbol(.{ .sym_index = data.atom_index, .file = null }).?;
// TODO this causes segfault in stage1
// try atom.addRelocations(macho_file, 2, .{
- try atom.addRelocation(macho_file, .{
+ try link.File.MachO.Atom.addRelocation(macho_file, atom_index, .{
.target = .{ .sym_index = data.sym_index, .file = null },
.offset = offset,
.addend = 0,
@@ -902,7 +902,7 @@ fn mirLoadMemoryPie(emit: *Emit, inst: Mir.Inst.Index) !void {
else => unreachable,
},
});
- try atom.addRelocation(macho_file, .{
+ try link.File.MachO.Atom.addRelocation(macho_file, atom_index, .{
.target = .{ .sym_index = data.sym_index, .file = null },
.offset = offset + 4,
.addend = 0,
diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig
index d50a614206..07a8dcd858 100644
--- a/src/arch/riscv64/CodeGen.zig
+++ b/src/arch/riscv64/CodeGen.zig
@@ -2556,9 +2556,7 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) Inne
try decl.link.elf.ensureInitialized(elf_file);
return MCValue{ .memory = decl.link.elf.getOffsetTableAddress(elf_file) };
} else if (self.bin_file.cast(link.File.MachO)) |_| {
- // TODO I'm hacking my way through here by repurposing .memory for storing
- // index to the GOT target symbol index.
- return MCValue{ .memory = decl.link.macho.sym_index };
+ unreachable;
} else if (self.bin_file.cast(link.File.Coff)) |_| {
return self.fail("TODO codegen COFF const Decl pointer", .{});
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig
index df24fe5e7d..fc244e3130 100644
--- a/src/arch/x86_64/CodeGen.zig
+++ b/src/arch/x86_64/CodeGen.zig
@@ -2670,10 +2670,12 @@ fn loadMemPtrIntoRegister(self: *Self, reg: Register, ptr_ty: Type, ptr: MCValue
const abi_size = @intCast(u32, ptr_ty.abiSize(self.target.*));
const mod = self.bin_file.options.module.?;
const fn_owner_decl = mod.declPtr(self.mod_fn.owner_decl);
- const atom_index = if (self.bin_file.tag == link.File.MachO.base_tag)
- fn_owner_decl.link.macho.getSymbolIndex().?
- else
- fn_owner_decl.link.coff.getSymbolIndex().?;
+ const atom_index = if (self.bin_file.cast(link.File.MachO)) |macho_file| blk: {
+ const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
+ break :blk macho_file.getAtom(atom).getSymbolIndex().?;
+ } else if (self.bin_file.cast(link.File.Coff)) |_| blk: {
+ break :blk fn_owner_decl.link.coff.getSymbolIndex().?;
+ } else unreachable;
const flags: u2 = switch (load_struct.type) {
.got => 0b00,
.direct => 0b01,
@@ -4023,8 +4025,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
.data = undefined,
});
} else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
- try fn_owner_decl.link.macho.ensureInitialized(macho_file);
- const sym_index = fn_owner_decl.link.macho.getSymbolIndex().?;
+ const atom_index = try macho_file.getOrCreateAtomForDecl(func.owner_decl);
+ const sym_index = macho_file.getAtom(atom_index).getSymbolIndex().?;
try self.genSetReg(Type.initTag(.usize), .rax, .{
.linker_load = .{
.type = .got,
@@ -4080,15 +4082,15 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
});
} else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
const sym_index = try macho_file.getGlobalSymbol(mem.sliceTo(decl_name, 0));
+ const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
+ const atom_index = macho_file.getAtom(atom).getSymbolIndex().?;
_ = try self.addInst(.{
.tag = .call_extern,
.ops = undefined,
- .data = .{
- .relocation = .{
- .atom_index = mod.declPtr(self.mod_fn.owner_decl).link.macho.getSymbolIndex().?,
- .sym_index = sym_index,
- },
- },
+ .data = .{ .relocation = .{
+ .atom_index = atom_index,
+ .sym_index = sym_index,
+ } },
});
} else {
return self.fail("TODO implement calling extern functions", .{});
@@ -6722,10 +6724,11 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) Inne
try decl.link.elf.ensureInitialized(elf_file);
return MCValue{ .memory = decl.link.elf.getOffsetTableAddress(elf_file) };
} else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
- try decl.link.macho.ensureInitialized(macho_file);
+ const atom_index = try macho_file.getOrCreateAtomForDecl(decl_index);
+ const sym_index = macho_file.getAtom(atom_index).getSymbolIndex().?;
return MCValue{ .linker_load = .{
.type = .got,
- .sym_index = decl.link.macho.getSymbolIndex().?,
+ .sym_index = sym_index,
} };
} else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
try decl.link.coff.ensureInitialized(coff_file);
diff --git a/src/arch/x86_64/Emit.zig b/src/arch/x86_64/Emit.zig
index af3ed5e053..980dbfd41e 100644
--- a/src/arch/x86_64/Emit.zig
+++ b/src/arch/x86_64/Emit.zig
@@ -1001,8 +1001,8 @@ fn mirLeaPic(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
0b01 => @enumToInt(std.macho.reloc_type_x86_64.X86_64_RELOC_SIGNED),
else => unreachable,
};
- const atom = macho_file.getAtomForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
- try atom.addRelocation(macho_file, .{
+ const atom_index = macho_file.getAtomIndexForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
+ try link.File.MachO.Atom.addRelocation(macho_file, atom_index, .{
.type = reloc_type,
.target = .{ .sym_index = relocation.sym_index, .file = null },
.offset = @intCast(u32, end_offset - 4),
@@ -1140,9 +1140,9 @@ fn mirCallExtern(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
if (emit.bin_file.cast(link.File.MachO)) |macho_file| {
// Add relocation to the decl.
- const atom = macho_file.getAtomForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
+ const atom_index = macho_file.getAtomIndexForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
const target = macho_file.getGlobalByIndex(relocation.sym_index);
- try atom.addRelocation(macho_file, .{
+ try link.File.MachO.Atom.addRelocation(macho_file, atom_index, .{
.type = @enumToInt(std.macho.reloc_type_x86_64.X86_64_RELOC_BRANCH),
.target = target,
.offset = offset,
diff --git a/src/link.zig b/src/link.zig
index 668c5b72e3..2a96efe89d 100644
--- a/src/link.zig
+++ b/src/link.zig
@@ -264,7 +264,7 @@ pub const File = struct {
pub const LinkBlock = union {
elf: Elf.TextBlock,
coff: Coff.Atom,
- macho: MachO.Atom,
+ macho: void,
plan9: Plan9.DeclBlock,
c: void,
wasm: Wasm.DeclBlock,
@@ -286,7 +286,7 @@ pub const File = struct {
pub const Export = union {
elf: Elf.Export,
coff: Coff.Export,
- macho: MachO.Export,
+ macho: void,
plan9: Plan9.Export,
c: void,
wasm: Wasm.Export,
diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig
index 1b65bbb04b..2595cd8ba5 100644
--- a/src/link/Dwarf.zig
+++ b/src/link/Dwarf.zig
@@ -2639,7 +2639,7 @@ fn getDbgInfoAtom(tag: File.Tag, mod: *Module, decl_index: Module.Decl.Index) *A
const decl = mod.declPtr(decl_index);
return switch (tag) {
.elf => &decl.link.elf.dbg_info_atom,
- .macho => &decl.link.macho.dbg_info_atom,
+ .macho => unreachable,
.wasm => &decl.link.wasm.dbg_info_atom,
else => unreachable,
};
diff --git a/src/link/MachO.zig b/src/link/MachO.zig
index 543cb473d7..29aed25b31 100644
--- a/src/link/MachO.zig
+++ b/src/link/MachO.zig
@@ -66,7 +66,7 @@ const Section = struct {
// TODO is null here necessary, or can we do away with tracking via section
// size in incremental context?
- last_atom: ?*Atom = null,
+ last_atom_index: ?Atom.Index = null,
/// A list of atoms that have surplus capacity. This list can have false
/// positives, as functions grow and shrink over time, only sometimes being added
@@ -83,7 +83,7 @@ const Section = struct {
/// overcapacity can be negative. A simple way to have negative overcapacity is to
/// allocate a fresh atom, which will have ideal capacity, and then grow it
/// by 1 byte. It will then have -1 overcapacity.
- free_list: std.ArrayListUnmanaged(*Atom) = .{},
+ free_list: std.ArrayListUnmanaged(Atom.Index) = .{},
};
base: File,
@@ -140,8 +140,8 @@ locals_free_list: std.ArrayListUnmanaged(u32) = .{},
globals_free_list: std.ArrayListUnmanaged(u32) = .{},
dyld_stub_binder_index: ?u32 = null,
-dyld_private_atom: ?*Atom = null,
-stub_helper_preamble_atom: ?*Atom = null,
+dyld_private_atom_index: ?Atom.Index = null,
+stub_helper_preamble_atom_index: ?Atom.Index = null,
strtab: StringTable(.strtab) = .{},
@@ -164,10 +164,10 @@ segment_table_dirty: bool = false,
cold_start: bool = true,
/// List of atoms that are either synthetic or map directly to the Zig source program.
-managed_atoms: std.ArrayListUnmanaged(*Atom) = .{},
+atoms: std.ArrayListUnmanaged(Atom) = .{},
/// Table of atoms indexed by the symbol index.
-atom_by_index_table: std.AutoHashMapUnmanaged(u32, *Atom) = .{},
+atom_by_index_table: std.AutoHashMapUnmanaged(u32, Atom.Index) = .{},
/// Table of unnamed constants associated with a parent `Decl`.
/// We store them here so that we can free the constants whenever the `Decl`
@@ -210,11 +210,36 @@ bindings: BindingTable = .{},
/// this will be a table indexed by index into the list of Atoms.
lazy_bindings: BindingTable = .{},
-/// Table of Decls that are currently alive.
-/// We store them here so that we can properly dispose of any allocated
-/// memory within the atom in the incremental linker.
-/// TODO consolidate this.
-decls: std.AutoArrayHashMapUnmanaged(Module.Decl.Index, ?u8) = .{},
+/// Table of tracked Decls.
+decls: std.AutoArrayHashMapUnmanaged(Module.Decl.Index, DeclMetadata) = .{},
+
+const DeclMetadata = struct {
+ atom: Atom.Index,
+ section: u8,
+ /// A list of all exports aliases of this Decl.
+ /// TODO do we actually need this at all?
+ exports: std.ArrayListUnmanaged(u32) = .{},
+
+ fn getExport(m: DeclMetadata, macho_file: *const MachO, name: []const u8) ?u32 {
+ for (m.exports.items) |exp| {
+ if (mem.eql(u8, name, macho_file.getSymbolName(.{
+ .sym_index = exp,
+ .file = null,
+ }))) return exp;
+ }
+ return null;
+ }
+
+ fn getExportPtr(m: *DeclMetadata, macho_file: *MachO, name: []const u8) ?*u32 {
+ for (m.exports.items) |*exp| {
+ if (mem.eql(u8, name, macho_file.getSymbolName(.{
+ .sym_index = exp.*,
+ .file = null,
+ }))) return exp;
+ }
+ return null;
+ }
+};
const Entry = struct {
target: SymbolWithLoc,
@@ -229,8 +254,8 @@ const Entry = struct {
return macho_file.getSymbolPtr(.{ .sym_index = entry.sym_index, .file = null });
}
- pub fn getAtom(entry: Entry, macho_file: *MachO) ?*Atom {
- return macho_file.getAtomForSymbol(.{ .sym_index = entry.sym_index, .file = null });
+ pub fn getAtomIndex(entry: Entry, macho_file: *MachO) ?Atom.Index {
+ return macho_file.getAtomIndexForSymbol(.{ .sym_index = entry.sym_index, .file = null });
}
pub fn getName(entry: Entry, macho_file: *MachO) []const u8 {
@@ -238,10 +263,10 @@ const Entry = struct {
}
};
-const BindingTable = std.AutoArrayHashMapUnmanaged(*Atom, std.ArrayListUnmanaged(Atom.Binding));
-const UnnamedConstTable = std.AutoArrayHashMapUnmanaged(Module.Decl.Index, std.ArrayListUnmanaged(*Atom));
-const RebaseTable = std.AutoArrayHashMapUnmanaged(*Atom, std.ArrayListUnmanaged(u32));
-const RelocationTable = std.AutoArrayHashMapUnmanaged(*Atom, std.ArrayListUnmanaged(Relocation));
+const BindingTable = std.AutoArrayHashMapUnmanaged(Atom.Index, std.ArrayListUnmanaged(Atom.Binding));
+const UnnamedConstTable = std.AutoArrayHashMapUnmanaged(Module.Decl.Index, std.ArrayListUnmanaged(Atom.Index));
+const RebaseTable = std.AutoArrayHashMapUnmanaged(Atom.Index, std.ArrayListUnmanaged(u32));
+const RelocationTable = std.AutoArrayHashMapUnmanaged(Atom.Index, std.ArrayListUnmanaged(Relocation));
const PendingUpdate = union(enum) {
resolve_undef: u32,
@@ -286,10 +311,6 @@ pub const default_pagezero_vmsize: u64 = 0x100000000;
/// potential future extensions.
pub const default_headerpad_size: u32 = 0x1000;
-pub const Export = struct {
- sym_index: ?u32 = null,
-};
-
pub fn openPath(allocator: Allocator, options: link.Options) !*MachO {
assert(options.target.ofmt == .macho);
@@ -451,9 +472,9 @@ pub fn flushModule(self: *MachO, comp: *Compilation, prog_node: *std.Progress.No
const module = self.base.options.module orelse return error.LinkingWithoutZigSourceUnimplemented;
- if (self.d_sym) |*d_sym| {
- try d_sym.dwarf.flushModule(module);
- }
+ // if (self.d_sym) |*d_sym| {
+ // try d_sym.dwarf.flushModule(module);
+ // }
var libs = std.StringArrayHashMap(link.SystemLib).init(arena);
try resolveLibSystem(
@@ -547,8 +568,8 @@ pub fn flushModule(self: *MachO, comp: *Compilation, prog_node: *std.Progress.No
try self.allocateSpecialSymbols();
- for (self.relocs.keys()) |atom| {
- try atom.resolveRelocations(self);
+ for (self.relocs.keys()) |atom_index| {
+ try Atom.resolveRelocations(self, atom_index);
}
if (build_options.enable_logging) {
@@ -643,10 +664,10 @@ pub fn flushModule(self: *MachO, comp: *Compilation, prog_node: *std.Progress.No
try self.writeCodeSignature(comp, csig); // code signing always comes last
}
- if (self.d_sym) |*d_sym| {
- // Flush debug symbols bundle.
- try d_sym.flushModule(self);
- }
+ // if (self.d_sym) |*d_sym| {
+ // // Flush debug symbols bundle.
+ // try d_sym.flushModule(self);
+ // }
// if (build_options.enable_link_snapshots) {
// if (self.base.options.enable_link_snapshots)
@@ -999,18 +1020,19 @@ pub fn parseDependentLibs(self: *MachO, syslibroot: ?[]const u8, dependent_libs:
}
}
-pub fn writeAtom(self: *MachO, atom: *Atom, code: []const u8) !void {
+pub fn writeAtom(self: *MachO, atom_index: Atom.Index, code: []const u8) !void {
+ const atom = self.getAtom(atom_index);
const sym = atom.getSymbol(self);
const section = self.sections.get(sym.n_sect - 1);
const file_offset = section.header.offset + sym.n_value - section.header.addr;
log.debug("writing atom for symbol {s} at file offset 0x{x}", .{ atom.getName(self), file_offset });
try self.base.file.?.pwriteAll(code, file_offset);
- try atom.resolveRelocations(self);
+ try Atom.resolveRelocations(self, atom_index);
}
-fn writePtrWidthAtom(self: *MachO, atom: *Atom) !void {
+fn writePtrWidthAtom(self: *MachO, atom_index: Atom.Index) !void {
var buffer: [@sizeOf(u64)]u8 = [_]u8{0} ** @sizeOf(u64);
- try self.writeAtom(atom, &buffer);
+ try self.writeAtom(atom_index, &buffer);
}
fn markRelocsDirtyByTarget(self: *MachO, target: SymbolWithLoc) void {
@@ -1026,7 +1048,8 @@ fn markRelocsDirtyByTarget(self: *MachO, target: SymbolWithLoc) void {
fn markRelocsDirtyByAddress(self: *MachO, addr: u64) void {
for (self.relocs.values()) |*relocs| {
for (relocs.items) |*reloc| {
- const target_atom = reloc.getTargetAtom(self) orelse continue;
+ const target_atom_index = reloc.getTargetAtomIndex(self) orelse continue;
+ const target_atom = self.getAtom(target_atom_index);
const target_sym = target_atom.getSymbol(self);
if (target_sym.n_value < addr) continue;
reloc.dirty = true;
@@ -1053,26 +1076,39 @@ pub fn allocateSpecialSymbols(self: *MachO) !void {
}
}
-pub fn createGotAtom(self: *MachO, target: SymbolWithLoc) !*Atom {
+pub fn createAtom(self: *MachO) !Atom.Index {
const gpa = self.base.allocator;
+ const atom_index = @intCast(Atom.Index, self.atoms.items.len);
+ const atom = try self.atoms.addOne(gpa);
+ const sym_index = try self.allocateSymbol();
+ try self.atom_by_index_table.putNoClobber(gpa, sym_index, atom_index);
+ atom.* = .{
+ .sym_index = sym_index,
+ .file = null,
+ .size = 0,
+ .alignment = 0,
+ .prev_index = null,
+ .next_index = null,
+ .dbg_info_atom = undefined,
+ };
+ log.debug("creating ATOM(%{d}) at index {d}", .{ sym_index, atom_index });
+ return atom_index;
+}
- const atom = try gpa.create(Atom);
- atom.* = Atom.empty;
- try atom.ensureInitialized(self);
+pub fn createGotAtom(self: *MachO, target: SymbolWithLoc) !Atom.Index {
+ const atom_index = try self.createAtom();
+ const atom = self.getAtomPtr(atom_index);
atom.size = @sizeOf(u64);
atom.alignment = @alignOf(u64);
- errdefer gpa.destroy(atom);
-
- try self.managed_atoms.append(gpa, atom);
const sym = atom.getSymbolPtr(self);
sym.n_type = macho.N_SECT;
sym.n_sect = self.got_section_index.? + 1;
- sym.n_value = try self.allocateAtom(atom, atom.size, @alignOf(u64));
+ sym.n_value = try self.allocateAtom(atom_index, atom.size, @alignOf(u64));
log.debug("allocated GOT atom at 0x{x}", .{sym.n_value});
- try atom.addRelocation(self, .{
+ try Atom.addRelocation(self, atom_index, .{
.type = switch (self.base.options.target.cpu.arch) {
.aarch64 => @enumToInt(macho.reloc_type_arm64.ARM64_RELOC_UNSIGNED),
.x86_64 => @enumToInt(macho.reloc_type_x86_64.X86_64_RELOC_UNSIGNED),
@@ -1087,45 +1123,39 @@ pub fn createGotAtom(self: *MachO, target: SymbolWithLoc) !*Atom {
const target_sym = self.getSymbol(target);
if (target_sym.undf()) {
- try atom.addBinding(self, .{
+ try Atom.addBinding(self, atom_index, .{
.target = self.getGlobal(self.getSymbolName(target)).?,
.offset = 0,
});
} else {
- try atom.addRebase(self, 0);
+ try Atom.addRebase(self, atom_index, 0);
}
- return atom;
+ return atom_index;
}
pub fn createDyldPrivateAtom(self: *MachO) !void {
if (self.dyld_stub_binder_index == null) return;
- if (self.dyld_private_atom != null) return;
+ if (self.dyld_private_atom_index != null) return;
- const gpa = self.base.allocator;
-
- const atom = try gpa.create(Atom);
- atom.* = Atom.empty;
- try atom.ensureInitialized(self);
+ const atom_index = try self.createAtom();
+ const atom = self.getAtomPtr(atom_index);
atom.size = @sizeOf(u64);
atom.alignment = @alignOf(u64);
- errdefer gpa.destroy(atom);
const sym = atom.getSymbolPtr(self);
sym.n_type = macho.N_SECT;
sym.n_sect = self.data_section_index.? + 1;
- self.dyld_private_atom = atom;
-
- try self.managed_atoms.append(gpa, atom);
+ self.dyld_private_atom_index = atom_index;
- sym.n_value = try self.allocateAtom(atom, atom.size, @alignOf(u64));
+ sym.n_value = try self.allocateAtom(atom_index, atom.size, @alignOf(u64));
log.debug("allocated dyld_private atom at 0x{x}", .{sym.n_value});
- try self.writePtrWidthAtom(atom);
+ try self.writePtrWidthAtom(atom_index);
}
pub fn createStubHelperPreambleAtom(self: *MachO) !void {
if (self.dyld_stub_binder_index == null) return;
- if (self.stub_helper_preamble_atom != null) return;
+ if (self.stub_helper_preamble_atom_index != null) return;
const gpa = self.base.allocator;
const arch = self.base.options.target.cpu.arch;
@@ -1134,22 +1164,23 @@ pub fn createStubHelperPreambleAtom(self: *MachO) !void {
.aarch64 => 6 * @sizeOf(u32),
else => unreachable,
};
- const atom = try gpa.create(Atom);
- atom.* = Atom.empty;
- try atom.ensureInitialized(self);
+ const atom_index = try self.createAtom();
+ const atom = self.getAtomPtr(atom_index);
atom.size = size;
atom.alignment = switch (arch) {
.x86_64 => 1,
.aarch64 => @alignOf(u32),
else => unreachable,
};
- errdefer gpa.destroy(atom);
const sym = atom.getSymbolPtr(self);
sym.n_type = macho.N_SECT;
sym.n_sect = self.stub_helper_section_index.? + 1;
- const dyld_private_sym_index = self.dyld_private_atom.?.getSymbolIndex().?;
+ const dyld_private_sym_index = if (self.dyld_private_atom_index) |dyld_index|
+ self.getAtom(dyld_index).getSymbolIndex().?
+ else
+ unreachable;
const code = try gpa.alloc(u8, size);
defer gpa.free(code);
@@ -1168,7 +1199,7 @@ pub fn createStubHelperPreambleAtom(self: *MachO) !void {
code[9] = 0xff;
code[10] = 0x25;
- try atom.addRelocations(self, 2, .{ .{
+ try Atom.addRelocations(self, atom_index, 2, .{ .{
.type = @enumToInt(macho.reloc_type_x86_64.X86_64_RELOC_SIGNED),
.target = .{ .sym_index = dyld_private_sym_index, .file = null },
.offset = 3,
@@ -1208,7 +1239,7 @@ pub fn createStubHelperPreambleAtom(self: *MachO) !void {
// br x16
mem.writeIntLittle(u32, code[20..][0..4], aarch64.Instruction.br(.x16).toU32());
- try atom.addRelocations(self, 4, .{ .{
+ try Atom.addRelocations(self, atom_index, 4, .{ .{
.type = @enumToInt(macho.reloc_type_arm64.ARM64_RELOC_PAGE21),
.target = .{ .sym_index = dyld_private_sym_index, .file = null },
.offset = 0,
@@ -1241,16 +1272,14 @@ pub fn createStubHelperPreambleAtom(self: *MachO) !void {
else => unreachable,
}
- self.stub_helper_preamble_atom = atom;
+ self.stub_helper_preamble_atom_index = atom_index;
- try self.managed_atoms.append(gpa, atom);
-
- sym.n_value = try self.allocateAtom(atom, size, atom.alignment);
+ sym.n_value = try self.allocateAtom(atom_index, size, atom.alignment);
log.debug("allocated stub preamble atom at 0x{x}", .{sym.n_value});
- try self.writeAtom(atom, code);
+ try self.writeAtom(atom_index, code);
}
-pub fn createStubHelperAtom(self: *MachO) !*Atom {
+pub fn createStubHelperAtom(self: *MachO) !Atom.Index {
const gpa = self.base.allocator;
const arch = self.base.options.target.cpu.arch;
const size: u4 = switch (arch) {
@@ -1258,16 +1287,14 @@ pub fn createStubHelperAtom(self: *MachO) !*Atom {
.aarch64 => 3 * @sizeOf(u32),
else => unreachable,
};
- const atom = try gpa.create(Atom);
- atom.* = Atom.empty;
- try atom.ensureInitialized(self);
+ const atom_index = try self.createAtom();
+ const atom = self.getAtomPtr(atom_index);
atom.size = size;
atom.alignment = switch (arch) {
.x86_64 => 1,
.aarch64 => @alignOf(u32),
else => unreachable,
};
- errdefer gpa.destroy(atom);
const sym = atom.getSymbolPtr(self);
sym.n_type = macho.N_SECT;
@@ -1277,6 +1304,11 @@ pub fn createStubHelperAtom(self: *MachO) !*Atom {
defer gpa.free(code);
mem.set(u8, code, 0);
+ const stub_helper_preamble_atom_sym_index = if (self.stub_helper_preamble_atom_index) |stub_index|
+ self.getAtom(stub_index).getSymbolIndex().?
+ else
+ unreachable;
+
switch (arch) {
.x86_64 => {
// pushq
@@ -1285,9 +1317,9 @@ pub fn createStubHelperAtom(self: *MachO) !*Atom {
// jmpq
code[5] = 0xe9;
- try atom.addRelocation(self, .{
+ try Atom.addRelocation(self, atom_index, .{
.type = @enumToInt(macho.reloc_type_x86_64.X86_64_RELOC_BRANCH),
- .target = .{ .sym_index = self.stub_helper_preamble_atom.?.getSymbolIndex().?, .file = null },
+ .target = .{ .sym_index = stub_helper_preamble_atom_sym_index, .file = null },
.offset = 6,
.addend = 0,
.pcrel = true,
@@ -1308,9 +1340,9 @@ pub fn createStubHelperAtom(self: *MachO) !*Atom {
mem.writeIntLittle(u32, code[4..8], aarch64.Instruction.b(0).toU32());
// Next 4 bytes 8..12 are just a placeholder populated in `populateLazyBindOffsetsInStubHelper`.
- try atom.addRelocation(self, .{
+ try Atom.addRelocation(self, atom_index, .{
.type = @enumToInt(macho.reloc_type_arm64.ARM64_RELOC_BRANCH26),
- .target = .{ .sym_index = self.stub_helper_preamble_atom.?.getSymbolIndex().?, .file = null },
+ .target = .{ .sym_index = stub_helper_preamble_atom_sym_index, .file = null },
.offset = 4,
.addend = 0,
.pcrel = true,
@@ -1320,29 +1352,24 @@ pub fn createStubHelperAtom(self: *MachO) !*Atom {
else => unreachable,
}
- try self.managed_atoms.append(gpa, atom);
-
- sym.n_value = try self.allocateAtom(atom, size, atom.alignment);
+ sym.n_value = try self.allocateAtom(atom_index, size, atom.alignment);
log.debug("allocated stub helper atom at 0x{x}", .{sym.n_value});
- try self.writeAtom(atom, code);
+ try self.writeAtom(atom_index, code);
- return atom;
+ return atom_index;
}
-pub fn createLazyPointerAtom(self: *MachO, stub_sym_index: u32, target: SymbolWithLoc) !*Atom {
- const gpa = self.base.allocator;
- const atom = try gpa.create(Atom);
- atom.* = Atom.empty;
- try atom.ensureInitialized(self);
+pub fn createLazyPointerAtom(self: *MachO, stub_sym_index: u32, target: SymbolWithLoc) !Atom.Index {
+ const atom_index = try self.createAtom();
+ const atom = self.getAtomPtr(atom_index);
atom.size = @sizeOf(u64);
atom.alignment = @alignOf(u64);
- errdefer gpa.destroy(atom);
const sym = atom.getSymbolPtr(self);
sym.n_type = macho.N_SECT;
sym.n_sect = self.la_symbol_ptr_section_index.? + 1;
- try atom.addRelocation(self, .{
+ try Atom.addRelocation(self, atom_index, .{
.type = switch (self.base.options.target.cpu.arch) {
.aarch64 => @enumToInt(macho.reloc_type_arm64.ARM64_RELOC_UNSIGNED),
.x86_64 => @enumToInt(macho.reloc_type_x86_64.X86_64_RELOC_UNSIGNED),
@@ -1354,22 +1381,20 @@ pub fn createLazyPointerAtom(self: *MachO, stub_sym_index: u32, target: SymbolWi
.pcrel = false,
.length = 3,
});
- try atom.addRebase(self, 0);
- try atom.addLazyBinding(self, .{
+ try Atom.addRebase(self, atom_index, 0);
+ try Atom.addLazyBinding(self, atom_index, .{
.target = self.getGlobal(self.getSymbolName(target)).?,
.offset = 0,
});
- try self.managed_atoms.append(gpa, atom);
-
- sym.n_value = try self.allocateAtom(atom, atom.size, @alignOf(u64));
+ sym.n_value = try self.allocateAtom(atom_index, atom.size, @alignOf(u64));
log.debug("allocated lazy pointer atom at 0x{x} ({s})", .{ sym.n_value, self.getSymbolName(target) });
- try self.writePtrWidthAtom(atom);
+ try self.writePtrWidthAtom(atom_index);
- return atom;
+ return atom_index;
}
-pub fn createStubAtom(self: *MachO, laptr_sym_index: u32) !*Atom {
+pub fn createStubAtom(self: *MachO, laptr_sym_index: u32) !Atom.Index {
const gpa = self.base.allocator;
const arch = self.base.options.target.cpu.arch;
const size: u4 = switch (arch) {
@@ -1377,9 +1402,8 @@ pub fn createStubAtom(self: *MachO, laptr_sym_index: u32) !*Atom {
.aarch64 => 3 * @sizeOf(u32),
else => unreachable, // unhandled architecture type
};
- const atom = try gpa.create(Atom);
- atom.* = Atom.empty;
- try atom.ensureInitialized(self);
+ const atom_index = try self.createAtom();
+ const atom = self.getAtomPtr(atom_index);
atom.size = size;
atom.alignment = switch (arch) {
.x86_64 => 1,
@@ -1387,7 +1411,6 @@ pub fn createStubAtom(self: *MachO, laptr_sym_index: u32) !*Atom {
else => unreachable, // unhandled architecture type
};
- errdefer gpa.destroy(atom);
const sym = atom.getSymbolPtr(self);
sym.n_type = macho.N_SECT;
@@ -1403,7 +1426,7 @@ pub fn createStubAtom(self: *MachO, laptr_sym_index: u32) !*Atom {
code[0] = 0xff;
code[1] = 0x25;
- try atom.addRelocation(self, .{
+ try Atom.addRelocation(self, atom_index, .{
.type = @enumToInt(macho.reloc_type_x86_64.X86_64_RELOC_BRANCH),
.target = .{ .sym_index = laptr_sym_index, .file = null },
.offset = 2,
@@ -1424,7 +1447,7 @@ pub fn createStubAtom(self: *MachO, laptr_sym_index: u32) !*Atom {
// br x16
mem.writeIntLittle(u32, code[8..12], aarch64.Instruction.br(.x16).toU32());
- try atom.addRelocations(self, 2, .{
+ try Atom.addRelocations(self, atom_index, 2, .{
.{
.type = @enumToInt(macho.reloc_type_arm64.ARM64_RELOC_PAGE21),
.target = .{ .sym_index = laptr_sym_index, .file = null },
@@ -1446,13 +1469,11 @@ pub fn createStubAtom(self: *MachO, laptr_sym_index: u32) !*Atom {
else => unreachable,
}
- try self.managed_atoms.append(gpa, atom);
-
- sym.n_value = try self.allocateAtom(atom, size, atom.alignment);
+ sym.n_value = try self.allocateAtom(atom_index, size, atom.alignment);
log.debug("allocated stub atom at 0x{x}", .{sym.n_value});
- try self.writeAtom(atom, code);
+ try self.writeAtom(atom_index, code);
- return atom;
+ return atom_index;
}
pub fn createMhExecuteHeaderSymbol(self: *MachO) !void {
@@ -1586,9 +1607,12 @@ pub fn resolveSymbolsInDylibs(self: *MachO) !void {
if (self.stubs_table.contains(global)) break :blk;
const stub_index = try self.allocateStubEntry(global);
- const stub_helper_atom = try self.createStubHelperAtom();
- const laptr_atom = try self.createLazyPointerAtom(stub_helper_atom.getSymbolIndex().?, global);
- const stub_atom = try self.createStubAtom(laptr_atom.getSymbolIndex().?);
+ const stub_helper_atom_index = try self.createStubHelperAtom();
+ const stub_helper_atom = self.getAtom(stub_helper_atom_index);
+ const laptr_atom_index = try self.createLazyPointerAtom(stub_helper_atom.getSymbolIndex().?, global);
+ const laptr_atom = self.getAtom(laptr_atom_index);
+ const stub_atom_index = try self.createStubAtom(laptr_atom.getSymbolIndex().?);
+ const stub_atom = self.getAtom(stub_atom_index);
self.stubs.items[stub_index].sym_index = stub_atom.getSymbolIndex().?;
self.markRelocsDirtyByTarget(global);
}
@@ -1686,10 +1710,11 @@ pub fn resolveDyldStubBinder(self: *MachO) !void {
// Add dyld_stub_binder as the final GOT entry.
const got_index = try self.allocateGotEntry(global);
- const got_atom = try self.createGotAtom(global);
+ const got_atom_index = try self.createGotAtom(global);
+ const got_atom = self.getAtom(got_atom_index);
self.got_entries.items[got_index].sym_index = got_atom.getSymbolIndex().?;
- try self.writePtrWidthAtom(got_atom);
+ try self.writePtrWidthAtom(got_atom_index);
}
pub fn deinit(self: *MachO) void {
@@ -1699,9 +1724,9 @@ pub fn deinit(self: *MachO) void {
if (self.llvm_object) |llvm_object| llvm_object.destroy(gpa);
}
- if (self.d_sym) |*d_sym| {
- d_sym.deinit();
- }
+ // if (self.d_sym) |*d_sym| {
+ // d_sym.deinit();
+ // }
self.got_entries.deinit(gpa);
self.got_entries_free_list.deinit(gpa);
@@ -1739,12 +1764,12 @@ pub fn deinit(self: *MachO) void {
}
self.sections.deinit(gpa);
- for (self.managed_atoms.items) |atom| {
- gpa.destroy(atom);
- }
- self.managed_atoms.deinit(gpa);
+ self.atoms.deinit(gpa);
if (self.base.options.module) |_| {
+ for (self.decls.values()) |*m| {
+ m.exports.deinit(gpa);
+ }
self.decls.deinit(gpa);
} else {
assert(self.decls.count() == 0);
@@ -1778,14 +1803,15 @@ pub fn deinit(self: *MachO) void {
self.lazy_bindings.deinit(gpa);
}
-fn freeAtom(self: *MachO, atom: *Atom) void {
- log.debug("freeAtom {*}", .{atom});
+fn freeAtom(self: *MachO, atom_index: Atom.Index) void {
+ log.debug("freeAtom {d}", .{atom_index});
const gpa = self.base.allocator;
// Remove any relocs and base relocs associated with this Atom
- self.freeRelocationsForAtom(atom);
+ Atom.freeRelocations(self, atom_index);
+ const atom = self.getAtom(atom_index);
const sect_id = atom.getSymbol(self).n_sect - 1;
const free_list = &self.sections.items(.free_list)[sect_id];
var already_have_free_list_node = false;
@@ -1793,45 +1819,46 @@ fn freeAtom(self: *MachO, atom: *Atom) void {
var i: usize = 0;
// TODO turn free_list into a hash map
while (i < free_list.items.len) {
- if (free_list.items[i] == atom) {
+ if (free_list.items[i] == atom_index) {
_ = free_list.swapRemove(i);
continue;
}
- if (free_list.items[i] == atom.prev) {
+ if (free_list.items[i] == atom.prev_index) {
already_have_free_list_node = true;
}
i += 1;
}
}
- const maybe_last_atom = &self.sections.items(.last_atom)[sect_id];
- if (maybe_last_atom.*) |last_atom| {
- if (last_atom == atom) {
- if (atom.prev) |prev| {
+ const maybe_last_atom_index = &self.sections.items(.last_atom_index)[sect_id];
+ if (maybe_last_atom_index.*) |last_atom_index| {
+ if (last_atom_index == atom_index) {
+ if (atom.prev_index) |prev_index| {
// TODO shrink the section size here
- maybe_last_atom.* = prev;
+ maybe_last_atom_index.* = prev_index;
} else {
- maybe_last_atom.* = null;
+ maybe_last_atom_index.* = null;
}
}
}
- if (atom.prev) |prev| {
- prev.next = atom.next;
+ if (atom.prev_index) |prev_index| {
+ const prev = self.getAtomPtr(prev_index);
+ prev.next_index = atom.next_index;
- if (!already_have_free_list_node and prev.freeListEligible(self)) {
+ if (!already_have_free_list_node and prev.*.freeListEligible(self)) {
// The free list is heuristics, it doesn't have to be perfect, so we can ignore
// the OOM here.
- free_list.append(gpa, prev) catch {};
+ free_list.append(gpa, prev_index) catch {};
}
} else {
- atom.prev = null;
+ self.getAtomPtr(atom_index).prev_index = null;
}
- if (atom.next) |next| {
- next.prev = atom.prev;
+ if (atom.next_index) |next_index| {
+ self.getAtomPtr(next_index).prev_index = atom.prev_index;
} else {
- atom.next = null;
+ self.getAtomPtr(atom_index).next_index = null;
}
// Appending to free lists is allowed to fail because the free lists are heuristics based anyway.
@@ -1849,9 +1876,9 @@ fn freeAtom(self: *MachO, atom: *Atom) void {
};
_ = self.got_entries_table.remove(got_target);
- if (self.d_sym) |*d_sym| {
- d_sym.swapRemoveRelocs(sym_index);
- }
+ // if (self.d_sym) |*d_sym| {
+ // d_sym.swapRemoveRelocs(sym_index);
+ // }
log.debug(" adding GOT index {d} to free list (target local@{d})", .{ got_index, sym_index });
}
@@ -1859,27 +1886,28 @@ fn freeAtom(self: *MachO, atom: *Atom) void {
self.locals.items[sym_index].n_type = 0;
_ = self.atom_by_index_table.remove(sym_index);
log.debug(" adding local symbol index {d} to free list", .{sym_index});
- atom.sym_index = 0;
+ self.getAtomPtr(atom_index).sym_index = 0;
- if (self.d_sym) |*d_sym| {
- d_sym.dwarf.freeAtom(&atom.dbg_info_atom);
- }
+ // if (self.d_sym) |*d_sym| {
+ // d_sym.dwarf.freeAtom(&atom.dbg_info_atom);
+ // }
}
-fn shrinkAtom(self: *MachO, atom: *Atom, new_block_size: u64) void {
+fn shrinkAtom(self: *MachO, atom_index: Atom.Index, new_block_size: u64) void {
_ = self;
- _ = atom;
+ _ = atom_index;
_ = new_block_size;
// TODO check the new capacity, and if it crosses the size threshold into a big enough
// capacity, insert a free list node for it.
}
-fn growAtom(self: *MachO, atom: *Atom, new_atom_size: u64, alignment: u64) !u64 {
+fn growAtom(self: *MachO, atom_index: Atom.Index, new_atom_size: u64, alignment: u64) !u64 {
+ const atom = self.getAtom(atom_index);
const sym = atom.getSymbol(self);
const align_ok = mem.alignBackwardGeneric(u64, sym.n_value, alignment) == sym.n_value;
const need_realloc = !align_ok or new_atom_size > atom.capacity(self);
if (!need_realloc) return sym.n_value;
- return self.allocateAtom(atom, new_atom_size, alignment);
+ return self.allocateAtom(atom_index, new_atom_size, alignment);
}
pub fn allocateSymbol(self: *MachO) !u32 {
@@ -1986,31 +2014,29 @@ pub fn updateFunc(self: *MachO, module: *Module, func: *Module.Fn, air: Air, liv
const decl_index = func.owner_decl;
const decl = module.declPtr(decl_index);
- const atom = &decl.link.macho;
- try atom.ensureInitialized(self);
- const gop = try self.decls.getOrPut(self.base.allocator, decl_index);
- if (gop.found_existing) {
- self.freeUnnamedConsts(decl_index);
- self.freeRelocationsForAtom(atom);
- } else {
- gop.value_ptr.* = null;
- }
+
+ const atom_index = try self.getOrCreateAtomForDecl(decl_index);
+ self.freeUnnamedConsts(decl_index);
+ Atom.freeRelocations(self, atom_index);
+
+ const atom = self.getAtom(atom_index);
+ _ = atom;
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
defer code_buffer.deinit();
- var decl_state = if (self.d_sym) |*d_sym|
- try d_sym.dwarf.initDeclState(module, decl_index)
- else
- null;
- defer if (decl_state) |*ds| ds.deinit();
+ // var decl_state = if (self.d_sym) |*d_sym|
+ // try d_sym.dwarf.initDeclState(module, decl_index)
+ // else
+ // null;
+ // defer if (decl_state) |*ds| ds.deinit();
- const res = if (decl_state) |*ds|
- try codegen.generateFunction(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .{
- .dwarf = ds,
- })
- else
- try codegen.generateFunction(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .none);
+ // const res = if (decl_state) |*ds|
+ // try codegen.generateFunction(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .{
+ // .dwarf = ds,
+ // })
+ // else
+ const res = try codegen.generateFunction(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .none);
const code = switch (res) {
.ok => code_buffer.items,
@@ -2022,16 +2048,11 @@ pub fn updateFunc(self: *MachO, module: *Module, func: *Module.Fn, air: Air, liv
};
const addr = try self.updateDeclCode(decl_index, code);
+ _ = addr;
- if (decl_state) |*ds| {
- try self.d_sym.?.dwarf.commitDeclState(
- module,
- decl_index,
- addr,
- decl.link.macho.size,
- ds,
- );
- }
+ // if (decl_state) |*ds| {
+ // try self.d_sym.?.dwarf.commitDeclState(module, decl_index, addr, atom.size, ds);
+ // }
// Since we updated the vaddr and the size, each corresponding export symbol also
// needs to be updated.
@@ -2065,11 +2086,8 @@ pub fn lowerUnnamedConst(self: *MachO, typed_value: TypedValue, decl_index: Modu
log.debug("allocating symbol indexes for {?s}", .{name});
- const atom = try gpa.create(Atom);
- errdefer gpa.destroy(atom);
- atom.* = Atom.empty;
- try atom.ensureInitialized(self);
- try self.managed_atoms.append(gpa, atom);
+ const atom_index = try self.createAtom();
+ const atom = self.getAtomPtr(atom_index);
const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), typed_value, &code_buffer, .none, .{
.parent_atom_index = atom.getSymbolIndex().?,
@@ -2088,21 +2106,21 @@ pub fn lowerUnnamedConst(self: *MachO, typed_value: TypedValue, decl_index: Modu
atom.size = code.len;
atom.alignment = required_alignment;
// TODO: work out logic for disambiguating functions from function pointers
- // const sect_id = self.getDeclOutputSection(decl);
+ // const sect_id = self.getDeclOutputSection(decl_index);
const sect_id = self.data_const_section_index.?;
const symbol = atom.getSymbolPtr(self);
symbol.n_strx = name_str_index;
symbol.n_type = macho.N_SECT;
symbol.n_sect = sect_id + 1;
- symbol.n_value = try self.allocateAtom(atom, code.len, required_alignment);
- errdefer self.freeAtom(atom);
+ symbol.n_value = try self.allocateAtom(atom_index, code.len, required_alignment);
+ errdefer self.freeAtom(atom_index);
- try unnamed_consts.append(gpa, atom);
+ try unnamed_consts.append(gpa, atom_index);
log.debug("allocated atom for {?s} at 0x{x}", .{ name, symbol.n_value });
log.debug(" (required alignment 0x{x})", .{required_alignment});
- try self.writeAtom(atom, code);
+ try self.writeAtom(atom_index, code);
return atom.getSymbolIndex().?;
}
@@ -2129,41 +2147,36 @@ pub fn updateDecl(self: *MachO, module: *Module, decl_index: Module.Decl.Index)
}
}
- const atom = &decl.link.macho;
- try atom.ensureInitialized(self);
- const gop = try self.decls.getOrPut(self.base.allocator, decl_index);
- if (gop.found_existing) {
- self.freeRelocationsForAtom(atom);
- } else {
- gop.value_ptr.* = null;
- }
+ const atom_index = try self.getOrCreateAtomForDecl(decl_index);
+ Atom.freeRelocations(self, atom_index);
+ const atom = self.getAtom(atom_index);
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
defer code_buffer.deinit();
- var decl_state: ?Dwarf.DeclState = if (self.d_sym) |*d_sym|
- try d_sym.dwarf.initDeclState(module, decl_index)
- else
- null;
- defer if (decl_state) |*ds| ds.deinit();
+ // var decl_state: ?Dwarf.DeclState = if (self.d_sym) |*d_sym|
+ // try d_sym.dwarf.initDeclState(module, decl_index)
+ // else
+ // null;
+ // defer if (decl_state) |*ds| ds.deinit();
const decl_val = if (decl.val.castTag(.variable)) |payload| payload.data.init else decl.val;
- const res = if (decl_state) |*ds|
- try codegen.generateSymbol(&self.base, decl.srcLoc(), .{
- .ty = decl.ty,
- .val = decl_val,
- }, &code_buffer, .{
- .dwarf = ds,
- }, .{
- .parent_atom_index = decl.link.macho.getSymbolIndex().?,
- })
- else
- try codegen.generateSymbol(&self.base, decl.srcLoc(), .{
- .ty = decl.ty,
- .val = decl_val,
- }, &code_buffer, .none, .{
- .parent_atom_index = decl.link.macho.getSymbolIndex().?,
- });
+ // const res = if (decl_state) |*ds|
+ // try codegen.generateSymbol(&self.base, decl.srcLoc(), .{
+ // .ty = decl.ty,
+ // .val = decl_val,
+ // }, &code_buffer, .{
+ // .dwarf = ds,
+ // }, .{
+ // .parent_atom_index = atom.getSymbolIndex().?,
+ // })
+ // else
+ const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), .{
+ .ty = decl.ty,
+ .val = decl_val,
+ }, &code_buffer, .none, .{
+ .parent_atom_index = atom.getSymbolIndex().?,
+ });
const code = switch (res) {
.ok => code_buffer.items,
@@ -2174,23 +2187,31 @@ pub fn updateDecl(self: *MachO, module: *Module, decl_index: Module.Decl.Index)
},
};
const addr = try self.updateDeclCode(decl_index, code);
+ _ = addr;
- if (decl_state) |*ds| {
- try self.d_sym.?.dwarf.commitDeclState(
- module,
- decl_index,
- addr,
- decl.link.macho.size,
- ds,
- );
- }
+ // if (decl_state) |*ds| {
+ // try self.d_sym.?.dwarf.commitDeclState(module, decl_index, addr, atom.size, ds);
+ // }
// Since we updated the vaddr and the size, each corresponding export symbol also
// needs to be updated.
try self.updateDeclExports(module, decl_index, module.getDeclExports(decl_index));
}
-fn getDeclOutputSection(self: *MachO, decl: *Module.Decl) u8 {
+pub fn getOrCreateAtomForDecl(self: *MachO, decl_index: Module.Decl.Index) !Atom.Index {
+ const gop = try self.decls.getOrPut(self.base.allocator, decl_index);
+ if (!gop.found_existing) {
+ gop.value_ptr.* = .{
+ .atom = try self.createAtom(),
+ .section = self.getDeclOutputSection(decl_index),
+ .exports = .{},
+ };
+ }
+ return gop.value_ptr.atom;
+}
+
+fn getDeclOutputSection(self: *MachO, decl_index: Module.Decl.Index) u8 {
+ const decl = self.base.options.module.?.declPtr(decl_index);
const ty = decl.ty;
const val = decl.val;
const zig_ty = ty.zigTypeTag();
@@ -2341,13 +2362,11 @@ fn updateDeclCode(self: *MachO, decl_index: Module.Decl.Index, code: []const u8)
const sym_name = try decl.getFullyQualifiedName(mod);
defer self.base.allocator.free(sym_name);
- const atom = &decl.link.macho;
- const sym_index = atom.getSymbolIndex().?; // Atom was not initialized
- const decl_ptr = self.decls.getPtr(decl_index).?;
- if (decl_ptr.* == null) {
- decl_ptr.* = self.getDeclOutputSection(decl);
- }
- const sect_id = decl_ptr.*.?;
+ const decl_metadata = self.decls.get(decl_index).?;
+ const atom_index = decl_metadata.atom;
+ const atom = self.getAtom(atom_index);
+ const sym_index = atom.getSymbolIndex().?;
+ const sect_id = decl_metadata.section;
const code_len = code.len;
if (atom.size != 0) {
@@ -2357,11 +2376,11 @@ fn updateDeclCode(self: *MachO, decl_index: Module.Decl.Index, code: []const u8)
sym.n_sect = sect_id + 1;
sym.n_desc = 0;
- const capacity = decl.link.macho.capacity(self);
+ const capacity = atom.capacity(self);
const need_realloc = code_len > capacity or !mem.isAlignedGeneric(u64, sym.n_value, required_alignment);
if (need_realloc) {
- const vaddr = try self.growAtom(atom, code_len, required_alignment);
+ const vaddr = try self.growAtom(atom_index, code_len, required_alignment);
log.debug("growing {s} and moving from 0x{x} to 0x{x}", .{ sym_name, sym.n_value, vaddr });
log.debug(" (required alignment 0x{x})", .{required_alignment});
@@ -2369,19 +2388,19 @@ fn updateDeclCode(self: *MachO, decl_index: Module.Decl.Index, code: []const u8)
sym.n_value = vaddr;
log.debug(" (updating GOT entry)", .{});
const got_target = SymbolWithLoc{ .sym_index = sym_index, .file = null };
- const got_atom = self.getGotAtomForSymbol(got_target).?;
+ const got_atom_index = self.getGotAtomIndexForSymbol(got_target).?;
self.markRelocsDirtyByTarget(got_target);
- try self.writePtrWidthAtom(got_atom);
+ try self.writePtrWidthAtom(got_atom_index);
}
} else if (code_len < atom.size) {
- self.shrinkAtom(atom, code_len);
- } else if (atom.next == null) {
+ self.shrinkAtom(atom_index, code_len);
+ } else if (atom.next_index == null) {
const header = &self.sections.items(.header)[sect_id];
const segment = self.getSegment(sect_id);
const needed_size = (sym.n_value + code_len) - segment.vmaddr;
header.size = needed_size;
}
- atom.size = code_len;
+ self.getAtomPtr(atom_index).size = code_len;
} else {
const name_str_index = try self.strtab.insert(gpa, sym_name);
const sym = atom.getSymbolPtr(self);
@@ -2390,33 +2409,36 @@ fn updateDeclCode(self: *MachO, decl_index: Module.Decl.Index, code: []const u8)
sym.n_sect = sect_id + 1;
sym.n_desc = 0;
- const vaddr = try self.allocateAtom(atom, code_len, required_alignment);
- errdefer self.freeAtom(atom);
+ const vaddr = try self.allocateAtom(atom_index, code_len, required_alignment);
+ errdefer self.freeAtom(atom_index);
log.debug("allocated atom for {s} at 0x{x}", .{ sym_name, vaddr });
log.debug(" (required alignment 0x{x})", .{required_alignment});
- atom.size = code_len;
+ self.getAtomPtr(atom_index).size = code_len;
sym.n_value = vaddr;
const got_target = SymbolWithLoc{ .sym_index = sym_index, .file = null };
const got_index = try self.allocateGotEntry(got_target);
- const got_atom = try self.createGotAtom(got_target);
+ const got_atom_index = try self.createGotAtom(got_target);
+ const got_atom = self.getAtom(got_atom_index);
self.got_entries.items[got_index].sym_index = got_atom.getSymbolIndex().?;
- try self.writePtrWidthAtom(got_atom);
+ try self.writePtrWidthAtom(got_atom_index);
}
self.markRelocsDirtyByTarget(atom.getSymbolWithLoc());
- try self.writeAtom(atom, code);
+ try self.writeAtom(atom_index, code);
return atom.getSymbol(self).n_value;
}
pub fn updateDeclLineNumber(self: *MachO, module: *Module, decl: *const Module.Decl) !void {
+ _ = decl;
+ _ = self;
_ = module;
- if (self.d_sym) |*d_sym| {
- try d_sym.dwarf.updateDeclLineNumber(decl);
- }
+ // if (self.d_sym) |*d_sym| {
+ // try d_sym.dwarf.updateDeclLineNumber(decl);
+ // }
}
pub fn updateDeclExports(
@@ -2432,22 +2454,17 @@ pub fn updateDeclExports(
if (self.llvm_object) |llvm_object|
return llvm_object.updateDeclExports(module, decl_index, exports);
}
+
const tracy = trace(@src());
defer tracy.end();
const gpa = self.base.allocator;
const decl = module.declPtr(decl_index);
- const atom = &decl.link.macho;
-
- if (atom.getSymbolIndex() == null) return;
-
- const gop = try self.decls.getOrPut(gpa, decl_index);
- if (!gop.found_existing) {
- gop.value_ptr.* = self.getDeclOutputSection(decl);
- }
-
+ const atom_index = try self.getOrCreateAtomForDecl(decl_index);
+ const atom = self.getAtom(atom_index);
const decl_sym = atom.getSymbol(self);
+ const decl_metadata = self.decls.getPtr(decl_index).?;
for (exports) |exp| {
const exp_name = try std.fmt.allocPrint(gpa, "_{s}", .{exp.options.name});
@@ -2485,9 +2502,9 @@ pub fn updateDeclExports(
continue;
}
- const sym_index = exp.link.macho.sym_index orelse blk: {
+ const sym_index = decl_metadata.getExport(self, exp_name) orelse blk: {
const sym_index = try self.allocateSymbol();
- exp.link.macho.sym_index = sym_index;
+ try decl_metadata.exports.append(gpa, sym_index);
break :blk sym_index;
};
const sym_loc = SymbolWithLoc{ .sym_index = sym_index, .file = null };
@@ -2535,16 +2552,18 @@ pub fn updateDeclExports(
}
}
-pub fn deleteExport(self: *MachO, exp: Export) void {
+pub fn deleteDeclExport(self: *MachO, decl_index: Module.Decl.Index, name: []const u8) Allocator.Error!void {
if (self.llvm_object) |_| return;
- const sym_index = exp.sym_index orelse return;
+ const metadata = self.decls.getPtr(decl_index) orelse return;
const gpa = self.base.allocator;
+ const exp_name = try std.fmt.allocPrint(gpa, "_{s}", .{name});
+ defer gpa.free(exp_name);
+ const sym_index = metadata.getExportPtr(self, exp_name) orelse return;
- const sym_loc = SymbolWithLoc{ .sym_index = sym_index, .file = null };
+ const sym_loc = SymbolWithLoc{ .sym_index = sym_index.*, .file = null };
const sym = self.getSymbolPtr(sym_loc);
- const sym_name = self.getSymbolName(sym_loc);
- log.debug("deleting export '{s}'", .{sym_name});
+ log.debug("deleting export '{s}'", .{exp_name});
assert(sym.sect() and sym.ext());
sym.* = .{
.n_strx = 0,
@@ -2553,9 +2572,9 @@ pub fn deleteExport(self: *MachO, exp: Export) void {
.n_desc = 0,
.n_value = 0,
};
- self.locals_free_list.append(gpa, sym_index) catch {};
+ self.locals_free_list.append(gpa, sym_index.*) catch {};
- if (self.resolver.fetchRemove(sym_name)) |entry| {
+ if (self.resolver.fetchRemove(exp_name)) |entry| {
defer gpa.free(entry.key);
self.globals_free_list.append(gpa, entry.value) catch {};
self.globals.items[entry.value] = .{
@@ -2563,17 +2582,8 @@ pub fn deleteExport(self: *MachO, exp: Export) void {
.file = null,
};
}
-}
-fn freeRelocationsForAtom(self: *MachO, atom: *Atom) void {
- var removed_relocs = self.relocs.fetchOrderedRemove(atom);
- if (removed_relocs) |*relocs| relocs.value.deinit(self.base.allocator);
- var removed_rebases = self.rebases.fetchOrderedRemove(atom);
- if (removed_rebases) |*rebases| rebases.value.deinit(self.base.allocator);
- var removed_bindings = self.bindings.fetchOrderedRemove(atom);
- if (removed_bindings) |*bindings| bindings.value.deinit(self.base.allocator);
- var removed_lazy_bindings = self.lazy_bindings.fetchOrderedRemove(atom);
- if (removed_lazy_bindings) |*lazy_bindings| lazy_bindings.value.deinit(self.base.allocator);
+ sym_index.* = 0;
}
fn freeUnnamedConsts(self: *MachO, decl_index: Module.Decl.Index) void {
@@ -2595,28 +2605,22 @@ pub fn freeDecl(self: *MachO, decl_index: Module.Decl.Index) void {
log.debug("freeDecl {*}", .{decl});
if (self.decls.fetchSwapRemove(decl_index)) |kv| {
- if (kv.value) |_| {
- self.freeAtom(&decl.link.macho);
- self.freeUnnamedConsts(decl_index);
- }
+ self.freeAtom(kv.value.atom);
+ self.freeUnnamedConsts(decl_index);
}
- if (self.d_sym) |*d_sym| {
- d_sym.dwarf.freeDecl(decl);
- }
+ // if (self.d_sym) |*d_sym| {
+ // d_sym.dwarf.freeDecl(decl);
+ // }
}
pub fn getDeclVAddr(self: *MachO, decl_index: Module.Decl.Index, reloc_info: File.RelocInfo) !u64 {
- const mod = self.base.options.module.?;
- const decl = mod.declPtr(decl_index);
-
assert(self.llvm_object == null);
- try decl.link.macho.ensureInitialized(self);
- const sym_index = decl.link.macho.getSymbolIndex().?;
-
- const atom = self.getAtomForSymbol(.{ .sym_index = reloc_info.parent_atom_index, .file = null }).?;
- try atom.addRelocation(self, .{
+ const this_atom_index = try self.getOrCreateAtomForDecl(decl_index);
+ const sym_index = self.getAtom(this_atom_index).getSymbolIndex().?;
+ const atom_index = self.getAtomIndexForSymbol(.{ .sym_index = reloc_info.parent_atom_index, .file = null }).?;
+ try Atom.addRelocation(self, atom_index, .{
.type = switch (self.base.options.target.cpu.arch) {
.aarch64 => @enumToInt(macho.reloc_type_arm64.ARM64_RELOC_UNSIGNED),
.x86_64 => @enumToInt(macho.reloc_type_x86_64.X86_64_RELOC_UNSIGNED),
@@ -2628,7 +2632,7 @@ pub fn getDeclVAddr(self: *MachO, decl_index: Module.Decl.Index, reloc_info: Fil
.pcrel = false,
.length = 3,
});
- try atom.addRebase(self, @intCast(u32, reloc_info.offset));
+ try Atom.addRebase(self, atom_index, @intCast(u32, reloc_info.offset));
return 0;
}
@@ -2860,34 +2864,36 @@ fn moveSectionInVirtualMemory(self: *MachO, sect_id: u8, needed_size: u64) !void
// TODO: enforce order by increasing VM addresses in self.sections container.
for (self.sections.items(.header)[sect_id + 1 ..]) |*next_header, next_sect_id| {
const index = @intCast(u8, sect_id + 1 + next_sect_id);
- const maybe_last_atom = &self.sections.items(.last_atom)[index];
const next_segment = self.getSegmentPtr(index);
next_header.addr += diff;
next_segment.vmaddr += diff;
- if (maybe_last_atom.*) |last_atom| {
- var atom = last_atom;
+ const maybe_last_atom_index = &self.sections.items(.last_atom_index)[index];
+ if (maybe_last_atom_index.*) |last_atom_index| {
+ var atom_index = last_atom_index;
while (true) {
+ const atom = self.getAtom(atom_index);
const sym = atom.getSymbolPtr(self);
sym.n_value += diff;
- if (atom.prev) |prev| {
- atom = prev;
+ if (atom.prev_index) |prev_index| {
+ atom_index = prev_index;
} else break;
}
}
}
}
-fn allocateAtom(self: *MachO, atom: *Atom, new_atom_size: u64, alignment: u64) !u64 {
+fn allocateAtom(self: *MachO, atom_index: Atom.Index, new_atom_size: u64, alignment: u64) !u64 {
const tracy = trace(@src());
defer tracy.end();
+ const atom = self.getAtom(atom_index);
const sect_id = atom.getSymbol(self).n_sect - 1;
const segment = self.getSegmentPtr(sect_id);
const header = &self.sections.items(.header)[sect_id];
const free_list = &self.sections.items(.free_list)[sect_id];
- const maybe_last_atom = &self.sections.items(.last_atom)[sect_id];
+ const maybe_last_atom_index = &self.sections.items(.last_atom_index)[sect_id];
const requires_padding = blk: {
if (!header.isCode()) break :blk false;
if (header.isSymbolStubs()) break :blk false;
@@ -2901,7 +2907,7 @@ fn allocateAtom(self: *MachO, atom: *Atom, new_atom_size: u64, alignment: u64) !
// It would be simpler to do it inside the for loop below, but that would cause a
// problem if an error was returned later in the function. So this action
// is actually carried out at the end of the function, when errors are no longer possible.
- var atom_placement: ?*Atom = null;
+ var atom_placement: ?Atom.Index = null;
var free_list_removal: ?usize = null;
// First we look for an appropriately sized free list node.
@@ -2909,7 +2915,8 @@ fn allocateAtom(self: *MachO, atom: *Atom, new_atom_size: u64, alignment: u64) !
var vaddr = blk: {
var i: usize = 0;
while (i < free_list.items.len) {
- const big_atom = free_list.items[i];
+ const big_atom_index = free_list.items[i];
+ const big_atom = self.getAtom(big_atom_index);
// We now have a pointer to a live atom that has too much capacity.
// Is it enough that we could fit this new atom?
const sym = big_atom.getSymbol(self);
@@ -2937,30 +2944,35 @@ fn allocateAtom(self: *MachO, atom: *Atom, new_atom_size: u64, alignment: u64) !
const keep_free_list_node = remaining_capacity >= min_text_capacity;
// Set up the metadata to be updated, after errors are no longer possible.
- atom_placement = big_atom;
+ atom_placement = big_atom_index;
if (!keep_free_list_node) {
free_list_removal = i;
}
break :blk new_start_vaddr;
- } else if (maybe_last_atom.*) |last| {
+ } else if (maybe_last_atom_index.*) |last_index| {
+ const last = self.getAtom(last_index);
const last_symbol = last.getSymbol(self);
const ideal_capacity = if (requires_padding) padToIdeal(last.size) else last.size;
const ideal_capacity_end_vaddr = last_symbol.n_value + ideal_capacity;
const new_start_vaddr = mem.alignForwardGeneric(u64, ideal_capacity_end_vaddr, alignment);
- atom_placement = last;
+ atom_placement = last_index;
break :blk new_start_vaddr;
} else {
break :blk mem.alignForwardGeneric(u64, segment.vmaddr, alignment);
}
};
- const expand_section = atom_placement == null or atom_placement.?.next == null;
+ const expand_section = if (atom_placement) |placement_index|
+ self.getAtom(placement_index).next_index == null
+ else
+ true;
if (expand_section) {
const sect_capacity = self.allocatedSize(header.offset);
const needed_size = (vaddr + new_atom_size) - segment.vmaddr;
if (needed_size > sect_capacity) {
const new_offset = self.findFreeSpace(needed_size, self.page_size);
- const current_size = if (maybe_last_atom.*) |last_atom| blk: {
+ const current_size = if (maybe_last_atom_index.*) |last_atom_index| blk: {
+ const last_atom = self.getAtom(last_atom_index);
const sym = last_atom.getSymbol(self);
break :blk (sym.n_value + last_atom.size) - segment.vmaddr;
} else 0;
@@ -2992,7 +3004,7 @@ fn allocateAtom(self: *MachO, atom: *Atom, new_atom_size: u64, alignment: u64) !
header.size = needed_size;
segment.filesize = mem.alignForwardGeneric(u64, needed_size, self.page_size);
segment.vmsize = mem.alignForwardGeneric(u64, needed_size, self.page_size);
- maybe_last_atom.* = atom;
+ maybe_last_atom_index.* = atom_index;
self.segment_table_dirty = true;
}
@@ -3002,20 +3014,25 @@ fn allocateAtom(self: *MachO, atom: *Atom, new_atom_size: u64, alignment: u64) !
header.@"align" = align_pow;
}
- if (atom.prev) |prev| {
- prev.next = atom.next;
+ if (atom.prev_index) |prev_index| {
+ const prev = self.getAtomPtr(prev_index);
+ prev.next_index = atom.next_index;
}
- if (atom.next) |next| {
- next.prev = atom.prev;
+ if (atom.next_index) |next_index| {
+ const next = self.getAtomPtr(next_index);
+ next.prev_index = atom.prev_index;
}
- if (atom_placement) |big_atom| {
- atom.prev = big_atom;
- atom.next = big_atom.next;
- big_atom.next = atom;
+ if (atom_placement) |big_atom_index| {
+ const big_atom = self.getAtomPtr(big_atom_index);
+ const atom_ptr = self.getAtomPtr(atom_index);
+ atom_ptr.prev_index = big_atom_index;
+ atom_ptr.next_index = big_atom.next_index;
+ big_atom.next_index = atom_index;
} else {
- atom.prev = null;
- atom.next = null;
+ const atom_ptr = self.getAtomPtr(atom_index);
+ atom_ptr.prev_index = null;
+ atom_ptr.next_index = null;
}
if (free_list_removal) |i| {
_ = free_list.swapRemove(i);
@@ -3155,7 +3172,8 @@ fn collectRebaseData(self: *MachO, rebase: *Rebase) !void {
const gpa = self.base.allocator;
const slice = self.sections.slice();
- for (self.rebases.keys()) |atom, i| {
+ for (self.rebases.keys()) |atom_index, i| {
+ const atom = self.getAtom(atom_index);
log.debug(" ATOM(%{?d}, '{s}')", .{ atom.getSymbolIndex(), atom.getName(self) });
const sym = atom.getSymbol(self);
@@ -3184,7 +3202,8 @@ fn collectBindData(self: *MachO, bind: anytype, raw_bindings: anytype) !void {
const gpa = self.base.allocator;
const slice = self.sections.slice();
- for (raw_bindings.keys()) |atom, i| {
+ for (raw_bindings.keys()) |atom_index, i| {
+ const atom = self.getAtom(atom_index);
log.debug(" ATOM(%{?d}, '{s}')", .{ atom.getSymbolIndex(), atom.getName(self) });
const sym = atom.getSymbol(self);
@@ -3359,7 +3378,7 @@ fn populateLazyBindOffsetsInStubHelper(self: *MachO, lazy_bind: LazyBind) !void
if (lazy_bind.size() == 0) return;
const stub_helper_section_index = self.stub_helper_section_index.?;
- assert(self.stub_helper_preamble_atom != null);
+ assert(self.stub_helper_preamble_atom_index != null);
const section = self.sections.get(stub_helper_section_index);
@@ -3369,10 +3388,11 @@ fn populateLazyBindOffsetsInStubHelper(self: *MachO, lazy_bind: LazyBind) !void
else => unreachable,
};
const header = section.header;
- var atom = section.last_atom.?;
+ var atom_index = section.last_atom_index.?;
var index: usize = lazy_bind.offsets.items.len;
while (index > 0) : (index -= 1) {
+ const atom = self.getAtom(atom_index);
const sym = atom.getSymbol(self);
const file_offset = header.offset + sym.n_value - header.addr + stub_offset;
const bind_offset = lazy_bind.offsets.items[index - 1];
@@ -3385,7 +3405,7 @@ fn populateLazyBindOffsetsInStubHelper(self: *MachO, lazy_bind: LazyBind) !void
try self.base.file.?.pwriteAll(mem.asBytes(&bind_offset), file_offset);
- atom = atom.prev.?;
+ atom_index = atom.prev_index.?;
}
}
@@ -3828,25 +3848,35 @@ pub fn getOrPutGlobalPtr(self: *MachO, name: []const u8) !GetOrPutGlobalPtrResul
return GetOrPutGlobalPtrResult{ .found_existing = false, .value_ptr = ptr };
}
+pub fn getAtom(self: *MachO, atom_index: Atom.Index) Atom {
+ assert(atom_index < self.atoms.items.len);
+ return self.atoms.items[atom_index];
+}
+
+pub fn getAtomPtr(self: *MachO, atom_index: Atom.Index) *Atom {
+ assert(atom_index < self.atoms.items.len);
+ return &self.atoms.items[atom_index];
+}
+
/// Returns atom if there is an atom referenced by the symbol described by `sym_with_loc` descriptor.
/// Returns null on failure.
-pub fn getAtomForSymbol(self: *MachO, sym_with_loc: SymbolWithLoc) ?*Atom {
+pub fn getAtomIndexForSymbol(self: *MachO, sym_with_loc: SymbolWithLoc) ?Atom.Index {
assert(sym_with_loc.file == null);
return self.atom_by_index_table.get(sym_with_loc.sym_index);
}
/// Returns GOT atom that references `sym_with_loc` if one exists.
/// Returns null otherwise.
-pub fn getGotAtomForSymbol(self: *MachO, sym_with_loc: SymbolWithLoc) ?*Atom {
+pub fn getGotAtomIndexForSymbol(self: *MachO, sym_with_loc: SymbolWithLoc) ?Atom.Index {
const got_index = self.got_entries_table.get(sym_with_loc) orelse return null;
- return self.got_entries.items[got_index].getAtom(self);
+ return self.got_entries.items[got_index].getAtomIndex(self);
}
/// Returns stubs atom that references `sym_with_loc` if one exists.
/// Returns null otherwise.
-pub fn getStubsAtomForSymbol(self: *MachO, sym_with_loc: SymbolWithLoc) ?*Atom {
+pub fn getStubsAtomIndexForSymbol(self: *MachO, sym_with_loc: SymbolWithLoc) ?Atom.Index {
const stubs_index = self.stubs_table.get(sym_with_loc) orelse return null;
- return self.stubs.items[stubs_index].getAtom(self);
+ return self.stubs.items[stubs_index].getAtomIndex(self);
}
/// Returns symbol location corresponding to the set entrypoint.
@@ -4232,26 +4262,31 @@ pub fn logAtoms(self: *MachO) void {
log.debug("atoms:", .{});
const slice = self.sections.slice();
- for (slice.items(.last_atom)) |last, i| {
- var atom = last orelse continue;
+ for (slice.items(.last_atom_index)) |last_atom_index, i| {
+ var atom_index = last_atom_index orelse continue;
const header = slice.items(.header)[i];
- while (atom.prev) |prev| {
- atom = prev;
+ while (true) {
+ const atom = self.getAtom(atom_index);
+ if (atom.prev_index) |prev_index| {
+ atom_index = prev_index;
+ } else break;
}
log.debug("{s},{s}", .{ header.segName(), header.sectName() });
while (true) {
- self.logAtom(atom);
- if (atom.next) |next| {
- atom = next;
+ self.logAtom(atom_index);
+ const atom = self.getAtom(atom_index);
+ if (atom.next_index) |next_index| {
+ atom_index = next_index;
} else break;
}
}
}
-pub fn logAtom(self: *MachO, atom: *const Atom) void {
+pub fn logAtom(self: *MachO, atom_index: Atom.Index) void {
+ const atom = self.getAtom(atom_index);
const sym = atom.getSymbol(self);
const sym_name = atom.getName(self);
log.debug(" ATOM(%{?d}, '{s}') @ {x} (sizeof({x}), alignof({x})) in object({?d}) in sect({d})", .{
diff --git a/src/link/MachO/Atom.zig b/src/link/MachO/Atom.zig
index f15958b3df..da0115d069 100644
--- a/src/link/MachO/Atom.zig
+++ b/src/link/MachO/Atom.zig
@@ -39,11 +39,14 @@ size: u64,
alignment: u32,
/// Points to the previous and next neighbours
-next: ?*Atom,
-prev: ?*Atom,
+/// TODO use the same trick as with symbols: reserve index 0 as null atom
+next_index: ?Atom.Index,
+prev_index: ?Atom.Index,
dbg_info_atom: Dwarf.Atom,
+pub const Index = u32;
+
pub const Binding = struct {
target: SymbolWithLoc,
offset: u64,
@@ -54,22 +57,6 @@ pub const SymbolAtOffset = struct {
offset: u64,
};
-pub const empty = Atom{
- .sym_index = 0,
- .file = null,
- .size = 0,
- .alignment = 0,
- .prev = null,
- .next = null,
- .dbg_info_atom = undefined,
-};
-
-pub fn ensureInitialized(self: *Atom, macho_file: *MachO) !void {
- if (self.getSymbolIndex() != null) return; // Already initialized
- self.sym_index = try macho_file.allocateSymbol();
- try macho_file.atom_by_index_table.putNoClobber(macho_file.base.allocator, self.sym_index, self);
-}
-
pub fn getSymbolIndex(self: Atom) ?u32 {
if (self.sym_index == 0) return null;
return self.sym_index;
@@ -108,7 +95,8 @@ pub fn getName(self: Atom, macho_file: *MachO) []const u8 {
/// this calculation.
pub fn capacity(self: Atom, macho_file: *MachO) u64 {
const self_sym = self.getSymbol(macho_file);
- if (self.next) |next| {
+ if (self.next_index) |next_index| {
+ const next = macho_file.getAtom(next_index);
const next_sym = next.getSymbol(macho_file);
return next_sym.n_value - self_sym.n_value;
} else {
@@ -120,7 +108,8 @@ pub fn capacity(self: Atom, macho_file: *MachO) u64 {
pub fn freeListEligible(self: Atom, macho_file: *MachO) bool {
// No need to keep a free list node for the last atom.
- const next = self.next orelse return false;
+ const next_index = self.next_index orelse return false;
+ const next = macho_file.getAtom(next_index);
const self_sym = self.getSymbol(macho_file);
const next_sym = next.getSymbol(macho_file);
const cap = next_sym.n_value - self_sym.n_value;
@@ -130,19 +119,19 @@ pub fn freeListEligible(self: Atom, macho_file: *MachO) bool {
return surplus >= MachO.min_text_capacity;
}
-pub fn addRelocation(self: *Atom, macho_file: *MachO, reloc: Relocation) !void {
- return self.addRelocations(macho_file, 1, .{reloc});
+pub fn addRelocation(macho_file: *MachO, atom_index: Atom.Index, reloc: Relocation) !void {
+ return addRelocations(macho_file, atom_index, 1, .{reloc});
}
pub fn addRelocations(
- self: *Atom,
macho_file: *MachO,
+ atom_index: Atom.Index,
comptime count: comptime_int,
relocs: [count]Relocation,
) !void {
const gpa = macho_file.base.allocator;
const target = macho_file.base.options.target;
- const gop = try macho_file.relocs.getOrPut(gpa, self);
+ const gop = try macho_file.relocs.getOrPut(gpa, atom_index);
if (!gop.found_existing) {
gop.value_ptr.* = .{};
}
@@ -156,56 +145,72 @@ pub fn addRelocations(
}
}
-pub fn addRebase(self: *Atom, macho_file: *MachO, offset: u32) !void {
+pub fn addRebase(macho_file: *MachO, atom_index: Atom.Index, offset: u32) !void {
const gpa = macho_file.base.allocator;
- log.debug(" (adding rebase at offset 0x{x} in %{?d})", .{ offset, self.getSymbolIndex() });
- const gop = try macho_file.rebases.getOrPut(gpa, self);
+ const atom = macho_file.getAtom(atom_index);
+ log.debug(" (adding rebase at offset 0x{x} in %{?d})", .{ offset, atom.getSymbolIndex() });
+ const gop = try macho_file.rebases.getOrPut(gpa, atom_index);
if (!gop.found_existing) {
gop.value_ptr.* = .{};
}
try gop.value_ptr.append(gpa, offset);
}
-pub fn addBinding(self: *Atom, macho_file: *MachO, binding: Binding) !void {
+pub fn addBinding(macho_file: *MachO, atom_index: Atom.Index, binding: Binding) !void {
const gpa = macho_file.base.allocator;
+ const atom = macho_file.getAtom(atom_index);
log.debug(" (adding binding to symbol {s} at offset 0x{x} in %{?d})", .{
macho_file.getSymbolName(binding.target),
binding.offset,
- self.getSymbolIndex(),
+ atom.getSymbolIndex(),
});
- const gop = try macho_file.bindings.getOrPut(gpa, self);
+ const gop = try macho_file.bindings.getOrPut(gpa, atom_index);
if (!gop.found_existing) {
gop.value_ptr.* = .{};
}
try gop.value_ptr.append(gpa, binding);
}
-pub fn addLazyBinding(self: *Atom, macho_file: *MachO, binding: Binding) !void {
+pub fn addLazyBinding(macho_file: *MachO, atom_index: Atom.Index, binding: Binding) !void {
const gpa = macho_file.base.allocator;
+ const atom = macho_file.getAtom(atom_index);
log.debug(" (adding lazy binding to symbol {s} at offset 0x{x} in %{?d})", .{
macho_file.getSymbolName(binding.target),
binding.offset,
- self.getSymbolIndex(),
+ atom.getSymbolIndex(),
});
- const gop = try macho_file.lazy_bindings.getOrPut(gpa, self);
+ const gop = try macho_file.lazy_bindings.getOrPut(gpa, atom_index);
if (!gop.found_existing) {
gop.value_ptr.* = .{};
}
try gop.value_ptr.append(gpa, binding);
}
-pub fn resolveRelocations(self: *Atom, macho_file: *MachO) !void {
- const relocs = macho_file.relocs.get(self) orelse return;
- const source_sym = self.getSymbol(macho_file);
+pub fn resolveRelocations(macho_file: *MachO, atom_index: Atom.Index) !void {
+ const atom = macho_file.getAtom(atom_index);
+ const relocs = macho_file.relocs.get(atom_index) orelse return;
+ const source_sym = atom.getSymbol(macho_file);
const source_section = macho_file.sections.get(source_sym.n_sect - 1).header;
const file_offset = source_section.offset + source_sym.n_value - source_section.addr;
- log.debug("relocating '{s}'", .{self.getName(macho_file)});
+ log.debug("relocating '{s}'", .{atom.getName(macho_file)});
for (relocs.items) |*reloc| {
if (!reloc.dirty) continue;
- try reloc.resolve(self, macho_file, file_offset);
+ try reloc.resolve(macho_file, atom_index, file_offset);
reloc.dirty = false;
}
}
+
+pub fn freeRelocations(macho_file: *MachO, atom_index: Atom.Index) void {
+ const gpa = macho_file.base.allocator;
+ var removed_relocs = macho_file.relocs.fetchOrderedRemove(atom_index);
+ if (removed_relocs) |*relocs| relocs.value.deinit(gpa);
+ var removed_rebases = macho_file.rebases.fetchOrderedRemove(atom_index);
+ if (removed_rebases) |*rebases| rebases.value.deinit(gpa);
+ var removed_bindings = macho_file.bindings.fetchOrderedRemove(atom_index);
+ if (removed_bindings) |*bindings| bindings.value.deinit(gpa);
+ var removed_lazy_bindings = macho_file.lazy_bindings.fetchOrderedRemove(atom_index);
+ if (removed_lazy_bindings) |*lazy_bindings| lazy_bindings.value.deinit(gpa);
+}
diff --git a/src/link/MachO/Relocation.zig b/src/link/MachO/Relocation.zig
index ca6bf9d681..07e5cf1aa2 100644
--- a/src/link/MachO/Relocation.zig
+++ b/src/link/MachO/Relocation.zig
@@ -29,33 +29,35 @@ pub fn fmtType(self: Relocation, target: std.Target) []const u8 {
}
}
-pub fn getTargetAtom(self: Relocation, macho_file: *MachO) ?*Atom {
+pub fn getTargetAtomIndex(self: Relocation, macho_file: *MachO) ?Atom.Index {
switch (macho_file.base.options.target.cpu.arch) {
.aarch64 => switch (@intToEnum(macho.reloc_type_arm64, self.type)) {
.ARM64_RELOC_GOT_LOAD_PAGE21,
.ARM64_RELOC_GOT_LOAD_PAGEOFF12,
.ARM64_RELOC_POINTER_TO_GOT,
- => return macho_file.getGotAtomForSymbol(self.target),
+ => return macho_file.getGotAtomIndexForSymbol(self.target),
else => {},
},
.x86_64 => switch (@intToEnum(macho.reloc_type_x86_64, self.type)) {
.X86_64_RELOC_GOT,
.X86_64_RELOC_GOT_LOAD,
- => return macho_file.getGotAtomForSymbol(self.target),
+ => return macho_file.getGotAtomIndexForSymbol(self.target),
else => {},
},
else => unreachable,
}
- if (macho_file.getStubsAtomForSymbol(self.target)) |stubs_atom| return stubs_atom;
- return macho_file.getAtomForSymbol(self.target);
+ if (macho_file.getStubsAtomIndexForSymbol(self.target)) |stubs_atom| return stubs_atom;
+ return macho_file.getAtomIndexForSymbol(self.target);
}
-pub fn resolve(self: Relocation, atom: *Atom, macho_file: *MachO, base_offset: u64) !void {
+pub fn resolve(self: Relocation, macho_file: *MachO, atom_index: Atom.Index, base_offset: u64) !void {
const arch = macho_file.base.options.target.cpu.arch;
+ const atom = macho_file.getAtom(atom_index);
const source_sym = atom.getSymbol(macho_file);
const source_addr = source_sym.n_value + self.offset;
- const target_atom = self.getTargetAtom(macho_file) orelse return;
+ const target_atom_index = self.getTargetAtomIndex(macho_file) orelse return;
+ const target_atom = macho_file.getAtom(target_atom_index);
const target_addr = @intCast(i64, target_atom.getSymbol(macho_file).n_value) + self.addend;
log.debug(" ({x}: [() => 0x{x} ({s})) ({s})", .{
--
cgit v1.2.3
From 1f64432196e51785fe1dba442a0878c1b10f8b06 Mon Sep 17 00:00:00 2001
From: Techatrix <19954306+Techatrix@users.noreply.github.com>
Date: Tue, 31 Jan 2023 00:59:18 +0100
Subject: wasm: correctly handle optional slices
---
src/arch/wasm/CodeGen.zig | 13 +++++++++----
1 file changed, 9 insertions(+), 4 deletions(-)
(limited to 'src')
diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig
index 342d6b70cc..d0ff27a3e2 100644
--- a/src/arch/wasm/CodeGen.zig
+++ b/src/arch/wasm/CodeGen.zig
@@ -1706,9 +1706,11 @@ fn isByRef(ty: Type, target: std.Target) bool {
return true;
},
.Optional => {
- if (ty.optionalReprIsPayload()) return false;
+ if (ty.isPtrLikeOptional()) return false;
var buf: Type.Payload.ElemType = undefined;
- return ty.optionalChild(&buf).hasRuntimeBitsIgnoreComptime();
+ const pl_type = ty.optionalChild(&buf);
+ if (pl_type.zigTypeTag() == .ErrorSet) return false;
+ return pl_type.hasRuntimeBitsIgnoreComptime();
},
.Pointer => {
// Slices act like struct and will be passed by reference
@@ -3869,14 +3871,17 @@ fn airIsNull(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode, op_kind:
/// NOTE: Leaves the result on the stack
fn isNull(func: *CodeGen, operand: WValue, optional_ty: Type, opcode: wasm.Opcode) InnerError!WValue {
try func.emitWValue(operand);
+ var buf: Type.Payload.ElemType = undefined;
+ const payload_ty = optional_ty.optionalChild(&buf);
if (!optional_ty.optionalReprIsPayload()) {
- var buf: Type.Payload.ElemType = undefined;
- const payload_ty = optional_ty.optionalChild(&buf);
// When payload is zero-bits, we can treat operand as a value, rather than
// a pointer to the stack value
if (payload_ty.hasRuntimeBitsIgnoreComptime()) {
try func.addMemArg(.i32_load8_u, .{ .offset = operand.offset(), .alignment = 1 });
}
+ } else if (payload_ty.isSlice()) {
+ // move the ptr on top of the stack
+ _ = try func.load(operand, Type.usize, 0);
}
// Compare the null value with '0'
--
cgit v1.2.3
From 17404f8e6edea28fc70e074fd75101e1ed48b620 Mon Sep 17 00:00:00 2001
From: r00ster91
Date: Sun, 29 Jan 2023 23:25:31 +0100
Subject: Sema: emit compile error for comptime or inline call of function
pointer
---
src/Sema.zig | 7 ++++++-
.../cases/compile_errors/comptime_call_of_function_pointer.zig | 10 ++++++++++
2 files changed, 16 insertions(+), 1 deletion(-)
create mode 100644 test/cases/compile_errors/comptime_call_of_function_pointer.zig
(limited to 'src')
diff --git a/src/Sema.zig b/src/Sema.zig
index 9c553a0092..7448fd149c 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -6446,7 +6446,12 @@ fn analyzeCall(
.extern_fn => return sema.fail(block, call_src, "{s} call of extern function", .{
@as([]const u8, if (is_comptime_call) "comptime" else "inline"),
}),
- else => unreachable,
+ else => {
+ assert(callee_ty.isPtrAtRuntime());
+ return sema.fail(block, call_src, "{s} call of function pointer", .{
+ @as([]const u8, if (is_comptime_call) "comptime" else "inline"),
+ });
+ },
};
if (func_ty_info.is_var_args) {
return sema.fail(block, call_src, "{s} call of variadic function", .{
diff --git a/test/cases/compile_errors/comptime_call_of_function_pointer.zig b/test/cases/compile_errors/comptime_call_of_function_pointer.zig
new file mode 100644
index 0000000000..cf01f5ea2c
--- /dev/null
+++ b/test/cases/compile_errors/comptime_call_of_function_pointer.zig
@@ -0,0 +1,10 @@
+export fn entry() void {
+ const fn_ptr = @intToPtr(*align(1) fn () void, 0xffd2);
+ comptime fn_ptr();
+}
+
+// error
+// backend=stage2
+// target=native
+//
+// :3:20: error: comptime call of function pointer
--
cgit v1.2.3
From 47ff57ed7ddbf4c4a0f93208fc96851c9033b8b7 Mon Sep 17 00:00:00 2001
From: Techatrix <19954306+Techatrix@users.noreply.github.com>
Date: Tue, 31 Jan 2023 17:01:56 +0100
Subject: wasm: apply request change
---
src/arch/wasm/CodeGen.zig | 7 +++++--
test/behavior/cast.zig | 3 ---
test/behavior/optional.zig | 2 --
3 files changed, 5 insertions(+), 7 deletions(-)
(limited to 'src')
diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig
index d0ff27a3e2..c0d0c11b56 100644
--- a/src/arch/wasm/CodeGen.zig
+++ b/src/arch/wasm/CodeGen.zig
@@ -3880,8 +3880,11 @@ fn isNull(func: *CodeGen, operand: WValue, optional_ty: Type, opcode: wasm.Opcod
try func.addMemArg(.i32_load8_u, .{ .offset = operand.offset(), .alignment = 1 });
}
} else if (payload_ty.isSlice()) {
- // move the ptr on top of the stack
- _ = try func.load(operand, Type.usize, 0);
+ switch (func.arch()) {
+ .wasm32 => try func.addMemArg(.i32_load, .{ .offset = operand.offset(), .alignment = 4 }),
+ .wasm64 => try func.addMemArg(.i64_load, .{ .offset = operand.offset(), .alignment = 8 }),
+ else => unreachable,
+ }
}
// Compare the null value with '0'
diff --git a/test/behavior/cast.zig b/test/behavior/cast.zig
index 7c0746bfcd..dbb4c07f64 100644
--- a/test/behavior/cast.zig
+++ b/test/behavior/cast.zig
@@ -1179,7 +1179,6 @@ fn peerTypeEmptyArrayAndSlice(a: bool, slice: []const u8) []const u8 {
test "implicitly cast from [N]T to ?[]const T" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try expect(mem.eql(u8, castToOptionalSlice().?, "hi"));
@@ -1264,7 +1263,6 @@ test "cast from array reference to fn: runtime fn ptr" {
test "*const [N]null u8 to ?[]const u8" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -1413,7 +1411,6 @@ test "cast i8 fn call peers to i32 result" {
test "cast compatible optional types" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var a: ?[:0]const u8 = null;
diff --git a/test/behavior/optional.zig b/test/behavior/optional.zig
index 3e91c6807c..3a5b7b008b 100644
--- a/test/behavior/optional.zig
+++ b/test/behavior/optional.zig
@@ -439,7 +439,6 @@ test "Optional slice size is optimized" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try expect(@sizeOf(?[]u8) == @sizeOf([]u8));
@@ -479,7 +478,6 @@ test "cast slice to const slice nested in error union and optional" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
const S = struct {
fn inner() !?[]u8 {
--
cgit v1.2.3
From 4404c4d20094bb5021aac4a047cd33b6c24b9a9b Mon Sep 17 00:00:00 2001
From: Jakub Konka
Date: Tue, 31 Jan 2023 17:54:12 +0100
Subject: link: make Elf atoms fully owned by the linker
---
src/Module.zig | 6 +-
src/Sema.zig | 2 +-
src/arch/aarch64/CodeGen.zig | 13 +-
src/arch/arm/CodeGen.zig | 15 +-
src/arch/riscv64/CodeGen.zig | 14 +-
src/arch/sparc64/CodeGen.zig | 12 +-
src/arch/x86_64/CodeGen.zig | 13 +-
src/link.zig | 4 +-
src/link/Dwarf.zig | 26 +-
src/link/Elf.zig | 1555 ++++++++++++++++++++++--------------------
src/link/Elf/Atom.zig | 61 +-
src/link/MachO.zig | 4 +-
12 files changed, 901 insertions(+), 824 deletions(-)
(limited to 'src')
diff --git a/src/Module.zig b/src/Module.zig
index 8301505492..0695a2e98a 100644
--- a/src/Module.zig
+++ b/src/Module.zig
@@ -5275,7 +5275,7 @@ pub fn clearDecl(
// and allow it to be variably sized.
decl.link = switch (mod.comp.bin_file.tag) {
.coff => .{ .coff = link.File.Coff.Atom.empty },
- .elf => .{ .elf = link.File.Elf.TextBlock.empty },
+ .elf => .{ .elf = {} },
.macho => .{ .macho = {} },
.plan9 => .{ .plan9 = link.File.Plan9.DeclBlock.empty },
.c => .{ .c = {} },
@@ -5381,7 +5381,7 @@ fn deleteDeclExports(mod: *Module, decl_index: Decl.Index) Allocator.Error!void
}
}
if (mod.comp.bin_file.cast(link.File.Elf)) |elf| {
- elf.deleteExport(exp.link.elf);
+ elf.deleteDeclExport(decl_index, exp.options.name);
}
if (mod.comp.bin_file.cast(link.File.MachO)) |macho| {
try macho.deleteDeclExport(decl_index, exp.options.name);
@@ -5695,7 +5695,7 @@ pub fn allocateNewDecl(
.src_scope = src_scope,
.link = switch (mod.comp.bin_file.tag) {
.coff => .{ .coff = link.File.Coff.Atom.empty },
- .elf => .{ .elf = link.File.Elf.TextBlock.empty },
+ .elf => .{ .elf = {} },
.macho => .{ .macho = {} },
.plan9 => .{ .plan9 = link.File.Plan9.DeclBlock.empty },
.c => .{ .c = {} },
diff --git a/src/Sema.zig b/src/Sema.zig
index 9083cc92ab..28d559f730 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -5566,7 +5566,7 @@ pub fn analyzeExport(
.src = src,
.link = switch (mod.comp.bin_file.tag) {
.coff => .{ .coff = .{} },
- .elf => .{ .elf = .{} },
+ .elf => .{ .elf = {} },
.macho => .{ .macho = {} },
.plan9 => .{ .plan9 = null },
.c => .{ .c = {} },
diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig
index edbe7905a2..67197c35f8 100644
--- a/src/arch/aarch64/CodeGen.zig
+++ b/src/arch/aarch64/CodeGen.zig
@@ -4308,8 +4308,9 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
const fn_owner_decl = mod.declPtr(func.owner_decl);
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
- try fn_owner_decl.link.elf.ensureInitialized(elf_file);
- const got_addr = @intCast(u32, fn_owner_decl.link.elf.getOffsetTableAddress(elf_file));
+ const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl);
+ const atom = elf_file.getAtom(atom_index);
+ const got_addr = @intCast(u32, atom.getOffsetTableAddress(elf_file));
try self.genSetReg(Type.initTag(.usize), .x30, .{ .memory = got_addr });
} else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
const atom = try macho_file.getOrCreateAtomForDecl(func.owner_decl);
@@ -6138,8 +6139,9 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) Inne
mod.markDeclAlive(decl);
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
- try decl.link.elf.ensureInitialized(elf_file);
- return MCValue{ .memory = decl.link.elf.getOffsetTableAddress(elf_file) };
+ const atom_index = try elf_file.getOrCreateAtomForDecl(decl_index);
+ const atom = elf_file.getAtom(atom_index);
+ return MCValue{ .memory = atom.getOffsetTableAddress(elf_file) };
} else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
const atom = try macho_file.getOrCreateAtomForDecl(decl_index);
const sym_index = macho_file.getAtom(atom).getSymbolIndex().?;
@@ -6168,8 +6170,7 @@ fn lowerUnnamedConst(self: *Self, tv: TypedValue) InnerError!MCValue {
return self.fail("lowering unnamed constant failed: {s}", .{@errorName(err)});
};
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
- const vaddr = elf_file.local_symbols.items[local_sym_index].st_value;
- return MCValue{ .memory = vaddr };
+ return MCValue{ .memory = elf_file.getSymbol(local_sym_index).st_value };
} else if (self.bin_file.cast(link.File.MachO)) |_| {
return MCValue{ .linker_load = .{
.type = .direct,
diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig
index 49f979624d..c6ee960e51 100644
--- a/src/arch/arm/CodeGen.zig
+++ b/src/arch/arm/CodeGen.zig
@@ -4256,12 +4256,11 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
if (self.air.value(callee)) |func_value| {
if (func_value.castTag(.function)) |func_payload| {
const func = func_payload.data;
- const mod = self.bin_file.options.module.?;
- const fn_owner_decl = mod.declPtr(func.owner_decl);
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
- try fn_owner_decl.link.elf.ensureInitialized(elf_file);
- const got_addr = @intCast(u32, fn_owner_decl.link.elf.getOffsetTableAddress(elf_file));
+ const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl);
+ const atom = elf_file.getAtom(atom_index);
+ const got_addr = @intCast(u32, atom.getOffsetTableAddress(elf_file));
try self.genSetReg(Type.initTag(.usize), .lr, .{ .memory = got_addr });
} else if (self.bin_file.cast(link.File.MachO)) |_| {
unreachable; // unsupported architecture for MachO
@@ -6084,8 +6083,9 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) Inne
mod.markDeclAlive(decl);
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
- try decl.link.elf.ensureInitialized(elf_file);
- return MCValue{ .memory = decl.link.elf.getOffsetTableAddress(elf_file) };
+ const atom_index = try elf_file.getOrCreateAtomForDecl(decl_index);
+ const atom = elf_file.getAtom(atom_index);
+ return MCValue{ .memory = atom.getOffsetTableAddress(elf_file) };
} else if (self.bin_file.cast(link.File.MachO)) |_| {
unreachable; // unsupported architecture for MachO
} else if (self.bin_file.cast(link.File.Coff)) |_| {
@@ -6106,8 +6106,7 @@ fn lowerUnnamedConst(self: *Self, tv: TypedValue) InnerError!MCValue {
return self.fail("lowering unnamed constant failed: {s}", .{@errorName(err)});
};
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
- const vaddr = elf_file.local_symbols.items[local_sym_index].st_value;
- return MCValue{ .memory = vaddr };
+ return MCValue{ .memory = elf_file.getSymbol(local_sym_index).st_value };
} else if (self.bin_file.cast(link.File.MachO)) |_| {
unreachable;
} else if (self.bin_file.cast(link.File.Coff)) |_| {
diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig
index 07a8dcd858..a0af1b3cce 100644
--- a/src/arch/riscv64/CodeGen.zig
+++ b/src/arch/riscv64/CodeGen.zig
@@ -1721,12 +1721,9 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
if (self.air.value(callee)) |func_value| {
if (func_value.castTag(.function)) |func_payload| {
const func = func_payload.data;
-
- const mod = self.bin_file.options.module.?;
- const fn_owner_decl = mod.declPtr(func.owner_decl);
- try fn_owner_decl.link.elf.ensureInitialized(elf_file);
- const got_addr = @intCast(u32, fn_owner_decl.link.elf.getOffsetTableAddress(elf_file));
-
+ const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl);
+ const atom = elf_file.getAtom(atom_index);
+ const got_addr = @intCast(u32, atom.getOffsetTableAddress(elf_file));
try self.genSetReg(Type.initTag(.usize), .ra, .{ .memory = got_addr });
_ = try self.addInst(.{
.tag = .jalr,
@@ -2553,8 +2550,9 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) Inne
const decl = mod.declPtr(decl_index);
mod.markDeclAlive(decl);
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
- try decl.link.elf.ensureInitialized(elf_file);
- return MCValue{ .memory = decl.link.elf.getOffsetTableAddress(elf_file) };
+ const atom_index = try elf_file.getOrCreateAtomForDecl(decl_index);
+ const atom = elf_file.getAtom(atom_index);
+ return MCValue{ .memory = atom.getOffsetTableAddress(elf_file) };
} else if (self.bin_file.cast(link.File.MachO)) |_| {
unreachable;
} else if (self.bin_file.cast(link.File.Coff)) |_| {
diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig
index 5e9326d23b..e67244167e 100644
--- a/src/arch/sparc64/CodeGen.zig
+++ b/src/arch/sparc64/CodeGen.zig
@@ -1216,11 +1216,10 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
if (self.bin_file.tag == link.File.Elf.base_tag) {
if (func_value.castTag(.function)) |func_payload| {
const func = func_payload.data;
- const mod = self.bin_file.options.module.?;
- const fn_owner_decl = mod.declPtr(func.owner_decl);
const got_addr = if (self.bin_file.cast(link.File.Elf)) |elf_file| blk: {
- try fn_owner_decl.link.elf.ensureInitialized(elf_file);
- break :blk @intCast(u32, fn_owner_decl.link.elf.getOffsetTableAddress(elf_file));
+ const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl);
+ const atom = elf_file.getAtom(atom_index);
+ break :blk @intCast(u32, atom.getOffsetTableAddress(elf_file));
} else unreachable;
try self.genSetReg(Type.initTag(.usize), .o7, .{ .memory = got_addr });
@@ -4205,8 +4204,9 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) Inne
mod.markDeclAlive(decl);
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
- try decl.link.elf.ensureInitialized(elf_file);
- return MCValue{ .memory = decl.link.elf.getOffsetTableAddress(elf_file) };
+ const atom_index = try elf_file.getOrCreateAtomForDecl(decl_index);
+ const atom = elf_file.getAtom(atom_index);
+ return MCValue{ .memory = atom.getOffsetTableAddress(elf_file) };
} else {
return self.fail("TODO codegen non-ELF const Decl pointer", .{});
}
diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig
index fc244e3130..23d3ca5514 100644
--- a/src/arch/x86_64/CodeGen.zig
+++ b/src/arch/x86_64/CodeGen.zig
@@ -4000,8 +4000,9 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
const fn_owner_decl = mod.declPtr(func.owner_decl);
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
- try fn_owner_decl.link.elf.ensureInitialized(elf_file);
- const got_addr = @intCast(u32, fn_owner_decl.link.elf.getOffsetTableAddress(elf_file));
+ const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl);
+ const atom = elf_file.getAtom(atom_index);
+ const got_addr = @intCast(u32, atom.getOffsetTableAddress(elf_file));
_ = try self.addInst(.{
.tag = .call,
.ops = Mir.Inst.Ops.encode(.{ .flags = 0b01 }),
@@ -6721,8 +6722,9 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) Inne
module.markDeclAlive(decl);
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
- try decl.link.elf.ensureInitialized(elf_file);
- return MCValue{ .memory = decl.link.elf.getOffsetTableAddress(elf_file) };
+ const atom_index = try elf_file.getOrCreateAtomForDecl(decl_index);
+ const atom = elf_file.getAtom(atom_index);
+ return MCValue{ .memory = atom.getOffsetTableAddress(elf_file) };
} else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
const atom_index = try macho_file.getOrCreateAtomForDecl(decl_index);
const sym_index = macho_file.getAtom(atom_index).getSymbolIndex().?;
@@ -6751,8 +6753,7 @@ fn lowerUnnamedConst(self: *Self, tv: TypedValue) InnerError!MCValue {
return self.fail("lowering unnamed constant failed: {s}", .{@errorName(err)});
};
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
- const vaddr = elf_file.local_symbols.items[local_sym_index].st_value;
- return MCValue{ .memory = vaddr };
+ return MCValue{ .memory = elf_file.getSymbol(local_sym_index).st_value };
} else if (self.bin_file.cast(link.File.MachO)) |_| {
return MCValue{ .linker_load = .{
.type = .direct,
diff --git a/src/link.zig b/src/link.zig
index 2a96efe89d..09804add53 100644
--- a/src/link.zig
+++ b/src/link.zig
@@ -262,7 +262,7 @@ pub const File = struct {
lock: ?Cache.Lock = null,
pub const LinkBlock = union {
- elf: Elf.TextBlock,
+ elf: void,
coff: Coff.Atom,
macho: void,
plan9: Plan9.DeclBlock,
@@ -284,7 +284,7 @@ pub const File = struct {
};
pub const Export = union {
- elf: Elf.Export,
+ elf: void,
coff: Coff.Export,
macho: void,
plan9: Plan9.Export,
diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig
index 2595cd8ba5..8278377095 100644
--- a/src/link/Dwarf.zig
+++ b/src/link/Dwarf.zig
@@ -1099,7 +1099,7 @@ pub fn commitDeclState(
switch (self.bin_file.tag) {
.elf => {
const elf_file = self.bin_file.cast(File.Elf).?;
- const debug_line_sect = &elf_file.sections.items[elf_file.debug_line_section_index.?];
+ const debug_line_sect = &elf_file.sections.items(.shdr)[elf_file.debug_line_section_index.?];
const file_pos = debug_line_sect.sh_offset + src_fn.off;
try pwriteDbgLineNops(elf_file.base.file.?, file_pos, 0, &[0]u8{}, src_fn.len);
},
@@ -1152,7 +1152,7 @@ pub fn commitDeclState(
const elf_file = self.bin_file.cast(File.Elf).?;
const shdr_index = elf_file.debug_line_section_index.?;
try elf_file.growNonAllocSection(shdr_index, needed_size, 1, true);
- const debug_line_sect = elf_file.sections.items[shdr_index];
+ const debug_line_sect = elf_file.sections.items(.shdr)[shdr_index];
const file_pos = debug_line_sect.sh_offset + src_fn.off;
try pwriteDbgLineNops(
elf_file.base.file.?,
@@ -1332,7 +1332,7 @@ fn updateDeclDebugInfoAllocation(self: *Dwarf, atom: *Atom, len: u32) !void {
switch (self.bin_file.tag) {
.elf => {
const elf_file = self.bin_file.cast(File.Elf).?;
- const debug_info_sect = &elf_file.sections.items[elf_file.debug_info_section_index.?];
+ const debug_info_sect = &elf_file.sections.items(.shdr)[elf_file.debug_info_section_index.?];
const file_pos = debug_info_sect.sh_offset + atom.off;
try pwriteDbgInfoNops(elf_file.base.file.?, file_pos, 0, &[0]u8{}, atom.len, false);
},
@@ -1399,7 +1399,7 @@ fn writeDeclDebugInfo(self: *Dwarf, atom: *Atom, dbg_info_buf: []const u8) !void
const elf_file = self.bin_file.cast(File.Elf).?;
const shdr_index = elf_file.debug_info_section_index.?;
try elf_file.growNonAllocSection(shdr_index, needed_size, 1, true);
- const debug_info_sect = elf_file.sections.items[shdr_index];
+ const debug_info_sect = elf_file.sections.items(.shdr)[shdr_index];
const file_pos = debug_info_sect.sh_offset + atom.off;
try pwriteDbgInfoNops(
elf_file.base.file.?,
@@ -1475,7 +1475,7 @@ pub fn updateDeclLineNumber(self: *Dwarf, decl: *const Module.Decl) !void {
switch (self.bin_file.tag) {
.elf => {
const elf_file = self.bin_file.cast(File.Elf).?;
- const shdr = elf_file.sections.items[elf_file.debug_line_section_index.?];
+ const shdr = elf_file.sections.items(.shdr)[elf_file.debug_line_section_index.?];
const file_pos = shdr.sh_offset + decl.fn_link.elf.off + self.getRelocDbgLineOff();
try elf_file.base.file.?.pwriteAll(&data, file_pos);
},
@@ -1690,7 +1690,7 @@ pub fn writeDbgAbbrev(self: *Dwarf) !void {
const elf_file = self.bin_file.cast(File.Elf).?;
const shdr_index = elf_file.debug_abbrev_section_index.?;
try elf_file.growNonAllocSection(shdr_index, needed_size, 1, false);
- const debug_abbrev_sect = elf_file.sections.items[shdr_index];
+ const debug_abbrev_sect = elf_file.sections.items(.shdr)[shdr_index];
const file_pos = debug_abbrev_sect.sh_offset + abbrev_offset;
try elf_file.base.file.?.pwriteAll(&abbrev_buf, file_pos);
},
@@ -1805,7 +1805,7 @@ pub fn writeDbgInfoHeader(self: *Dwarf, module: *Module, low_pc: u64, high_pc: u
switch (self.bin_file.tag) {
.elf => {
const elf_file = self.bin_file.cast(File.Elf).?;
- const debug_info_sect = elf_file.sections.items[elf_file.debug_info_section_index.?];
+ const debug_info_sect = elf_file.sections.items(.shdr)[elf_file.debug_info_section_index.?];
const file_pos = debug_info_sect.sh_offset;
try pwriteDbgInfoNops(elf_file.base.file.?, file_pos, 0, di_buf.items, jmp_amt, false);
},
@@ -2124,7 +2124,7 @@ pub fn writeDbgAranges(self: *Dwarf, addr: u64, size: u64) !void {
const elf_file = self.bin_file.cast(File.Elf).?;
const shdr_index = elf_file.debug_aranges_section_index.?;
try elf_file.growNonAllocSection(shdr_index, needed_size, 16, false);
- const debug_aranges_sect = elf_file.sections.items[shdr_index];
+ const debug_aranges_sect = elf_file.sections.items(.shdr)[shdr_index];
const file_pos = debug_aranges_sect.sh_offset;
try elf_file.base.file.?.pwriteAll(di_buf.items, file_pos);
},
@@ -2285,9 +2285,9 @@ pub fn writeDbgLineHeader(self: *Dwarf) !void {
.elf => {
const elf_file = self.bin_file.cast(File.Elf).?;
const shdr_index = elf_file.debug_line_section_index.?;
- const needed_size = elf_file.sections.items[shdr_index].sh_size + delta;
+ const needed_size = elf_file.sections.items(.shdr)[shdr_index].sh_size + delta;
try elf_file.growNonAllocSection(shdr_index, needed_size, 1, true);
- const file_pos = elf_file.sections.items[shdr_index].sh_offset + src_fn.off;
+ const file_pos = elf_file.sections.items(.shdr)[shdr_index].sh_offset + src_fn.off;
const amt = try elf_file.base.file.?.preadAll(buffer, file_pos);
if (amt != buffer.len) return error.InputOutput;
@@ -2346,7 +2346,7 @@ pub fn writeDbgLineHeader(self: *Dwarf) !void {
switch (self.bin_file.tag) {
.elf => {
const elf_file = self.bin_file.cast(File.Elf).?;
- const debug_line_sect = elf_file.sections.items[elf_file.debug_line_section_index.?];
+ const debug_line_sect = elf_file.sections.items(.shdr)[elf_file.debug_line_section_index.?];
const file_pos = debug_line_sect.sh_offset;
try pwriteDbgLineNops(elf_file.base.file.?, file_pos, 0, di_buf.items, jmp_amt);
},
@@ -2487,7 +2487,7 @@ pub fn flushModule(self: *Dwarf, module: *Module) !void {
switch (self.bin_file.tag) {
.elf => {
const elf_file = self.bin_file.cast(File.Elf).?;
- const debug_info_sect = &elf_file.sections.items[elf_file.debug_info_section_index.?];
+ const debug_info_sect = &elf_file.sections.items(.shdr)[elf_file.debug_info_section_index.?];
break :blk debug_info_sect.sh_offset;
},
.macho => {
@@ -2638,7 +2638,7 @@ fn addDbgInfoErrorSet(
fn getDbgInfoAtom(tag: File.Tag, mod: *Module, decl_index: Module.Decl.Index) *Atom {
const decl = mod.declPtr(decl_index);
return switch (tag) {
- .elf => &decl.link.elf.dbg_info_atom,
+ .elf => unreachable,
.macho => unreachable,
.wasm => &decl.link.wasm.dbg_info_atom,
else => unreachable,
diff --git a/src/link/Elf.zig b/src/link/Elf.zig
index 2c55e55f83..0b8128aa33 100644
--- a/src/link/Elf.zig
+++ b/src/link/Elf.zig
@@ -1,43 +1,89 @@
const Elf = @This();
const std = @import("std");
+const build_options = @import("build_options");
const builtin = @import("builtin");
-const math = std.math;
-const mem = std.mem;
const assert = std.debug.assert;
-const Allocator = std.mem.Allocator;
-const fs = std.fs;
const elf = std.elf;
+const fs = std.fs;
const log = std.log.scoped(.link);
+const math = std.math;
+const mem = std.mem;
-const Atom = @import("Elf/Atom.zig");
-const Module = @import("../Module.zig");
-const Compilation = @import("../Compilation.zig");
-const Dwarf = @import("Dwarf.zig");
const codegen = @import("../codegen.zig");
-const lldMain = @import("../main.zig").lldMain;
-const trace = @import("../tracy.zig").trace;
-const Package = @import("../Package.zig");
-const Value = @import("../value.zig").Value;
-const Type = @import("../type.zig").Type;
-const TypedValue = @import("../TypedValue.zig");
-const link = @import("../link.zig");
-const File = link.File;
-const build_options = @import("build_options");
-const target_util = @import("../target.zig");
const glibc = @import("../glibc.zig");
+const link = @import("../link.zig");
+const lldMain = @import("../main.zig").lldMain;
const musl = @import("../musl.zig");
-const Cache = @import("../Cache.zig");
+const target_util = @import("../target.zig");
+const trace = @import("../tracy.zig").trace;
+
const Air = @import("../Air.zig");
+const Allocator = std.mem.Allocator;
+pub const Atom = @import("Elf/Atom.zig");
+const Cache = @import("../Cache.zig");
+const Compilation = @import("../Compilation.zig");
+const Dwarf = @import("Dwarf.zig");
+const File = link.File;
const Liveness = @import("../Liveness.zig");
const LlvmObject = @import("../codegen/llvm.zig").Object;
-
-pub const TextBlock = Atom;
+const Module = @import("../Module.zig");
+const Package = @import("../Package.zig");
+const StringTable = @import("strtab.zig").StringTable;
+const Type = @import("../type.zig").Type;
+const TypedValue = @import("../TypedValue.zig");
+const Value = @import("../value.zig").Value;
const default_entry_addr = 0x8000000;
pub const base_tag: File.Tag = .elf;
+const Section = struct {
+ shdr: elf.Elf64_Shdr,
+ phdr_index: u16,
+
+ /// Index of the last allocated atom in this section.
+ last_atom_index: ?Atom.Index = null,
+
+ /// A list of atoms that have surplus capacity. This list can have false
+ /// positives, as functions grow and shrink over time, only sometimes being added
+ /// or removed from the freelist.
+ ///
+ /// An atom has surplus capacity when its overcapacity value is greater than
+ /// padToIdeal(minimum_atom_size). That is, when it has so
+ /// much extra capacity, that we could fit a small new symbol in it, itself with
+ /// ideal_capacity or more.
+ ///
+ /// Ideal capacity is defined by size + (size / ideal_factor)
+ ///
+ /// Overcapacity is measured by actual_capacity - ideal_capacity. Note that
+ /// overcapacity can be negative. A simple way to have negative overcapacity is to
+ /// allocate a fresh text block, which will have ideal capacity, and then grow it
+ /// by 1 byte. It will then have -1 overcapacity.
+ free_list: std.ArrayListUnmanaged(Atom.Index) = .{},
+};
+
+const DeclMetadata = struct {
+ atom: Atom.Index,
+ shdr: u16,
+ /// A list of all exports aliases of this Decl.
+ exports: std.ArrayListUnmanaged(u32) = .{},
+
+ fn getExport(m: DeclMetadata, elf_file: *const Elf, name: []const u8) ?u32 {
+ for (m.exports.items) |exp| {
+ if (mem.eql(u8, name, elf_file.getSymbolName(exp))) return exp;
+ }
+ return null;
+ }
+
+ fn getExportPtr(m: *DeclMetadata, elf_file: *Elf, name: []const u8) ?*u32 {
+ for (m.exports.items) |*exp| {
+ if (mem.eql(u8, name, elf_file.getSymbolName(exp.*))) return exp;
+ }
+ return null;
+ }
+};
+
base: File,
dwarf: ?Dwarf = null,
@@ -48,12 +94,12 @@ llvm_object: ?*LlvmObject = null,
/// Stored in native-endian format, depending on target endianness needs to be bswapped on read/write.
/// Same order as in the file.
-sections: std.ArrayListUnmanaged(elf.Elf64_Shdr) = std.ArrayListUnmanaged(elf.Elf64_Shdr){},
+sections: std.MultiArrayList(Section) = .{},
shdr_table_offset: ?u64 = null,
/// Stored in native-endian format, depending on target endianness needs to be bswapped on read/write.
/// Same order as in the file.
-program_headers: std.ArrayListUnmanaged(elf.Elf64_Phdr) = std.ArrayListUnmanaged(elf.Elf64_Phdr){},
+program_headers: std.ArrayListUnmanaged(elf.Elf64_Phdr) = .{},
phdr_table_offset: ?u64 = null,
/// The index into the program headers of a PT_LOAD program header with Read and Execute flags
phdr_load_re_index: ?u16 = null,
@@ -65,12 +111,10 @@ phdr_load_ro_index: ?u16 = null,
/// The index into the program headers of a PT_LOAD program header with Write flag
phdr_load_rw_index: ?u16 = null,
-phdr_shdr_table: std.AutoHashMapUnmanaged(u16, u16) = .{},
-
entry_addr: ?u64 = null,
page_size: u32,
-shstrtab: std.ArrayListUnmanaged(u8) = std.ArrayListUnmanaged(u8){},
+shstrtab: StringTable(.strtab) = .{},
shstrtab_index: ?u16 = null,
symtab_section_index: ?u16 = null,
@@ -113,39 +157,14 @@ debug_line_header_dirty: bool = false,
error_flags: File.ErrorFlags = File.ErrorFlags{},
-/// Pointer to the last allocated atom
-atoms: std.AutoHashMapUnmanaged(u16, *TextBlock) = .{},
-
-/// A list of text blocks that have surplus capacity. This list can have false
-/// positives, as functions grow and shrink over time, only sometimes being added
-/// or removed from the freelist.
-///
-/// A text block has surplus capacity when its overcapacity value is greater than
-/// padToIdeal(minimum_text_block_size). That is, when it has so
-/// much extra capacity, that we could fit a small new symbol in it, itself with
-/// ideal_capacity or more.
-///
-/// Ideal capacity is defined by size + (size / ideal_factor)
-///
-/// Overcapacity is measured by actual_capacity - ideal_capacity. Note that
-/// overcapacity can be negative. A simple way to have negative overcapacity is to
-/// allocate a fresh text block, which will have ideal capacity, and then grow it
-/// by 1 byte. It will then have -1 overcapacity.
-atom_free_lists: std.AutoHashMapUnmanaged(u16, std.ArrayListUnmanaged(*TextBlock)) = .{},
-
-/// Table of Decls that are currently alive.
-/// We store them here so that we can properly dispose of any allocated
-/// memory within the atom in the incremental linker.
-/// TODO consolidate this.
-decls: std.AutoHashMapUnmanaged(Module.Decl.Index, ?u16) = .{},
+/// Table of tracked Decls.
+decls: std.AutoHashMapUnmanaged(Module.Decl.Index, DeclMetadata) = .{},
/// List of atoms that are owned directly by the linker.
-/// Currently these are only atoms that are the result of linking
-/// object files. Atoms which take part in incremental linking are
-/// at present owned by Module.Decl.
-/// TODO consolidate this.
-managed_atoms: std.ArrayListUnmanaged(*TextBlock) = .{},
-atom_by_index_table: std.AutoHashMapUnmanaged(u32, *TextBlock) = .{},
+atoms: std.ArrayListUnmanaged(Atom) = .{},
+
+/// Table of atoms indexed by the symbol index.
+atom_by_index_table: std.AutoHashMapUnmanaged(u32, Atom.Index) = .{},
/// Table of unnamed constants associated with a parent `Decl`.
/// We store them here so that we can free the constants whenever the `Decl`
@@ -173,15 +192,8 @@ unnamed_const_atoms: UnnamedConstTable = .{},
/// this will be a table indexed by index into the list of Atoms.
relocs: RelocTable = .{},
-const Reloc = struct {
- target: u32,
- offset: u64,
- addend: u32,
- prev_vaddr: u64,
-};
-
-const RelocTable = std.AutoHashMapUnmanaged(*TextBlock, std.ArrayListUnmanaged(Reloc));
-const UnnamedConstTable = std.AutoHashMapUnmanaged(Module.Decl.Index, std.ArrayListUnmanaged(*TextBlock));
+const RelocTable = std.AutoHashMapUnmanaged(Atom.Index, std.ArrayListUnmanaged(Atom.Reloc));
+const UnnamedConstTable = std.AutoHashMapUnmanaged(Module.Decl.Index, std.ArrayListUnmanaged(Atom.Index));
/// When allocating, the ideal_capacity is calculated by
/// actual_capacity + (actual_capacity / ideal_factor)
@@ -190,15 +202,11 @@ const ideal_factor = 3;
/// In order for a slice of bytes to be considered eligible to keep metadata pointing at
/// it as a possible place to put new symbols, it must have enough room for this many bytes
/// (plus extra for reserved capacity).
-const minimum_text_block_size = 64;
-pub const min_text_capacity = padToIdeal(minimum_text_block_size);
+const minimum_atom_size = 64;
+pub const min_text_capacity = padToIdeal(minimum_atom_size);
pub const PtrWidth = enum { p32, p64 };
-pub const Export = struct {
- sym_index: ?u32 = null,
-};
-
pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Options) !*Elf {
assert(options.target.ofmt == .elf);
@@ -230,16 +238,19 @@ pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Option
// There must always be a null section in index 0
try self.sections.append(allocator, .{
- .sh_name = 0,
- .sh_type = elf.SHT_NULL,
- .sh_flags = 0,
- .sh_addr = 0,
- .sh_offset = 0,
- .sh_size = 0,
- .sh_link = 0,
- .sh_info = 0,
- .sh_addralign = 0,
- .sh_entsize = 0,
+ .shdr = .{
+ .sh_name = 0,
+ .sh_type = elf.SHT_NULL,
+ .sh_flags = 0,
+ .sh_addr = 0,
+ .sh_offset = 0,
+ .sh_size = 0,
+ .sh_link = 0,
+ .sh_info = 0,
+ .sh_addralign = 0,
+ .sh_entsize = 0,
+ },
+ .phdr_index = undefined,
});
try self.populateMissingMetadata();
@@ -286,75 +297,67 @@ pub fn createEmpty(gpa: Allocator, options: link.Options) !*Elf {
}
pub fn deinit(self: *Elf) void {
+ const gpa = self.base.allocator;
+
if (build_options.have_llvm) {
- if (self.llvm_object) |llvm_object| llvm_object.destroy(self.base.allocator);
- }
-
- self.sections.deinit(self.base.allocator);
- self.program_headers.deinit(self.base.allocator);
- self.shstrtab.deinit(self.base.allocator);
- self.local_symbols.deinit(self.base.allocator);
- self.global_symbols.deinit(self.base.allocator);
- self.global_symbol_free_list.deinit(self.base.allocator);
- self.local_symbol_free_list.deinit(self.base.allocator);
- self.offset_table_free_list.deinit(self.base.allocator);
- self.offset_table.deinit(self.base.allocator);
- self.phdr_shdr_table.deinit(self.base.allocator);
- self.decls.deinit(self.base.allocator);
-
- self.atoms.deinit(self.base.allocator);
+ if (self.llvm_object) |llvm_object| llvm_object.destroy(gpa);
+ }
+
+ for (self.sections.items(.free_list)) |*free_list| {
+ free_list.deinit(gpa);
+ }
+ self.sections.deinit(gpa);
+
+ self.program_headers.deinit(gpa);
+ self.shstrtab.deinit(gpa);
+ self.local_symbols.deinit(gpa);
+ self.global_symbols.deinit(gpa);
+ self.global_symbol_free_list.deinit(gpa);
+ self.local_symbol_free_list.deinit(gpa);
+ self.offset_table_free_list.deinit(gpa);
+ self.offset_table.deinit(gpa);
+
{
- var it = self.atom_free_lists.valueIterator();
- while (it.next()) |free_list| {
- free_list.deinit(self.base.allocator);
+ var it = self.decls.iterator();
+ while (it.next()) |entry| {
+ entry.value_ptr.exports.deinit(gpa);
}
- self.atom_free_lists.deinit(self.base.allocator);
+ self.decls.deinit(gpa);
}
- for (self.managed_atoms.items) |atom| {
- self.base.allocator.destroy(atom);
- }
- self.managed_atoms.deinit(self.base.allocator);
+ self.atoms.deinit(gpa);
+ self.atom_by_index_table.deinit(gpa);
{
var it = self.unnamed_const_atoms.valueIterator();
while (it.next()) |atoms| {
- atoms.deinit(self.base.allocator);
+ atoms.deinit(gpa);
}
- self.unnamed_const_atoms.deinit(self.base.allocator);
+ self.unnamed_const_atoms.deinit(gpa);
}
{
var it = self.relocs.valueIterator();
while (it.next()) |relocs| {
- relocs.deinit(self.base.allocator);
+ relocs.deinit(gpa);
}
- self.relocs.deinit(self.base.allocator);
+ self.relocs.deinit(gpa);
}
- self.atom_by_index_table.deinit(self.base.allocator);
-
- if (self.dwarf) |*dw| {
- dw.deinit();
- }
+ // if (self.dwarf) |*dw| {
+ // dw.deinit();
+ // }
}
pub fn getDeclVAddr(self: *Elf, decl_index: Module.Decl.Index, reloc_info: File.RelocInfo) !u64 {
- const mod = self.base.options.module.?;
- const decl = mod.declPtr(decl_index);
-
assert(self.llvm_object == null);
- try decl.link.elf.ensureInitialized(self);
- const target = decl.link.elf.getSymbolIndex().?;
-
- const vaddr = self.local_symbols.items[target].st_value;
- const atom = self.atom_by_index_table.get(reloc_info.parent_atom_index).?;
- const gop = try self.relocs.getOrPut(self.base.allocator, atom);
- if (!gop.found_existing) {
- gop.value_ptr.* = .{};
- }
- try gop.value_ptr.append(self.base.allocator, .{
+ const this_atom_index = try self.getOrCreateAtomForDecl(decl_index);
+ const this_atom = self.getAtom(this_atom_index);
+ const target = this_atom.getSymbolIndex().?;
+ const vaddr = this_atom.getSymbol(self).st_value;
+ const atom_index = self.getAtomIndexForSymbol(reloc_info.parent_atom_index).?;
+ try Atom.addRelocation(self, atom_index, .{
.target = target,
.offset = reloc_info.offset,
.addend = reloc_info.addend,
@@ -375,7 +378,7 @@ fn detectAllocCollision(self: *Elf, start: u64, size: u64) ?u64 {
if (self.shdr_table_offset) |off| {
const shdr_size: u64 = if (small_ptr) @sizeOf(elf.Elf32_Shdr) else @sizeOf(elf.Elf64_Shdr);
- const tight_size = self.sections.items.len * shdr_size;
+ const tight_size = self.sections.slice().len * shdr_size;
const increased_size = padToIdeal(tight_size);
const test_end = off + increased_size;
if (end > off and start < test_end) {
@@ -385,7 +388,7 @@ fn detectAllocCollision(self: *Elf, start: u64, size: u64) ?u64 {
if (self.phdr_table_offset) |off| {
const phdr_size: u64 = if (small_ptr) @sizeOf(elf.Elf32_Phdr) else @sizeOf(elf.Elf64_Phdr);
- const tight_size = self.sections.items.len * phdr_size;
+ const tight_size = self.sections.slice().len * phdr_size;
const increased_size = padToIdeal(tight_size);
const test_end = off + increased_size;
if (end > off and start < test_end) {
@@ -393,7 +396,7 @@ fn detectAllocCollision(self: *Elf, start: u64, size: u64) ?u64 {
}
}
- for (self.sections.items) |section| {
+ for (self.sections.items(.shdr)) |section| {
const increased_size = padToIdeal(section.sh_size);
const test_end = section.sh_offset + increased_size;
if (end > section.sh_offset and start < test_end) {
@@ -420,7 +423,7 @@ pub fn allocatedSize(self: *Elf, start: u64) u64 {
if (self.phdr_table_offset) |off| {
if (off > start and off < min_pos) min_pos = off;
}
- for (self.sections.items) |section| {
+ for (self.sections.items(.shdr)) |section| {
if (section.sh_offset <= start) continue;
if (section.sh_offset < min_pos) min_pos = section.sh_offset;
}
@@ -439,31 +442,10 @@ pub fn findFreeSpace(self: *Elf, object_size: u64, min_alignment: u32) u64 {
return start;
}
-/// TODO Improve this to use a table.
-fn makeString(self: *Elf, bytes: []const u8) !u32 {
- try self.shstrtab.ensureUnusedCapacity(self.base.allocator, bytes.len + 1);
- const result = self.shstrtab.items.len;
- self.shstrtab.appendSliceAssumeCapacity(bytes);
- self.shstrtab.appendAssumeCapacity(0);
- return @intCast(u32, result);
-}
-
-pub fn getString(self: Elf, str_off: u32) []const u8 {
- assert(str_off < self.shstrtab.items.len);
- return mem.sliceTo(@ptrCast([*:0]const u8, self.shstrtab.items.ptr + str_off), 0);
-}
-
-fn updateString(self: *Elf, old_str_off: u32, new_name: []const u8) !u32 {
- const existing_name = self.getString(old_str_off);
- if (mem.eql(u8, existing_name, new_name)) {
- return old_str_off;
- }
- return self.makeString(new_name);
-}
-
pub fn populateMissingMetadata(self: *Elf) !void {
assert(self.llvm_object == null);
+ const gpa = self.base.allocator;
const small_ptr = switch (self.ptr_width) {
.p32 => true,
.p64 => false,
@@ -477,7 +459,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
const off = self.findFreeSpace(file_size, p_align);
log.debug("found PT_LOAD RE free space 0x{x} to 0x{x}", .{ off, off + file_size });
const entry_addr: u64 = self.entry_addr orelse if (self.base.options.target.cpu.arch == .spu_2) @as(u64, 0) else default_entry_addr;
- try self.program_headers.append(self.base.allocator, .{
+ try self.program_headers.append(gpa, .{
.p_type = elf.PT_LOAD,
.p_offset = off,
.p_filesz = file_size,
@@ -487,7 +469,6 @@ pub fn populateMissingMetadata(self: *Elf) !void {
.p_align = p_align,
.p_flags = elf.PF_X | elf.PF_R,
});
- try self.atom_free_lists.putNoClobber(self.base.allocator, self.phdr_load_re_index.?, .{});
self.entry_addr = null;
self.phdr_table_dirty = true;
}
@@ -504,7 +485,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
// we'll need to re-use that function anyway, in case the GOT grows and overlaps something
// else in virtual memory.
const got_addr: u32 = if (self.base.options.target.cpu.arch.ptrBitWidth() >= 32) 0x4000000 else 0x8000;
- try self.program_headers.append(self.base.allocator, .{
+ try self.program_headers.append(gpa, .{
.p_type = elf.PT_LOAD,
.p_offset = off,
.p_filesz = file_size,
@@ -527,7 +508,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
log.debug("found PT_LOAD RO free space 0x{x} to 0x{x}", .{ off, off + file_size });
// TODO Same as for GOT
const rodata_addr: u32 = if (self.base.options.target.cpu.arch.ptrBitWidth() >= 32) 0xc000000 else 0xa000;
- try self.program_headers.append(self.base.allocator, .{
+ try self.program_headers.append(gpa, .{
.p_type = elf.PT_LOAD,
.p_offset = off,
.p_filesz = file_size,
@@ -537,7 +518,6 @@ pub fn populateMissingMetadata(self: *Elf) !void {
.p_align = p_align,
.p_flags = elf.PF_R,
});
- try self.atom_free_lists.putNoClobber(self.base.allocator, self.phdr_load_ro_index.?, .{});
self.phdr_table_dirty = true;
}
@@ -551,7 +531,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
log.debug("found PT_LOAD RW free space 0x{x} to 0x{x}", .{ off, off + file_size });
// TODO Same as for GOT
const rwdata_addr: u32 = if (self.base.options.target.cpu.arch.ptrBitWidth() >= 32) 0x10000000 else 0xc000;
- try self.program_headers.append(self.base.allocator, .{
+ try self.program_headers.append(gpa, .{
.p_type = elf.PT_LOAD,
.p_offset = off,
.p_filesz = file_size,
@@ -561,278 +541,290 @@ pub fn populateMissingMetadata(self: *Elf) !void {
.p_align = p_align,
.p_flags = elf.PF_R | elf.PF_W,
});
- try self.atom_free_lists.putNoClobber(self.base.allocator, self.phdr_load_rw_index.?, .{});
self.phdr_table_dirty = true;
}
if (self.shstrtab_index == null) {
- self.shstrtab_index = @intCast(u16, self.sections.items.len);
- assert(self.shstrtab.items.len == 0);
- try self.shstrtab.append(self.base.allocator, 0); // need a 0 at position 0
- const off = self.findFreeSpace(self.shstrtab.items.len, 1);
- log.debug("found shstrtab free space 0x{x} to 0x{x}", .{ off, off + self.shstrtab.items.len });
- try self.sections.append(self.base.allocator, .{
- .sh_name = try self.makeString(".shstrtab"),
- .sh_type = elf.SHT_STRTAB,
- .sh_flags = 0,
- .sh_addr = 0,
- .sh_offset = off,
- .sh_size = self.shstrtab.items.len,
- .sh_link = 0,
- .sh_info = 0,
- .sh_addralign = 1,
- .sh_entsize = 0,
+ self.shstrtab_index = @intCast(u16, self.sections.slice().len);
+ assert(self.shstrtab.buffer.items.len == 0);
+ try self.shstrtab.buffer.append(gpa, 0); // need a 0 at position 0
+ const off = self.findFreeSpace(self.shstrtab.buffer.items.len, 1);
+ log.debug("found shstrtab free space 0x{x} to 0x{x}", .{ off, off + self.shstrtab.buffer.items.len });
+ try self.sections.append(gpa, .{
+ .shdr = .{
+ .sh_name = try self.shstrtab.insert(gpa, ".shstrtab"),
+ .sh_type = elf.SHT_STRTAB,
+ .sh_flags = 0,
+ .sh_addr = 0,
+ .sh_offset = off,
+ .sh_size = self.shstrtab.buffer.items.len,
+ .sh_link = 0,
+ .sh_info = 0,
+ .sh_addralign = 1,
+ .sh_entsize = 0,
+ },
+ .phdr_index = undefined,
});
self.shstrtab_dirty = true;
self.shdr_table_dirty = true;
}
if (self.text_section_index == null) {
- self.text_section_index = @intCast(u16, self.sections.items.len);
+ self.text_section_index = @intCast(u16, self.sections.slice().len);
const phdr = &self.program_headers.items[self.phdr_load_re_index.?];
- try self.sections.append(self.base.allocator, .{
- .sh_name = try self.makeString(".text"),
- .sh_type = elf.SHT_PROGBITS,
- .sh_flags = elf.SHF_ALLOC | elf.SHF_EXECINSTR,
- .sh_addr = phdr.p_vaddr,
- .sh_offset = phdr.p_offset,
- .sh_size = phdr.p_filesz,
- .sh_link = 0,
- .sh_info = 0,
- .sh_addralign = 1,
- .sh_entsize = 0,
+ try self.sections.append(gpa, .{
+ .shdr = .{
+ .sh_name = try self.shstrtab.insert(gpa, ".text"),
+ .sh_type = elf.SHT_PROGBITS,
+ .sh_flags = elf.SHF_ALLOC | elf.SHF_EXECINSTR,
+ .sh_addr = phdr.p_vaddr,
+ .sh_offset = phdr.p_offset,
+ .sh_size = phdr.p_filesz,
+ .sh_link = 0,
+ .sh_info = 0,
+ .sh_addralign = 1,
+ .sh_entsize = 0,
+ },
+ .phdr_index = self.phdr_load_re_index.?,
});
- try self.phdr_shdr_table.putNoClobber(
- self.base.allocator,
- self.phdr_load_re_index.?,
- self.text_section_index.?,
- );
self.shdr_table_dirty = true;
}
if (self.got_section_index == null) {
- self.got_section_index = @intCast(u16, self.sections.items.len);
+ self.got_section_index = @intCast(u16, self.sections.slice().len);
const phdr = &self.program_headers.items[self.phdr_got_index.?];
- try self.sections.append(self.base.allocator, .{
- .sh_name = try self.makeString(".got"),
- .sh_type = elf.SHT_PROGBITS,
- .sh_flags = elf.SHF_ALLOC,
- .sh_addr = phdr.p_vaddr,
- .sh_offset = phdr.p_offset,
- .sh_size = phdr.p_filesz,
- .sh_link = 0,
- .sh_info = 0,
- .sh_addralign = @as(u16, ptr_size),
- .sh_entsize = 0,
+ try self.sections.append(gpa, .{
+ .shdr = .{
+ .sh_name = try self.shstrtab.insert(gpa, ".got"),
+ .sh_type = elf.SHT_PROGBITS,
+ .sh_flags = elf.SHF_ALLOC,
+ .sh_addr = phdr.p_vaddr,
+ .sh_offset = phdr.p_offset,
+ .sh_size = phdr.p_filesz,
+ .sh_link = 0,
+ .sh_info = 0,
+ .sh_addralign = @as(u16, ptr_size),
+ .sh_entsize = 0,
+ },
+ .phdr_index = self.phdr_got_index.?,
});
- try self.phdr_shdr_table.putNoClobber(
- self.base.allocator,
- self.phdr_got_index.?,
- self.got_section_index.?,
- );
self.shdr_table_dirty = true;
}
if (self.rodata_section_index == null) {
- self.rodata_section_index = @intCast(u16, self.sections.items.len);
+ self.rodata_section_index = @intCast(u16, self.sections.slice().len);
const phdr = &self.program_headers.items[self.phdr_load_ro_index.?];
- try self.sections.append(self.base.allocator, .{
- .sh_name = try self.makeString(".rodata"),
- .sh_type = elf.SHT_PROGBITS,
- .sh_flags = elf.SHF_ALLOC,
- .sh_addr = phdr.p_vaddr,
- .sh_offset = phdr.p_offset,
- .sh_size = phdr.p_filesz,
- .sh_link = 0,
- .sh_info = 0,
- .sh_addralign = 1,
- .sh_entsize = 0,
+ try self.sections.append(gpa, .{
+ .shdr = .{
+ .sh_name = try self.shstrtab.insert(gpa, ".rodata"),
+ .sh_type = elf.SHT_PROGBITS,
+ .sh_flags = elf.SHF_ALLOC,
+ .sh_addr = phdr.p_vaddr,
+ .sh_offset = phdr.p_offset,
+ .sh_size = phdr.p_filesz,
+ .sh_link = 0,
+ .sh_info = 0,
+ .sh_addralign = 1,
+ .sh_entsize = 0,
+ },
+ .phdr_index = self.phdr_load_ro_index.?,
});
- try self.phdr_shdr_table.putNoClobber(
- self.base.allocator,
- self.phdr_load_ro_index.?,
- self.rodata_section_index.?,
- );
self.shdr_table_dirty = true;
}
if (self.data_section_index == null) {
- self.data_section_index = @intCast(u16, self.sections.items.len);
+ self.data_section_index = @intCast(u16, self.sections.slice().len);
const phdr = &self.program_headers.items[self.phdr_load_rw_index.?];
- try self.sections.append(self.base.allocator, .{
- .sh_name = try self.makeString(".data"),
- .sh_type = elf.SHT_PROGBITS,
- .sh_flags = elf.SHF_WRITE | elf.SHF_ALLOC,
- .sh_addr = phdr.p_vaddr,
- .sh_offset = phdr.p_offset,
- .sh_size = phdr.p_filesz,
- .sh_link = 0,
- .sh_info = 0,
- .sh_addralign = @as(u16, ptr_size),
- .sh_entsize = 0,
+ try self.sections.append(gpa, .{
+ .shdr = .{
+ .sh_name = try self.shstrtab.insert(gpa, ".data"),
+ .sh_type = elf.SHT_PROGBITS,
+ .sh_flags = elf.SHF_WRITE | elf.SHF_ALLOC,
+ .sh_addr = phdr.p_vaddr,
+ .sh_offset = phdr.p_offset,
+ .sh_size = phdr.p_filesz,
+ .sh_link = 0,
+ .sh_info = 0,
+ .sh_addralign = @as(u16, ptr_size),
+ .sh_entsize = 0,
+ },
+ .phdr_index = self.phdr_load_rw_index.?,
});
- try self.phdr_shdr_table.putNoClobber(
- self.base.allocator,
- self.phdr_load_rw_index.?,
- self.data_section_index.?,
- );
self.shdr_table_dirty = true;
}
if (self.symtab_section_index == null) {
- self.symtab_section_index = @intCast(u16, self.sections.items.len);
+ self.symtab_section_index = @intCast(u16, self.sections.slice().len);
const min_align: u16 = if (small_ptr) @alignOf(elf.Elf32_Sym) else @alignOf(elf.Elf64_Sym);
const each_size: u64 = if (small_ptr) @sizeOf(elf.Elf32_Sym) else @sizeOf(elf.Elf64_Sym);
const file_size = self.base.options.symbol_count_hint * each_size;
const off = self.findFreeSpace(file_size, min_align);
log.debug("found symtab free space 0x{x} to 0x{x}", .{ off, off + file_size });
- try self.sections.append(self.base.allocator, .{
- .sh_name = try self.makeString(".symtab"),
- .sh_type = elf.SHT_SYMTAB,
- .sh_flags = 0,
- .sh_addr = 0,
- .sh_offset = off,
- .sh_size = file_size,
- // The section header index of the associated string table.
- .sh_link = self.shstrtab_index.?,
- .sh_info = @intCast(u32, self.local_symbols.items.len),
- .sh_addralign = min_align,
- .sh_entsize = each_size,
+ try self.sections.append(gpa, .{
+ .shdr = .{
+ .sh_name = try self.shstrtab.insert(gpa, ".symtab"),
+ .sh_type = elf.SHT_SYMTAB,
+ .sh_flags = 0,
+ .sh_addr = 0,
+ .sh_offset = off,
+ .sh_size = file_size,
+ // The section header index of the associated string table.
+ .sh_link = self.shstrtab_index.?,
+ .sh_info = @intCast(u32, self.local_symbols.items.len),
+ .sh_addralign = min_align,
+ .sh_entsize = each_size,
+ },
+ .phdr_index = undefined,
});
self.shdr_table_dirty = true;
try self.writeSymbol(0);
}
- if (self.dwarf) |*dw| {
- if (self.debug_str_section_index == null) {
- self.debug_str_section_index = @intCast(u16, self.sections.items.len);
- assert(dw.strtab.items.len == 0);
- try dw.strtab.append(self.base.allocator, 0);
- try self.sections.append(self.base.allocator, .{
- .sh_name = try self.makeString(".debug_str"),
- .sh_type = elf.SHT_PROGBITS,
- .sh_flags = elf.SHF_MERGE | elf.SHF_STRINGS,
- .sh_addr = 0,
- .sh_offset = 0,
- .sh_size = 0,
- .sh_link = 0,
- .sh_info = 0,
- .sh_addralign = 1,
- .sh_entsize = 1,
- });
- self.debug_strtab_dirty = true;
- self.shdr_table_dirty = true;
- }
-
- if (self.debug_info_section_index == null) {
- self.debug_info_section_index = @intCast(u16, self.sections.items.len);
-
- const file_size_hint = 200;
- const p_align = 1;
- const off = self.findFreeSpace(file_size_hint, p_align);
- log.debug("found .debug_info free space 0x{x} to 0x{x}", .{
- off,
- off + file_size_hint,
- });
- try self.sections.append(self.base.allocator, .{
- .sh_name = try self.makeString(".debug_info"),
- .sh_type = elf.SHT_PROGBITS,
- .sh_flags = 0,
- .sh_addr = 0,
- .sh_offset = off,
- .sh_size = file_size_hint,
- .sh_link = 0,
- .sh_info = 0,
- .sh_addralign = p_align,
- .sh_entsize = 0,
- });
- self.shdr_table_dirty = true;
- self.debug_info_header_dirty = true;
- }
-
- if (self.debug_abbrev_section_index == null) {
- self.debug_abbrev_section_index = @intCast(u16, self.sections.items.len);
-
- const file_size_hint = 128;
- const p_align = 1;
- const off = self.findFreeSpace(file_size_hint, p_align);
- log.debug("found .debug_abbrev free space 0x{x} to 0x{x}", .{
- off,
- off + file_size_hint,
- });
- try self.sections.append(self.base.allocator, .{
- .sh_name = try self.makeString(".debug_abbrev"),
- .sh_type = elf.SHT_PROGBITS,
- .sh_flags = 0,
- .sh_addr = 0,
- .sh_offset = off,
- .sh_size = file_size_hint,
- .sh_link = 0,
- .sh_info = 0,
- .sh_addralign = p_align,
- .sh_entsize = 0,
- });
- self.shdr_table_dirty = true;
- self.debug_abbrev_section_dirty = true;
- }
-
- if (self.debug_aranges_section_index == null) {
- self.debug_aranges_section_index = @intCast(u16, self.sections.items.len);
-
- const file_size_hint = 160;
- const p_align = 16;
- const off = self.findFreeSpace(file_size_hint, p_align);
- log.debug("found .debug_aranges free space 0x{x} to 0x{x}", .{
- off,
- off + file_size_hint,
- });
- try self.sections.append(self.base.allocator, .{
- .sh_name = try self.makeString(".debug_aranges"),
- .sh_type = elf.SHT_PROGBITS,
- .sh_flags = 0,
- .sh_addr = 0,
- .sh_offset = off,
- .sh_size = file_size_hint,
- .sh_link = 0,
- .sh_info = 0,
- .sh_addralign = p_align,
- .sh_entsize = 0,
- });
- self.shdr_table_dirty = true;
- self.debug_aranges_section_dirty = true;
- }
-
- if (self.debug_line_section_index == null) {
- self.debug_line_section_index = @intCast(u16, self.sections.items.len);
-
- const file_size_hint = 250;
- const p_align = 1;
- const off = self.findFreeSpace(file_size_hint, p_align);
- log.debug("found .debug_line free space 0x{x} to 0x{x}", .{
- off,
- off + file_size_hint,
- });
- try self.sections.append(self.base.allocator, .{
- .sh_name = try self.makeString(".debug_line"),
- .sh_type = elf.SHT_PROGBITS,
- .sh_flags = 0,
- .sh_addr = 0,
- .sh_offset = off,
- .sh_size = file_size_hint,
- .sh_link = 0,
- .sh_info = 0,
- .sh_addralign = p_align,
- .sh_entsize = 0,
- });
- self.shdr_table_dirty = true;
- self.debug_line_header_dirty = true;
- }
- }
+ // if (self.dwarf) |*dw| {
+ // if (self.debug_str_section_index == null) {
+ // self.debug_str_section_index = @intCast(u16, self.sections.slice().len);
+ // assert(dw.strtab.items.len == 0);
+ // try dw.strtab.append(gpa, 0);
+ // try self.sections.append(gpa, .{
+ // .shdr = .{
+ // .sh_name = try self.shstrtab.insert(gpa, ".debug_str"),
+ // .sh_type = elf.SHT_PROGBITS,
+ // .sh_flags = elf.SHF_MERGE | elf.SHF_STRINGS,
+ // .sh_addr = 0,
+ // .sh_offset = 0,
+ // .sh_size = 0,
+ // .sh_link = 0,
+ // .sh_info = 0,
+ // .sh_addralign = 1,
+ // .sh_entsize = 1,
+ // },
+ // .phdr_index = undefined,
+ // });
+ // self.debug_strtab_dirty = true;
+ // self.shdr_table_dirty = true;
+ // }
+
+ // if (self.debug_info_section_index == null) {
+ // self.debug_info_section_index = @intCast(u16, self.sections.slice().len);
+
+ // const file_size_hint = 200;
+ // const p_align = 1;
+ // const off = self.findFreeSpace(file_size_hint, p_align);
+ // log.debug("found .debug_info free space 0x{x} to 0x{x}", .{
+ // off,
+ // off + file_size_hint,
+ // });
+ // try self.sections.append(gpa, .{
+ // .shdr = .{
+ // .sh_name = try self.shstrtab.insert(gpa, ".debug_info"),
+ // .sh_type = elf.SHT_PROGBITS,
+ // .sh_flags = 0,
+ // .sh_addr = 0,
+ // .sh_offset = off,
+ // .sh_size = file_size_hint,
+ // .sh_link = 0,
+ // .sh_info = 0,
+ // .sh_addralign = p_align,
+ // .sh_entsize = 0,
+ // },
+ // .phdr_index = undefined,
+ // });
+ // self.shdr_table_dirty = true;
+ // self.debug_info_header_dirty = true;
+ // }
+
+ // if (self.debug_abbrev_section_index == null) {
+ // self.debug_abbrev_section_index = @intCast(u16, self.sections.slice().len);
+
+ // const file_size_hint = 128;
+ // const p_align = 1;
+ // const off = self.findFreeSpace(file_size_hint, p_align);
+ // log.debug("found .debug_abbrev free space 0x{x} to 0x{x}", .{
+ // off,
+ // off + file_size_hint,
+ // });
+ // try self.sections.append(gpa, .{
+ // .shdr = .{
+ // .sh_name = try self.shstrtab.insert(gpa, ".debug_abbrev"),
+ // .sh_type = elf.SHT_PROGBITS,
+ // .sh_flags = 0,
+ // .sh_addr = 0,
+ // .sh_offset = off,
+ // .sh_size = file_size_hint,
+ // .sh_link = 0,
+ // .sh_info = 0,
+ // .sh_addralign = p_align,
+ // .sh_entsize = 0,
+ // },
+ // .phdr_index = undefined,
+ // });
+ // self.shdr_table_dirty = true;
+ // self.debug_abbrev_section_dirty = true;
+ // }
+
+ // if (self.debug_aranges_section_index == null) {
+ // self.debug_aranges_section_index = @intCast(u16, self.sections.slice().len);
+
+ // const file_size_hint = 160;
+ // const p_align = 16;
+ // const off = self.findFreeSpace(file_size_hint, p_align);
+ // log.debug("found .debug_aranges free space 0x{x} to 0x{x}", .{
+ // off,
+ // off + file_size_hint,
+ // });
+ // try self.sections.append(gpa, .{
+ // .shdr = .{
+ // .sh_name = try self.shstrtab.insert(gpa, ".debug_aranges"),
+ // .sh_type = elf.SHT_PROGBITS,
+ // .sh_flags = 0,
+ // .sh_addr = 0,
+ // .sh_offset = off,
+ // .sh_size = file_size_hint,
+ // .sh_link = 0,
+ // .sh_info = 0,
+ // .sh_addralign = p_align,
+ // .sh_entsize = 0,
+ // },
+ // .phdr_index = undefined,
+ // });
+ // self.shdr_table_dirty = true;
+ // self.debug_aranges_section_dirty = true;
+ // }
+
+ // if (self.debug_line_section_index == null) {
+ // self.debug_line_section_index = @intCast(u16, self.sections.slice().len);
+
+ // const file_size_hint = 250;
+ // const p_align = 1;
+ // const off = self.findFreeSpace(file_size_hint, p_align);
+ // log.debug("found .debug_line free space 0x{x} to 0x{x}", .{
+ // off,
+ // off + file_size_hint,
+ // });
+ // try self.sections.append(gpa, .{
+ // .shdr = .{
+ // .sh_name = try self.shstrtab.insert(gpa, ".debug_line"),
+ // .sh_type = elf.SHT_PROGBITS,
+ // .sh_flags = 0,
+ // .sh_addr = 0,
+ // .sh_offset = off,
+ // .sh_size = file_size_hint,
+ // .sh_link = 0,
+ // .sh_info = 0,
+ // .sh_addralign = p_align,
+ // .sh_entsize = 0,
+ // },
+ // .phdr_index = undefined,
+ // });
+ // self.shdr_table_dirty = true;
+ // self.debug_line_header_dirty = true;
+ // }
+ // }
const shsize: u64 = switch (self.ptr_width) {
.p32 => @sizeOf(elf.Elf32_Shdr),
@@ -843,7 +835,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
.p64 => @alignOf(elf.Elf64_Shdr),
};
if (self.shdr_table_offset == null) {
- self.shdr_table_offset = self.findFreeSpace(self.sections.items.len * shsize, shalign);
+ self.shdr_table_offset = self.findFreeSpace(self.sections.slice().len * shsize, shalign);
self.shdr_table_dirty = true;
}
@@ -874,7 +866,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
// offset + it's filesize.
var max_file_offset: u64 = 0;
- for (self.sections.items) |shdr| {
+ for (self.sections.items(.shdr)) |shdr| {
if (shdr.sh_offset + shdr.sh_size > max_file_offset) {
max_file_offset = shdr.sh_offset + shdr.sh_size;
}
@@ -884,15 +876,18 @@ pub fn populateMissingMetadata(self: *Elf) !void {
}
}
-fn growAllocSection(self: *Elf, shdr_index: u16, phdr_index: u16, needed_size: u64) !void {
+fn growAllocSection(self: *Elf, shdr_index: u16, needed_size: u64) !void {
// TODO Also detect virtual address collisions.
- const shdr = &self.sections.items[shdr_index];
+ const shdr = &self.sections.items(.shdr)[shdr_index];
+ const phdr_index = self.sections.items(.phdr_index)[shdr_index];
const phdr = &self.program_headers.items[phdr_index];
+ const maybe_last_atom_index = self.sections.items(.last_atom_index)[shdr_index];
if (needed_size > self.allocatedSize(shdr.sh_offset)) {
// Must move the entire section.
const new_offset = self.findFreeSpace(needed_size, self.page_size);
- const existing_size = if (self.atoms.get(phdr_index)) |last| blk: {
+ const existing_size = if (maybe_last_atom_index) |last_atom_index| blk: {
+ const last = self.getAtom(last_atom_index);
const sym = last.getSymbol(self);
break :blk (sym.st_value + sym.st_size) - phdr.p_vaddr;
} else if (shdr_index == self.got_section_index.?) blk: {
@@ -900,8 +895,8 @@ fn growAllocSection(self: *Elf, shdr_index: u16, phdr_index: u16, needed_size: u
} else 0;
shdr.sh_size = 0;
- log.debug("new '{s}' file offset 0x{x} to 0x{x}", .{
- self.getString(shdr.sh_name),
+ log.debug("new '{?s}' file offset 0x{x} to 0x{x}", .{
+ self.shstrtab.get(shdr.sh_name),
new_offset,
new_offset + existing_size,
});
@@ -927,7 +922,7 @@ pub fn growNonAllocSection(
min_alignment: u32,
requires_file_copy: bool,
) !void {
- const shdr = &self.sections.items[shdr_index];
+ const shdr = &self.sections.items(.shdr)[shdr_index];
if (needed_size > self.allocatedSize(shdr.sh_offset)) {
const existing_size = if (self.symtab_section_index.? == shdr_index) blk: {
@@ -940,7 +935,7 @@ pub fn growNonAllocSection(
shdr.sh_size = 0;
// Move all the symbols to a new file location.
const new_offset = self.findFreeSpace(needed_size, min_alignment);
- log.debug("moving '{s}' from 0x{x} to 0x{x}", .{ self.getString(shdr.sh_name), shdr.sh_offset, new_offset });
+ log.debug("moving '{?s}' from 0x{x} to 0x{x}", .{ self.shstrtab.get(shdr.sh_name), shdr.sh_offset, new_offset });
if (requires_file_copy) {
const amt = try self.base.file.?.copyRangeAll(
@@ -961,25 +956,26 @@ pub fn growNonAllocSection(
}
pub fn markDirty(self: *Elf, shdr_index: u16, phdr_index: ?u16) void {
+ _ = shdr_index;
self.shdr_table_dirty = true; // TODO look into only writing one section
if (phdr_index) |_| {
self.phdr_table_dirty = true; // TODO look into making only the one program header dirty
}
- if (self.dwarf) |_| {
- if (self.debug_info_section_index.? == shdr_index) {
- self.debug_info_header_dirty = true;
- } else if (self.debug_line_section_index.? == shdr_index) {
- self.debug_line_header_dirty = true;
- } else if (self.debug_abbrev_section_index.? == shdr_index) {
- self.debug_abbrev_section_dirty = true;
- } else if (self.debug_str_section_index.? == shdr_index) {
- self.debug_strtab_dirty = true;
- } else if (self.debug_aranges_section_index.? == shdr_index) {
- self.debug_aranges_section_dirty = true;
- }
- }
+ // if (self.dwarf) |_| {
+ // if (self.debug_info_section_index.? == shdr_index) {
+ // self.debug_info_header_dirty = true;
+ // } else if (self.debug_line_section_index.? == shdr_index) {
+ // self.debug_line_header_dirty = true;
+ // } else if (self.debug_abbrev_section_index.? == shdr_index) {
+ // self.debug_abbrev_section_dirty = true;
+ // } else if (self.debug_str_section_index.? == shdr_index) {
+ // self.debug_strtab_dirty = true;
+ // } else if (self.debug_aranges_section_index.? == shdr_index) {
+ // self.debug_aranges_section_dirty = true;
+ // }
+ // }
}
pub fn flush(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node) link.File.FlushError!void {
@@ -1011,6 +1007,7 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
}
}
+ const gpa = self.base.allocator;
var sub_prog_node = prog_node.start("ELF Flush", 0);
sub_prog_node.activate();
defer sub_prog_node.end();
@@ -1018,23 +1015,25 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
// TODO This linker code currently assumes there is only 1 compilation unit and it
// corresponds to the Zig source code.
const module = self.base.options.module orelse return error.LinkingWithoutZigSourceUnimplemented;
+ _ = module;
const target_endian = self.base.options.target.cpu.arch.endian();
const foreign_endian = target_endian != builtin.cpu.arch.endian();
- if (self.dwarf) |*dw| {
- try dw.flushModule(module);
- }
+ // if (self.dwarf) |*dw| {
+ // try dw.flushModule(module);
+ // }
{
var it = self.relocs.iterator();
while (it.next()) |entry| {
- const atom = entry.key_ptr.*;
+ const atom_index = entry.key_ptr.*;
const relocs = entry.value_ptr.*;
+ const atom = self.getAtom(atom_index);
const source_sym = atom.getSymbol(self);
- const source_shdr = self.sections.items[source_sym.st_shndx];
+ const source_shdr = self.sections.items(.shdr)[source_sym.st_shndx];
- log.debug("relocating '{s}'", .{self.getString(source_sym.st_name)});
+ log.debug("relocating '{?s}'", .{self.shstrtab.get(source_sym.st_name)});
for (relocs.items) |*reloc| {
const target_sym = self.local_symbols.items[reloc.target];
@@ -1045,10 +1044,10 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
const section_offset = (source_sym.st_value + reloc.offset) - source_shdr.sh_addr;
const file_offset = source_shdr.sh_offset + section_offset;
- log.debug(" ({x}: [() => 0x{x}] ({s}))", .{
+ log.debug(" ({x}: [() => 0x{x}] ({?s}))", .{
reloc.offset,
target_vaddr,
- self.getString(target_sym.st_name),
+ self.shstrtab.get(target_sym.st_name),
});
switch (self.ptr_width) {
@@ -1069,43 +1068,43 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
self.logSymtab();
}
- if (self.dwarf) |*dw| {
- if (self.debug_abbrev_section_dirty) {
- try dw.writeDbgAbbrev();
- if (!self.shdr_table_dirty) {
- // Then it won't get written with the others and we need to do it.
- try self.writeSectHeader(self.debug_abbrev_section_index.?);
- }
- self.debug_abbrev_section_dirty = false;
- }
-
- if (self.debug_info_header_dirty) {
- // Currently only one compilation unit is supported, so the address range is simply
- // identical to the main program header virtual address and memory size.
- const text_phdr = &self.program_headers.items[self.phdr_load_re_index.?];
- const low_pc = text_phdr.p_vaddr;
- const high_pc = text_phdr.p_vaddr + text_phdr.p_memsz;
- try dw.writeDbgInfoHeader(module, low_pc, high_pc);
- self.debug_info_header_dirty = false;
- }
-
- if (self.debug_aranges_section_dirty) {
- // Currently only one compilation unit is supported, so the address range is simply
- // identical to the main program header virtual address and memory size.
- const text_phdr = &self.program_headers.items[self.phdr_load_re_index.?];
- try dw.writeDbgAranges(text_phdr.p_vaddr, text_phdr.p_memsz);
- if (!self.shdr_table_dirty) {
- // Then it won't get written with the others and we need to do it.
- try self.writeSectHeader(self.debug_aranges_section_index.?);
- }
- self.debug_aranges_section_dirty = false;
- }
-
- if (self.debug_line_header_dirty) {
- try dw.writeDbgLineHeader();
- self.debug_line_header_dirty = false;
- }
- }
+ // if (self.dwarf) |*dw| {
+ // if (self.debug_abbrev_section_dirty) {
+ // try dw.writeDbgAbbrev();
+ // if (!self.shdr_table_dirty) {
+ // // Then it won't get written with the others and we need to do it.
+ // try self.writeSectHeader(self.debug_abbrev_section_index.?);
+ // }
+ // self.debug_abbrev_section_dirty = false;
+ // }
+
+ // if (self.debug_info_header_dirty) {
+ // // Currently only one compilation unit is supported, so the address range is simply
+ // // identical to the main program header virtual address and memory size.
+ // const text_phdr = &self.program_headers.items[self.phdr_load_re_index.?];
+ // const low_pc = text_phdr.p_vaddr;
+ // const high_pc = text_phdr.p_vaddr + text_phdr.p_memsz;
+ // try dw.writeDbgInfoHeader(module, low_pc, high_pc);
+ // self.debug_info_header_dirty = false;
+ // }
+
+ // if (self.debug_aranges_section_dirty) {
+ // // Currently only one compilation unit is supported, so the address range is simply
+ // // identical to the main program header virtual address and memory size.
+ // const text_phdr = &self.program_headers.items[self.phdr_load_re_index.?];
+ // try dw.writeDbgAranges(text_phdr.p_vaddr, text_phdr.p_memsz);
+ // if (!self.shdr_table_dirty) {
+ // // Then it won't get written with the others and we need to do it.
+ // try self.writeSectHeader(self.debug_aranges_section_index.?);
+ // }
+ // self.debug_aranges_section_dirty = false;
+ // }
+
+ // if (self.debug_line_header_dirty) {
+ // try dw.writeDbgLineHeader();
+ // self.debug_line_header_dirty = false;
+ // }
+ // }
if (self.phdr_table_dirty) {
const phsize: u64 = switch (self.ptr_width) {
@@ -1126,8 +1125,8 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
switch (self.ptr_width) {
.p32 => {
- const buf = try self.base.allocator.alloc(elf.Elf32_Phdr, self.program_headers.items.len);
- defer self.base.allocator.free(buf);
+ const buf = try gpa.alloc(elf.Elf32_Phdr, self.program_headers.items.len);
+ defer gpa.free(buf);
for (buf) |*phdr, i| {
phdr.* = progHeaderTo32(self.program_headers.items[i]);
@@ -1138,8 +1137,8 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
try self.base.file.?.pwriteAll(mem.sliceAsBytes(buf), self.phdr_table_offset.?);
},
.p64 => {
- const buf = try self.base.allocator.alloc(elf.Elf64_Phdr, self.program_headers.items.len);
- defer self.base.allocator.free(buf);
+ const buf = try gpa.alloc(elf.Elf64_Phdr, self.program_headers.items.len);
+ defer gpa.free(buf);
for (buf) |*phdr, i| {
phdr.* = self.program_headers.items[i];
@@ -1155,23 +1154,23 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
{
const shdr_index = self.shstrtab_index.?;
- if (self.shstrtab_dirty or self.shstrtab.items.len != self.sections.items[shdr_index].sh_size) {
- try self.growNonAllocSection(shdr_index, self.shstrtab.items.len, 1, false);
- const shstrtab_sect = self.sections.items[shdr_index];
- try self.base.file.?.pwriteAll(self.shstrtab.items, shstrtab_sect.sh_offset);
+ if (self.shstrtab_dirty or self.shstrtab.buffer.items.len != self.sections.items(.shdr)[shdr_index].sh_size) {
+ try self.growNonAllocSection(shdr_index, self.shstrtab.buffer.items.len, 1, false);
+ const shstrtab_sect = self.sections.items(.shdr)[shdr_index];
+ try self.base.file.?.pwriteAll(self.shstrtab.buffer.items, shstrtab_sect.sh_offset);
self.shstrtab_dirty = false;
}
}
- if (self.dwarf) |dwarf| {
- const shdr_index = self.debug_str_section_index.?;
- if (self.debug_strtab_dirty or dwarf.strtab.items.len != self.sections.items[shdr_index].sh_size) {
- try self.growNonAllocSection(shdr_index, dwarf.strtab.items.len, 1, false);
- const debug_strtab_sect = self.sections.items[shdr_index];
- try self.base.file.?.pwriteAll(dwarf.strtab.items, debug_strtab_sect.sh_offset);
- self.debug_strtab_dirty = false;
- }
- }
+ // if (self.dwarf) |dwarf| {
+ // const shdr_index = self.debug_str_section_index.?;
+ // if (self.debug_strtab_dirty or dwarf.strtab.items.len != self.sections.items(.shdr)[shdr_index].sh_size) {
+ // try self.growNonAllocSection(shdr_index, dwarf.strtab.items.len, 1, false);
+ // const debug_strtab_sect = self.sections.items(.shdr)[shdr_index];
+ // try self.base.file.?.pwriteAll(dwarf.strtab.items, debug_strtab_sect.sh_offset);
+ // self.debug_strtab_dirty = false;
+ // }
+ // }
if (self.shdr_table_dirty) {
const shsize: u64 = switch (self.ptr_width) {
@@ -1183,7 +1182,7 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
.p64 => @alignOf(elf.Elf64_Shdr),
};
const allocated_size = self.allocatedSize(self.shdr_table_offset.?);
- const needed_size = self.sections.items.len * shsize;
+ const needed_size = self.sections.slice().len * shsize;
if (needed_size > allocated_size) {
self.shdr_table_offset = null; // free the space
@@ -1192,12 +1191,13 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
switch (self.ptr_width) {
.p32 => {
- const buf = try self.base.allocator.alloc(elf.Elf32_Shdr, self.sections.items.len);
- defer self.base.allocator.free(buf);
+ const slice = self.sections.slice();
+ const buf = try gpa.alloc(elf.Elf32_Shdr, slice.len);
+ defer gpa.free(buf);
for (buf) |*shdr, i| {
- shdr.* = sectHeaderTo32(self.sections.items[i]);
- log.debug("writing section {s}: {}", .{ self.getString(shdr.sh_name), shdr.* });
+ shdr.* = sectHeaderTo32(slice.items(.shdr)[i]);
+ log.debug("writing section {?s}: {}", .{ self.shstrtab.get(shdr.sh_name), shdr.* });
if (foreign_endian) {
mem.byteSwapAllFields(elf.Elf32_Shdr, shdr);
}
@@ -1205,12 +1205,13 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
try self.base.file.?.pwriteAll(mem.sliceAsBytes(buf), self.shdr_table_offset.?);
},
.p64 => {
- const buf = try self.base.allocator.alloc(elf.Elf64_Shdr, self.sections.items.len);
- defer self.base.allocator.free(buf);
+ const slice = self.sections.slice();
+ const buf = try gpa.alloc(elf.Elf64_Shdr, slice.len);
+ defer gpa.free(buf);
for (buf) |*shdr, i| {
- shdr.* = self.sections.items[i];
- log.debug("writing section {s}: {}", .{ self.getString(shdr.sh_name), shdr.* });
+ shdr.* = slice.items(.shdr)[i];
+ log.debug("writing section {?s}: {}", .{ self.shstrtab.get(shdr.sh_name), shdr.* });
if (foreign_endian) {
mem.byteSwapAllFields(elf.Elf64_Shdr, shdr);
}
@@ -2021,7 +2022,7 @@ fn writeElfHeader(self: *Elf) !void {
mem.writeInt(u16, hdr_buf[index..][0..2], e_shentsize, endian);
index += 2;
- const e_shnum = @intCast(u16, self.sections.items.len);
+ const e_shnum = @intCast(u16, self.sections.slice().len);
mem.writeInt(u16, hdr_buf[index..][0..2], e_shnum, endian);
index += 2;
@@ -2033,124 +2034,150 @@ fn writeElfHeader(self: *Elf) !void {
try self.base.file.?.pwriteAll(hdr_buf[0..index], 0);
}
-fn freeTextBlock(self: *Elf, text_block: *TextBlock, phdr_index: u16) void {
- const local_sym = text_block.getSymbol(self);
- const name_str_index = local_sym.st_name;
- const name = self.getString(name_str_index);
- log.debug("freeTextBlock {*} ({s})", .{ text_block, name });
+fn freeAtom(self: *Elf, atom_index: Atom.Index) void {
+ const atom = self.getAtom(atom_index);
+ log.debug("freeAtom {d} ({s})", .{ atom_index, atom.getName(self) });
- self.freeRelocationsForTextBlock(text_block);
+ Atom.freeRelocations(self, atom_index);
- const free_list = self.atom_free_lists.getPtr(phdr_index).?;
+ const gpa = self.base.allocator;
+ const shndx = atom.getSymbol(self).st_shndx;
+ const free_list = &self.sections.items(.free_list)[shndx];
var already_have_free_list_node = false;
{
var i: usize = 0;
// TODO turn free_list into a hash map
while (i < free_list.items.len) {
- if (free_list.items[i] == text_block) {
+ if (free_list.items[i] == atom_index) {
_ = free_list.swapRemove(i);
continue;
}
- if (free_list.items[i] == text_block.prev) {
+ if (free_list.items[i] == atom.prev_index) {
already_have_free_list_node = true;
}
i += 1;
}
}
- if (self.atoms.getPtr(phdr_index)) |last_block| {
- if (last_block.* == text_block) {
- if (text_block.prev) |prev| {
+ const maybe_last_atom_index = &self.sections.items(.last_atom_index)[shndx];
+ if (maybe_last_atom_index.*) |last_atom_index| {
+ if (last_atom_index == atom_index) {
+ if (atom.prev_index) |prev_index| {
// TODO shrink the section size here
- last_block.* = prev;
+ maybe_last_atom_index.* = prev_index;
} else {
- _ = self.atoms.fetchRemove(phdr_index);
+ maybe_last_atom_index.* = null;
}
}
}
- if (text_block.prev) |prev| {
- prev.next = text_block.next;
+ if (atom.prev_index) |prev_index| {
+ const prev = self.getAtomPtr(prev_index);
+ prev.next_index = atom.next_index;
- if (!already_have_free_list_node and prev.freeListEligible(self)) {
+ if (!already_have_free_list_node and prev.*.freeListEligible(self)) {
// The free list is heuristics, it doesn't have to be perfect, so we can
// ignore the OOM here.
- free_list.append(self.base.allocator, prev) catch {};
+ free_list.append(gpa, prev_index) catch {};
}
} else {
- text_block.prev = null;
+ self.getAtomPtr(atom_index).prev_index = null;
}
- if (text_block.next) |next| {
- next.prev = text_block.prev;
+ if (atom.next_index) |next_index| {
+ self.getAtomPtr(next_index).prev_index = atom.prev_index;
} else {
- text_block.next = null;
+ self.getAtomPtr(atom_index).next_index = null;
}
// Appending to free lists is allowed to fail because the free lists are heuristics based anyway.
- const local_sym_index = text_block.getSymbolIndex().?;
- self.local_symbol_free_list.append(self.base.allocator, local_sym_index) catch {};
+ const local_sym_index = atom.getSymbolIndex().?;
+
+ self.local_symbol_free_list.append(gpa, local_sym_index) catch {};
self.local_symbols.items[local_sym_index].st_info = 0;
+ self.local_symbols.items[local_sym_index].st_shndx = 0;
_ = self.atom_by_index_table.remove(local_sym_index);
- text_block.local_sym_index = 0;
+ self.getAtomPtr(atom_index).local_sym_index = 0;
- self.offset_table_free_list.append(self.base.allocator, text_block.offset_table_index) catch {};
+ self.offset_table_free_list.append(self.base.allocator, atom.offset_table_index) catch {};
- if (self.dwarf) |*dw| {
- dw.freeAtom(&text_block.dbg_info_atom);
- }
+ // if (self.dwarf) |*dw| {
+ // dw.freeAtom(&atom.dbg_info_atom);
+ // }
}
-fn shrinkTextBlock(self: *Elf, text_block: *TextBlock, new_block_size: u64, phdr_index: u16) void {
+fn shrinkAtom(self: *Elf, atom_index: Atom.Index, new_block_size: u64) void {
_ = self;
- _ = text_block;
+ _ = atom_index;
_ = new_block_size;
- _ = phdr_index;
}
-fn growTextBlock(self: *Elf, text_block: *TextBlock, new_block_size: u64, alignment: u64, phdr_index: u16) !u64 {
- const sym = text_block.getSymbol(self);
+fn growAtom(self: *Elf, atom_index: Atom.Index, new_block_size: u64, alignment: u64) !u64 {
+ const atom = self.getAtom(atom_index);
+ const sym = atom.getSymbol(self);
const align_ok = mem.alignBackwardGeneric(u64, sym.st_value, alignment) == sym.st_value;
- const need_realloc = !align_ok or new_block_size > text_block.capacity(self);
+ const need_realloc = !align_ok or new_block_size > atom.capacity(self);
if (!need_realloc) return sym.st_value;
- return self.allocateTextBlock(text_block, new_block_size, alignment, phdr_index);
+ return self.allocateAtom(atom_index, new_block_size, alignment);
+}
+
+pub fn createAtom(self: *Elf) !Atom.Index {
+ const gpa = self.base.allocator;
+ const atom_index = @intCast(Atom.Index, self.atoms.items.len);
+ const atom = try self.atoms.addOne(gpa);
+ const local_sym_index = try self.allocateLocalSymbol();
+ const offset_table_index = try self.allocateGotOffset();
+ try self.atom_by_index_table.putNoClobber(gpa, local_sym_index, atom_index);
+ atom.* = .{
+ .local_sym_index = local_sym_index,
+ .offset_table_index = offset_table_index,
+ .prev_index = null,
+ .next_index = null,
+ .dbg_info_atom = undefined,
+ };
+ log.debug("creating ATOM(%{d}) at index {d}", .{ local_sym_index, atom_index });
+ return atom_index;
}
-fn allocateTextBlock(self: *Elf, text_block: *TextBlock, new_block_size: u64, alignment: u64, phdr_index: u16) !u64 {
- const shdr_index = self.phdr_shdr_table.get(phdr_index).?;
+fn allocateAtom(self: *Elf, atom_index: Atom.Index, new_block_size: u64, alignment: u64) !u64 {
+ const atom = self.getAtom(atom_index);
+ const sym = atom.getSymbol(self);
+ const phdr_index = self.sections.items(.phdr_index)[sym.st_shndx];
const phdr = &self.program_headers.items[phdr_index];
- const shdr = &self.sections.items[shdr_index];
- const new_block_ideal_capacity = padToIdeal(new_block_size);
+ const shdr = &self.sections.items(.shdr)[sym.st_shndx];
+ const free_list = &self.sections.items(.free_list)[sym.st_shndx];
+ const maybe_last_atom_index = &self.sections.items(.last_atom_index)[sym.st_shndx];
+ const new_atom_ideal_capacity = padToIdeal(new_block_size);
- // We use these to indicate our intention to update metadata, placing the new block,
+ // We use these to indicate our intention to update metadata, placing the new atom,
// and possibly removing a free list node.
// It would be simpler to do it inside the for loop below, but that would cause a
// problem if an error was returned later in the function. So this action
// is actually carried out at the end of the function, when errors are no longer possible.
- var block_placement: ?*TextBlock = null;
+ var atom_placement: ?Atom.Index = null;
var free_list_removal: ?usize = null;
- var free_list = self.atom_free_lists.get(phdr_index).?;
// First we look for an appropriately sized free list node.
// The list is unordered. We'll just take the first thing that works.
const vaddr = blk: {
var i: usize = 0;
while (i < free_list.items.len) {
- const big_block = free_list.items[i];
- // We now have a pointer to a live text block that has too much capacity.
- // Is it enough that we could fit this new text block?
- const sym = big_block.getSymbol(self);
- const capacity = big_block.capacity(self);
+ const big_atom_index = free_list.items[i];
+ const big_atom = self.getAtom(big_atom_index);
+ // We now have a pointer to a live atom that has too much capacity.
+ // Is it enough that we could fit this new atom?
+ const big_atom_sym = big_atom.getSymbol(self);
+ const capacity = big_atom.capacity(self);
const ideal_capacity = padToIdeal(capacity);
- const ideal_capacity_end_vaddr = std.math.add(u64, sym.st_value, ideal_capacity) catch ideal_capacity;
- const capacity_end_vaddr = sym.st_value + capacity;
- const new_start_vaddr_unaligned = capacity_end_vaddr - new_block_ideal_capacity;
+ const ideal_capacity_end_vaddr = std.math.add(u64, big_atom_sym.st_value, ideal_capacity) catch ideal_capacity;
+ const capacity_end_vaddr = big_atom_sym.st_value + capacity;
+ const new_start_vaddr_unaligned = capacity_end_vaddr - new_atom_ideal_capacity;
const new_start_vaddr = mem.alignBackwardGeneric(u64, new_start_vaddr_unaligned, alignment);
if (new_start_vaddr < ideal_capacity_end_vaddr) {
// Additional bookkeeping here to notice if this free list node
// should be deleted because the block that it points to has grown to take up
// more of the extra capacity.
- if (!big_block.freeListEligible(self)) {
+ if (!big_atom.freeListEligible(self)) {
_ = free_list.swapRemove(i);
} else {
i += 1;
@@ -2164,60 +2191,69 @@ fn allocateTextBlock(self: *Elf, text_block: *TextBlock, new_block_size: u64, al
const keep_free_list_node = remaining_capacity >= min_text_capacity;
// Set up the metadata to be updated, after errors are no longer possible.
- block_placement = big_block;
+ atom_placement = big_atom_index;
if (!keep_free_list_node) {
free_list_removal = i;
}
break :blk new_start_vaddr;
- } else if (self.atoms.get(phdr_index)) |last| {
- const sym = last.getSymbol(self);
- const ideal_capacity = padToIdeal(sym.st_size);
- const ideal_capacity_end_vaddr = sym.st_value + ideal_capacity;
+ } else if (maybe_last_atom_index.*) |last_index| {
+ const last = self.getAtom(last_index);
+ const last_sym = last.getSymbol(self);
+ const ideal_capacity = padToIdeal(last_sym.st_size);
+ const ideal_capacity_end_vaddr = last_sym.st_value + ideal_capacity;
const new_start_vaddr = mem.alignForwardGeneric(u64, ideal_capacity_end_vaddr, alignment);
// Set up the metadata to be updated, after errors are no longer possible.
- block_placement = last;
+ atom_placement = last_index;
break :blk new_start_vaddr;
} else {
break :blk phdr.p_vaddr;
}
};
- const expand_text_section = block_placement == null or block_placement.?.next == null;
- if (expand_text_section) {
+ const expand_section = if (atom_placement) |placement_index|
+ self.getAtom(placement_index).next_index == null
+ else
+ true;
+ if (expand_section) {
const needed_size = (vaddr + new_block_size) - phdr.p_vaddr;
- try self.growAllocSection(shdr_index, phdr_index, needed_size);
- _ = try self.atoms.put(self.base.allocator, phdr_index, text_block);
-
- if (self.dwarf) |_| {
- // The .debug_info section has `low_pc` and `high_pc` values which is the virtual address
- // range of the compilation unit. When we expand the text section, this range changes,
- // so the DW_TAG.compile_unit tag of the .debug_info section becomes dirty.
- self.debug_info_header_dirty = true;
- // This becomes dirty for the same reason. We could potentially make this more
- // fine-grained with the addition of support for more compilation units. It is planned to
- // model each package as a different compilation unit.
- self.debug_aranges_section_dirty = true;
- }
+ try self.growAllocSection(sym.st_shndx, needed_size);
+ maybe_last_atom_index.* = atom_index;
+
+ // if (self.dwarf) |_| {
+ // // The .debug_info section has `low_pc` and `high_pc` values which is the virtual address
+ // // range of the compilation unit. When we expand the text section, this range changes,
+ // // so the DW_TAG.compile_unit tag of the .debug_info section becomes dirty.
+ // self.debug_info_header_dirty = true;
+ // // This becomes dirty for the same reason. We could potentially make this more
+ // // fine-grained with the addition of support for more compilation units. It is planned to
+ // // model each package as a different compilation unit.
+ // self.debug_aranges_section_dirty = true;
+ // }
}
shdr.sh_addralign = math.max(shdr.sh_addralign, alignment);
- // This function can also reallocate a text block.
+ // This function can also reallocate an atom.
// In this case we need to "unplug" it from its previous location before
// plugging it in to its new location.
- if (text_block.prev) |prev| {
- prev.next = text_block.next;
+ if (atom.prev_index) |prev_index| {
+ const prev = self.getAtomPtr(prev_index);
+ prev.next_index = atom.next_index;
}
- if (text_block.next) |next| {
- next.prev = text_block.prev;
+ if (atom.next_index) |next_index| {
+ const next = self.getAtomPtr(next_index);
+ next.prev_index = atom.prev_index;
}
- if (block_placement) |big_block| {
- text_block.prev = big_block;
- text_block.next = big_block.next;
- big_block.next = text_block;
+ if (atom_placement) |big_atom_index| {
+ const big_atom = self.getAtomPtr(big_atom_index);
+ const atom_ptr = self.getAtomPtr(atom_index);
+ atom_ptr.prev_index = big_atom_index;
+ atom_ptr.next_index = big_atom.next_index;
+ big_atom.next_index = atom_index;
} else {
- text_block.prev = null;
- text_block.next = null;
+ const atom_ptr = self.getAtomPtr(atom_index);
+ atom_ptr.prev_index = null;
+ atom_ptr.next_index = null;
}
if (free_list_removal) |i| {
_ = free_list.swapRemove(i);
@@ -2272,15 +2308,10 @@ pub fn allocateGotOffset(self: *Elf) !u32 {
return index;
}
-fn freeRelocationsForTextBlock(self: *Elf, text_block: *TextBlock) void {
- var removed_relocs = self.relocs.fetchRemove(text_block);
- if (removed_relocs) |*relocs| relocs.value.deinit(self.base.allocator);
-}
-
fn freeUnnamedConsts(self: *Elf, decl_index: Module.Decl.Index) void {
const unnamed_consts = self.unnamed_const_atoms.getPtr(decl_index) orelse return;
for (unnamed_consts.items) |atom| {
- self.freeTextBlock(atom, self.phdr_load_ro_index.?);
+ self.freeAtom(atom);
}
unnamed_consts.clearAndFree(self.base.allocator);
}
@@ -2295,43 +2326,57 @@ pub fn freeDecl(self: *Elf, decl_index: Module.Decl.Index) void {
log.debug("freeDecl {*}", .{decl});
- if (self.decls.fetchRemove(decl_index)) |kv| {
- if (kv.value) |index| {
- self.freeTextBlock(&decl.link.elf, index);
- self.freeUnnamedConsts(decl_index);
- }
+ if (self.decls.fetchRemove(decl_index)) |const_kv| {
+ var kv = const_kv;
+ self.freeAtom(kv.value.atom);
+ self.freeUnnamedConsts(decl_index);
+ kv.value.exports.deinit(self.base.allocator);
}
- if (self.dwarf) |*dw| {
- dw.freeDecl(decl);
+ // if (self.dwarf) |*dw| {
+ // dw.freeDecl(decl);
+ // }
+}
+
+pub fn getOrCreateAtomForDecl(self: *Elf, decl_index: Module.Decl.Index) !Atom.Index {
+ const gop = try self.decls.getOrPut(self.base.allocator, decl_index);
+ if (!gop.found_existing) {
+ gop.value_ptr.* = .{
+ .atom = try self.createAtom(),
+ .shdr = self.getDeclShdrIndex(decl_index),
+ .exports = .{},
+ };
}
+ return gop.value_ptr.atom;
}
-fn getDeclPhdrIndex(self: *Elf, decl: *Module.Decl) !u16 {
+fn getDeclShdrIndex(self: *Elf, decl_index: Module.Decl.Index) u16 {
+ const decl = self.base.options.module.?.declPtr(decl_index);
const ty = decl.ty;
const zig_ty = ty.zigTypeTag();
const val = decl.val;
- const phdr_index: u16 = blk: {
+ const shdr_index: u16 = blk: {
if (val.isUndefDeep()) {
// TODO in release-fast and release-small, we should put undef in .bss
- break :blk self.phdr_load_rw_index.?;
+ break :blk self.data_section_index.?;
}
switch (zig_ty) {
// TODO: what if this is a function pointer?
- .Fn => break :blk self.phdr_load_re_index.?,
+ .Fn => break :blk self.text_section_index.?,
else => {
if (val.castTag(.variable)) |_| {
- break :blk self.phdr_load_rw_index.?;
+ break :blk self.data_section_index.?;
}
- break :blk self.phdr_load_ro_index.?;
+ break :blk self.rodata_section_index.?;
},
}
};
- return phdr_index;
+ return shdr_index;
}
fn updateDeclCode(self: *Elf, decl_index: Module.Decl.Index, code: []const u8, stt_bits: u8) !*elf.Elf64_Sym {
+ const gpa = self.base.allocator;
const mod = self.base.options.module.?;
const decl = mod.declPtr(decl_index);
@@ -2341,60 +2386,65 @@ fn updateDeclCode(self: *Elf, decl_index: Module.Decl.Index, code: []const u8, s
log.debug("updateDeclCode {s}{*}", .{ decl_name, decl });
const required_alignment = decl.getAlignment(self.base.options.target);
- const decl_ptr = self.decls.getPtr(decl_index).?;
- if (decl_ptr.* == null) {
- decl_ptr.* = try self.getDeclPhdrIndex(decl);
- }
- const phdr_index = decl_ptr.*.?;
- const shdr_index = self.phdr_shdr_table.get(phdr_index).?;
+ const decl_metadata = self.decls.get(decl_index).?;
+ const atom_index = decl_metadata.atom;
+ const atom = self.getAtom(atom_index);
+
+ const shdr_index = decl_metadata.shdr;
+ if (atom.getSymbol(self).st_size != 0) {
+ const local_sym = atom.getSymbolPtr(self);
+ local_sym.st_name = try self.shstrtab.insert(gpa, decl_name);
+ local_sym.st_info = (elf.STB_LOCAL << 4) | stt_bits;
+ local_sym.st_other = 0;
+ local_sym.st_shndx = shdr_index;
- const local_sym = decl.link.elf.getSymbolPtr(self);
- if (local_sym.st_size != 0) {
- const capacity = decl.link.elf.capacity(self);
+ const capacity = atom.capacity(self);
const need_realloc = code.len > capacity or
!mem.isAlignedGeneric(u64, local_sym.st_value, required_alignment);
+
if (need_realloc) {
- const vaddr = try self.growTextBlock(&decl.link.elf, code.len, required_alignment, phdr_index);
+ const vaddr = try self.growAtom(atom_index, code.len, required_alignment);
log.debug("growing {s} from 0x{x} to 0x{x}", .{ decl_name, local_sym.st_value, vaddr });
if (vaddr != local_sym.st_value) {
local_sym.st_value = vaddr;
log.debug(" (writing new offset table entry)", .{});
- self.offset_table.items[decl.link.elf.offset_table_index] = vaddr;
- try self.writeOffsetTableEntry(decl.link.elf.offset_table_index);
+ self.offset_table.items[atom.offset_table_index] = vaddr;
+ try self.writeOffsetTableEntry(atom.offset_table_index);
}
} else if (code.len < local_sym.st_size) {
- self.shrinkTextBlock(&decl.link.elf, code.len, phdr_index);
+ self.shrinkAtom(atom_index, code.len);
}
local_sym.st_size = code.len;
- local_sym.st_name = try self.updateString(local_sym.st_name, decl_name);
- local_sym.st_info = (elf.STB_LOCAL << 4) | stt_bits;
- local_sym.st_other = 0;
- local_sym.st_shndx = shdr_index;
+
// TODO this write could be avoided if no fields of the symbol were changed.
- try self.writeSymbol(decl.link.elf.getSymbolIndex().?);
+ try self.writeSymbol(atom.getSymbolIndex().?);
} else {
- const name_str_index = try self.makeString(decl_name);
- const vaddr = try self.allocateTextBlock(&decl.link.elf, code.len, required_alignment, phdr_index);
- errdefer self.freeTextBlock(&decl.link.elf, phdr_index);
- log.debug("allocated text block for {s} at 0x{x}", .{ decl_name, vaddr });
-
+ const local_sym = atom.getSymbolPtr(self);
local_sym.* = .{
- .st_name = name_str_index,
+ .st_name = try self.shstrtab.insert(gpa, decl_name),
.st_info = (elf.STB_LOCAL << 4) | stt_bits,
.st_other = 0,
.st_shndx = shdr_index,
- .st_value = vaddr,
- .st_size = code.len,
+ .st_value = 0,
+ .st_size = 0,
};
- self.offset_table.items[decl.link.elf.offset_table_index] = vaddr;
+ const vaddr = try self.allocateAtom(atom_index, code.len, required_alignment);
+ errdefer self.freeAtom(atom_index);
+ log.debug("allocated text block for {s} at 0x{x}", .{ decl_name, vaddr });
- try self.writeSymbol(decl.link.elf.getSymbolIndex().?);
- try self.writeOffsetTableEntry(decl.link.elf.offset_table_index);
+ self.offset_table.items[atom.offset_table_index] = vaddr;
+ local_sym.st_value = vaddr;
+ local_sym.st_size = code.len;
+
+ try self.writeSymbol(atom.getSymbolIndex().?);
+ try self.writeOffsetTableEntry(atom.offset_table_index);
}
+ const local_sym = atom.getSymbolPtr(self);
+ const phdr_index = self.sections.items(.phdr_index)[shdr_index];
const section_offset = local_sym.st_value - self.program_headers.items[phdr_index].p_vaddr;
- const file_offset = self.sections.items[shdr_index].sh_offset + section_offset;
+ const file_offset = self.sections.items(.shdr)[shdr_index].sh_offset + section_offset;
try self.base.file.?.pwriteAll(code, file_offset);
return local_sym;
@@ -2413,28 +2463,23 @@ pub fn updateFunc(self: *Elf, module: *Module, func: *Module.Fn, air: Air, liven
const decl_index = func.owner_decl;
const decl = module.declPtr(decl_index);
- const atom = &decl.link.elf;
- try atom.ensureInitialized(self);
- const gop = try self.decls.getOrPut(self.base.allocator, decl_index);
- if (gop.found_existing) {
- self.freeUnnamedConsts(decl_index);
- self.freeRelocationsForTextBlock(atom);
- } else {
- gop.value_ptr.* = null;
- }
+
+ const atom_index = try self.getOrCreateAtomForDecl(decl_index);
+ self.freeUnnamedConsts(decl_index);
+ Atom.freeRelocations(self, atom_index);
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
defer code_buffer.deinit();
- var decl_state: ?Dwarf.DeclState = if (self.dwarf) |*dw| try dw.initDeclState(module, decl_index) else null;
- defer if (decl_state) |*ds| ds.deinit();
+ // var decl_state: ?Dwarf.DeclState = if (self.dwarf) |*dw| try dw.initDeclState(module, decl_index) else null;
+ // defer if (decl_state) |*ds| ds.deinit();
- const res = if (decl_state) |*ds|
- try codegen.generateFunction(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .{
- .dwarf = ds,
- })
- else
- try codegen.generateFunction(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .none);
+ // const res = if (decl_state) |*ds|
+ // try codegen.generateFunction(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .{
+ // .dwarf = ds,
+ // })
+ // else
+ const res = try codegen.generateFunction(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .none);
const code = switch (res) {
.ok => code_buffer.items,
@@ -2445,15 +2490,16 @@ pub fn updateFunc(self: *Elf, module: *Module, func: *Module.Fn, air: Air, liven
},
};
const local_sym = try self.updateDeclCode(decl_index, code, elf.STT_FUNC);
- if (decl_state) |*ds| {
- try self.dwarf.?.commitDeclState(
- module,
- decl_index,
- local_sym.st_value,
- local_sym.st_size,
- ds,
- );
- }
+ _ = local_sym;
+ // if (decl_state) |*ds| {
+ // try self.dwarf.?.commitDeclState(
+ // module,
+ // decl_index,
+ // local_sym.st_value,
+ // local_sym.st_size,
+ // ds,
+ // );
+ // }
// Since we updated the vaddr and the size, each corresponding export
// symbol also needs to be updated.
@@ -2483,41 +2529,34 @@ pub fn updateDecl(self: *Elf, module: *Module, decl_index: Module.Decl.Index) !v
}
}
- assert(!self.unnamed_const_atoms.contains(decl_index));
-
- const atom = &decl.link.elf;
- try atom.ensureInitialized(self);
- const gop = try self.decls.getOrPut(self.base.allocator, decl_index);
- if (gop.found_existing) {
- self.freeRelocationsForTextBlock(atom);
- } else {
- gop.value_ptr.* = null;
- }
+ const atom_index = try self.getOrCreateAtomForDecl(decl_index);
+ Atom.freeRelocations(self, atom_index);
+ const atom = self.getAtom(atom_index);
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
defer code_buffer.deinit();
- var decl_state: ?Dwarf.DeclState = if (self.dwarf) |*dw| try dw.initDeclState(module, decl_index) else null;
- defer if (decl_state) |*ds| ds.deinit();
+ // var decl_state: ?Dwarf.DeclState = if (self.dwarf) |*dw| try dw.initDeclState(module, decl_index) else null;
+ // defer if (decl_state) |*ds| ds.deinit();
// TODO implement .debug_info for global variables
const decl_val = if (decl.val.castTag(.variable)) |payload| payload.data.init else decl.val;
- const res = if (decl_state) |*ds|
- try codegen.generateSymbol(&self.base, decl.srcLoc(), .{
- .ty = decl.ty,
- .val = decl_val,
- }, &code_buffer, .{
- .dwarf = ds,
- }, .{
- .parent_atom_index = decl.link.elf.getSymbolIndex().?,
- })
- else
- try codegen.generateSymbol(&self.base, decl.srcLoc(), .{
- .ty = decl.ty,
- .val = decl_val,
- }, &code_buffer, .none, .{
- .parent_atom_index = decl.link.elf.getSymbolIndex().?,
- });
+ // const res = if (decl_state) |*ds|
+ // try codegen.generateSymbol(&self.base, decl.srcLoc(), .{
+ // .ty = decl.ty,
+ // .val = decl_val,
+ // }, &code_buffer, .{
+ // .dwarf = ds,
+ // }, .{
+ // .parent_atom_index = atom.getSymbolIndex().?,
+ // })
+ // else
+ const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), .{
+ .ty = decl.ty,
+ .val = decl_val,
+ }, &code_buffer, .none, .{
+ .parent_atom_index = atom.getSymbolIndex().?,
+ });
const code = switch (res) {
.ok => code_buffer.items,
@@ -2529,15 +2568,16 @@ pub fn updateDecl(self: *Elf, module: *Module, decl_index: Module.Decl.Index) !v
};
const local_sym = try self.updateDeclCode(decl_index, code, elf.STT_OBJECT);
- if (decl_state) |*ds| {
- try self.dwarf.?.commitDeclState(
- module,
- decl_index,
- local_sym.st_value,
- local_sym.st_size,
- ds,
- );
- }
+ _ = local_sym;
+ // if (decl_state) |*ds| {
+ // try self.dwarf.?.commitDeclState(
+ // module,
+ // decl_index,
+ // local_sym.st_value,
+ // local_sym.st_size,
+ // ds,
+ // );
+ // }
// Since we updated the vaddr and the size, each corresponding export
// symbol also needs to be updated.
@@ -2545,36 +2585,31 @@ pub fn updateDecl(self: *Elf, module: *Module, decl_index: Module.Decl.Index) !v
}
pub fn lowerUnnamedConst(self: *Elf, typed_value: TypedValue, decl_index: Module.Decl.Index) !u32 {
- var code_buffer = std.ArrayList(u8).init(self.base.allocator);
+ const gpa = self.base.allocator;
+
+ var code_buffer = std.ArrayList(u8).init(gpa);
defer code_buffer.deinit();
const mod = self.base.options.module.?;
- const decl = mod.declPtr(decl_index);
-
- const gop = try self.unnamed_const_atoms.getOrPut(self.base.allocator, decl_index);
+ const gop = try self.unnamed_const_atoms.getOrPut(gpa, decl_index);
if (!gop.found_existing) {
gop.value_ptr.* = .{};
}
const unnamed_consts = gop.value_ptr;
- const atom = try self.base.allocator.create(TextBlock);
- errdefer self.base.allocator.destroy(atom);
- atom.* = TextBlock.empty;
- // TODO for unnamed consts we don't need GOT offset/entry allocated
- try atom.ensureInitialized(self);
- try self.managed_atoms.append(self.base.allocator, atom);
-
+ const decl = mod.declPtr(decl_index);
const name_str_index = blk: {
const decl_name = try decl.getFullyQualifiedName(mod);
- defer self.base.allocator.free(decl_name);
-
+ defer gpa.free(decl_name);
const index = unnamed_consts.items.len;
- const name = try std.fmt.allocPrint(self.base.allocator, "__unnamed_{s}_{d}", .{ decl_name, index });
- defer self.base.allocator.free(name);
-
- break :blk try self.makeString(name);
+ const name = try std.fmt.allocPrint(gpa, "__unnamed_{s}_{d}", .{ decl_name, index });
+ defer gpa.free(name);
+ break :blk try self.shstrtab.insert(gpa, name);
};
- const name = self.getString(name_str_index);
+ const name = self.shstrtab.get(name_str_index).?;
+
+ const atom_index = try self.createAtom();
+ const atom = self.getAtomPtr(atom_index);
const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), typed_value, &code_buffer, .{
.none = {},
@@ -2592,28 +2627,24 @@ pub fn lowerUnnamedConst(self: *Elf, typed_value: TypedValue, decl_index: Module
};
const required_alignment = typed_value.ty.abiAlignment(self.base.options.target);
- const phdr_index = self.phdr_load_ro_index.?;
- const shdr_index = self.phdr_shdr_table.get(phdr_index).?;
- const vaddr = try self.allocateTextBlock(atom, code.len, required_alignment, phdr_index);
- errdefer self.freeTextBlock(atom, phdr_index);
-
- log.debug("allocated text block for {s} at 0x{x}", .{ name, vaddr });
-
+ const shdr_index = self.rodata_section_index.?;
+ const phdr_index = self.sections.items(.phdr_index)[shdr_index];
const local_sym = atom.getSymbolPtr(self);
- local_sym.* = .{
- .st_name = name_str_index,
- .st_info = (elf.STB_LOCAL << 4) | elf.STT_OBJECT,
- .st_other = 0,
- .st_shndx = shdr_index,
- .st_value = vaddr,
- .st_size = code.len,
- };
+ local_sym.st_name = name_str_index;
+ local_sym.st_info = (elf.STB_LOCAL << 4) | elf.STT_OBJECT;
+ local_sym.st_other = 0;
+ local_sym.st_shndx = shdr_index;
+ local_sym.st_size = code.len;
+ local_sym.st_value = try self.allocateAtom(atom_index, code.len, required_alignment);
+ errdefer self.freeAtom(atom_index);
+
+ log.debug("allocated text block for {s} at 0x{x}", .{ name, local_sym.st_value });
try self.writeSymbol(atom.getSymbolIndex().?);
- try unnamed_consts.append(self.base.allocator, atom);
+ try unnamed_consts.append(gpa, atom_index);
const section_offset = local_sym.st_value - self.program_headers.items[phdr_index].p_vaddr;
- const file_offset = self.sections.items[shdr_index].sh_offset + section_offset;
+ const file_offset = self.sections.items(.shdr)[shdr_index].sh_offset + section_offset;
try self.base.file.?.pwriteAll(code, file_offset);
return atom.getSymbolIndex().?;
@@ -2635,20 +2666,16 @@ pub fn updateDeclExports(
const tracy = trace(@src());
defer tracy.end();
- const decl = module.declPtr(decl_index);
- const atom = &decl.link.elf;
-
- if (atom.getSymbolIndex() == null) return;
+ const gpa = self.base.allocator;
+ const decl = module.declPtr(decl_index);
+ const atom_index = try self.getOrCreateAtomForDecl(decl_index);
+ const atom = self.getAtom(atom_index);
const decl_sym = atom.getSymbol(self);
- try self.global_symbols.ensureUnusedCapacity(self.base.allocator, exports.len);
+ const decl_metadata = self.decls.getPtr(decl_index).?;
+ const shdr_index = decl_metadata.shdr;
- const gop = try self.decls.getOrPut(self.base.allocator, decl_index);
- if (!gop.found_existing) {
- gop.value_ptr.* = try self.getDeclPhdrIndex(decl);
- }
- const phdr_index = gop.value_ptr.*.?;
- const shdr_index = self.phdr_shdr_table.get(phdr_index).?;
+ try self.global_symbols.ensureUnusedCapacity(gpa, exports.len);
for (exports) |exp| {
if (exp.options.section) |section_name| {
@@ -2681,10 +2708,10 @@ pub fn updateDeclExports(
},
};
const stt_bits: u8 = @truncate(u4, decl_sym.st_info);
- if (exp.link.elf.sym_index) |i| {
+ if (decl_metadata.getExport(self, exp.options.name)) |i| {
const sym = &self.global_symbols.items[i];
sym.* = .{
- .st_name = try self.updateString(sym.st_name, exp.options.name),
+ .st_name = try self.shstrtab.insert(gpa, exp.options.name),
.st_info = (stb_bits << 4) | stt_bits,
.st_other = 0,
.st_shndx = shdr_index,
@@ -2692,21 +2719,19 @@ pub fn updateDeclExports(
.st_size = decl_sym.st_size,
};
} else {
- const name = try self.makeString(exp.options.name);
const i = if (self.global_symbol_free_list.popOrNull()) |i| i else blk: {
_ = self.global_symbols.addOneAssumeCapacity();
break :blk self.global_symbols.items.len - 1;
};
+ try decl_metadata.exports.append(gpa, @intCast(u32, i));
self.global_symbols.items[i] = .{
- .st_name = name,
+ .st_name = try self.shstrtab.insert(gpa, exp.options.name),
.st_info = (stb_bits << 4) | stt_bits,
.st_other = 0,
.st_shndx = shdr_index,
.st_value = decl_sym.st_value,
.st_size = decl_sym.st_size,
};
-
- exp.link.elf.sym_index = @intCast(u32, i);
}
}
}
@@ -2722,17 +2747,19 @@ pub fn updateDeclLineNumber(self: *Elf, mod: *Module, decl: *const Module.Decl)
log.debug("updateDeclLineNumber {s}{*}", .{ decl_name, decl });
if (self.llvm_object) |_| return;
- if (self.dwarf) |*dw| {
- try dw.updateDeclLineNumber(decl);
- }
+ // if (self.dwarf) |*dw| {
+ // try dw.updateDeclLineNumber(decl);
+ // }
}
-pub fn deleteExport(self: *Elf, exp: Export) void {
+pub fn deleteDeclExport(self: *Elf, decl_index: Module.Decl.Index, name: []const u8) void {
if (self.llvm_object) |_| return;
-
- const sym_index = exp.sym_index orelse return;
- self.global_symbol_free_list.append(self.base.allocator, sym_index) catch {};
- self.global_symbols.items[sym_index].st_info = 0;
+ const metadata = self.decls.getPtr(decl_index) orelse return;
+ const sym_index = metadata.getExportPtr(self, name) orelse return;
+ log.debug("deleting export '{s}'", .{name});
+ self.global_symbol_free_list.append(self.base.allocator, sym_index.*) catch {};
+ self.global_symbols.items[sym_index.*].st_info = 0;
+ sym_index.* = 0;
}
fn writeProgHeader(self: *Elf, index: usize) !void {
@@ -2761,7 +2788,7 @@ fn writeSectHeader(self: *Elf, index: usize) !void {
switch (self.ptr_width) {
.p32 => {
var shdr: [1]elf.Elf32_Shdr = undefined;
- shdr[0] = sectHeaderTo32(self.sections.items[index]);
+ shdr[0] = sectHeaderTo32(self.sections.items(.shdr)[index]);
if (foreign_endian) {
mem.byteSwapAllFields(elf.Elf32_Shdr, &shdr[0]);
}
@@ -2769,7 +2796,7 @@ fn writeSectHeader(self: *Elf, index: usize) !void {
return self.base.file.?.pwriteAll(mem.sliceAsBytes(&shdr), offset);
},
.p64 => {
- var shdr = [1]elf.Elf64_Shdr{self.sections.items[index]};
+ var shdr = [1]elf.Elf64_Shdr{self.sections.items(.shdr)[index]};
if (foreign_endian) {
mem.byteSwapAllFields(elf.Elf64_Shdr, &shdr[0]);
}
@@ -2783,11 +2810,11 @@ fn writeOffsetTableEntry(self: *Elf, index: usize) !void {
const entry_size: u16 = self.archPtrWidthBytes();
if (self.offset_table_count_dirty) {
const needed_size = self.offset_table.items.len * entry_size;
- try self.growAllocSection(self.got_section_index.?, self.phdr_got_index.?, needed_size);
+ try self.growAllocSection(self.got_section_index.?, needed_size);
self.offset_table_count_dirty = false;
}
const endian = self.base.options.target.cpu.arch.endian();
- const shdr = &self.sections.items[self.got_section_index.?];
+ const shdr = &self.sections.items(.shdr)[self.got_section_index.?];
const off = shdr.sh_offset + @as(u64, entry_size) * index;
switch (entry_size) {
2 => {
@@ -2813,7 +2840,7 @@ fn writeSymbol(self: *Elf, index: usize) !void {
const tracy = trace(@src());
defer tracy.end();
- const syms_sect = &self.sections.items[self.symtab_section_index.?];
+ const syms_sect = &self.sections.items(.shdr)[self.symtab_section_index.?];
// Make sure we are not pointlessly writing symbol data that will have to get relocated
// due to running out of space.
if (self.local_symbols.items.len != syms_sect.sh_info) {
@@ -2835,7 +2862,7 @@ fn writeSymbol(self: *Elf, index: usize) !void {
.p64 => syms_sect.sh_offset + @sizeOf(elf.Elf64_Sym) * index,
};
const local = self.local_symbols.items[index];
- log.debug("writing symbol {d}, '{s}' at 0x{x}", .{ index, self.getString(local.st_name), off });
+ log.debug("writing symbol {d}, '{?s}' at 0x{x}", .{ index, self.shstrtab.get(local.st_name), off });
log.debug(" ({})", .{local});
switch (self.ptr_width) {
.p32 => {
@@ -2865,7 +2892,7 @@ fn writeSymbol(self: *Elf, index: usize) !void {
}
fn writeAllGlobalSymbols(self: *Elf) !void {
- const syms_sect = &self.sections.items[self.symtab_section_index.?];
+ const syms_sect = &self.sections.items(.shdr)[self.symtab_section_index.?];
const sym_size: u64 = switch (self.ptr_width) {
.p32 => @sizeOf(elf.Elf32_Sym),
.p64 => @sizeOf(elf.Elf64_Sym),
@@ -3215,10 +3242,52 @@ const CsuObjects = struct {
fn logSymtab(self: Elf) void {
log.debug("locals:", .{});
for (self.local_symbols.items) |sym, id| {
- log.debug(" {d}: {s}: @{x} in {d}", .{ id, self.getString(sym.st_name), sym.st_value, sym.st_shndx });
+ log.debug(" {d}: {?s}: @{x} in {d}", .{ id, self.shstrtab.get(sym.st_name), sym.st_value, sym.st_shndx });
}
log.debug("globals:", .{});
for (self.global_symbols.items) |sym, id| {
- log.debug(" {d}: {s}: @{x} in {d}", .{ id, self.getString(sym.st_name), sym.st_value, sym.st_shndx });
+ log.debug(" {d}: {?s}: @{x} in {d}", .{ id, self.shstrtab.get(sym.st_name), sym.st_value, sym.st_shndx });
}
}
+
+pub fn getProgramHeader(self: *const Elf, shdr_index: u16) elf.Elf64_Phdr {
+ const index = self.sections.items(.phdr_index)[shdr_index];
+ return self.program_headers.items[index];
+}
+
+pub fn getProgramHeaderPtr(self: *Elf, shdr_index: u16) *elf.Elf64_Phdr {
+ const index = self.sections.items(.phdr_index)[shdr_index];
+ return &self.program_headers.items[index];
+}
+
+/// Returns pointer-to-symbol described at sym_index.
+pub fn getSymbolPtr(self: *Elf, sym_index: u32) *elf.Elf64_Sym {
+ return &self.local_symbols.items[sym_index];
+}
+
+/// Returns symbol at sym_index.
+pub fn getSymbol(self: *const Elf, sym_index: u32) elf.Elf64_Sym {
+ return self.local_symbols.items[sym_index];
+}
+
+/// Returns name of the symbol at sym_index.
+pub fn getSymbolName(self: *const Elf, sym_index: u32) []const u8 {
+ const sym = self.local_symbols.items[sym_index];
+ return self.shstrtab.get(sym.st_name).?;
+}
+
+pub fn getAtom(self: *const Elf, atom_index: Atom.Index) Atom {
+ assert(atom_index < self.atoms.items.len);
+ return self.atoms.items[atom_index];
+}
+
+pub fn getAtomPtr(self: *Elf, atom_index: Atom.Index) *Atom {
+ assert(atom_index < self.atoms.items.len);
+ return &self.atoms.items[atom_index];
+}
+
+/// Returns atom if there is an atom referenced by the symbol.
+/// Returns null on failure.
+pub fn getAtomIndexForSymbol(self: *Elf, sym_index: u32) ?Atom.Index {
+ return self.atom_by_index_table.get(sym_index);
+}
diff --git a/src/link/Elf/Atom.zig b/src/link/Elf/Atom.zig
index caeb3bfbc5..79b699636f 100644
--- a/src/link/Elf/Atom.zig
+++ b/src/link/Elf/Atom.zig
@@ -20,44 +20,35 @@ offset_table_index: u32,
/// Points to the previous and next neighbors, based on the `text_offset`.
/// This can be used to find, for example, the capacity of this `TextBlock`.
-prev: ?*Atom,
-next: ?*Atom,
+prev_index: ?Atom.Index,
+next_index: ?Atom.Index,
dbg_info_atom: Dwarf.Atom,
-pub const empty = Atom{
- .local_sym_index = 0,
- .offset_table_index = undefined,
- .prev = null,
- .next = null,
- .dbg_info_atom = undefined,
-};
+pub const Index = u32;
-pub fn ensureInitialized(self: *Atom, elf_file: *Elf) !void {
- if (self.getSymbolIndex() != null) return; // Already initialized
- self.local_sym_index = try elf_file.allocateLocalSymbol();
- self.offset_table_index = try elf_file.allocateGotOffset();
- try elf_file.atom_by_index_table.putNoClobber(elf_file.base.allocator, self.local_sym_index, self);
-}
+pub const Reloc = struct {
+ target: u32,
+ offset: u64,
+ addend: u32,
+ prev_vaddr: u64,
+};
pub fn getSymbolIndex(self: Atom) ?u32 {
if (self.local_sym_index == 0) return null;
return self.local_sym_index;
}
-pub fn getSymbol(self: Atom, elf_file: *Elf) elf.Elf64_Sym {
- const sym_index = self.getSymbolIndex().?;
- return elf_file.local_symbols.items[sym_index];
+pub fn getSymbol(self: Atom, elf_file: *const Elf) elf.Elf64_Sym {
+ return elf_file.getSymbol(self.getSymbolIndex().?);
}
pub fn getSymbolPtr(self: Atom, elf_file: *Elf) *elf.Elf64_Sym {
- const sym_index = self.getSymbolIndex().?;
- return &elf_file.local_symbols.items[sym_index];
+ return elf_file.getSymbolPtr(self.getSymbolIndex().?);
}
-pub fn getName(self: Atom, elf_file: *Elf) []const u8 {
- const sym = self.getSymbol();
- return elf_file.getString(sym.st_name);
+pub fn getName(self: Atom, elf_file: *const Elf) []const u8 {
+ return elf_file.getSymbolName(self.getSymbolIndex().?);
}
pub fn getOffsetTableAddress(self: Atom, elf_file: *Elf) u64 {
@@ -72,9 +63,10 @@ pub fn getOffsetTableAddress(self: Atom, elf_file: *Elf) u64 {
/// Returns how much room there is to grow in virtual address space.
/// File offset relocation happens transparently, so it is not included in
/// this calculation.
-pub fn capacity(self: Atom, elf_file: *Elf) u64 {
+pub fn capacity(self: Atom, elf_file: *const Elf) u64 {
const self_sym = self.getSymbol(elf_file);
- if (self.next) |next| {
+ if (self.next_index) |next_index| {
+ const next = elf_file.getAtom(next_index);
const next_sym = next.getSymbol(elf_file);
return next_sym.st_value - self_sym.st_value;
} else {
@@ -83,9 +75,10 @@ pub fn capacity(self: Atom, elf_file: *Elf) u64 {
}
}
-pub fn freeListEligible(self: Atom, elf_file: *Elf) bool {
+pub fn freeListEligible(self: Atom, elf_file: *const Elf) bool {
// No need to keep a free list node for the last block.
- const next = self.next orelse return false;
+ const next_index = self.next_index orelse return false;
+ const next = elf_file.getAtom(next_index);
const self_sym = self.getSymbol(elf_file);
const next_sym = next.getSymbol(elf_file);
const cap = next_sym.st_value - self_sym.st_value;
@@ -94,3 +87,17 @@ pub fn freeListEligible(self: Atom, elf_file: *Elf) bool {
const surplus = cap - ideal_cap;
return surplus >= Elf.min_text_capacity;
}
+
+pub fn addRelocation(elf_file: *Elf, atom_index: Index, reloc: Reloc) !void {
+ const gpa = elf_file.base.allocator;
+ const gop = try elf_file.relocs.getOrPut(gpa, atom_index);
+ if (!gop.found_existing) {
+ gop.value_ptr.* = .{};
+ }
+ try gop.value_ptr.append(gpa, reloc);
+}
+
+pub fn freeRelocations(elf_file: *Elf, atom_index: Index) void {
+ var removed_relocs = elf_file.relocs.fetchRemove(atom_index);
+ if (removed_relocs) |*relocs| relocs.value.deinit(elf_file.base.allocator);
+}
diff --git a/src/link/MachO.zig b/src/link/MachO.zig
index 29aed25b31..11a1119449 100644
--- a/src/link/MachO.zig
+++ b/src/link/MachO.zig
@@ -2604,9 +2604,11 @@ pub fn freeDecl(self: *MachO, decl_index: Module.Decl.Index) void {
log.debug("freeDecl {*}", .{decl});
- if (self.decls.fetchSwapRemove(decl_index)) |kv| {
+ if (self.decls.fetchSwapRemove(decl_index)) |const_kv| {
+ var kv = const_kv;
self.freeAtom(kv.value.atom);
self.freeUnnamedConsts(decl_index);
+ kv.value.exports.deinit(self.base.allocator);
}
// if (self.d_sym) |*d_sym| {
--
cgit v1.2.3
From c430e9afa7b050400b9703360a0af4ab824335ce Mon Sep 17 00:00:00 2001
From: Jakub Konka
Date: Tue, 31 Jan 2023 20:27:17 +0100
Subject: link: make Coff atoms fully owned by the linker
---
src/Module.zig | 6 +-
src/Sema.zig | 2 +-
src/arch/aarch64/CodeGen.zig | 42 ++--
src/arch/aarch64/Emit.zig | 6 +-
src/arch/x86_64/CodeGen.zig | 16 +-
src/arch/x86_64/Emit.zig | 8 +-
src/link.zig | 4 +-
src/link/Coff.zig | 460 ++++++++++++++++++++++++-------------------
src/link/Coff/Atom.zig | 48 ++---
src/link/Coff/Relocation.zig | 18 +-
src/link/Elf.zig | 10 +-
src/link/MachO.zig | 5 +
12 files changed, 350 insertions(+), 275 deletions(-)
(limited to 'src')
diff --git a/src/Module.zig b/src/Module.zig
index 0695a2e98a..b39fd2bab2 100644
--- a/src/Module.zig
+++ b/src/Module.zig
@@ -5274,7 +5274,7 @@ pub fn clearDecl(
// TODO instead of a union, put this memory trailing Decl objects,
// and allow it to be variably sized.
decl.link = switch (mod.comp.bin_file.tag) {
- .coff => .{ .coff = link.File.Coff.Atom.empty },
+ .coff => .{ .coff = {} },
.elf => .{ .elf = {} },
.macho => .{ .macho = {} },
.plan9 => .{ .plan9 = link.File.Plan9.DeclBlock.empty },
@@ -5390,7 +5390,7 @@ fn deleteDeclExports(mod: *Module, decl_index: Decl.Index) Allocator.Error!void
wasm.deleteExport(exp.link.wasm);
}
if (mod.comp.bin_file.cast(link.File.Coff)) |coff| {
- coff.deleteExport(exp.link.coff);
+ coff.deleteDeclExport(decl_index, exp.options.name);
}
if (mod.failed_exports.fetchSwapRemove(exp)) |failed_kv| {
failed_kv.value.destroy(mod.gpa);
@@ -5694,7 +5694,7 @@ pub fn allocateNewDecl(
.zir_decl_index = 0,
.src_scope = src_scope,
.link = switch (mod.comp.bin_file.tag) {
- .coff => .{ .coff = link.File.Coff.Atom.empty },
+ .coff => .{ .coff = {} },
.elf => .{ .elf = {} },
.macho => .{ .macho = {} },
.plan9 => .{ .plan9 = link.File.Plan9.DeclBlock.empty },
diff --git a/src/Sema.zig b/src/Sema.zig
index 28d559f730..82321ef545 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -5565,7 +5565,7 @@ pub fn analyzeExport(
},
.src = src,
.link = switch (mod.comp.bin_file.tag) {
- .coff => .{ .coff = .{} },
+ .coff => .{ .coff = {} },
.elf => .{ .elf = {} },
.macho => .{ .macho = {} },
.plan9 => .{ .plan9 = null },
diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig
index 67197c35f8..d0fba2fd0e 100644
--- a/src/arch/aarch64/CodeGen.zig
+++ b/src/arch/aarch64/CodeGen.zig
@@ -4019,15 +4019,17 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
.direct => .load_memory_ptr_direct,
.import => unreachable,
};
- const mod = self.bin_file.options.module.?;
- const owner_decl = mod.declPtr(self.mod_fn.owner_decl);
const atom_index = switch (self.bin_file.tag) {
.macho => blk: {
const macho_file = self.bin_file.cast(link.File.MachO).?;
const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
break :blk macho_file.getAtom(atom).getSymbolIndex().?;
},
- .coff => owner_decl.link.coff.getSymbolIndex().?,
+ .coff => blk: {
+ const coff_file = self.bin_file.cast(link.File.Coff).?;
+ const atom = try coff_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
+ break :blk coff_file.getAtom(atom).getSymbolIndex().?;
+ },
else => unreachable, // unsupported target format
};
_ = try self.addInst(.{
@@ -4322,11 +4324,12 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
},
});
} else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
- try fn_owner_decl.link.coff.ensureInitialized(coff_file);
+ const atom = try coff_file.getOrCreateAtomForDecl(func.owner_decl);
+ const sym_index = coff_file.getAtom(atom).getSymbolIndex().?;
try self.genSetReg(Type.initTag(.u64), .x30, .{
.linker_load = .{
.type = .got,
- .sym_index = fn_owner_decl.link.coff.getSymbolIndex().?,
+ .sym_index = sym_index,
},
});
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
@@ -5496,15 +5499,17 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
.direct => .load_memory_ptr_direct,
.import => unreachable,
};
- const mod = self.bin_file.options.module.?;
- const owner_decl = mod.declPtr(self.mod_fn.owner_decl);
const atom_index = switch (self.bin_file.tag) {
.macho => blk: {
const macho_file = self.bin_file.cast(link.File.MachO).?;
const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
break :blk macho_file.getAtom(atom).getSymbolIndex().?;
},
- .coff => owner_decl.link.coff.getSymbolIndex().?,
+ .coff => blk: {
+ const coff_file = self.bin_file.cast(link.File.Coff).?;
+ const atom = try coff_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
+ break :blk coff_file.getAtom(atom).getSymbolIndex().?;
+ },
else => unreachable, // unsupported target format
};
_ = try self.addInst(.{
@@ -5614,15 +5619,17 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
.direct => .load_memory_direct,
.import => .load_memory_import,
};
- const mod = self.bin_file.options.module.?;
- const owner_decl = mod.declPtr(self.mod_fn.owner_decl);
const atom_index = switch (self.bin_file.tag) {
.macho => blk: {
const macho_file = self.bin_file.cast(link.File.MachO).?;
const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
break :blk macho_file.getAtom(atom).getSymbolIndex().?;
},
- .coff => owner_decl.link.coff.getSymbolIndex().?,
+ .coff => blk: {
+ const coff_file = self.bin_file.cast(link.File.Coff).?;
+ const atom = try coff_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
+ break :blk coff_file.getAtom(atom).getSymbolIndex().?;
+ },
else => unreachable, // unsupported target format
};
_ = try self.addInst(.{
@@ -5812,15 +5819,17 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I
.direct => .load_memory_ptr_direct,
.import => unreachable,
};
- const mod = self.bin_file.options.module.?;
- const owner_decl = mod.declPtr(self.mod_fn.owner_decl);
const atom_index = switch (self.bin_file.tag) {
.macho => blk: {
const macho_file = self.bin_file.cast(link.File.MachO).?;
const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
break :blk macho_file.getAtom(atom).getSymbolIndex().?;
},
- .coff => owner_decl.link.coff.getSymbolIndex().?,
+ .coff => blk: {
+ const coff_file = self.bin_file.cast(link.File.Coff).?;
+ const atom = try coff_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
+ break :blk coff_file.getAtom(atom).getSymbolIndex().?;
+ },
else => unreachable, // unsupported target format
};
_ = try self.addInst(.{
@@ -6150,10 +6159,11 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) Inne
.sym_index = sym_index,
} };
} else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
- try decl.link.coff.ensureInitialized(coff_file);
+ const atom_index = try coff_file.getOrCreateAtomForDecl(decl_index);
+ const sym_index = coff_file.getAtom(atom_index).getSymbolIndex().?;
return MCValue{ .linker_load = .{
.type = .got,
- .sym_index = decl.link.coff.getSymbolIndex().?,
+ .sym_index = sym_index,
} };
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
try p9.seeDecl(decl_index);
diff --git a/src/arch/aarch64/Emit.zig b/src/arch/aarch64/Emit.zig
index f348fb70e3..3c2a81d5d1 100644
--- a/src/arch/aarch64/Emit.zig
+++ b/src/arch/aarch64/Emit.zig
@@ -919,7 +919,7 @@ fn mirLoadMemoryPie(emit: *Emit, inst: Mir.Inst.Index) !void {
},
});
} else if (emit.bin_file.cast(link.File.Coff)) |coff_file| {
- const atom = coff_file.getAtomForSymbol(.{ .sym_index = data.atom_index, .file = null }).?;
+ const atom_index = coff_file.getAtomIndexForSymbol(.{ .sym_index = data.atom_index, .file = null }).?;
const target = switch (tag) {
.load_memory_got,
.load_memory_ptr_got,
@@ -929,7 +929,7 @@ fn mirLoadMemoryPie(emit: *Emit, inst: Mir.Inst.Index) !void {
.load_memory_import => coff_file.getGlobalByIndex(data.sym_index),
else => unreachable,
};
- try atom.addRelocation(coff_file, .{
+ try link.File.Coff.Atom.addRelocation(coff_file, atom_index, .{
.target = target,
.offset = offset,
.addend = 0,
@@ -946,7 +946,7 @@ fn mirLoadMemoryPie(emit: *Emit, inst: Mir.Inst.Index) !void {
else => unreachable,
},
});
- try atom.addRelocation(coff_file, .{
+ try link.File.Coff.Atom.addRelocation(coff_file, atom_index, .{
.target = target,
.offset = offset + 4,
.addend = 0,
diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig
index 23d3ca5514..b41973ea97 100644
--- a/src/arch/x86_64/CodeGen.zig
+++ b/src/arch/x86_64/CodeGen.zig
@@ -2668,13 +2668,12 @@ fn loadMemPtrIntoRegister(self: *Self, reg: Register, ptr_ty: Type, ptr: MCValue
switch (ptr) {
.linker_load => |load_struct| {
const abi_size = @intCast(u32, ptr_ty.abiSize(self.target.*));
- const mod = self.bin_file.options.module.?;
- const fn_owner_decl = mod.declPtr(self.mod_fn.owner_decl);
const atom_index = if (self.bin_file.cast(link.File.MachO)) |macho_file| blk: {
const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
break :blk macho_file.getAtom(atom).getSymbolIndex().?;
- } else if (self.bin_file.cast(link.File.Coff)) |_| blk: {
- break :blk fn_owner_decl.link.coff.getSymbolIndex().?;
+ } else if (self.bin_file.cast(link.File.Coff)) |coff_file| blk: {
+ const atom = try coff_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
+ break :blk coff_file.getAtom(atom).getSymbolIndex().?;
} else unreachable;
const flags: u2 = switch (load_struct.type) {
.got => 0b00,
@@ -4009,8 +4008,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
.data = .{ .imm = got_addr },
});
} else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
- try fn_owner_decl.link.coff.ensureInitialized(coff_file);
- const sym_index = fn_owner_decl.link.coff.getSymbolIndex().?;
+ const atom_index = try coff_file.getOrCreateAtomForDecl(func.owner_decl);
+ const sym_index = coff_file.getAtom(atom_index).getSymbolIndex().?;
try self.genSetReg(Type.initTag(.usize), .rax, .{
.linker_load = .{
.type = .got,
@@ -6733,10 +6732,11 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) Inne
.sym_index = sym_index,
} };
} else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
- try decl.link.coff.ensureInitialized(coff_file);
+ const atom_index = try coff_file.getOrCreateAtomForDecl(decl_index);
+ const sym_index = coff_file.getAtom(atom_index).getSymbolIndex().?;
return MCValue{ .linker_load = .{
.type = .got,
- .sym_index = decl.link.coff.getSymbolIndex().?,
+ .sym_index = sym_index,
} };
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
try p9.seeDecl(decl_index);
diff --git a/src/arch/x86_64/Emit.zig b/src/arch/x86_64/Emit.zig
index 980dbfd41e..c4f9b4eb42 100644
--- a/src/arch/x86_64/Emit.zig
+++ b/src/arch/x86_64/Emit.zig
@@ -1011,8 +1011,8 @@ fn mirLeaPic(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
.length = 2,
});
} else if (emit.bin_file.cast(link.File.Coff)) |coff_file| {
- const atom = coff_file.getAtomForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
- try atom.addRelocation(coff_file, .{
+ const atom_index = coff_file.getAtomIndexForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
+ try link.File.Coff.Atom.addRelocation(coff_file, atom_index, .{
.type = switch (ops.flags) {
0b00 => .got,
0b01 => .direct,
@@ -1152,9 +1152,9 @@ fn mirCallExtern(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
});
} else if (emit.bin_file.cast(link.File.Coff)) |coff_file| {
// Add relocation to the decl.
- const atom = coff_file.getAtomForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
+ const atom_index = coff_file.getAtomIndexForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
const target = coff_file.getGlobalByIndex(relocation.sym_index);
- try atom.addRelocation(coff_file, .{
+ try link.File.Coff.Atom.addRelocation(coff_file, atom_index, .{
.type = .direct,
.target = target,
.offset = offset,
diff --git a/src/link.zig b/src/link.zig
index 09804add53..eb74615492 100644
--- a/src/link.zig
+++ b/src/link.zig
@@ -263,7 +263,7 @@ pub const File = struct {
pub const LinkBlock = union {
elf: void,
- coff: Coff.Atom,
+ coff: void,
macho: void,
plan9: Plan9.DeclBlock,
c: void,
@@ -285,7 +285,7 @@ pub const File = struct {
pub const Export = union {
elf: void,
- coff: Coff.Export,
+ coff: void,
macho: void,
plan9: Plan9.Export,
c: void,
diff --git a/src/link/Coff.zig b/src/link/Coff.zig
index dee3c7c381..c062276b73 100644
--- a/src/link/Coff.zig
+++ b/src/link/Coff.zig
@@ -79,13 +79,13 @@ entry_addr: ?u32 = null,
/// We store them here so that we can properly dispose of any allocated
/// memory within the atom in the incremental linker.
/// TODO consolidate this.
-decls: std.AutoHashMapUnmanaged(Module.Decl.Index, ?u16) = .{},
+decls: std.AutoHashMapUnmanaged(Module.Decl.Index, DeclMetadata) = .{},
/// List of atoms that are either synthetic or map directly to the Zig source program.
-managed_atoms: std.ArrayListUnmanaged(*Atom) = .{},
+atoms: std.ArrayListUnmanaged(Atom) = .{},
/// Table of atoms indexed by the symbol index.
-atom_by_index_table: std.AutoHashMapUnmanaged(u32, *Atom) = .{},
+atom_by_index_table: std.AutoHashMapUnmanaged(u32, Atom.Index) = .{},
/// Table of unnamed constants associated with a parent `Decl`.
/// We store them here so that we can free the constants whenever the `Decl`
@@ -124,9 +124,9 @@ const Entry = struct {
sym_index: u32,
};
-const RelocTable = std.AutoHashMapUnmanaged(*Atom, std.ArrayListUnmanaged(Relocation));
-const BaseRelocationTable = std.AutoHashMapUnmanaged(*Atom, std.ArrayListUnmanaged(u32));
-const UnnamedConstTable = std.AutoHashMapUnmanaged(Module.Decl.Index, std.ArrayListUnmanaged(*Atom));
+const RelocTable = std.AutoHashMapUnmanaged(Atom.Index, std.ArrayListUnmanaged(Relocation));
+const BaseRelocationTable = std.AutoHashMapUnmanaged(Atom.Index, std.ArrayListUnmanaged(u32));
+const UnnamedConstTable = std.AutoHashMapUnmanaged(Module.Decl.Index, std.ArrayListUnmanaged(Atom.Index));
const default_file_alignment: u16 = 0x200;
const default_size_of_stack_reserve: u32 = 0x1000000;
@@ -137,7 +137,7 @@ const default_size_of_heap_commit: u32 = 0x1000;
const Section = struct {
header: coff.SectionHeader,
- last_atom: ?*Atom = null,
+ last_atom_index: ?Atom.Index = null,
/// A list of atoms that have surplus capacity. This list can have false
/// positives, as functions grow and shrink over time, only sometimes being added
@@ -154,7 +154,34 @@ const Section = struct {
/// overcapacity can be negative. A simple way to have negative overcapacity is to
/// allocate a fresh atom, which will have ideal capacity, and then grow it
/// by 1 byte. It will then have -1 overcapacity.
- free_list: std.ArrayListUnmanaged(*Atom) = .{},
+ free_list: std.ArrayListUnmanaged(Atom.Index) = .{},
+};
+
+const DeclMetadata = struct {
+ atom: Atom.Index,
+ section: u16,
+ /// A list of all exports aliases of this Decl.
+ exports: std.ArrayListUnmanaged(u32) = .{},
+
+ fn getExport(m: DeclMetadata, coff_file: *const Coff, name: []const u8) ?u32 {
+ for (m.exports.items) |exp| {
+ if (mem.eql(u8, name, coff_file.getSymbolName(.{
+ .sym_index = exp,
+ .file = null,
+ }))) return exp;
+ }
+ return null;
+ }
+
+ fn getExportPtr(m: *DeclMetadata, coff_file: *Coff, name: []const u8) ?*u32 {
+ for (m.exports.items) |*exp| {
+ if (mem.eql(u8, name, coff_file.getSymbolName(.{
+ .sym_index = exp.*,
+ .file = null,
+ }))) return exp;
+ }
+ return null;
+ }
};
pub const PtrWidth = enum {
@@ -170,10 +197,6 @@ pub const PtrWidth = enum {
};
pub const SrcFn = void;
-pub const Export = struct {
- sym_index: ?u32 = null,
-};
-
pub const SymbolWithLoc = struct {
// Index into the respective symbol table.
sym_index: u32,
@@ -271,11 +294,7 @@ pub fn deinit(self: *Coff) void {
}
self.sections.deinit(gpa);
- for (self.managed_atoms.items) |atom| {
- gpa.destroy(atom);
- }
- self.managed_atoms.deinit(gpa);
-
+ self.atoms.deinit(gpa);
self.locals.deinit(gpa);
self.globals.deinit(gpa);
@@ -297,7 +316,15 @@ pub fn deinit(self: *Coff) void {
self.imports.deinit(gpa);
self.imports_free_list.deinit(gpa);
self.imports_table.deinit(gpa);
- self.decls.deinit(gpa);
+
+ {
+ var it = self.decls.iterator();
+ while (it.next()) |entry| {
+ entry.value_ptr.exports.deinit(gpa);
+ }
+ self.decls.deinit(gpa);
+ }
+
self.atom_by_index_table.deinit(gpa);
{
@@ -461,17 +488,18 @@ fn growSectionVM(self: *Coff, sect_id: u32, needed_size: u32) !void {
// TODO: enforce order by increasing VM addresses in self.sections container.
// This is required by the loader anyhow as far as I can tell.
for (self.sections.items(.header)[sect_id + 1 ..]) |*next_header, next_sect_id| {
- const maybe_last_atom = &self.sections.items(.last_atom)[sect_id + 1 + next_sect_id];
+ const maybe_last_atom_index = self.sections.items(.last_atom_index)[sect_id + 1 + next_sect_id];
next_header.virtual_address += diff;
- if (maybe_last_atom.*) |last_atom| {
- var atom = last_atom;
+ if (maybe_last_atom_index) |last_atom_index| {
+ var atom_index = last_atom_index;
while (true) {
+ const atom = self.getAtom(atom_index);
const sym = atom.getSymbolPtr(self);
sym.value += diff;
- if (atom.prev) |prev| {
- atom = prev;
+ if (atom.prev_index) |prev_index| {
+ atom_index = prev_index;
} else break;
}
}
@@ -480,14 +508,15 @@ fn growSectionVM(self: *Coff, sect_id: u32, needed_size: u32) !void {
header.virtual_size = increased_size;
}
-fn allocateAtom(self: *Coff, atom: *Atom, new_atom_size: u32, alignment: u32) !u32 {
+fn allocateAtom(self: *Coff, atom_index: Atom.Index, new_atom_size: u32, alignment: u32) !u32 {
const tracy = trace(@src());
defer tracy.end();
+ const atom = self.getAtom(atom_index);
const sect_id = @enumToInt(atom.getSymbol(self).section_number) - 1;
const header = &self.sections.items(.header)[sect_id];
const free_list = &self.sections.items(.free_list)[sect_id];
- const maybe_last_atom = &self.sections.items(.last_atom)[sect_id];
+ const maybe_last_atom_index = &self.sections.items(.last_atom_index)[sect_id];
const new_atom_ideal_capacity = if (header.isCode()) padToIdeal(new_atom_size) else new_atom_size;
// We use these to indicate our intention to update metadata, placing the new atom,
@@ -495,7 +524,7 @@ fn allocateAtom(self: *Coff, atom: *Atom, new_atom_size: u32, alignment: u32) !u
// It would be simpler to do it inside the for loop below, but that would cause a
// problem if an error was returned later in the function. So this action
// is actually carried out at the end of the function, when errors are no longer possible.
- var atom_placement: ?*Atom = null;
+ var atom_placement: ?Atom.Index = null;
var free_list_removal: ?usize = null;
// First we look for an appropriately sized free list node.
@@ -503,7 +532,8 @@ fn allocateAtom(self: *Coff, atom: *Atom, new_atom_size: u32, alignment: u32) !u
var vaddr = blk: {
var i: usize = 0;
while (i < free_list.items.len) {
- const big_atom = free_list.items[i];
+ const big_atom_index = free_list.items[i];
+ const big_atom = self.getAtom(big_atom_index);
// We now have a pointer to a live atom that has too much capacity.
// Is it enough that we could fit this new atom?
const sym = big_atom.getSymbol(self);
@@ -531,34 +561,43 @@ fn allocateAtom(self: *Coff, atom: *Atom, new_atom_size: u32, alignment: u32) !u
const keep_free_list_node = remaining_capacity >= min_text_capacity;
// Set up the metadata to be updated, after errors are no longer possible.
- atom_placement = big_atom;
+ atom_placement = big_atom_index;
if (!keep_free_list_node) {
free_list_removal = i;
}
break :blk new_start_vaddr;
- } else if (maybe_last_atom.*) |last| {
+ } else if (maybe_last_atom_index.*) |last_index| {
+ const last = self.getAtom(last_index);
const last_symbol = last.getSymbol(self);
const ideal_capacity = if (header.isCode()) padToIdeal(last.size) else last.size;
const ideal_capacity_end_vaddr = last_symbol.value + ideal_capacity;
const new_start_vaddr = mem.alignForwardGeneric(u32, ideal_capacity_end_vaddr, alignment);
- atom_placement = last;
+ atom_placement = last_index;
break :blk new_start_vaddr;
} else {
break :blk mem.alignForwardGeneric(u32, header.virtual_address, alignment);
}
};
- const expand_section = atom_placement == null or atom_placement.?.next == null;
+ const expand_section = if (atom_placement) |placement_index|
+ self.getAtom(placement_index).next_index == null
+ else
+ true;
if (expand_section) {
const sect_capacity = self.allocatedSize(header.pointer_to_raw_data);
const needed_size: u32 = (vaddr + new_atom_size) - header.virtual_address;
if (needed_size > sect_capacity) {
const new_offset = self.findFreeSpace(needed_size, default_file_alignment);
- const current_size = if (maybe_last_atom.*) |last_atom| blk: {
+ const current_size = if (maybe_last_atom_index.*) |last_atom_index| blk: {
+ const last_atom = self.getAtom(last_atom_index);
const sym = last_atom.getSymbol(self);
break :blk (sym.value + last_atom.size) - header.virtual_address;
} else 0;
- log.debug("moving {s} from 0x{x} to 0x{x}", .{ self.getSectionName(header), header.pointer_to_raw_data, new_offset });
+ log.debug("moving {s} from 0x{x} to 0x{x}", .{
+ self.getSectionName(header),
+ header.pointer_to_raw_data,
+ new_offset,
+ });
const amt = try self.base.file.?.copyRangeAll(
header.pointer_to_raw_data,
self.base.file.?,
@@ -577,26 +616,34 @@ fn allocateAtom(self: *Coff, atom: *Atom, new_atom_size: u32, alignment: u32) !u
header.virtual_size = @max(header.virtual_size, needed_size);
header.size_of_raw_data = needed_size;
- maybe_last_atom.* = atom;
+ maybe_last_atom_index.* = atom_index;
}
- atom.size = new_atom_size;
- atom.alignment = alignment;
+ {
+ const atom_ptr = self.getAtomPtr(atom_index);
+ atom_ptr.size = new_atom_size;
+ atom_ptr.alignment = alignment;
+ }
- if (atom.prev) |prev| {
- prev.next = atom.next;
+ if (atom.prev_index) |prev_index| {
+ const prev = self.getAtomPtr(prev_index);
+ prev.next_index = atom.next_index;
}
- if (atom.next) |next| {
- next.prev = atom.prev;
+ if (atom.next_index) |next_index| {
+ const next = self.getAtomPtr(next_index);
+ next.prev_index = atom.prev_index;
}
- if (atom_placement) |big_atom| {
- atom.prev = big_atom;
- atom.next = big_atom.next;
- big_atom.next = atom;
+ if (atom_placement) |big_atom_index| {
+ const big_atom = self.getAtomPtr(big_atom_index);
+ const atom_ptr = self.getAtomPtr(atom_index);
+ atom_ptr.prev_index = big_atom_index;
+ atom_ptr.next_index = big_atom.next_index;
+ big_atom.next_index = atom_index;
} else {
- atom.prev = null;
- atom.next = null;
+ const atom_ptr = self.getAtomPtr(atom_index);
+ atom_ptr.prev_index = null;
+ atom_ptr.next_index = null;
}
if (free_list_removal) |i| {
_ = free_list.swapRemove(i);
@@ -701,24 +748,37 @@ pub fn allocateImportEntry(self: *Coff, target: SymbolWithLoc) !u32 {
return index;
}
-fn createGotAtom(self: *Coff, target: SymbolWithLoc) !*Atom {
+pub fn createAtom(self: *Coff) !Atom.Index {
const gpa = self.base.allocator;
- const atom = try gpa.create(Atom);
- errdefer gpa.destroy(atom);
- atom.* = Atom.empty;
- try atom.ensureInitialized(self);
+ const atom_index = @intCast(Atom.Index, self.atoms.items.len);
+ const atom = try self.atoms.addOne(gpa);
+ const sym_index = try self.allocateSymbol();
+ try self.atom_by_index_table.putNoClobber(gpa, sym_index, atom_index);
+ atom.* = .{
+ .sym_index = sym_index,
+ .file = null,
+ .size = 0,
+ .alignment = 0,
+ .prev_index = null,
+ .next_index = null,
+ };
+ log.debug("creating ATOM(%{d}) at index {d}", .{ sym_index, atom_index });
+ return atom_index;
+}
+
+fn createGotAtom(self: *Coff, target: SymbolWithLoc) !Atom.Index {
+ const atom_index = try self.createAtom();
+ const atom = self.getAtomPtr(atom_index);
atom.size = @sizeOf(u64);
atom.alignment = @alignOf(u64);
- try self.managed_atoms.append(gpa, atom);
-
const sym = atom.getSymbolPtr(self);
sym.section_number = @intToEnum(coff.SectionNumber, self.got_section_index.? + 1);
- sym.value = try self.allocateAtom(atom, atom.size, atom.alignment);
+ sym.value = try self.allocateAtom(atom_index, atom.size, atom.alignment);
log.debug("allocated GOT atom at 0x{x}", .{sym.value});
- try atom.addRelocation(self, .{
+ try Atom.addRelocation(self, atom_index, .{
.type = .direct,
.target = target,
.offset = 0,
@@ -732,49 +792,46 @@ fn createGotAtom(self: *Coff, target: SymbolWithLoc) !*Atom {
.UNDEFINED => @panic("TODO generate a binding for undefined GOT target"),
.ABSOLUTE => {},
.DEBUG => unreachable, // not possible
- else => try atom.addBaseRelocation(self, 0),
+ else => try Atom.addBaseRelocation(self, atom_index, 0),
}
- return atom;
+ return atom_index;
}
-fn createImportAtom(self: *Coff) !*Atom {
- const gpa = self.base.allocator;
- const atom = try gpa.create(Atom);
- errdefer gpa.destroy(atom);
- atom.* = Atom.empty;
- try atom.ensureInitialized(self);
+fn createImportAtom(self: *Coff) !Atom.Index {
+ const atom_index = try self.createAtom();
+ const atom = self.getAtomPtr(atom_index);
atom.size = @sizeOf(u64);
atom.alignment = @alignOf(u64);
- try self.managed_atoms.append(gpa, atom);
-
const sym = atom.getSymbolPtr(self);
sym.section_number = @intToEnum(coff.SectionNumber, self.idata_section_index.? + 1);
- sym.value = try self.allocateAtom(atom, atom.size, atom.alignment);
+ sym.value = try self.allocateAtom(atom_index, atom.size, atom.alignment);
log.debug("allocated import atom at 0x{x}", .{sym.value});
- return atom;
+ return atom_index;
}
-fn growAtom(self: *Coff, atom: *Atom, new_atom_size: u32, alignment: u32) !u32 {
+fn growAtom(self: *Coff, atom_index: Atom.Index, new_atom_size: u32, alignment: u32) !u32 {
+ const atom = self.getAtom(atom_index);
const sym = atom.getSymbol(self);
const align_ok = mem.alignBackwardGeneric(u32, sym.value, alignment) == sym.value;
const need_realloc = !align_ok or new_atom_size > atom.capacity(self);
if (!need_realloc) return sym.value;
- return self.allocateAtom(atom, new_atom_size, alignment);
+ return self.allocateAtom(atom_index, new_atom_size, alignment);
}
-fn shrinkAtom(self: *Coff, atom: *Atom, new_block_size: u32) void {
+fn shrinkAtom(self: *Coff, atom_index: Atom.Index, new_block_size: u32) void {
_ = self;
- _ = atom;
+ _ = atom_index;
_ = new_block_size;
// TODO check the new capacity, and if it crosses the size threshold into a big enough
// capacity, insert a free list node for it.
}
-fn writeAtom(self: *Coff, atom: *Atom, code: []const u8) !void {
+fn writeAtom(self: *Coff, atom_index: Atom.Index, code: []const u8) !void {
+ const atom = self.getAtom(atom_index);
const sym = atom.getSymbol(self);
const section = self.sections.get(@enumToInt(sym.section_number) - 1);
const file_offset = section.header.pointer_to_raw_data + sym.value - section.header.virtual_address;
@@ -784,18 +841,18 @@ fn writeAtom(self: *Coff, atom: *Atom, code: []const u8) !void {
file_offset + code.len,
});
try self.base.file.?.pwriteAll(code, file_offset);
- try self.resolveRelocs(atom);
+ try self.resolveRelocs(atom_index);
}
-fn writePtrWidthAtom(self: *Coff, atom: *Atom) !void {
+fn writePtrWidthAtom(self: *Coff, atom_index: Atom.Index) !void {
switch (self.ptr_width) {
.p32 => {
var buffer: [@sizeOf(u32)]u8 = [_]u8{0} ** @sizeOf(u32);
- try self.writeAtom(atom, &buffer);
+ try self.writeAtom(atom_index, &buffer);
},
.p64 => {
var buffer: [@sizeOf(u64)]u8 = [_]u8{0} ** @sizeOf(u64);
- try self.writeAtom(atom, &buffer);
+ try self.writeAtom(atom_index, &buffer);
},
}
}
@@ -815,7 +872,8 @@ fn markRelocsDirtyByAddress(self: *Coff, addr: u32) void {
var it = self.relocs.valueIterator();
while (it.next()) |relocs| {
for (relocs.items) |*reloc| {
- const target_atom = reloc.getTargetAtom(self) orelse continue;
+ const target_atom_index = reloc.getTargetAtomIndex(self) orelse continue;
+ const target_atom = self.getAtom(target_atom_index);
const target_sym = target_atom.getSymbol(self);
if (target_sym.value < addr) continue;
reloc.dirty = true;
@@ -823,24 +881,26 @@ fn markRelocsDirtyByAddress(self: *Coff, addr: u32) void {
}
}
-fn resolveRelocs(self: *Coff, atom: *Atom) !void {
- const relocs = self.relocs.get(atom) orelse return;
+fn resolveRelocs(self: *Coff, atom_index: Atom.Index) !void {
+ const relocs = self.relocs.get(atom_index) orelse return;
- log.debug("relocating '{s}'", .{atom.getName(self)});
+ log.debug("relocating '{s}'", .{self.getAtom(atom_index).getName(self)});
for (relocs.items) |*reloc| {
if (!reloc.dirty) continue;
- try reloc.resolve(atom, self);
+ try reloc.resolve(atom_index, self);
}
}
-fn freeAtom(self: *Coff, atom: *Atom) void {
- log.debug("freeAtom {*}", .{atom});
+fn freeAtom(self: *Coff, atom_index: Atom.Index) void {
+ log.debug("freeAtom {d}", .{atom_index});
+
+ const gpa = self.base.allocator;
// Remove any relocs and base relocs associated with this Atom
- self.freeRelocationsForAtom(atom);
+ Atom.freeRelocations(self, atom_index);
- const gpa = self.base.allocator;
+ const atom = self.getAtom(atom_index);
const sym = atom.getSymbol(self);
const sect_id = @enumToInt(sym.section_number) - 1;
const free_list = &self.sections.items(.free_list)[sect_id];
@@ -849,45 +909,46 @@ fn freeAtom(self: *Coff, atom: *Atom) void {
var i: usize = 0;
// TODO turn free_list into a hash map
while (i < free_list.items.len) {
- if (free_list.items[i] == atom) {
+ if (free_list.items[i] == atom_index) {
_ = free_list.swapRemove(i);
continue;
}
- if (free_list.items[i] == atom.prev) {
+ if (free_list.items[i] == atom.prev_index) {
already_have_free_list_node = true;
}
i += 1;
}
}
- const maybe_last_atom = &self.sections.items(.last_atom)[sect_id];
- if (maybe_last_atom.*) |last_atom| {
- if (last_atom == atom) {
- if (atom.prev) |prev| {
+ const maybe_last_atom_index = &self.sections.items(.last_atom_index)[sect_id];
+ if (maybe_last_atom_index.*) |last_atom_index| {
+ if (last_atom_index == atom_index) {
+ if (atom.prev_index) |prev_index| {
// TODO shrink the section size here
- maybe_last_atom.* = prev;
+ maybe_last_atom_index.* = prev_index;
} else {
- maybe_last_atom.* = null;
+ maybe_last_atom_index.* = null;
}
}
}
- if (atom.prev) |prev| {
- prev.next = atom.next;
+ if (atom.prev_index) |prev_index| {
+ const prev = self.getAtomPtr(prev_index);
+ prev.next_index = atom.next_index;
- if (!already_have_free_list_node and prev.freeListEligible(self)) {
+ if (!already_have_free_list_node and prev.*.freeListEligible(self)) {
// The free list is heuristics, it doesn't have to be perfect, so we can
// ignore the OOM here.
- free_list.append(gpa, prev) catch {};
+ free_list.append(gpa, prev_index) catch {};
}
} else {
- atom.prev = null;
+ self.getAtomPtr(atom_index).prev_index = null;
}
- if (atom.next) |next| {
- next.prev = atom.prev;
+ if (atom.next_index) |next_index| {
+ self.getAtomPtr(next_index).prev_index = atom.prev_index;
} else {
- atom.next = null;
+ self.getAtomPtr(atom_index).next_index = null;
}
// Appending to free lists is allowed to fail because the free lists are heuristics based anyway.
@@ -910,7 +971,7 @@ fn freeAtom(self: *Coff, atom: *Atom) void {
self.locals.items[sym_index].section_number = .UNDEFINED;
_ = self.atom_by_index_table.remove(sym_index);
log.debug(" adding local symbol index {d} to free list", .{sym_index});
- atom.sym_index = 0;
+ self.getAtomPtr(atom_index).sym_index = 0;
}
pub fn updateFunc(self: *Coff, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void {
@@ -927,15 +988,10 @@ pub fn updateFunc(self: *Coff, module: *Module, func: *Module.Fn, air: Air, live
const decl_index = func.owner_decl;
const decl = module.declPtr(decl_index);
- const atom = &decl.link.coff;
- try atom.ensureInitialized(self);
- const gop = try self.decls.getOrPut(self.base.allocator, decl_index);
- if (gop.found_existing) {
- self.freeUnnamedConsts(decl_index);
- self.freeRelocationsForAtom(&decl.link.coff);
- } else {
- gop.value_ptr.* = null;
- }
+
+ const atom_index = try self.getOrCreateAtomForDecl(decl_index);
+ self.freeUnnamedConsts(decl_index);
+ Atom.freeRelocations(self, atom_index);
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
defer code_buffer.deinit();
@@ -979,11 +1035,8 @@ pub fn lowerUnnamedConst(self: *Coff, tv: TypedValue, decl_index: Module.Decl.In
}
const unnamed_consts = gop.value_ptr;
- const atom = try gpa.create(Atom);
- errdefer gpa.destroy(atom);
- atom.* = Atom.empty;
- try atom.ensureInitialized(self);
- try self.managed_atoms.append(gpa, atom);
+ const atom_index = try self.createAtom();
+ const atom = self.getAtomPtr(atom_index);
const sym_name = blk: {
const decl_name = try decl.getFullyQualifiedName(mod);
@@ -1012,15 +1065,15 @@ pub fn lowerUnnamedConst(self: *Coff, tv: TypedValue, decl_index: Module.Decl.In
const required_alignment = tv.ty.abiAlignment(self.base.options.target);
atom.alignment = required_alignment;
atom.size = @intCast(u32, code.len);
- atom.getSymbolPtr(self).value = try self.allocateAtom(atom, atom.size, atom.alignment);
- errdefer self.freeAtom(atom);
+ atom.getSymbolPtr(self).value = try self.allocateAtom(atom_index, atom.size, atom.alignment);
+ errdefer self.freeAtom(atom_index);
- try unnamed_consts.append(gpa, atom);
+ try unnamed_consts.append(gpa, atom_index);
log.debug("allocated atom for {s} at 0x{x}", .{ sym_name, atom.getSymbol(self).value });
log.debug(" (required alignment 0x{x})", .{required_alignment});
- try self.writeAtom(atom, code);
+ try self.writeAtom(atom_index, code);
return atom.getSymbolIndex().?;
}
@@ -1047,14 +1100,9 @@ pub fn updateDecl(self: *Coff, module: *Module, decl_index: Module.Decl.Index) !
}
}
- const atom = &decl.link.coff;
- try atom.ensureInitialized(self);
- const gop = try self.decls.getOrPut(self.base.allocator, decl_index);
- if (gop.found_existing) {
- self.freeRelocationsForAtom(atom);
- } else {
- gop.value_ptr.* = null;
- }
+ const atom_index = try self.getOrCreateAtomForDecl(decl_index);
+ Atom.freeRelocations(self, atom_index);
+ const atom = self.getAtom(atom_index);
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
defer code_buffer.deinit();
@@ -1064,7 +1112,7 @@ pub fn updateDecl(self: *Coff, module: *Module, decl_index: Module.Decl.Index) !
.ty = decl.ty,
.val = decl_val,
}, &code_buffer, .none, .{
- .parent_atom_index = decl.link.coff.getSymbolIndex().?,
+ .parent_atom_index = atom.getSymbolIndex().?,
});
const code = switch (res) {
.ok => code_buffer.items,
@@ -1082,7 +1130,20 @@ pub fn updateDecl(self: *Coff, module: *Module, decl_index: Module.Decl.Index) !
return self.updateDeclExports(module, decl_index, module.getDeclExports(decl_index));
}
-fn getDeclOutputSection(self: *Coff, decl: *Module.Decl) u16 {
+pub fn getOrCreateAtomForDecl(self: *Coff, decl_index: Module.Decl.Index) !Atom.Index {
+ const gop = try self.decls.getOrPut(self.base.allocator, decl_index);
+ if (!gop.found_existing) {
+ gop.value_ptr.* = .{
+ .atom = try self.createAtom(),
+ .section = self.getDeclOutputSection(decl_index),
+ .exports = .{},
+ };
+ }
+ return gop.value_ptr.atom;
+}
+
+fn getDeclOutputSection(self: *Coff, decl_index: Module.Decl.Index) u16 {
+ const decl = self.base.options.module.?.declPtr(decl_index);
const ty = decl.ty;
const zig_ty = ty.zigTypeTag();
const val = decl.val;
@@ -1117,14 +1178,11 @@ fn updateDeclCode(self: *Coff, decl_index: Module.Decl.Index, code: []const u8,
log.debug("updateDeclCode {s}{*}", .{ decl_name, decl });
const required_alignment = decl.getAlignment(self.base.options.target);
- const decl_ptr = self.decls.getPtr(decl_index).?;
- if (decl_ptr.* == null) {
- decl_ptr.* = self.getDeclOutputSection(decl);
- }
- const sect_index = decl_ptr.*.?;
-
+ const decl_metadata = self.decls.get(decl_index).?;
+ const atom_index = decl_metadata.atom;
+ const atom = self.getAtom(atom_index);
+ const sect_index = decl_metadata.section;
const code_len = @intCast(u32, code.len);
- const atom = &decl.link.coff;
if (atom.size != 0) {
const sym = atom.getSymbolPtr(self);
@@ -1135,7 +1193,7 @@ fn updateDeclCode(self: *Coff, decl_index: Module.Decl.Index, code: []const u8,
const capacity = atom.capacity(self);
const need_realloc = code.len > capacity or !mem.isAlignedGeneric(u64, sym.value, required_alignment);
if (need_realloc) {
- const vaddr = try self.growAtom(atom, code_len, required_alignment);
+ const vaddr = try self.growAtom(atom_index, code_len, required_alignment);
log.debug("growing {s} from 0x{x} to 0x{x}", .{ decl_name, sym.value, vaddr });
log.debug(" (required alignment 0x{x}", .{required_alignment});
@@ -1143,49 +1201,43 @@ fn updateDeclCode(self: *Coff, decl_index: Module.Decl.Index, code: []const u8,
sym.value = vaddr;
log.debug(" (updating GOT entry)", .{});
const got_target = SymbolWithLoc{ .sym_index = atom.getSymbolIndex().?, .file = null };
- const got_atom = self.getGotAtomForSymbol(got_target).?;
+ const got_atom_index = self.getGotAtomIndexForSymbol(got_target).?;
self.markRelocsDirtyByTarget(got_target);
- try self.writePtrWidthAtom(got_atom);
+ try self.writePtrWidthAtom(got_atom_index);
}
} else if (code_len < atom.size) {
- self.shrinkAtom(atom, code_len);
+ self.shrinkAtom(atom_index, code_len);
}
- atom.size = code_len;
+ self.getAtomPtr(atom_index).size = code_len;
} else {
const sym = atom.getSymbolPtr(self);
try self.setSymbolName(sym, decl_name);
sym.section_number = @intToEnum(coff.SectionNumber, sect_index + 1);
sym.type = .{ .complex_type = complex_type, .base_type = .NULL };
- const vaddr = try self.allocateAtom(atom, code_len, required_alignment);
- errdefer self.freeAtom(atom);
+ const vaddr = try self.allocateAtom(atom_index, code_len, required_alignment);
+ errdefer self.freeAtom(atom_index);
log.debug("allocated atom for {s} at 0x{x}", .{ decl_name, vaddr });
- atom.size = code_len;
+ self.getAtomPtr(atom_index).size = code_len;
sym.value = vaddr;
const got_target = SymbolWithLoc{ .sym_index = atom.getSymbolIndex().?, .file = null };
const got_index = try self.allocateGotEntry(got_target);
- const got_atom = try self.createGotAtom(got_target);
+ const got_atom_index = try self.createGotAtom(got_target);
+ const got_atom = self.getAtom(got_atom_index);
self.got_entries.items[got_index].sym_index = got_atom.getSymbolIndex().?;
- try self.writePtrWidthAtom(got_atom);
+ try self.writePtrWidthAtom(got_atom_index);
}
self.markRelocsDirtyByTarget(atom.getSymbolWithLoc());
- try self.writeAtom(atom, code);
-}
-
-fn freeRelocationsForAtom(self: *Coff, atom: *Atom) void {
- var removed_relocs = self.relocs.fetchRemove(atom);
- if (removed_relocs) |*relocs| relocs.value.deinit(self.base.allocator);
- var removed_base_relocs = self.base_relocs.fetchRemove(atom);
- if (removed_base_relocs) |*base_relocs| base_relocs.value.deinit(self.base.allocator);
+ try self.writeAtom(atom_index, code);
}
fn freeUnnamedConsts(self: *Coff, decl_index: Module.Decl.Index) void {
const gpa = self.base.allocator;
const unnamed_consts = self.unnamed_const_atoms.getPtr(decl_index) orelse return;
- for (unnamed_consts.items) |atom| {
- self.freeAtom(atom);
+ for (unnamed_consts.items) |atom_index| {
+ self.freeAtom(atom_index);
}
unnamed_consts.clearAndFree(gpa);
}
@@ -1200,11 +1252,11 @@ pub fn freeDecl(self: *Coff, decl_index: Module.Decl.Index) void {
log.debug("freeDecl {*}", .{decl});
- if (self.decls.fetchRemove(decl_index)) |kv| {
- if (kv.value) |_| {
- self.freeAtom(&decl.link.coff);
- self.freeUnnamedConsts(decl_index);
- }
+ if (self.decls.fetchRemove(decl_index)) |const_kv| {
+ var kv = const_kv;
+ self.freeAtom(kv.value.atom);
+ self.freeUnnamedConsts(decl_index);
+ kv.value.exports.deinit(self.base.allocator);
}
}
@@ -1257,16 +1309,10 @@ pub fn updateDeclExports(
const gpa = self.base.allocator;
const decl = module.declPtr(decl_index);
- const atom = &decl.link.coff;
-
- if (atom.getSymbolIndex() == null) return;
-
- const gop = try self.decls.getOrPut(gpa, decl_index);
- if (!gop.found_existing) {
- gop.value_ptr.* = self.getDeclOutputSection(decl);
- }
-
+ const atom_index = try self.getOrCreateAtomForDecl(decl_index);
+ const atom = self.getAtom(atom_index);
const decl_sym = atom.getSymbol(self);
+ const decl_metadata = self.decls.getPtr(decl_index).?;
for (exports) |exp| {
log.debug("adding new export '{s}'", .{exp.options.name});
@@ -1301,9 +1347,9 @@ pub fn updateDeclExports(
continue;
}
- const sym_index = exp.link.coff.sym_index orelse blk: {
+ const sym_index = decl_metadata.getExport(self, exp.options.name) orelse blk: {
const sym_index = try self.allocateSymbol();
- exp.link.coff.sym_index = sym_index;
+ try decl_metadata.exports.append(gpa, sym_index);
break :blk sym_index;
};
const sym_loc = SymbolWithLoc{ .sym_index = sym_index, .file = null };
@@ -1326,16 +1372,15 @@ pub fn updateDeclExports(
}
}
-pub fn deleteExport(self: *Coff, exp: Export) void {
+pub fn deleteDeclExport(self: *Coff, decl_index: Module.Decl.Index, name: []const u8) void {
if (self.llvm_object) |_| return;
- const sym_index = exp.sym_index orelse return;
+ const metadata = self.decls.getPtr(decl_index) orelse return;
+ const sym_index = metadata.getExportPtr(self, name) orelse return;
const gpa = self.base.allocator;
-
- const sym_loc = SymbolWithLoc{ .sym_index = sym_index, .file = null };
+ const sym_loc = SymbolWithLoc{ .sym_index = sym_index.*, .file = null };
const sym = self.getSymbolPtr(sym_loc);
- const sym_name = self.getSymbolName(sym_loc);
- log.debug("deleting export '{s}'", .{sym_name});
+ log.debug("deleting export '{s}'", .{name});
assert(sym.storage_class == .EXTERNAL and sym.section_number != .UNDEFINED);
sym.* = .{
.name = [_]u8{0} ** 8,
@@ -1345,9 +1390,9 @@ pub fn deleteExport(self: *Coff, exp: Export) void {
.storage_class = .NULL,
.number_of_aux_symbols = 0,
};
- self.locals_free_list.append(gpa, sym_index) catch {};
+ self.locals_free_list.append(gpa, sym_index.*) catch {};
- if (self.resolver.fetchRemove(sym_name)) |entry| {
+ if (self.resolver.fetchRemove(name)) |entry| {
defer gpa.free(entry.key);
self.globals_free_list.append(gpa, entry.value) catch {};
self.globals.items[entry.value] = .{
@@ -1355,6 +1400,8 @@ pub fn deleteExport(self: *Coff, exp: Export) void {
.file = null,
};
}
+
+ sym_index.* = 0;
}
fn resolveGlobalSymbol(self: *Coff, current: SymbolWithLoc) !void {
@@ -1419,9 +1466,10 @@ pub fn flushModule(self: *Coff, comp: *Compilation, prog_node: *std.Progress.Nod
if (self.imports_table.contains(global)) continue;
const import_index = try self.allocateImportEntry(global);
- const import_atom = try self.createImportAtom();
+ const import_atom_index = try self.createImportAtom();
+ const import_atom = self.getAtom(import_atom_index);
self.imports.items[import_index].sym_index = import_atom.getSymbolIndex().?;
- try self.writePtrWidthAtom(import_atom);
+ try self.writePtrWidthAtom(import_atom_index);
}
if (build_options.enable_logging) {
@@ -1455,22 +1503,14 @@ pub fn flushModule(self: *Coff, comp: *Compilation, prog_node: *std.Progress.Nod
}
}
-pub fn getDeclVAddr(
- self: *Coff,
- decl_index: Module.Decl.Index,
- reloc_info: link.File.RelocInfo,
-) !u64 {
- const mod = self.base.options.module.?;
- const decl = mod.declPtr(decl_index);
-
+pub fn getDeclVAddr(self: *Coff, decl_index: Module.Decl.Index, reloc_info: link.File.RelocInfo) !u64 {
assert(self.llvm_object == null);
- try decl.link.coff.ensureInitialized(self);
- const sym_index = decl.link.coff.getSymbolIndex().?;
-
- const atom = self.getAtomForSymbol(.{ .sym_index = reloc_info.parent_atom_index, .file = null }).?;
+ const this_atom_index = try self.getOrCreateAtomForDecl(decl_index);
+ const sym_index = self.getAtom(this_atom_index).getSymbolIndex().?;
+ const atom_index = self.getAtomIndexForSymbol(.{ .sym_index = reloc_info.parent_atom_index, .file = null }).?;
const target = SymbolWithLoc{ .sym_index = sym_index, .file = null };
- try atom.addRelocation(self, .{
+ try Atom.addRelocation(self, atom_index, .{
.type = .direct,
.target = target,
.offset = @intCast(u32, reloc_info.offset),
@@ -1478,7 +1518,7 @@ pub fn getDeclVAddr(
.pcrel = false,
.length = 3,
});
- try atom.addBaseRelocation(self, @intCast(u32, reloc_info.offset));
+ try Atom.addBaseRelocation(self, atom_index, @intCast(u32, reloc_info.offset));
return 0;
}
@@ -1529,7 +1569,8 @@ fn writeBaseRelocations(self: *Coff) !void {
var it = self.base_relocs.iterator();
while (it.next()) |entry| {
- const atom = entry.key_ptr.*;
+ const atom_index = entry.key_ptr.*;
+ const atom = self.getAtom(atom_index);
const offsets = entry.value_ptr.*;
for (offsets.items) |offset| {
@@ -1613,7 +1654,8 @@ fn writeImportTable(self: *Coff) !void {
const gpa = self.base.allocator;
const section = self.sections.get(self.idata_section_index.?);
- const last_atom = section.last_atom orelse return;
+ const last_atom_index = section.last_atom_index orelse return;
+ const last_atom = self.getAtom(last_atom_index);
const iat_rva = section.header.virtual_address;
const iat_size = last_atom.getSymbol(self).value + last_atom.size * 2 - iat_rva; // account for sentinel zero pointer
@@ -2051,27 +2093,37 @@ pub fn getOrPutGlobalPtr(self: *Coff, name: []const u8) !GetOrPutGlobalPtrResult
return GetOrPutGlobalPtrResult{ .found_existing = false, .value_ptr = ptr };
}
+pub fn getAtom(self: *const Coff, atom_index: Atom.Index) Atom {
+ assert(atom_index < self.atoms.items.len);
+ return self.atoms.items[atom_index];
+}
+
+pub fn getAtomPtr(self: *Coff, atom_index: Atom.Index) *Atom {
+ assert(atom_index < self.atoms.items.len);
+ return &self.atoms.items[atom_index];
+}
+
/// Returns atom if there is an atom referenced by the symbol described by `sym_loc` descriptor.
/// Returns null on failure.
-pub fn getAtomForSymbol(self: *Coff, sym_loc: SymbolWithLoc) ?*Atom {
+pub fn getAtomIndexForSymbol(self: *const Coff, sym_loc: SymbolWithLoc) ?Atom.Index {
assert(sym_loc.file == null); // TODO linking with object files
return self.atom_by_index_table.get(sym_loc.sym_index);
}
/// Returns GOT atom that references `sym_loc` if one exists.
/// Returns null otherwise.
-pub fn getGotAtomForSymbol(self: *Coff, sym_loc: SymbolWithLoc) ?*Atom {
+pub fn getGotAtomIndexForSymbol(self: *const Coff, sym_loc: SymbolWithLoc) ?Atom.Index {
const got_index = self.got_entries_table.get(sym_loc) orelse return null;
const got_entry = self.got_entries.items[got_index];
- return self.getAtomForSymbol(.{ .sym_index = got_entry.sym_index, .file = null });
+ return self.getAtomIndexForSymbol(.{ .sym_index = got_entry.sym_index, .file = null });
}
/// Returns import atom that references `sym_loc` if one exists.
/// Returns null otherwise.
-pub fn getImportAtomForSymbol(self: *Coff, sym_loc: SymbolWithLoc) ?*Atom {
+pub fn getImportAtomIndexForSymbol(self: *const Coff, sym_loc: SymbolWithLoc) ?Atom.Index {
const imports_index = self.imports_table.get(sym_loc) orelse return null;
const imports_entry = self.imports.items[imports_index];
- return self.getAtomForSymbol(.{ .sym_index = imports_entry.sym_index, .file = null });
+ return self.getAtomIndexForSymbol(.{ .sym_index = imports_entry.sym_index, .file = null });
}
fn setSectionName(self: *Coff, header: *coff.SectionHeader, name: []const u8) !void {
diff --git a/src/link/Coff/Atom.zig b/src/link/Coff/Atom.zig
index 78824eac1d..1ee31cccaa 100644
--- a/src/link/Coff/Atom.zig
+++ b/src/link/Coff/Atom.zig
@@ -27,23 +27,10 @@ alignment: u32,
/// Points to the previous and next neighbors, based on the `text_offset`.
/// This can be used to find, for example, the capacity of this `Atom`.
-prev: ?*Atom,
-next: ?*Atom,
-
-pub const empty = Atom{
- .sym_index = 0,
- .file = null,
- .size = 0,
- .alignment = 0,
- .prev = null,
- .next = null,
-};
-
-pub fn ensureInitialized(self: *Atom, coff_file: *Coff) !void {
- if (self.getSymbolIndex() != null) return; // Already initialized
- self.sym_index = try coff_file.allocateSymbol();
- try coff_file.atom_by_index_table.putNoClobber(coff_file.base.allocator, self.sym_index, self);
-}
+prev_index: ?Index,
+next_index: ?Index,
+
+pub const Index = u32;
pub fn getSymbolIndex(self: Atom) ?u32 {
if (self.sym_index == 0) return null;
@@ -85,7 +72,8 @@ pub fn getName(self: Atom, coff_file: *const Coff) []const u8 {
/// Returns how much room there is to grow in virtual address space.
pub fn capacity(self: Atom, coff_file: *const Coff) u32 {
const self_sym = self.getSymbol(coff_file);
- if (self.next) |next| {
+ if (self.next_index) |next_index| {
+ const next = coff_file.getAtom(next_index);
const next_sym = next.getSymbol(coff_file);
return next_sym.value - self_sym.value;
} else {
@@ -97,7 +85,8 @@ pub fn capacity(self: Atom, coff_file: *const Coff) u32 {
pub fn freeListEligible(self: Atom, coff_file: *const Coff) bool {
// No need to keep a free list node for the last atom.
- const next = self.next orelse return false;
+ const next_index = self.next_index orelse return false;
+ const next = coff_file.getAtom(next_index);
const self_sym = self.getSymbol(coff_file);
const next_sym = next.getSymbol(coff_file);
const cap = next_sym.value - self_sym.value;
@@ -107,22 +96,33 @@ pub fn freeListEligible(self: Atom, coff_file: *const Coff) bool {
return surplus >= Coff.min_text_capacity;
}
-pub fn addRelocation(self: *Atom, coff_file: *Coff, reloc: Relocation) !void {
+pub fn addRelocation(coff_file: *Coff, atom_index: Index, reloc: Relocation) !void {
const gpa = coff_file.base.allocator;
log.debug(" (adding reloc of type {s} to target %{d})", .{ @tagName(reloc.type), reloc.target.sym_index });
- const gop = try coff_file.relocs.getOrPut(gpa, self);
+ const gop = try coff_file.relocs.getOrPut(gpa, atom_index);
if (!gop.found_existing) {
gop.value_ptr.* = .{};
}
try gop.value_ptr.append(gpa, reloc);
}
-pub fn addBaseRelocation(self: *Atom, coff_file: *Coff, offset: u32) !void {
+pub fn addBaseRelocation(coff_file: *Coff, atom_index: Index, offset: u32) !void {
const gpa = coff_file.base.allocator;
- log.debug(" (adding base relocation at offset 0x{x} in %{d})", .{ offset, self.sym_index });
- const gop = try coff_file.base_relocs.getOrPut(gpa, self);
+ log.debug(" (adding base relocation at offset 0x{x} in %{d})", .{
+ offset,
+ coff_file.getAtom(atom_index).getSymbolIndex().?,
+ });
+ const gop = try coff_file.base_relocs.getOrPut(gpa, atom_index);
if (!gop.found_existing) {
gop.value_ptr.* = .{};
}
try gop.value_ptr.append(gpa, offset);
}
+
+pub fn freeRelocations(coff_file: *Coff, atom_index: Atom.Index) void {
+ const gpa = coff_file.base.allocator;
+ var removed_relocs = coff_file.relocs.fetchRemove(atom_index);
+ if (removed_relocs) |*relocs| relocs.value.deinit(gpa);
+ var removed_base_relocs = coff_file.base_relocs.fetchRemove(atom_index);
+ if (removed_base_relocs) |*base_relocs| base_relocs.value.deinit(gpa);
+}
diff --git a/src/link/Coff/Relocation.zig b/src/link/Coff/Relocation.zig
index 12a34b332d..1ba1d7a1c1 100644
--- a/src/link/Coff/Relocation.zig
+++ b/src/link/Coff/Relocation.zig
@@ -46,33 +46,35 @@ length: u2,
dirty: bool = true,
/// Returns an Atom which is the target node of this relocation edge (if any).
-pub fn getTargetAtom(self: Relocation, coff_file: *Coff) ?*Atom {
+pub fn getTargetAtomIndex(self: Relocation, coff_file: *const Coff) ?Atom.Index {
switch (self.type) {
.got,
.got_page,
.got_pageoff,
- => return coff_file.getGotAtomForSymbol(self.target),
+ => return coff_file.getGotAtomIndexForSymbol(self.target),
.direct,
.page,
.pageoff,
- => return coff_file.getAtomForSymbol(self.target),
+ => return coff_file.getAtomIndexForSymbol(self.target),
.import,
.import_page,
.import_pageoff,
- => return coff_file.getImportAtomForSymbol(self.target),
+ => return coff_file.getImportAtomIndexForSymbol(self.target),
}
}
-pub fn resolve(self: *Relocation, atom: *Atom, coff_file: *Coff) !void {
+pub fn resolve(self: *Relocation, atom_index: Atom.Index, coff_file: *Coff) !void {
+ const atom = coff_file.getAtom(atom_index);
const source_sym = atom.getSymbol(coff_file);
const source_section = coff_file.sections.get(@enumToInt(source_sym.section_number) - 1).header;
const source_vaddr = source_sym.value + self.offset;
const file_offset = source_section.pointer_to_raw_data + source_sym.value - source_section.virtual_address;
- const target_atom = self.getTargetAtom(coff_file) orelse return;
+ const target_atom_index = self.getTargetAtomIndex(coff_file) orelse return;
+ const target_atom = coff_file.getAtom(target_atom_index);
const target_vaddr = target_atom.getSymbol(coff_file).value;
const target_vaddr_with_addend = target_vaddr + self.addend;
@@ -107,7 +109,7 @@ const Context = struct {
image_base: u64,
};
-fn resolveAarch64(self: *Relocation, ctx: Context, coff_file: *Coff) !void {
+fn resolveAarch64(self: Relocation, ctx: Context, coff_file: *Coff) !void {
var buffer: [@sizeOf(u64)]u8 = undefined;
switch (self.length) {
2 => {
@@ -197,7 +199,7 @@ fn resolveAarch64(self: *Relocation, ctx: Context, coff_file: *Coff) !void {
}
}
-fn resolveX86(self: *Relocation, ctx: Context, coff_file: *Coff) !void {
+fn resolveX86(self: Relocation, ctx: Context, coff_file: *Coff) !void {
switch (self.type) {
.got_page => unreachable,
.got_pageoff => unreachable,
diff --git a/src/link/Elf.zig b/src/link/Elf.zig
index 0b8128aa33..747120ac5d 100644
--- a/src/link/Elf.zig
+++ b/src/link/Elf.zig
@@ -71,14 +71,14 @@ const DeclMetadata = struct {
fn getExport(m: DeclMetadata, elf_file: *const Elf, name: []const u8) ?u32 {
for (m.exports.items) |exp| {
- if (mem.eql(u8, name, elf_file.getSymbolName(exp))) return exp;
+ if (mem.eql(u8, name, elf_file.getGlobalName(exp))) return exp;
}
return null;
}
fn getExportPtr(m: *DeclMetadata, elf_file: *Elf, name: []const u8) ?*u32 {
for (m.exports.items) |*exp| {
- if (mem.eql(u8, name, elf_file.getSymbolName(exp.*))) return exp;
+ if (mem.eql(u8, name, elf_file.getGlobalName(exp.*))) return exp;
}
return null;
}
@@ -3276,6 +3276,12 @@ pub fn getSymbolName(self: *const Elf, sym_index: u32) []const u8 {
return self.shstrtab.get(sym.st_name).?;
}
+/// Returns name of the global symbol at index.
+pub fn getGlobalName(self: *const Elf, index: u32) []const u8 {
+ const sym = self.global_symbols.items[index];
+ return self.shstrtab.get(sym.st_name).?;
+}
+
pub fn getAtom(self: *const Elf, atom_index: Atom.Index) Atom {
assert(atom_index < self.atoms.items.len);
return self.atoms.items[atom_index];
diff --git a/src/link/MachO.zig b/src/link/MachO.zig
index 11a1119449..42aaa3a275 100644
--- a/src/link/MachO.zig
+++ b/src/link/MachO.zig
@@ -3015,6 +3015,11 @@ fn allocateAtom(self: *MachO, atom_index: Atom.Index, new_atom_size: u64, alignm
if (header.@"align" < align_pow) {
header.@"align" = align_pow;
}
+ {
+ const atom_ptr = self.getAtomPtr(atom_index);
+ atom_ptr.size = new_atom_size;
+ atom_ptr.alignment = @intCast(u32, alignment);
+ }
if (atom.prev_index) |prev_index| {
const prev = self.getAtomPtr(prev_index);
--
cgit v1.2.3
From 73cf7b64291ed8b5dcb4cb52df103be08f15a347 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Mon, 30 Jan 2023 21:39:43 -0700
Subject: update build.zig API usage
---
build.zig | 72 ++++++-----
doc/langref.html.in | 42 ++++--
lib/init-exe/build.zig | 51 ++++++--
lib/init-lib/build.zig | 41 +++++-
lib/std/build.zig | 19 ++-
lib/std/build/TranslateCStep.zig | 40 +++++-
lib/std/builtin.zig | 5 +-
src/link/MachO/zld.zig | 11 +-
test/link/bss/build.zig | 9 +-
test/link/common_symbols/build.zig | 15 ++-
test/link/common_symbols_alignment/build.zig | 17 ++-
test/link/interdependent_static_c_libs/build.zig | 24 +++-
test/link/macho/bugs/13056/build.zig | 8 +-
test/link/macho/bugs/13457/build.zig | 11 +-
test/link/macho/dead_strip/build.zig | 16 ++-
test/link/macho/dead_strip_dylibs/build.zig | 14 +-
test/link/macho/dylib/build.zig | 19 ++-
test/link/macho/empty/build.zig | 10 +-
test/link/macho/entry/build.zig | 10 +-
test/link/macho/headerpad/build.zig | 18 +--
test/link/macho/linksection/build.zig | 13 +-
test/link/macho/needed_framework/build.zig | 8 +-
test/link/macho/needed_library/build.zig | 19 ++-
test/link/macho/objc/build.zig | 8 +-
test/link/macho/objcpp/build.zig | 8 +-
test/link/macho/pagezero/build.zig | 18 ++-
test/link/macho/search_strategy/build.zig | 33 +++--
test/link/macho/stack_size/build.zig | 10 +-
test/link/macho/strict_validation/build.zig | 11 +-
test/link/macho/tls/build.zig | 19 ++-
test/link/macho/unwind_info/build.zig | 20 +--
test/link/macho/uuid/build.zig | 17 ++-
test/link/macho/weak_framework/build.zig | 8 +-
test/link/macho/weak_library/build.zig | 19 ++-
test/link/static_lib_as_system_lib/build.zig | 17 ++-
test/link/wasm/archive/build.zig | 11 +-
test/link/wasm/basic-features/build.zig | 18 ++-
test/link/wasm/bss/build.zig | 11 +-
test/link/wasm/export-data/build.zig | 9 +-
test/link/wasm/export/build.zig | 31 +++--
test/link/wasm/extern-mangle/build.zig | 11 +-
test/link/wasm/extern/build.zig | 10 +-
test/link/wasm/function-table/build.zig | 29 +++--
test/link/wasm/infer-features/build.zig | 29 +++--
test/link/wasm/producers/build.zig | 11 +-
test/link/wasm/segments/build.zig | 11 +-
test/link/wasm/stack_pointer/build.zig | 11 +-
test/link/wasm/type/build.zig | 11 +-
test/src/compare_output.zig | 31 +++--
test/src/run_translated_c.zig | 9 +-
test/src/translate_c.zig | 7 +-
test/standalone/brace_expansion/build.zig | 6 +-
test/standalone/c_compiler/build.zig | 18 ++-
test/standalone/emit_asm_and_bin/build.zig | 6 +-
test/standalone/empty_env/build.zig | 7 +-
test/standalone/global_linkage/build.zig | 24 +++-
test/standalone/install_raw_hex/build.zig | 11 +-
test/standalone/issue_11595/build.zig | 11 +-
test/standalone/issue_12588/build.zig | 11 +-
test/standalone/issue_12706/build.zig | 11 +-
test/standalone/issue_13030/build.zig | 12 +-
test/standalone/issue_339/build.zig | 7 +-
test/standalone/issue_5825/build.zig | 19 ++-
test/standalone/issue_7030/build.zig | 11 +-
test/standalone/issue_794/build.zig | 4 +-
test/standalone/issue_8550/build.zig | 11 +-
test/standalone/issue_9812/build.zig | 8 +-
test/standalone/load_dynamic_library/build.zig | 20 ++-
test/standalone/main_pkg_path/build.zig | 4 +-
test/standalone/mix_c_files/build.zig | 11 +-
test/standalone/mix_o_files/build.zig | 14 +-
test/standalone/options/build.zig | 10 +-
test/standalone/pie/build.zig | 6 +-
test/standalone/pkg_import/build.zig | 13 +-
test/standalone/shared_library/build.zig | 17 ++-
test/standalone/static_c_lib/build.zig | 15 ++-
test/standalone/test_runner_path/build.zig | 5 +-
test/standalone/use_alias/build.zig | 6 +-
test/standalone/windows_spawn/build.zig | 17 ++-
test/tests.zig | 156 +++++++++++++----------
80 files changed, 906 insertions(+), 495 deletions(-)
(limited to 'src')
diff --git a/build.zig b/build.zig
index 3a7468243f..98da9f31ee 100644
--- a/build.zig
+++ b/build.zig
@@ -23,7 +23,7 @@ pub fn build(b: *Builder) !void {
}
break :t b.standardTargetOptions(.{ .default_target = default_target });
};
- const mode: std.builtin.Mode = if (release) switch (target.getCpuArch()) {
+ const optimize: std.builtin.OptimizeMode = if (release) switch (target.getCpuArch()) {
.wasm32 => .ReleaseSmall,
else => .ReleaseFast,
} else .Debug;
@@ -33,7 +33,12 @@ pub fn build(b: *Builder) !void {
const test_step = b.step("test", "Run all the tests");
- const docgen_exe = b.addExecutable("docgen", "doc/docgen.zig");
+ const docgen_exe = b.addExecutable(.{
+ .name = "docgen",
+ .root_source_file = .{ .path = "doc/docgen.zig" },
+ .target = .{},
+ .optimize = .Debug,
+ });
docgen_exe.single_threaded = single_threaded;
const rel_zig_exe = try fs.path.relative(b.allocator, b.build_root, b.zig_exe);
@@ -53,10 +58,12 @@ pub fn build(b: *Builder) !void {
const docs_step = b.step("docs", "Build documentation");
docs_step.dependOn(&docgen_cmd.step);
- const test_cases = b.addTest("src/test.zig");
+ const test_cases = b.addTest(.{
+ .root_source_file = .{ .path = "src/test.zig" },
+ .optimize = optimize,
+ });
test_cases.main_pkg_path = ".";
test_cases.stack_size = stack_size;
- test_cases.setBuildMode(mode);
test_cases.single_threaded = single_threaded;
const fmt_build_zig = b.addFmt(&[_][]const u8{"build.zig"});
@@ -149,17 +156,15 @@ pub fn build(b: *Builder) !void {
const mem_leak_frames: u32 = b.option(u32, "mem-leak-frames", "How many stack frames to print when a memory leak occurs. Tests get 2x this amount.") orelse blk: {
if (strip == true) break :blk @as(u32, 0);
- if (mode != .Debug) break :blk 0;
+ if (optimize != .Debug) break :blk 0;
break :blk 4;
};
- const exe = addCompilerStep(b);
+ const exe = addCompilerStep(b, optimize, target);
exe.strip = strip;
exe.sanitize_thread = sanitize_thread;
exe.build_id = b.option(bool, "build-id", "Include a build id note") orelse false;
exe.install();
- exe.setBuildMode(mode);
- exe.setTarget(target);
const compile_step = b.step("compile", "Build the self-hosted compiler");
compile_step.dependOn(&exe.step);
@@ -195,7 +200,7 @@ pub fn build(b: *Builder) !void {
test_cases.linkLibC();
}
- const is_debug = mode == .Debug;
+ const is_debug = optimize == .Debug;
const enable_logging = b.option(bool, "log", "Enable debug logging with --debug-log") orelse is_debug;
const enable_link_snapshots = b.option(bool, "link-snapshot", "Whether to enable linker state snapshots") orelse false;
@@ -360,25 +365,25 @@ pub fn build(b: *Builder) !void {
test_step.dependOn(test_cases_step);
}
- var chosen_modes: [4]builtin.Mode = undefined;
+ var chosen_opt_modes_buf: [4]builtin.Mode = undefined;
var chosen_mode_index: usize = 0;
if (!skip_debug) {
- chosen_modes[chosen_mode_index] = builtin.Mode.Debug;
+ chosen_opt_modes_buf[chosen_mode_index] = builtin.Mode.Debug;
chosen_mode_index += 1;
}
if (!skip_release_safe) {
- chosen_modes[chosen_mode_index] = builtin.Mode.ReleaseSafe;
+ chosen_opt_modes_buf[chosen_mode_index] = builtin.Mode.ReleaseSafe;
chosen_mode_index += 1;
}
if (!skip_release_fast) {
- chosen_modes[chosen_mode_index] = builtin.Mode.ReleaseFast;
+ chosen_opt_modes_buf[chosen_mode_index] = builtin.Mode.ReleaseFast;
chosen_mode_index += 1;
}
if (!skip_release_small) {
- chosen_modes[chosen_mode_index] = builtin.Mode.ReleaseSmall;
+ chosen_opt_modes_buf[chosen_mode_index] = builtin.Mode.ReleaseSmall;
chosen_mode_index += 1;
}
- const modes = chosen_modes[0..chosen_mode_index];
+ const optimization_modes = chosen_opt_modes_buf[0..chosen_mode_index];
// run stage1 `zig fmt` on this build.zig file just to make sure it works
test_step.dependOn(&fmt_build_zig.step);
@@ -391,7 +396,7 @@ pub fn build(b: *Builder) !void {
"test/behavior.zig",
"behavior",
"Run the behavior tests",
- modes,
+ optimization_modes,
skip_single_threaded,
skip_non_native,
skip_libc,
@@ -405,7 +410,7 @@ pub fn build(b: *Builder) !void {
"lib/compiler_rt.zig",
"compiler-rt",
"Run the compiler_rt tests",
- modes,
+ optimization_modes,
true, // skip_single_threaded
skip_non_native,
true, // skip_libc
@@ -419,7 +424,7 @@ pub fn build(b: *Builder) !void {
"lib/c.zig",
"universal-libc",
"Run the universal libc tests",
- modes,
+ optimization_modes,
true, // skip_single_threaded
skip_non_native,
true, // skip_libc
@@ -427,11 +432,11 @@ pub fn build(b: *Builder) !void {
skip_stage2_tests or true, // TODO get these all passing
));
- test_step.dependOn(tests.addCompareOutputTests(b, test_filter, modes));
+ test_step.dependOn(tests.addCompareOutputTests(b, test_filter, optimization_modes));
test_step.dependOn(tests.addStandaloneTests(
b,
test_filter,
- modes,
+ optimization_modes,
skip_non_native,
enable_macos_sdk,
target,
@@ -444,10 +449,10 @@ pub fn build(b: *Builder) !void {
enable_symlinks_windows,
));
test_step.dependOn(tests.addCAbiTests(b, skip_non_native, skip_release));
- test_step.dependOn(tests.addLinkTests(b, test_filter, modes, enable_macos_sdk, skip_stage2_tests, enable_symlinks_windows));
- test_step.dependOn(tests.addStackTraceTests(b, test_filter, modes));
- test_step.dependOn(tests.addCliTests(b, test_filter, modes));
- test_step.dependOn(tests.addAssembleAndLinkTests(b, test_filter, modes));
+ test_step.dependOn(tests.addLinkTests(b, test_filter, optimization_modes, enable_macos_sdk, skip_stage2_tests, enable_symlinks_windows));
+ test_step.dependOn(tests.addStackTraceTests(b, test_filter, optimization_modes));
+ test_step.dependOn(tests.addCliTests(b, test_filter, optimization_modes));
+ test_step.dependOn(tests.addAssembleAndLinkTests(b, test_filter, optimization_modes));
test_step.dependOn(tests.addTranslateCTests(b, test_filter));
if (!skip_run_translated_c) {
test_step.dependOn(tests.addRunTranslatedCTests(b, test_filter, target));
@@ -461,7 +466,7 @@ pub fn build(b: *Builder) !void {
"lib/std/std.zig",
"std",
"Run the standard library tests",
- modes,
+ optimization_modes,
skip_single_threaded,
skip_non_native,
skip_libc,
@@ -481,9 +486,7 @@ fn addWasiUpdateStep(b: *Builder, version: [:0]const u8) !void {
};
target.cpu_features_add.addFeature(@enumToInt(std.Target.wasm.Feature.bulk_memory));
- const exe = addCompilerStep(b);
- exe.setBuildMode(.ReleaseSmall);
- exe.setTarget(target);
+ const exe = addCompilerStep(b, .ReleaseSmall, target);
const exe_options = b.addOptions();
exe.addOptions("build_options", exe_options);
@@ -510,8 +513,17 @@ fn addWasiUpdateStep(b: *Builder, version: [:0]const u8) !void {
update_zig1_step.dependOn(&run_opt.step);
}
-fn addCompilerStep(b: *Builder) *std.build.LibExeObjStep {
- const exe = b.addExecutable("zig", "src/main.zig");
+fn addCompilerStep(
+ b: *Builder,
+ optimize: std.builtin.OptimizeMode,
+ target: std.zig.CrossTarget,
+) *std.build.LibExeObjStep {
+ const exe = b.addExecutable(.{
+ .name = "zig",
+ .root_source_file = .{ .path = "src/main.zig" },
+ .target = target,
+ .optimize = optimize,
+ });
exe.stack_size = stack_size;
return exe;
}
diff --git a/doc/langref.html.in b/doc/langref.html.in
index fd4aa8ae76..c008149f41 100644
--- a/doc/langref.html.in
+++ b/doc/langref.html.in
@@ -9531,8 +9531,12 @@ fn foo(comptime T: type, ptr: *T) T {
const Builder = @import("std").build.Builder;
pub fn build(b: *Builder) void {
- const exe = b.addExecutable("example", "example.zig");
- exe.setBuildMode(b.standardReleaseOptions());
+ const optimize = b.standardOptimizeOption(.{});
+ const exe = b.addExecutable(.{
+ .name = "example",
+ .root_source_file = .{ .path = "example.zig" },
+ .optimize = optimize,
+ });
b.default_step.dependOn(&exe.step);
}
{#code_end#}
@@ -10558,11 +10562,14 @@ pub fn build(b: *Builder) void {
// Standard release options allow the person running `zig build` to select
// between Debug, ReleaseSafe, ReleaseFast, and ReleaseSmall.
- const mode = b.standardReleaseOptions();
+ const optimize = b.standardOptimizeOption(.{});
- const exe = b.addExecutable("example", "src/main.zig");
- exe.setTarget(target);
- exe.setBuildMode(mode);
+ const exe = b.addExecutable(.{
+ .name = "example",
+ .root_source_file = .{ .path = "src/main.zig" },
+ .target = target,
+ .optimize = optimize,
+ });
exe.install();
const run_cmd = exe.run();
@@ -10584,13 +10591,18 @@ pub fn build(b: *Builder) void {
const Builder = @import("std").build.Builder;
pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
- const lib = b.addStaticLibrary("example", "src/main.zig");
- lib.setBuildMode(mode);
+ const optimize = b.standardOptimizeOption(.{});
+ const lib = b.addStaticLibrary(.{
+ .name = "example",
+ .root_source_file = .{ .path = "src/main.zig" },
+ .optimize = optimize,
+ });
lib.install();
- var main_tests = b.addTest("src/main.zig");
- main_tests.setBuildMode(mode);
+ const main_tests = b.addTest(.{
+ .root_source_file = .{ .path = "src/main.zig" },
+ .optimize = optimize,
+ });
const test_step = b.step("test", "Run library tests");
test_step.dependOn(&main_tests.step);
@@ -10954,7 +10966,9 @@ const Builder = @import("std").build.Builder;
pub fn build(b: *Builder) void {
const lib = b.addSharedLibrary("mathtest", "mathtest.zig", b.version(1, 0, 0));
- const exe = b.addExecutable("test", null);
+ const exe = b.addExecutable(.{
+ .name = "test",
+ });
exe.addCSourceFile("test.c", &[_][]const u8{"-std=c99"});
exe.linkLibrary(lib);
exe.linkSystemLibrary("c");
@@ -11016,7 +11030,9 @@ const Builder = @import("std").build.Builder;
pub fn build(b: *Builder) void {
const obj = b.addObject("base64", "base64.zig");
- const exe = b.addExecutable("test", null);
+ const exe = b.addExecutable(.{
+ .name = "test",
+ });
exe.addCSourceFile("test.c", &[_][]const u8{"-std=c99"});
exe.addObject(obj);
exe.linkSystemLibrary("c");
diff --git a/lib/init-exe/build.zig b/lib/init-exe/build.zig
index 29b50b5cc4..36e5feddec 100644
--- a/lib/init-exe/build.zig
+++ b/lib/init-exe/build.zig
@@ -1,5 +1,8 @@
const std = @import("std");
+// Although this function looks imperative, note that its job is to
+// declaratively construct a build graph that will be executed by an external
+// runner.
pub fn build(b: *std.build.Builder) void {
// Standard target options allows the person running `zig build` to choose
// what target to build for. Here we do not override the defaults, which
@@ -7,28 +10,58 @@ pub fn build(b: *std.build.Builder) void {
// for restricting supported target set are available.
const target = b.standardTargetOptions(.{});
- // Standard release options allow the person running `zig build` to select
- // between Debug, ReleaseSafe, ReleaseFast, and ReleaseSmall.
- const mode = b.standardReleaseOptions();
+ // Standard optimization options allow the person running `zig build` to select
+ // between Debug, ReleaseSafe, ReleaseFast, and ReleaseSmall. Here we do not
+ // set a preferred release mode, allowing the user to decide how to optimize.
+ const optimize = b.standardOptimizeOption();
- const exe = b.addExecutable("$", "src/main.zig");
- exe.setTarget(target);
- exe.setBuildMode(mode);
+ const exe = b.addExecutable(.{
+ .name = "$",
+ // In this case the main source file is merely a path, however, in more
+ // complicated build scripts, this could be a generated file.
+ .root_source_file = .{ .path = "src/main.zig" },
+ .target = target,
+ .optimize = optimize,
+ });
+
+ // This declares intent for the executable to be installed into the
+ // standard location when the user invokes the "install" step (the default
+ // step when running `zig build`).
exe.install();
+ // This *creates* a RunStep in the build graph, to be executed when another
+ // step is evaluated that depends on it. The next line below will establish
+ // such a dependency.
const run_cmd = exe.run();
+
+ // By making the run step depend on the install step, it will be run from the
+ // installation directory rather than directly from within the cache directory.
+ // This is not necessary, however, if the application depends on other installed
+ // files, this ensures they will be present and in the expected location.
run_cmd.step.dependOn(b.getInstallStep());
+
+ // This allows the user to pass arguments to the application in the build
+ // command itself, like this: `zig build run -- arg1 arg2 etc`
if (b.args) |args| {
run_cmd.addArgs(args);
}
+ // This creates a build step. It will be visible in the `zig build --help` menu,
+ // and can be selected like this: `zig build run`
+ // This will evaluate the `run` step rather than the default, which is "install".
const run_step = b.step("run", "Run the app");
run_step.dependOn(&run_cmd.step);
- const exe_tests = b.addTest("src/main.zig");
- exe_tests.setTarget(target);
- exe_tests.setBuildMode(mode);
+ // Creates a step for unit testing.
+ const exe_tests = b.addTest(.{
+ .root_source_file = .{ .path = "src/main.zig" },
+ .target = target,
+ .optimize = optimize,
+ });
+ // Similar to creating the run step earlier, this exposes a `test` step to
+ // the `zig build --help` menu, providing a way for the user to request
+ // running the unit tests.
const test_step = b.step("test", "Run unit tests");
test_step.dependOn(&exe_tests.step);
}
diff --git a/lib/init-lib/build.zig b/lib/init-lib/build.zig
index b3876691a2..4a7b700dc2 100644
--- a/lib/init-lib/build.zig
+++ b/lib/init-lib/build.zig
@@ -1,17 +1,44 @@
const std = @import("std");
+// Although this function looks imperative, note that its job is to
+// declaratively construct a build graph that will be executed by an external
+// runner.
pub fn build(b: *std.build.Builder) void {
- // Standard release options allow the person running `zig build` to select
- // between Debug, ReleaseSafe, ReleaseFast, and ReleaseSmall.
- const mode = b.standardReleaseOptions();
+ // Standard target options allows the person running `zig build` to choose
+ // what target to build for. Here we do not override the defaults, which
+ // means any target is allowed, and the default is native. Other options
+ // for restricting supported target set are available.
+ const target = b.standardTargetOptions(.{});
- const lib = b.addStaticLibrary("$", "src/main.zig");
- lib.setBuildMode(mode);
+ // Standard optimization options allow the person running `zig build` to select
+ // between Debug, ReleaseSafe, ReleaseFast, and ReleaseSmall. Here we do not
+ // set a preferred release mode, allowing the user to decide how to optimize.
+ const optimize = b.standardOptimizeOption();
+
+ const lib = b.addStaticLibrary(.{
+ .name = "$",
+ // In this case the main source file is merely a path, however, in more
+ // complicated build scripts, this could be a generated file.
+ .root_source_file = .{ .path = "src/main.zig" },
+ .target = target,
+ .optimize = optimize,
+ });
+
+ // This declares intent for the library to be installed into the standard
+ // location when the user invokes the "install" step (the default step when
+ // running `zig build`).
lib.install();
- const main_tests = b.addTest("src/main.zig");
- main_tests.setBuildMode(mode);
+ // Creates a step for unit testing.
+ const main_tests = b.addTest(.{
+ .root_source_file = .{ .path = "src/main.zig" },
+ .target = target,
+ .optimize = optimize,
+ });
+ // This creates a build step. It will be visible in the `zig build --help` menu,
+ // and can be selected like this: `zig build test`
+ // This will evaluate the `test` step rather than the default, which is "install".
const test_step = b.step("test", "Run library tests");
test_step.dependOn(&main_tests.step);
}
diff --git a/lib/std/build.zig b/lib/std/build.zig
index a4ad599d93..4ee00a4710 100644
--- a/lib/std/build.zig
+++ b/lib/std/build.zig
@@ -420,10 +420,10 @@ pub const Builder = struct {
pub const ExecutableOptions = struct {
name: []const u8,
- root_source_file: ?FileSource,
+ root_source_file: ?FileSource = null,
version: ?std.builtin.Version = null,
- target: CrossTarget,
- optimize: std.builtin.Mode,
+ target: CrossTarget = .{},
+ optimize: std.builtin.Mode = .Debug,
linkage: ?LibExeObjStep.Linkage = null,
};
@@ -436,13 +436,12 @@ pub const Builder = struct {
.optimize = options.optimize,
.kind = .exe,
.linkage = options.linkage,
- .version = options.version,
});
}
pub const ObjectOptions = struct {
name: []const u8,
- root_source_file: ?FileSource,
+ root_source_file: ?FileSource = null,
target: CrossTarget,
optimize: std.builtin.Mode,
};
@@ -459,7 +458,7 @@ pub const Builder = struct {
pub const SharedLibraryOptions = struct {
name: []const u8,
- root_source_file: ?FileSource,
+ root_source_file: ?FileSource = null,
version: ?std.builtin.Version = null,
target: CrossTarget,
optimize: std.builtin.Mode,
@@ -501,8 +500,8 @@ pub const Builder = struct {
name: []const u8 = "test",
kind: LibExeObjStep.Kind = .@"test",
root_source_file: FileSource,
- target: CrossTarget,
- optimize: std.builtin.Mode,
+ target: CrossTarget = .{},
+ optimize: std.builtin.Mode = .Debug,
version: ?std.builtin.Version = null,
};
@@ -630,8 +629,8 @@ pub const Builder = struct {
return FmtStep.create(self, paths);
}
- pub fn addTranslateC(self: *Builder, source: FileSource) *TranslateCStep {
- return TranslateCStep.create(self, source.dupe(self));
+ pub fn addTranslateC(self: *Builder, options: TranslateCStep.Options) *TranslateCStep {
+ return TranslateCStep.create(self, options);
}
pub fn make(self: *Builder, step_names: []const []const u8) !void {
diff --git a/lib/std/build/TranslateCStep.zig b/lib/std/build/TranslateCStep.zig
index 1f9bee463c..9f45d606a1 100644
--- a/lib/std/build/TranslateCStep.zig
+++ b/lib/std/build/TranslateCStep.zig
@@ -19,11 +19,19 @@ include_dirs: std.ArrayList([]const u8),
c_macros: std.ArrayList([]const u8),
output_dir: ?[]const u8,
out_basename: []const u8,
-target: CrossTarget = CrossTarget{},
+target: CrossTarget,
+optimize: std.builtin.OptimizeMode,
output_file: build.GeneratedFile,
-pub fn create(builder: *Builder, source: build.FileSource) *TranslateCStep {
+pub const Options = struct {
+ source_file: build.FileSource,
+ target: CrossTarget,
+ optimize: std.builtin.OptimizeMode,
+};
+
+pub fn create(builder: *Builder, options: Options) *TranslateCStep {
const self = builder.allocator.create(TranslateCStep) catch unreachable;
+ const source = options.source_file.dupe(builder);
self.* = TranslateCStep{
.step = Step.init(.translate_c, "translate-c", builder.allocator, make),
.builder = builder,
@@ -32,19 +40,32 @@ pub fn create(builder: *Builder, source: build.FileSource) *TranslateCStep {
.c_macros = std.ArrayList([]const u8).init(builder.allocator),
.output_dir = null,
.out_basename = undefined,
+ .target = options.target,
+ .optimize = options.optimize,
.output_file = build.GeneratedFile{ .step = &self.step },
};
source.addStepDependencies(&self.step);
return self;
}
-pub fn setTarget(self: *TranslateCStep, target: CrossTarget) void {
- self.target = target;
-}
+pub const AddExecutableOptions = struct {
+ name: ?[]const u8 = null,
+ version: ?std.builtin.Version = null,
+ target: ?CrossTarget = null,
+ optimize: ?std.builtin.Mode = null,
+ linkage: ?LibExeObjStep.Linkage = null,
+};
/// Creates a step to build an executable from the translated source.
-pub fn addExecutable(self: *TranslateCStep) *LibExeObjStep {
- return self.builder.addExecutableSource("translated_c", build.FileSource{ .generated = &self.output_file });
+pub fn addExecutable(self: *TranslateCStep, options: AddExecutableOptions) *LibExeObjStep {
+ return self.builder.addExecutable(.{
+ .root_source_file = .{ .generated = &self.output_file },
+ .name = options.name orelse "translated_c",
+ .version = options.version,
+ .target = options.target orelse self.target,
+ .optimize = options.optimize orelse self.optimize,
+ .linkage = options.linkage,
+ });
}
pub fn addIncludeDir(self: *TranslateCStep, include_dir: []const u8) void {
@@ -82,6 +103,11 @@ fn make(step: *Step) !void {
try argv_list.append(try self.target.zigTriple(self.builder.allocator));
}
+ switch (self.optimize) {
+ .Debug => {}, // Skip since it's the default.
+ else => try argv_list.append(self.builder.fmt("-O{s}", .{@tagName(self.optimize)})),
+ }
+
for (self.include_dirs.items) |include_dir| {
try argv_list.append("-I");
try argv_list.append(include_dir);
diff --git a/lib/std/builtin.zig b/lib/std/builtin.zig
index 4d949946d8..74c61d229b 100644
--- a/lib/std/builtin.zig
+++ b/lib/std/builtin.zig
@@ -131,13 +131,16 @@ pub const CodeModel = enum {
/// This data structure is used by the Zig language code generation and
/// therefore must be kept in sync with the compiler implementation.
-pub const Mode = enum {
+pub const OptimizeMode = enum {
Debug,
ReleaseSafe,
ReleaseFast,
ReleaseSmall,
};
+/// Deprecated; use OptimizeMode.
+pub const Mode = OptimizeMode;
+
/// This data structure is used by the Zig language code generation and
/// therefore must be kept in sync with the compiler implementation.
pub const CallingConvention = enum {
diff --git a/src/link/MachO/zld.zig b/src/link/MachO/zld.zig
index 4cb346aa47..81fae399ef 100644
--- a/src/link/MachO/zld.zig
+++ b/src/link/MachO/zld.zig
@@ -3596,7 +3596,8 @@ pub fn linkWithZld(macho_file: *MachO, comp: *Compilation, prog_node: *std.Progr
man.hash.addOptionalBytes(options.sysroot);
try man.addOptionalFile(options.entitlements);
- // We don't actually care whether it's a cache hit or miss; we just need the digest and the lock.
+ // We don't actually care whether it's a cache hit or miss; we just
+ // need the digest and the lock.
_ = try man.hit();
digest = man.final();
@@ -4177,9 +4178,11 @@ pub fn linkWithZld(macho_file: *MachO, comp: *Compilation, prog_node: *std.Progr
log.debug("failed to save linking hash digest file: {s}", .{@errorName(err)});
};
// Again failure here only means an unnecessary cache miss.
- man.writeManifest() catch |err| {
- log.debug("failed to write cache manifest when linking: {s}", .{@errorName(err)});
- };
+ if (man.have_exclusive_lock) {
+ man.writeManifest() catch |err| {
+ log.debug("failed to write cache manifest when linking: {s}", .{@errorName(err)});
+ };
+ }
// We hang on to this lock so that the output file path can be used without
// other processes clobbering it.
macho_file.base.lock = man.toOwnedLock();
diff --git a/test/link/bss/build.zig b/test/link/bss/build.zig
index 76e9bdb305..c31fa7faf5 100644
--- a/test/link/bss/build.zig
+++ b/test/link/bss/build.zig
@@ -1,12 +1,15 @@
const Builder = @import("std").build.Builder;
pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
+ const optimize = b.standardOptimizeOption(.{});
const test_step = b.step("test", "Test");
- const exe = b.addExecutable("bss", "main.zig");
+ const exe = b.addExecutable(.{
+ .name = "bss",
+ .root_source_file = .{ .path = "main.zig" },
+ .optimize = optimize,
+ });
b.default_step.dependOn(&exe.step);
- exe.setBuildMode(mode);
const run = exe.run();
run.expectStdOutEqual("0, 1, 0\n");
diff --git a/test/link/common_symbols/build.zig b/test/link/common_symbols/build.zig
index 2f9f892e86..068c3f9c57 100644
--- a/test/link/common_symbols/build.zig
+++ b/test/link/common_symbols/build.zig
@@ -1,14 +1,19 @@
const Builder = @import("std").build.Builder;
pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
+ const optimize = b.standardOptimizeOption(.{});
- const lib_a = b.addStaticLibrary("a", null);
+ const lib_a = b.addStaticLibrary(.{
+ .name = "a",
+ .optimize = optimize,
+ .target = .{},
+ });
lib_a.addCSourceFiles(&.{ "c.c", "a.c", "b.c" }, &.{"-fcommon"});
- lib_a.setBuildMode(mode);
- const test_exe = b.addTest("main.zig");
- test_exe.setBuildMode(mode);
+ const test_exe = b.addTest(.{
+ .root_source_file = .{ .path = "main.zig" },
+ .optimize = optimize,
+ });
test_exe.linkLibrary(lib_a);
const test_step = b.step("test", "Test it");
diff --git a/test/link/common_symbols_alignment/build.zig b/test/link/common_symbols_alignment/build.zig
index a62d86af4f..b6dd39801c 100644
--- a/test/link/common_symbols_alignment/build.zig
+++ b/test/link/common_symbols_alignment/build.zig
@@ -1,14 +1,21 @@
const Builder = @import("std").build.Builder;
pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
+ const optimize = b.standardOptimizeOption(.{});
+ const target = b.standardTargetOptions(.{});
- const lib_a = b.addStaticLibrary("a", null);
+ const lib_a = b.addStaticLibrary(.{
+ .name = "a",
+ .optimize = optimize,
+ .target = target,
+ });
lib_a.addCSourceFiles(&.{"a.c"}, &.{"-fcommon"});
- lib_a.setBuildMode(mode);
- const test_exe = b.addTest("main.zig");
- test_exe.setBuildMode(mode);
+ const test_exe = b.addTest(.{
+ .root_source_file = .{ .path = "main.zig" },
+ .optimize = optimize,
+ .target = target,
+ });
test_exe.linkLibrary(lib_a);
const test_step = b.step("test", "Test it");
diff --git a/test/link/interdependent_static_c_libs/build.zig b/test/link/interdependent_static_c_libs/build.zig
index bd1b6100da..50a214490d 100644
--- a/test/link/interdependent_static_c_libs/build.zig
+++ b/test/link/interdependent_static_c_libs/build.zig
@@ -1,20 +1,30 @@
const Builder = @import("std").build.Builder;
pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
+ const optimize = b.standardOptimizeOption(.{});
+ const target = b.standardTargetOptions(.{});
- const lib_a = b.addStaticLibrary("a", null);
+ const lib_a = b.addStaticLibrary(.{
+ .name = "a",
+ .optimize = optimize,
+ .target = target,
+ });
lib_a.addCSourceFile("a.c", &[_][]const u8{});
- lib_a.setBuildMode(mode);
lib_a.addIncludePath(".");
- const lib_b = b.addStaticLibrary("b", null);
+ const lib_b = b.addStaticLibrary(.{
+ .name = "b",
+ .optimize = optimize,
+ .target = target,
+ });
lib_b.addCSourceFile("b.c", &[_][]const u8{});
- lib_b.setBuildMode(mode);
lib_b.addIncludePath(".");
- const test_exe = b.addTest("main.zig");
- test_exe.setBuildMode(mode);
+ const test_exe = b.addTest(.{
+ .root_source_file = .{ .path = "main.zig" },
+ .optimize = optimize,
+ .target = target,
+ });
test_exe.linkLibrary(lib_a);
test_exe.linkLibrary(lib_b);
test_exe.addIncludePath(".");
diff --git a/test/link/macho/bugs/13056/build.zig b/test/link/macho/bugs/13056/build.zig
index 751a7c4db6..a65cd60766 100644
--- a/test/link/macho/bugs/13056/build.zig
+++ b/test/link/macho/bugs/13056/build.zig
@@ -2,7 +2,7 @@ const std = @import("std");
const Builder = std.build.Builder;
pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
+ const optimize = b.standardOptimizeOption(.{});
const target: std.zig.CrossTarget = .{ .os_tag = .macos };
const target_info = std.zig.system.NativeTargetInfo.detect(target) catch unreachable;
@@ -11,7 +11,10 @@ pub fn build(b: *Builder) void {
const test_step = b.step("test", "Test the program");
- const exe = b.addExecutable("test", null);
+ const exe = b.addExecutable(.{
+ .name = "test",
+ .optimize = optimize,
+ });
b.default_step.dependOn(&exe.step);
exe.addIncludePath(std.fs.path.join(b.allocator, &.{ sdk.path, "/usr/include" }) catch unreachable);
exe.addIncludePath(std.fs.path.join(b.allocator, &.{ sdk.path, "/usr/include/c++/v1" }) catch unreachable);
@@ -20,7 +23,6 @@ pub fn build(b: *Builder) void {
"-nostdinc++",
});
exe.addObjectFile(std.fs.path.join(b.allocator, &.{ sdk.path, "/usr/lib/libc++.tbd" }) catch unreachable);
- exe.setBuildMode(mode);
const run_cmd = exe.run();
run_cmd.expectStdErrEqual("x: 5\n");
diff --git a/test/link/macho/bugs/13457/build.zig b/test/link/macho/bugs/13457/build.zig
index 2de8c01c6a..4c1ce89261 100644
--- a/test/link/macho/bugs/13457/build.zig
+++ b/test/link/macho/bugs/13457/build.zig
@@ -3,14 +3,17 @@ const Builder = std.build.Builder;
const LibExeObjectStep = std.build.LibExeObjStep;
pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
+ const optimize = b.standardOptimizeOption(.{});
const target: std.zig.CrossTarget = .{ .os_tag = .macos };
const test_step = b.step("test", "Test the program");
- const exe = b.addExecutable("test", "main.zig");
- exe.setBuildMode(mode);
- exe.setTarget(target);
+ const exe = b.addExecutable(.{
+ .name = "test",
+ .root_source_file = .{ .path = "main.zig" },
+ .optimize = optimize,
+ .target = target,
+ });
const run = exe.runEmulatable();
test_step.dependOn(&run.step);
diff --git a/test/link/macho/dead_strip/build.zig b/test/link/macho/dead_strip/build.zig
index 25759f5619..a4c3575e45 100644
--- a/test/link/macho/dead_strip/build.zig
+++ b/test/link/macho/dead_strip/build.zig
@@ -3,7 +3,7 @@ const Builder = std.build.Builder;
const LibExeObjectStep = std.build.LibExeObjStep;
pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
+ const optimize = b.standardOptimizeOption(.{});
const target: std.zig.CrossTarget = .{ .os_tag = .macos };
const test_step = b.step("test", "Test the program");
@@ -11,7 +11,7 @@ pub fn build(b: *Builder) void {
{
// Without -dead_strip, we expect `iAmUnused` symbol present
- const exe = createScenario(b, mode, target);
+ const exe = createScenario(b, optimize, target);
const check = exe.checkObject(.macho);
check.checkInSymtab();
@@ -24,7 +24,7 @@ pub fn build(b: *Builder) void {
{
// With -dead_strip, no `iAmUnused` symbol should be present
- const exe = createScenario(b, mode, target);
+ const exe = createScenario(b, optimize, target);
exe.link_gc_sections = true;
const check = exe.checkObject(.macho);
@@ -37,11 +37,13 @@ pub fn build(b: *Builder) void {
}
}
-fn createScenario(b: *Builder, mode: std.builtin.Mode, target: std.zig.CrossTarget) *LibExeObjectStep {
- const exe = b.addExecutable("test", null);
+fn createScenario(b: *Builder, optimize: std.builtin.OptimizeMode, target: std.zig.CrossTarget) *LibExeObjectStep {
+ const exe = b.addExecutable(.{
+ .name = "test",
+ .optimize = optimize,
+ .target = target,
+ });
exe.addCSourceFile("main.c", &[0][]const u8{});
- exe.setBuildMode(mode);
- exe.setTarget(target);
exe.linkLibC();
return exe;
}
diff --git a/test/link/macho/dead_strip_dylibs/build.zig b/test/link/macho/dead_strip_dylibs/build.zig
index efdaf191bd..0127b575fc 100644
--- a/test/link/macho/dead_strip_dylibs/build.zig
+++ b/test/link/macho/dead_strip_dylibs/build.zig
@@ -3,14 +3,14 @@ const Builder = std.build.Builder;
const LibExeObjectStep = std.build.LibExeObjStep;
pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
+ const optimize = b.standardOptimizeOption(.{});
const test_step = b.step("test", "Test the program");
test_step.dependOn(b.getInstallStep());
{
// Without -dead_strip_dylibs we expect `-la` to include liba.dylib in the final executable
- const exe = createScenario(b, mode);
+ const exe = createScenario(b, optimize);
const check = exe.checkObject(.macho);
check.checkStart("cmd LOAD_DYLIB");
@@ -27,7 +27,7 @@ pub fn build(b: *Builder) void {
{
// With -dead_strip_dylibs, we should include liba.dylib as it's unreachable
- const exe = createScenario(b, mode);
+ const exe = createScenario(b, optimize);
exe.dead_strip_dylibs = true;
const run_cmd = exe.run();
@@ -36,10 +36,12 @@ pub fn build(b: *Builder) void {
}
}
-fn createScenario(b: *Builder, mode: std.builtin.Mode) *LibExeObjectStep {
- const exe = b.addExecutable("test", null);
+fn createScenario(b: *Builder, optimize: std.builtin.OptimizeMode) *LibExeObjectStep {
+ const exe = b.addExecutable(.{
+ .name = "test",
+ .optimize = optimize,
+ });
exe.addCSourceFile("main.c", &[0][]const u8{});
- exe.setBuildMode(mode);
exe.linkLibC();
exe.linkFramework("Cocoa");
return exe;
diff --git a/test/link/macho/dylib/build.zig b/test/link/macho/dylib/build.zig
index a5baf255c6..acd27a507f 100644
--- a/test/link/macho/dylib/build.zig
+++ b/test/link/macho/dylib/build.zig
@@ -2,15 +2,18 @@ const std = @import("std");
const Builder = std.build.Builder;
pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
+ const optimize = b.standardOptimizeOption(.{});
const target: std.zig.CrossTarget = .{ .os_tag = .macos };
const test_step = b.step("test", "Test");
test_step.dependOn(b.getInstallStep());
- const dylib = b.addSharedLibrary("a", null, b.version(1, 0, 0));
- dylib.setBuildMode(mode);
- dylib.setTarget(target);
+ const dylib = b.addSharedLibrary(.{
+ .name = "a",
+ .version = .{ .major = 1, .minor = 0 },
+ .optimize = optimize,
+ .target = target,
+ });
dylib.addCSourceFile("a.c", &.{});
dylib.linkLibC();
dylib.install();
@@ -24,9 +27,11 @@ pub fn build(b: *Builder) void {
test_step.dependOn(&check_dylib.step);
- const exe = b.addExecutable("main", null);
- exe.setTarget(target);
- exe.setBuildMode(mode);
+ const exe = b.addExecutable(.{
+ .name = "main",
+ .optimize = optimize,
+ .target = target,
+ });
exe.addCSourceFile("main.c", &.{});
exe.linkSystemLibrary("a");
exe.linkLibC();
diff --git a/test/link/macho/empty/build.zig b/test/link/macho/empty/build.zig
index ab016fd4bd..8b2d047371 100644
--- a/test/link/macho/empty/build.zig
+++ b/test/link/macho/empty/build.zig
@@ -2,17 +2,19 @@ const std = @import("std");
const Builder = std.build.Builder;
pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
+ const optimize = b.standardOptimizeOption(.{});
const target: std.zig.CrossTarget = .{ .os_tag = .macos };
const test_step = b.step("test", "Test the program");
test_step.dependOn(b.getInstallStep());
- const exe = b.addExecutable("test", null);
+ const exe = b.addExecutable(.{
+ .name = "test",
+ .optimize = optimize,
+ .target = target,
+ });
exe.addCSourceFile("main.c", &[0][]const u8{});
exe.addCSourceFile("empty.c", &[0][]const u8{});
- exe.setBuildMode(mode);
- exe.setTarget(target);
exe.linkLibC();
const run_cmd = std.build.EmulatableRunStep.create(b, "run", exe);
diff --git a/test/link/macho/entry/build.zig b/test/link/macho/entry/build.zig
index 0ecca14aa2..87e4d1b5da 100644
--- a/test/link/macho/entry/build.zig
+++ b/test/link/macho/entry/build.zig
@@ -2,14 +2,16 @@ const std = @import("std");
const Builder = std.build.Builder;
pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
+ const optimize = b.standardOptimizeOption(.{});
const test_step = b.step("test", "Test");
test_step.dependOn(b.getInstallStep());
- const exe = b.addExecutable("main", null);
- exe.setTarget(.{ .os_tag = .macos });
- exe.setBuildMode(mode);
+ const exe = b.addExecutable(.{
+ .name = "main",
+ .optimize = optimize,
+ .target = .{ .os_tag = .macos },
+ });
exe.addCSourceFile("main.c", &.{});
exe.linkLibC();
exe.entry_symbol_name = "_non_main";
diff --git a/test/link/macho/headerpad/build.zig b/test/link/macho/headerpad/build.zig
index 0730a01d44..74efb5d580 100644
--- a/test/link/macho/headerpad/build.zig
+++ b/test/link/macho/headerpad/build.zig
@@ -4,14 +4,14 @@ const Builder = std.build.Builder;
const LibExeObjectStep = std.build.LibExeObjStep;
pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
+ const optimize = b.standardOptimizeOption(.{});
const test_step = b.step("test", "Test");
test_step.dependOn(b.getInstallStep());
{
// Test -headerpad_max_install_names
- const exe = simpleExe(b, mode);
+ const exe = simpleExe(b, optimize);
exe.headerpad_max_install_names = true;
const check = exe.checkObject(.macho);
@@ -36,7 +36,7 @@ pub fn build(b: *Builder) void {
{
// Test -headerpad
- const exe = simpleExe(b, mode);
+ const exe = simpleExe(b, optimize);
exe.headerpad_size = 0x10000;
const check = exe.checkObject(.macho);
@@ -52,7 +52,7 @@ pub fn build(b: *Builder) void {
{
// Test both flags with -headerpad overriding -headerpad_max_install_names
- const exe = simpleExe(b, mode);
+ const exe = simpleExe(b, optimize);
exe.headerpad_max_install_names = true;
exe.headerpad_size = 0x10000;
@@ -69,7 +69,7 @@ pub fn build(b: *Builder) void {
{
// Test both flags with -headerpad_max_install_names overriding -headerpad
- const exe = simpleExe(b, mode);
+ const exe = simpleExe(b, optimize);
exe.headerpad_size = 0x1000;
exe.headerpad_max_install_names = true;
@@ -94,9 +94,11 @@ pub fn build(b: *Builder) void {
}
}
-fn simpleExe(b: *Builder, mode: std.builtin.Mode) *LibExeObjectStep {
- const exe = b.addExecutable("main", null);
- exe.setBuildMode(mode);
+fn simpleExe(b: *Builder, optimize: std.builtin.OptimizeMode) *LibExeObjectStep {
+ const exe = b.addExecutable(.{
+ .name = "main",
+ .optimize = optimize,
+ });
exe.addCSourceFile("main.c", &.{});
exe.linkLibC();
exe.linkFramework("CoreFoundation");
diff --git a/test/link/macho/linksection/build.zig b/test/link/macho/linksection/build.zig
index 9204499803..eebb31a21e 100644
--- a/test/link/macho/linksection/build.zig
+++ b/test/link/macho/linksection/build.zig
@@ -1,15 +1,18 @@
const std = @import("std");
pub fn build(b: *std.build.Builder) void {
- const mode = b.standardReleaseOptions();
+ const optimize = b.standardOptimizeOption(.{});
const target = std.zig.CrossTarget{ .os_tag = .macos };
const test_step = b.step("test", "Test");
test_step.dependOn(b.getInstallStep());
- const obj = b.addObject("test", "main.zig");
- obj.setBuildMode(mode);
- obj.setTarget(target);
+ const obj = b.addObject(.{
+ .name = "test",
+ .root_source_file = .{ .path = "main.zig" },
+ .optimize = optimize,
+ .target = target,
+ });
const check = obj.checkObject(.macho);
@@ -19,7 +22,7 @@ pub fn build(b: *std.build.Builder) void {
check.checkInSymtab();
check.checkNext("{*} (__TEXT,__TestFn) external _testFn");
- if (mode == .Debug) {
+ if (optimize == .Debug) {
check.checkInSymtab();
check.checkNext("{*} (__TEXT,__TestGenFnA) _main.testGenericFn__anon_{*}");
}
diff --git a/test/link/macho/needed_framework/build.zig b/test/link/macho/needed_framework/build.zig
index 4315935941..33965a9272 100644
--- a/test/link/macho/needed_framework/build.zig
+++ b/test/link/macho/needed_framework/build.zig
@@ -3,16 +3,18 @@ const Builder = std.build.Builder;
const LibExeObjectStep = std.build.LibExeObjStep;
pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
+ const optimize = b.standardOptimizeOption(.{});
const test_step = b.step("test", "Test the program");
test_step.dependOn(b.getInstallStep());
// -dead_strip_dylibs
// -needed_framework Cocoa
- const exe = b.addExecutable("test", null);
+ const exe = b.addExecutable(.{
+ .name = "test",
+ .optimize = optimize,
+ });
exe.addCSourceFile("main.c", &[0][]const u8{});
- exe.setBuildMode(mode);
exe.linkLibC();
exe.linkFrameworkNeeded("Cocoa");
exe.dead_strip_dylibs = true;
diff --git a/test/link/macho/needed_library/build.zig b/test/link/macho/needed_library/build.zig
index a314fd2201..137239d292 100644
--- a/test/link/macho/needed_library/build.zig
+++ b/test/link/macho/needed_library/build.zig
@@ -3,25 +3,30 @@ const Builder = std.build.Builder;
const LibExeObjectStep = std.build.LibExeObjStep;
pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
+ const optimize = b.standardOptimizeOption(.{});
const target: std.zig.CrossTarget = .{ .os_tag = .macos };
const test_step = b.step("test", "Test the program");
test_step.dependOn(b.getInstallStep());
- const dylib = b.addSharedLibrary("a", null, b.version(1, 0, 0));
- dylib.setTarget(target);
- dylib.setBuildMode(mode);
+ const dylib = b.addSharedLibrary(.{
+ .name = "a",
+ .version = .{ .major = 1, .minor = 0 },
+ .optimize = optimize,
+ .target = target,
+ });
dylib.addCSourceFile("a.c", &.{});
dylib.linkLibC();
dylib.install();
// -dead_strip_dylibs
// -needed-la
- const exe = b.addExecutable("test", null);
+ const exe = b.addExecutable(.{
+ .name = "test",
+ .optimize = optimize,
+ .target = target,
+ });
exe.addCSourceFile("main.c", &[0][]const u8{});
- exe.setBuildMode(mode);
- exe.setTarget(target);
exe.linkLibC();
exe.linkSystemLibraryNeeded("a");
exe.addLibraryPath(b.pathFromRoot("zig-out/lib"));
diff --git a/test/link/macho/objc/build.zig b/test/link/macho/objc/build.zig
index d7fd872f77..9c38739a5c 100644
--- a/test/link/macho/objc/build.zig
+++ b/test/link/macho/objc/build.zig
@@ -2,15 +2,17 @@ const std = @import("std");
const Builder = std.build.Builder;
pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
+ const optimize = b.standardOptimizeOption(.{});
const test_step = b.step("test", "Test the program");
- const exe = b.addExecutable("test", null);
+ const exe = b.addExecutable(.{
+ .name = "test",
+ .optimize = optimize,
+ });
exe.addIncludePath(".");
exe.addCSourceFile("Foo.m", &[0][]const u8{});
exe.addCSourceFile("test.m", &[0][]const u8{});
- exe.setBuildMode(mode);
exe.linkLibC();
// TODO when we figure out how to ship framework stubs for cross-compilation,
// populate paths to the sysroot here.
diff --git a/test/link/macho/objcpp/build.zig b/test/link/macho/objcpp/build.zig
index 767578e225..f4c88b2862 100644
--- a/test/link/macho/objcpp/build.zig
+++ b/test/link/macho/objcpp/build.zig
@@ -2,16 +2,18 @@ const std = @import("std");
const Builder = std.build.Builder;
pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
+ const optimize = b.standardOptimizeOption(.{});
const test_step = b.step("test", "Test the program");
- const exe = b.addExecutable("test", null);
+ const exe = b.addExecutable(.{
+ .name = "test",
+ .optimize = optimize,
+ });
b.default_step.dependOn(&exe.step);
exe.addIncludePath(".");
exe.addCSourceFile("Foo.mm", &[0][]const u8{});
exe.addCSourceFile("test.mm", &[0][]const u8{});
- exe.setBuildMode(mode);
exe.linkLibCpp();
// TODO when we figure out how to ship framework stubs for cross-compilation,
// populate paths to the sysroot here.
diff --git a/test/link/macho/pagezero/build.zig b/test/link/macho/pagezero/build.zig
index 5a7044d960..f61aa34a93 100644
--- a/test/link/macho/pagezero/build.zig
+++ b/test/link/macho/pagezero/build.zig
@@ -2,16 +2,18 @@ const std = @import("std");
const Builder = std.build.Builder;
pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
+ const optimize = b.standardOptimizeOption(.{});
const target: std.zig.CrossTarget = .{ .os_tag = .macos };
const test_step = b.step("test", "Test");
test_step.dependOn(b.getInstallStep());
{
- const exe = b.addExecutable("pagezero", null);
- exe.setTarget(target);
- exe.setBuildMode(mode);
+ const exe = b.addExecutable(.{
+ .name = "pagezero",
+ .optimize = optimize,
+ .target = target,
+ });
exe.addCSourceFile("main.c", &.{});
exe.linkLibC();
exe.pagezero_size = 0x4000;
@@ -29,9 +31,11 @@ pub fn build(b: *Builder) void {
}
{
- const exe = b.addExecutable("no_pagezero", null);
- exe.setTarget(target);
- exe.setBuildMode(mode);
+ const exe = b.addExecutable(.{
+ .name = "no_pagezero",
+ .optimize = optimize,
+ .target = target,
+ });
exe.addCSourceFile("main.c", &.{});
exe.linkLibC();
exe.pagezero_size = 0;
diff --git a/test/link/macho/search_strategy/build.zig b/test/link/macho/search_strategy/build.zig
index e556b5bb23..db894b6ae3 100644
--- a/test/link/macho/search_strategy/build.zig
+++ b/test/link/macho/search_strategy/build.zig
@@ -3,7 +3,7 @@ const Builder = std.build.Builder;
const LibExeObjectStep = std.build.LibExeObjStep;
pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
+ const optimize = b.standardOptimizeOption(.{});
const target: std.zig.CrossTarget = .{ .os_tag = .macos };
const test_step = b.step("test", "Test");
@@ -11,7 +11,7 @@ pub fn build(b: *Builder) void {
{
// -search_dylibs_first
- const exe = createScenario(b, mode, target);
+ const exe = createScenario(b, optimize, target);
exe.search_strategy = .dylibs_first;
const check = exe.checkObject(.macho);
@@ -26,7 +26,7 @@ pub fn build(b: *Builder) void {
{
// -search_paths_first
- const exe = createScenario(b, mode, target);
+ const exe = createScenario(b, optimize, target);
exe.search_strategy = .paths_first;
const run = std.build.EmulatableRunStep.create(b, "run", exe);
@@ -36,10 +36,12 @@ pub fn build(b: *Builder) void {
}
}
-fn createScenario(b: *Builder, mode: std.builtin.Mode, target: std.zig.CrossTarget) *LibExeObjectStep {
- const static = b.addStaticLibrary("a", null);
- static.setTarget(target);
- static.setBuildMode(mode);
+fn createScenario(b: *Builder, optimize: std.builtin.OptimizeMode, target: std.zig.CrossTarget) *LibExeObjectStep {
+ const static = b.addStaticLibrary(.{
+ .name = "a",
+ .optimize = optimize,
+ .target = target,
+ });
static.addCSourceFile("a.c", &.{});
static.linkLibC();
static.override_dest_dir = std.build.InstallDir{
@@ -47,9 +49,12 @@ fn createScenario(b: *Builder, mode: std.builtin.Mode, target: std.zig.CrossTarg
};
static.install();
- const dylib = b.addSharedLibrary("a", null, b.version(1, 0, 0));
- dylib.setTarget(target);
- dylib.setBuildMode(mode);
+ const dylib = b.addSharedLibrary(.{
+ .name = "a",
+ .version = .{ .major = 1, .minor = 0 },
+ .optimize = optimize,
+ .target = target,
+ });
dylib.addCSourceFile("a.c", &.{});
dylib.linkLibC();
dylib.override_dest_dir = std.build.InstallDir{
@@ -57,9 +62,11 @@ fn createScenario(b: *Builder, mode: std.builtin.Mode, target: std.zig.CrossTarg
};
dylib.install();
- const exe = b.addExecutable("main", null);
- exe.setTarget(target);
- exe.setBuildMode(mode);
+ const exe = b.addExecutable(.{
+ .name = "main",
+ .optimize = optimize,
+ .target = target,
+ });
exe.addCSourceFile("main.c", &.{});
exe.linkSystemLibraryName("a");
exe.linkLibC();
diff --git a/test/link/macho/stack_size/build.zig b/test/link/macho/stack_size/build.zig
index 91c44baf52..74e9a86e94 100644
--- a/test/link/macho/stack_size/build.zig
+++ b/test/link/macho/stack_size/build.zig
@@ -2,15 +2,17 @@ const std = @import("std");
const Builder = std.build.Builder;
pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
+ const optimize = b.standardOptimizeOption(.{});
const target: std.zig.CrossTarget = .{ .os_tag = .macos };
const test_step = b.step("test", "Test");
test_step.dependOn(b.getInstallStep());
- const exe = b.addExecutable("main", null);
- exe.setTarget(target);
- exe.setBuildMode(mode);
+ const exe = b.addExecutable(.{
+ .name = "main",
+ .optimize = optimize,
+ .target = target,
+ });
exe.addCSourceFile("main.c", &.{});
exe.linkLibC();
exe.stack_size = 0x100000000;
diff --git a/test/link/macho/strict_validation/build.zig b/test/link/macho/strict_validation/build.zig
index 0ea150252c..b6baf63c11 100644
--- a/test/link/macho/strict_validation/build.zig
+++ b/test/link/macho/strict_validation/build.zig
@@ -4,15 +4,18 @@ const Builder = std.build.Builder;
const LibExeObjectStep = std.build.LibExeObjStep;
pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
+ const optimize = b.standardOptimizeOption(.{});
const target: std.zig.CrossTarget = .{ .os_tag = .macos };
const test_step = b.step("test", "Test");
test_step.dependOn(b.getInstallStep());
- const exe = b.addExecutable("main", "main.zig");
- exe.setBuildMode(mode);
- exe.setTarget(target);
+ const exe = b.addExecutable(.{
+ .name = "main",
+ .root_source_file = .{ .path = "main.zig" },
+ .optimize = optimize,
+ .target = target,
+ });
exe.linkLibC();
const check_exe = exe.checkObject(.macho);
diff --git a/test/link/macho/tls/build.zig b/test/link/macho/tls/build.zig
index 031a05cedf..9b2fe952bf 100644
--- a/test/link/macho/tls/build.zig
+++ b/test/link/macho/tls/build.zig
@@ -2,18 +2,23 @@ const std = @import("std");
const Builder = std.build.Builder;
pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
+ const optimize = b.standardOptimizeOption(.{});
const target: std.zig.CrossTarget = .{ .os_tag = .macos };
- const lib = b.addSharedLibrary("a", null, b.version(1, 0, 0));
- lib.setBuildMode(mode);
- lib.setTarget(target);
+ const lib = b.addSharedLibrary(.{
+ .name = "a",
+ .version = .{ .major = 1, .minor = 0 },
+ .optimize = optimize,
+ .target = target,
+ });
lib.addCSourceFile("a.c", &.{});
lib.linkLibC();
- const test_exe = b.addTest("main.zig");
- test_exe.setBuildMode(mode);
- test_exe.setTarget(target);
+ const test_exe = b.addTest(.{
+ .root_source_file = .{ .path = "main.zig" },
+ .optimize = optimize,
+ .target = target,
+ });
test_exe.linkLibrary(lib);
test_exe.linkLibC();
diff --git a/test/link/macho/unwind_info/build.zig b/test/link/macho/unwind_info/build.zig
index cc00854465..dbbdbb3e51 100644
--- a/test/link/macho/unwind_info/build.zig
+++ b/test/link/macho/unwind_info/build.zig
@@ -4,23 +4,23 @@ const Builder = std.build.Builder;
const LibExeObjectStep = std.build.LibExeObjStep;
pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
+ const optimize = b.standardOptimizeOption(.{});
const target: std.zig.CrossTarget = .{ .os_tag = .macos };
const test_step = b.step("test", "Test the program");
- testUnwindInfo(b, test_step, mode, target, false);
- testUnwindInfo(b, test_step, mode, target, true);
+ testUnwindInfo(b, test_step, optimize, target, false);
+ testUnwindInfo(b, test_step, optimize, target, true);
}
fn testUnwindInfo(
b: *Builder,
test_step: *std.build.Step,
- mode: std.builtin.Mode,
+ optimize: std.builtin.OptimizeMode,
target: std.zig.CrossTarget,
dead_strip: bool,
) void {
- const exe = createScenario(b, mode, target);
+ const exe = createScenario(b, optimize, target);
exe.link_gc_sections = dead_strip;
const check = exe.checkObject(.macho);
@@ -52,8 +52,12 @@ fn testUnwindInfo(
test_step.dependOn(&run_cmd.step);
}
-fn createScenario(b: *Builder, mode: std.builtin.Mode, target: std.zig.CrossTarget) *LibExeObjectStep {
- const exe = b.addExecutable("test", null);
+fn createScenario(b: *Builder, optimize: std.builtin.OptimizeMode, target: std.zig.CrossTarget) *LibExeObjectStep {
+ const exe = b.addExecutable(.{
+ .name = "test",
+ .optimize = optimize,
+ .target = target,
+ });
b.default_step.dependOn(&exe.step);
exe.addIncludePath(".");
exe.addCSourceFiles(&[_][]const u8{
@@ -61,8 +65,6 @@ fn createScenario(b: *Builder, mode: std.builtin.Mode, target: std.zig.CrossTarg
"simple_string.cpp",
"simple_string_owner.cpp",
}, &[0][]const u8{});
- exe.setBuildMode(mode);
- exe.setTarget(target);
exe.linkLibCpp();
return exe;
}
diff --git a/test/link/macho/uuid/build.zig b/test/link/macho/uuid/build.zig
index 314febdb20..86ff99e8b1 100644
--- a/test/link/macho/uuid/build.zig
+++ b/test/link/macho/uuid/build.zig
@@ -29,21 +29,21 @@ pub fn build(b: *Builder) void {
fn testUuid(
b: *Builder,
test_step: *std.build.Step,
- mode: std.builtin.Mode,
+ optimize: std.builtin.OptimizeMode,
target: std.zig.CrossTarget,
comptime exp: []const u8,
) void {
// The calculated UUID value is independent of debug info and so it should
// stay the same across builds.
{
- const dylib = simpleDylib(b, mode, target);
+ const dylib = simpleDylib(b, optimize, target);
const check_dylib = dylib.checkObject(.macho);
check_dylib.checkStart("cmd UUID");
check_dylib.checkNext("uuid " ++ exp);
test_step.dependOn(&check_dylib.step);
}
{
- const dylib = simpleDylib(b, mode, target);
+ const dylib = simpleDylib(b, optimize, target);
dylib.strip = true;
const check_dylib = dylib.checkObject(.macho);
check_dylib.checkStart("cmd UUID");
@@ -52,10 +52,13 @@ fn testUuid(
}
}
-fn simpleDylib(b: *Builder, mode: std.builtin.Mode, target: std.zig.CrossTarget) *LibExeObjectStep {
- const dylib = b.addSharedLibrary("test", null, b.version(1, 0, 0));
- dylib.setTarget(target);
- dylib.setBuildMode(mode);
+fn simpleDylib(b: *Builder, optimize: std.builtin.OptimizeMode, target: std.zig.CrossTarget) *LibExeObjectStep {
+ const dylib = b.addSharedLibrary(.{
+ .name = "test",
+ .version = .{ .major = 1, .minor = 0 },
+ .optimize = optimize,
+ .target = target,
+ });
dylib.addCSourceFile("test.c", &.{});
dylib.linkLibC();
return dylib;
diff --git a/test/link/macho/weak_framework/build.zig b/test/link/macho/weak_framework/build.zig
index 44675a15f8..f8460c4e82 100644
--- a/test/link/macho/weak_framework/build.zig
+++ b/test/link/macho/weak_framework/build.zig
@@ -3,14 +3,16 @@ const Builder = std.build.Builder;
const LibExeObjectStep = std.build.LibExeObjStep;
pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
+ const optimize = b.standardOptimizeOption(.{});
const test_step = b.step("test", "Test the program");
test_step.dependOn(b.getInstallStep());
- const exe = b.addExecutable("test", null);
+ const exe = b.addExecutable(.{
+ .name = "test",
+ .optimize = optimize,
+ });
exe.addCSourceFile("main.c", &[0][]const u8{});
- exe.setBuildMode(mode);
exe.linkLibC();
exe.linkFrameworkWeak("Cocoa");
diff --git a/test/link/macho/weak_library/build.zig b/test/link/macho/weak_library/build.zig
index 79f67bd7df..229d965e48 100644
--- a/test/link/macho/weak_library/build.zig
+++ b/test/link/macho/weak_library/build.zig
@@ -3,23 +3,28 @@ const Builder = std.build.Builder;
const LibExeObjectStep = std.build.LibExeObjStep;
pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
+ const optimize = b.standardOptimizeOption(.{});
const target: std.zig.CrossTarget = .{ .os_tag = .macos };
const test_step = b.step("test", "Test the program");
test_step.dependOn(b.getInstallStep());
- const dylib = b.addSharedLibrary("a", null, b.version(1, 0, 0));
- dylib.setTarget(target);
- dylib.setBuildMode(mode);
+ const dylib = b.addSharedLibrary(.{
+ .name = "a",
+ .version = .{ .major = 1, .minor = 0, .patch = 0 },
+ .target = target,
+ .optimize = optimize,
+ });
dylib.addCSourceFile("a.c", &.{});
dylib.linkLibC();
dylib.install();
- const exe = b.addExecutable("test", null);
+ const exe = b.addExecutable(.{
+ .name = "test",
+ .target = target,
+ .optimize = optimize,
+ });
exe.addCSourceFile("main.c", &[0][]const u8{});
- exe.setTarget(target);
- exe.setBuildMode(mode);
exe.linkLibC();
exe.linkSystemLibraryWeak("a");
exe.addLibraryPath(b.pathFromRoot("zig-out/lib"));
diff --git a/test/link/static_lib_as_system_lib/build.zig b/test/link/static_lib_as_system_lib/build.zig
index f39f3fac2a..895cdcf316 100644
--- a/test/link/static_lib_as_system_lib/build.zig
+++ b/test/link/static_lib_as_system_lib/build.zig
@@ -2,16 +2,23 @@ const std = @import("std");
const Builder = std.build.Builder;
pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
+ const optimize = b.standardOptimizeOption(.{});
+ const target = b.standardTargetOptions(.{});
- const lib_a = b.addStaticLibrary("a", null);
+ const lib_a = b.addStaticLibrary(.{
+ .name = "a",
+ .optimize = optimize,
+ .target = target,
+ });
lib_a.addCSourceFile("a.c", &[_][]const u8{});
- lib_a.setBuildMode(mode);
lib_a.addIncludePath(".");
lib_a.install();
- const test_exe = b.addTest("main.zig");
- test_exe.setBuildMode(mode);
+ const test_exe = b.addTest(.{
+ .root_source_file = .{ .path = "main.zig" },
+ .optimize = optimize,
+ .target = target,
+ });
test_exe.linkSystemLibrary("a"); // force linking liba.a as -la
test_exe.addSystemIncludePath(".");
const search_path = std.fs.path.join(b.allocator, &[_][]const u8{ b.install_path, "lib" }) catch unreachable;
diff --git a/test/link/wasm/archive/build.zig b/test/link/wasm/archive/build.zig
index 7efa88999a..7401ba22dc 100644
--- a/test/link/wasm/archive/build.zig
+++ b/test/link/wasm/archive/build.zig
@@ -2,16 +2,17 @@ const std = @import("std");
const Builder = std.build.Builder;
pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
-
const test_step = b.step("test", "Test");
test_step.dependOn(b.getInstallStep());
// The code in question will pull-in compiler-rt,
// and therefore link with its archive file.
- const lib = b.addSharedLibrary("main", "main.zig", .unversioned);
- lib.setBuildMode(mode);
- lib.setTarget(.{ .cpu_arch = .wasm32, .os_tag = .freestanding });
+ const lib = b.addSharedLibrary(.{
+ .name = "main",
+ .root_source_file = .{ .path = "main.zig" },
+ .optimize = b.standardOptimizeOption(.{}),
+ .target = .{ .cpu_arch = .wasm32, .os_tag = .freestanding },
+ });
lib.use_llvm = false;
lib.use_lld = false;
lib.strip = false;
diff --git a/test/link/wasm/basic-features/build.zig b/test/link/wasm/basic-features/build.zig
index 2c565f9263..69e88aefae 100644
--- a/test/link/wasm/basic-features/build.zig
+++ b/test/link/wasm/basic-features/build.zig
@@ -1,14 +1,18 @@
const std = @import("std");
pub fn build(b: *std.build.Builder) void {
- const mode = b.standardReleaseOptions();
-
// Library with explicitly set cpu features
- const lib = b.addSharedLibrary("lib", "main.zig", .unversioned);
- lib.setTarget(.{ .cpu_arch = .wasm32, .os_tag = .freestanding });
- lib.target.cpu_model = .{ .explicit = &std.Target.wasm.cpu.mvp };
- lib.target.cpu_features_add.addFeature(0); // index 0 == atomics (see std.Target.wasm.Features)
- lib.setBuildMode(mode);
+ const lib = b.addSharedLibrary(.{
+ .name = "lib",
+ .root_source_file = .{ .path = "main.zig" },
+ .optimize = b.standardOptimizeOption(.{}),
+ .target = .{
+ .cpu_arch = .wasm32,
+ .cpu_model = .{ .explicit = &std.Target.wasm.cpu.mvp },
+ .cpu_features_add = std.Target.wasm.featureSet(&.{.atomics}),
+ .os_tag = .freestanding,
+ },
+ });
lib.use_llvm = false;
lib.use_lld = false;
diff --git a/test/link/wasm/bss/build.zig b/test/link/wasm/bss/build.zig
index e234a3f402..6b29fd0dc3 100644
--- a/test/link/wasm/bss/build.zig
+++ b/test/link/wasm/bss/build.zig
@@ -2,14 +2,15 @@ const std = @import("std");
const Builder = std.build.Builder;
pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
-
const test_step = b.step("test", "Test");
test_step.dependOn(b.getInstallStep());
- const lib = b.addSharedLibrary("lib", "lib.zig", .unversioned);
- lib.setBuildMode(mode);
- lib.setTarget(.{ .cpu_arch = .wasm32, .os_tag = .freestanding });
+ const lib = b.addSharedLibrary(.{
+ .name = "lib",
+ .root_source_file = .{ .path = "lib.zig" },
+ .target = .{ .cpu_arch = .wasm32, .os_tag = .freestanding },
+ .optimize = b.standardOptimizeOption(.{}),
+ });
lib.use_llvm = false;
lib.use_lld = false;
lib.strip = false;
diff --git a/test/link/wasm/export-data/build.zig b/test/link/wasm/export-data/build.zig
index 283566dab3..8eab283ec2 100644
--- a/test/link/wasm/export-data/build.zig
+++ b/test/link/wasm/export-data/build.zig
@@ -5,9 +5,12 @@ pub fn build(b: *Builder) void {
const test_step = b.step("test", "Test");
test_step.dependOn(b.getInstallStep());
- const lib = b.addSharedLibrary("lib", "lib.zig", .unversioned);
- lib.setBuildMode(.ReleaseSafe); // to make the output deterministic in address positions
- lib.setTarget(.{ .cpu_arch = .wasm32, .os_tag = .freestanding });
+ const lib = b.addSharedLibrary(.{
+ .name = "lib",
+ .root_source_file = .{ .path = "lib.zig" },
+ .optimize = .ReleaseSafe, // to make the output deterministic in address positions
+ .target = .{ .cpu_arch = .wasm32, .os_tag = .freestanding },
+ });
lib.use_lld = false;
lib.export_symbol_names = &.{ "foo", "bar" };
lib.global_base = 0; // put data section at address 0 to make data symbols easier to parse
diff --git a/test/link/wasm/export/build.zig b/test/link/wasm/export/build.zig
index 181e77e296..2b9a91d728 100644
--- a/test/link/wasm/export/build.zig
+++ b/test/link/wasm/export/build.zig
@@ -1,24 +1,33 @@
const std = @import("std");
pub fn build(b: *std.build.Builder) void {
- const mode = b.standardReleaseOptions();
-
- const no_export = b.addSharedLibrary("no-export", "main.zig", .unversioned);
- no_export.setTarget(.{ .cpu_arch = .wasm32, .os_tag = .freestanding });
- no_export.setBuildMode(mode);
+ const optimize = b.standardOptimizeOption(.{});
+
+ const no_export = b.addSharedLibrary(.{
+ .name = "no-export",
+ .root_source_file = .{ .path = "main.zig" },
+ .optimize = optimize,
+ .target = .{ .cpu_arch = .wasm32, .os_tag = .freestanding },
+ });
no_export.use_llvm = false;
no_export.use_lld = false;
- const dynamic_export = b.addSharedLibrary("dynamic", "main.zig", .unversioned);
- dynamic_export.setTarget(.{ .cpu_arch = .wasm32, .os_tag = .freestanding });
- dynamic_export.setBuildMode(mode);
+ const dynamic_export = b.addSharedLibrary(.{
+ .name = "dynamic",
+ .root_source_file = .{ .path = "main.zig" },
+ .optimize = optimize,
+ .target = .{ .cpu_arch = .wasm32, .os_tag = .freestanding },
+ });
dynamic_export.rdynamic = true;
dynamic_export.use_llvm = false;
dynamic_export.use_lld = false;
- const force_export = b.addSharedLibrary("force", "main.zig", .unversioned);
- force_export.setTarget(.{ .cpu_arch = .wasm32, .os_tag = .freestanding });
- force_export.setBuildMode(mode);
+ const force_export = b.addSharedLibrary(.{
+ .name = "force",
+ .root_source_file = .{ .path = "main.zig" },
+ .optimize = optimize,
+ .target = .{ .cpu_arch = .wasm32, .os_tag = .freestanding },
+ });
force_export.export_symbol_names = &.{"foo"};
force_export.use_llvm = false;
force_export.use_lld = false;
diff --git a/test/link/wasm/extern-mangle/build.zig b/test/link/wasm/extern-mangle/build.zig
index ae46117f18..71bb986dff 100644
--- a/test/link/wasm/extern-mangle/build.zig
+++ b/test/link/wasm/extern-mangle/build.zig
@@ -2,14 +2,15 @@ const std = @import("std");
const Builder = std.build.Builder;
pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
-
const test_step = b.step("test", "Test");
test_step.dependOn(b.getInstallStep());
- const lib = b.addSharedLibrary("lib", "lib.zig", .unversioned);
- lib.setBuildMode(mode);
- lib.setTarget(.{ .cpu_arch = .wasm32, .os_tag = .freestanding });
+ const lib = b.addSharedLibrary(.{
+ .name = "lib",
+ .root_source_file = .{ .path = "lib.zig" },
+ .target = .{ .cpu_arch = .wasm32, .os_tag = .freestanding },
+ .optimize = b.standardOptimizeOption(.{}),
+ });
lib.import_symbols = true; // import `a` and `b`
lib.rdynamic = true; // export `foo`
lib.install();
diff --git a/test/link/wasm/extern/build.zig b/test/link/wasm/extern/build.zig
index 88cce88d98..800c76a31c 100644
--- a/test/link/wasm/extern/build.zig
+++ b/test/link/wasm/extern/build.zig
@@ -1,10 +1,12 @@
const std = @import("std");
pub fn build(b: *std.build.Builder) void {
- const mode = b.standardReleaseOptions();
- const exe = b.addExecutable("extern", "main.zig");
- exe.setTarget(.{ .cpu_arch = .wasm32, .os_tag = .wasi });
- exe.setBuildMode(mode);
+ const exe = b.addExecutable(.{
+ .name = "extern",
+ .root_source_file = .{ .path = "main.zig" },
+ .optimize = b.standardOptimizeOption(.{}),
+ .target = .{ .cpu_arch = .wasm32, .os_tag = .wasi },
+ });
exe.addCSourceFile("foo.c", &.{});
exe.use_llvm = false;
exe.use_lld = false;
diff --git a/test/link/wasm/function-table/build.zig b/test/link/wasm/function-table/build.zig
index f7572bd6b1..804aaf0b09 100644
--- a/test/link/wasm/function-table/build.zig
+++ b/test/link/wasm/function-table/build.zig
@@ -2,28 +2,37 @@ const std = @import("std");
const Builder = std.build.Builder;
pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
+ const optimize = b.standardOptimizeOption(.{});
const test_step = b.step("test", "Test");
test_step.dependOn(b.getInstallStep());
- const import_table = b.addSharedLibrary("lib", "lib.zig", .unversioned);
- import_table.setBuildMode(mode);
- import_table.setTarget(.{ .cpu_arch = .wasm32, .os_tag = .freestanding });
+ const import_table = b.addSharedLibrary(.{
+ .name = "lib",
+ .root_source_file = .{ .path = "lib.zig" },
+ .target = .{ .cpu_arch = .wasm32, .os_tag = .freestanding },
+ .optimize = optimize,
+ });
import_table.use_llvm = false;
import_table.use_lld = false;
import_table.import_table = true;
- const export_table = b.addSharedLibrary("lib", "lib.zig", .unversioned);
- export_table.setBuildMode(mode);
- export_table.setTarget(.{ .cpu_arch = .wasm32, .os_tag = .freestanding });
+ const export_table = b.addSharedLibrary(.{
+ .name = "lib",
+ .root_source_file = .{ .path = "lib.zig" },
+ .target = .{ .cpu_arch = .wasm32, .os_tag = .freestanding },
+ .optimize = optimize,
+ });
export_table.use_llvm = false;
export_table.use_lld = false;
export_table.export_table = true;
- const regular_table = b.addSharedLibrary("lib", "lib.zig", .unversioned);
- regular_table.setBuildMode(mode);
- regular_table.setTarget(.{ .cpu_arch = .wasm32, .os_tag = .freestanding });
+ const regular_table = b.addSharedLibrary(.{
+ .name = "lib",
+ .root_source_file = .{ .path = "lib.zig" },
+ .target = .{ .cpu_arch = .wasm32, .os_tag = .freestanding },
+ .optimize = optimize,
+ });
regular_table.use_llvm = false;
regular_table.use_lld = false;
diff --git a/test/link/wasm/infer-features/build.zig b/test/link/wasm/infer-features/build.zig
index b50caf7264..147fb55fda 100644
--- a/test/link/wasm/infer-features/build.zig
+++ b/test/link/wasm/infer-features/build.zig
@@ -1,21 +1,32 @@
const std = @import("std");
pub fn build(b: *std.build.Builder) void {
- const mode = b.standardReleaseOptions();
+ const optimize = b.standardOptimizeOption(.{});
// Wasm Object file which we will use to infer the features from
- const c_obj = b.addObject("c_obj", null);
- c_obj.setTarget(.{ .cpu_arch = .wasm32, .os_tag = .freestanding });
- c_obj.target.cpu_model = .{ .explicit = &std.Target.wasm.cpu.bleeding_edge };
+ const c_obj = b.addObject(.{
+ .name = "c_obj",
+ .optimize = optimize,
+ .target = .{
+ .cpu_arch = .wasm32,
+ .cpu_model = .{ .explicit = &std.Target.wasm.cpu.bleeding_edge },
+ .os_tag = .freestanding,
+ },
+ });
c_obj.addCSourceFile("foo.c", &.{});
- c_obj.setBuildMode(mode);
// Wasm library that doesn't have any features specified. This will
// infer its featureset from other linked object files.
- const lib = b.addSharedLibrary("lib", "main.zig", .unversioned);
- lib.setTarget(.{ .cpu_arch = .wasm32, .os_tag = .freestanding });
- lib.target.cpu_model = .{ .explicit = &std.Target.wasm.cpu.mvp };
- lib.setBuildMode(mode);
+ const lib = b.addSharedLibrary(.{
+ .name = "lib",
+ .root_source_file = .{ .path = "main.zig" },
+ .optimize = optimize,
+ .target = .{
+ .cpu_arch = .wasm32,
+ .cpu_model = .{ .explicit = &std.Target.wasm.cpu.mvp },
+ .os_tag = .freestanding,
+ },
+ });
lib.use_llvm = false;
lib.use_lld = false;
lib.addObject(c_obj);
diff --git a/test/link/wasm/producers/build.zig b/test/link/wasm/producers/build.zig
index 7557b4fa41..57ee6acd18 100644
--- a/test/link/wasm/producers/build.zig
+++ b/test/link/wasm/producers/build.zig
@@ -3,14 +3,15 @@ const builtin = @import("builtin");
const Builder = std.build.Builder;
pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
-
const test_step = b.step("test", "Test");
test_step.dependOn(b.getInstallStep());
- const lib = b.addSharedLibrary("lib", "lib.zig", .unversioned);
- lib.setBuildMode(mode);
- lib.setTarget(.{ .cpu_arch = .wasm32, .os_tag = .freestanding });
+ const lib = b.addSharedLibrary(.{
+ .name = "lib",
+ .root_source_file = .{ .path = "lib.zig" },
+ .target = .{ .cpu_arch = .wasm32, .os_tag = .freestanding },
+ .optimize = b.standardOptimizeOption(.{}),
+ });
lib.use_llvm = false;
lib.use_lld = false;
lib.strip = false;
diff --git a/test/link/wasm/segments/build.zig b/test/link/wasm/segments/build.zig
index 1b2cdf87ab..8f7d9e0583 100644
--- a/test/link/wasm/segments/build.zig
+++ b/test/link/wasm/segments/build.zig
@@ -2,14 +2,15 @@ const std = @import("std");
const Builder = std.build.Builder;
pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
-
const test_step = b.step("test", "Test");
test_step.dependOn(b.getInstallStep());
- const lib = b.addSharedLibrary("lib", "lib.zig", .unversioned);
- lib.setBuildMode(mode);
- lib.setTarget(.{ .cpu_arch = .wasm32, .os_tag = .freestanding });
+ const lib = b.addSharedLibrary(.{
+ .name = "lib",
+ .root_source_file = .{ .path = "lib.zig" },
+ .target = .{ .cpu_arch = .wasm32, .os_tag = .freestanding },
+ .optimize = b.standardOptimizeOption(.{}),
+ });
lib.use_llvm = false;
lib.use_lld = false;
lib.strip = false;
diff --git a/test/link/wasm/stack_pointer/build.zig b/test/link/wasm/stack_pointer/build.zig
index 5b67c3caa3..42971c607d 100644
--- a/test/link/wasm/stack_pointer/build.zig
+++ b/test/link/wasm/stack_pointer/build.zig
@@ -2,14 +2,15 @@ const std = @import("std");
const Builder = std.build.Builder;
pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
-
const test_step = b.step("test", "Test");
test_step.dependOn(b.getInstallStep());
- const lib = b.addSharedLibrary("lib", "lib.zig", .unversioned);
- lib.setBuildMode(mode);
- lib.setTarget(.{ .cpu_arch = .wasm32, .os_tag = .freestanding });
+ const lib = b.addSharedLibrary(.{
+ .name = "lib",
+ .root_source_file = .{ .path = "lib.zig" },
+ .target = .{ .cpu_arch = .wasm32, .os_tag = .freestanding },
+ .optimize = b.standardOptimizeOption(.{}),
+ });
lib.use_llvm = false;
lib.use_lld = false;
lib.strip = false;
diff --git a/test/link/wasm/type/build.zig b/test/link/wasm/type/build.zig
index fbae6dc741..7fa3849083 100644
--- a/test/link/wasm/type/build.zig
+++ b/test/link/wasm/type/build.zig
@@ -2,14 +2,15 @@ const std = @import("std");
const Builder = std.build.Builder;
pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
-
const test_step = b.step("test", "Test");
test_step.dependOn(b.getInstallStep());
- const lib = b.addSharedLibrary("lib", "lib.zig", .unversioned);
- lib.setBuildMode(mode);
- lib.setTarget(.{ .cpu_arch = .wasm32, .os_tag = .freestanding });
+ const lib = b.addSharedLibrary(.{
+ .name = "lib",
+ .root_source_file = .{ .path = "lib.zig" },
+ .target = .{ .cpu_arch = .wasm32, .os_tag = .freestanding },
+ .optimize = b.standardOptimizeOption(.{}),
+ });
lib.use_llvm = false;
lib.use_lld = false;
lib.strip = false;
diff --git a/test/src/compare_output.zig b/test/src/compare_output.zig
index 538e4023f0..a885faaadf 100644
--- a/test/src/compare_output.zig
+++ b/test/src/compare_output.zig
@@ -6,14 +6,14 @@ const ArrayList = std.ArrayList;
const fmt = std.fmt;
const mem = std.mem;
const fs = std.fs;
-const Mode = std.builtin.Mode;
+const OptimizeMode = std.builtin.OptimizeMode;
pub const CompareOutputContext = struct {
b: *build.Builder,
step: *build.Step,
test_index: usize,
test_filter: ?[]const u8,
- modes: []const Mode,
+ optimize_modes: []const OptimizeMode,
const Special = enum {
None,
@@ -102,7 +102,11 @@ pub const CompareOutputContext = struct {
if (mem.indexOf(u8, annotated_case_name, filter) == null) return;
}
- const exe = b.addExecutable("test", null);
+ const exe = b.addExecutable(.{
+ .name = "test",
+ .target = .{},
+ .optimize = .Debug,
+ });
exe.addAssemblyFileSource(write_src.getFileSource(case.sources.items[0].filename).?);
const run = exe.run();
@@ -113,19 +117,23 @@ pub const CompareOutputContext = struct {
self.step.dependOn(&run.step);
},
Special.None => {
- for (self.modes) |mode| {
+ for (self.optimize_modes) |optimize| {
const annotated_case_name = fmt.allocPrint(self.b.allocator, "{s} {s} ({s})", .{
"compare-output",
case.name,
- @tagName(mode),
+ @tagName(optimize),
}) catch unreachable;
if (self.test_filter) |filter| {
if (mem.indexOf(u8, annotated_case_name, filter) == null) continue;
}
const basename = case.sources.items[0].filename;
- const exe = b.addExecutableSource("test", write_src.getFileSource(basename).?);
- exe.setBuildMode(mode);
+ const exe = b.addExecutable(.{
+ .name = "test",
+ .root_source_file = write_src.getFileSource(basename).?,
+ .optimize = optimize,
+ .target = .{},
+ });
if (case.link_libc) {
exe.linkSystemLibrary("c");
}
@@ -139,13 +147,20 @@ pub const CompareOutputContext = struct {
}
},
Special.RuntimeSafety => {
+ // TODO iterate over self.optimize_modes and test this in both
+ // debug and release safe mode
const annotated_case_name = fmt.allocPrint(self.b.allocator, "safety {s}", .{case.name}) catch unreachable;
if (self.test_filter) |filter| {
if (mem.indexOf(u8, annotated_case_name, filter) == null) return;
}
const basename = case.sources.items[0].filename;
- const exe = b.addExecutableSource("test", write_src.getFileSource(basename).?);
+ const exe = b.addExecutable(.{
+ .name = "test",
+ .root_source_file = write_src.getFileSource(basename).?,
+ .target = .{},
+ .optimize = .Debug,
+ });
if (case.link_libc) {
exe.linkSystemLibrary("c");
}
diff --git a/test/src/run_translated_c.zig b/test/src/run_translated_c.zig
index 0204272f97..0c54655b32 100644
--- a/test/src/run_translated_c.zig
+++ b/test/src/run_translated_c.zig
@@ -85,11 +85,14 @@ pub const RunTranslatedCContext = struct {
for (case.sources.items) |src_file| {
write_src.add(src_file.filename, src_file.source);
}
- const translate_c = b.addTranslateC(write_src.getFileSource(case.sources.items[0].filename).?);
+ const translate_c = b.addTranslateC(.{
+ .source_file = write_src.getFileSource(case.sources.items[0].filename).?,
+ .target = .{},
+ .optimize = .Debug,
+ });
translate_c.step.name = b.fmt("{s} translate-c", .{annotated_case_name});
- const exe = translate_c.addExecutable();
- exe.setTarget(self.target);
+ const exe = translate_c.addExecutable(.{});
exe.step.name = b.fmt("{s} build-exe", .{annotated_case_name});
exe.linkLibC();
const run = exe.run();
diff --git a/test/src/translate_c.zig b/test/src/translate_c.zig
index f0f6f30c57..ad5fbb7091 100644
--- a/test/src/translate_c.zig
+++ b/test/src/translate_c.zig
@@ -108,10 +108,13 @@ pub const TranslateCContext = struct {
write_src.add(src_file.filename, src_file.source);
}
- const translate_c = b.addTranslateC(write_src.getFileSource(case.sources.items[0].filename).?);
+ const translate_c = b.addTranslateC(.{
+ .source_file = write_src.getFileSource(case.sources.items[0].filename).?,
+ .target = case.target,
+ .optimize = .Debug,
+ });
translate_c.step.name = annotated_case_name;
- translate_c.setTarget(case.target);
const check_file = translate_c.addCheckFile(case.expected_lines.items);
diff --git a/test/standalone/brace_expansion/build.zig b/test/standalone/brace_expansion/build.zig
index 64f3c08583..89250ff96f 100644
--- a/test/standalone/brace_expansion/build.zig
+++ b/test/standalone/brace_expansion/build.zig
@@ -1,8 +1,10 @@
const Builder = @import("std").build.Builder;
pub fn build(b: *Builder) void {
- const main = b.addTest("main.zig");
- main.setBuildMode(b.standardReleaseOptions());
+ const main = b.addTest(.{
+ .root_source_file = .{ .path = "main.zig" },
+ .optimize = b.standardOptimizeOption(.{}),
+ });
const test_step = b.step("test", "Test it");
test_step.dependOn(&main.step);
diff --git a/test/standalone/c_compiler/build.zig b/test/standalone/c_compiler/build.zig
index 240d535182..6959f810d6 100644
--- a/test/standalone/c_compiler/build.zig
+++ b/test/standalone/c_compiler/build.zig
@@ -12,23 +12,27 @@ fn isRunnableTarget(t: CrossTarget) bool {
}
pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
+ const optimize = b.standardOptimizeOption(.{});
const target = b.standardTargetOptions(.{});
const test_step = b.step("test", "Test the program");
- const exe_c = b.addExecutable("test_c", null);
+ const exe_c = b.addExecutable(.{
+ .name = "test_c",
+ .optimize = optimize,
+ .target = target,
+ });
b.default_step.dependOn(&exe_c.step);
exe_c.addCSourceFile("test.c", &[0][]const u8{});
- exe_c.setBuildMode(mode);
- exe_c.setTarget(target);
exe_c.linkLibC();
- const exe_cpp = b.addExecutable("test_cpp", null);
+ const exe_cpp = b.addExecutable(.{
+ .name = "test_cpp",
+ .optimize = optimize,
+ .target = target,
+ });
b.default_step.dependOn(&exe_cpp.step);
exe_cpp.addCSourceFile("test.cpp", &[0][]const u8{});
- exe_cpp.setBuildMode(mode);
- exe_cpp.setTarget(target);
exe_cpp.linkLibCpp();
switch (target.getOsTag()) {
diff --git a/test/standalone/emit_asm_and_bin/build.zig b/test/standalone/emit_asm_and_bin/build.zig
index 43b7bb791d..b8cbd5fc17 100644
--- a/test/standalone/emit_asm_and_bin/build.zig
+++ b/test/standalone/emit_asm_and_bin/build.zig
@@ -1,8 +1,10 @@
const Builder = @import("std").build.Builder;
pub fn build(b: *Builder) void {
- const main = b.addTest("main.zig");
- main.setBuildMode(b.standardReleaseOptions());
+ const main = b.addTest(.{
+ .root_source_file = .{ .path = "main.zig" },
+ .optimize = b.standardOptimizeOption(.{}),
+ });
main.emit_asm = .{ .emit_to = b.pathFromRoot("main.s") };
main.emit_bin = .{ .emit_to = b.pathFromRoot("main") };
diff --git a/test/standalone/empty_env/build.zig b/test/standalone/empty_env/build.zig
index 2a184dcd2e..ecdd74aa90 100644
--- a/test/standalone/empty_env/build.zig
+++ b/test/standalone/empty_env/build.zig
@@ -1,8 +1,11 @@
const Builder = @import("std").build.Builder;
pub fn build(b: *Builder) void {
- const main = b.addExecutable("main", "main.zig");
- main.setBuildMode(b.standardReleaseOptions());
+ const main = b.addExecutable(.{
+ .name = "main",
+ .root_source_file = .{ .path = "main.zig" },
+ .optimize = b.standardOptimizeOption(.{}),
+ });
const run = main.run();
run.clearEnvironment();
diff --git a/test/standalone/global_linkage/build.zig b/test/standalone/global_linkage/build.zig
index e13c0e8873..3064c6cc08 100644
--- a/test/standalone/global_linkage/build.zig
+++ b/test/standalone/global_linkage/build.zig
@@ -1,16 +1,26 @@
const Builder = @import("std").build.Builder;
pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
+ const optimize = b.standardOptimizeOption(.{});
- const obj1 = b.addStaticLibrary("obj1", "obj1.zig");
- obj1.setBuildMode(mode);
+ const obj1 = b.addStaticLibrary(.{
+ .name = "obj1",
+ .root_source_file = .{ .path = "obj1.zig" },
+ .optimize = optimize,
+ .target = .{},
+ });
- const obj2 = b.addStaticLibrary("obj2", "obj2.zig");
- obj2.setBuildMode(mode);
+ const obj2 = b.addStaticLibrary(.{
+ .name = "obj2",
+ .root_source_file = .{ .path = "obj2.zig" },
+ .optimize = optimize,
+ .target = .{},
+ });
- const main = b.addTest("main.zig");
- main.setBuildMode(mode);
+ const main = b.addTest(.{
+ .root_source_file = .{ .path = "main.zig" },
+ .optimize = optimize,
+ });
main.linkLibrary(obj1);
main.linkLibrary(obj2);
diff --git a/test/standalone/install_raw_hex/build.zig b/test/standalone/install_raw_hex/build.zig
index 0038c4c298..94016b1d74 100644
--- a/test/standalone/install_raw_hex/build.zig
+++ b/test/standalone/install_raw_hex/build.zig
@@ -10,11 +10,14 @@ pub fn build(b: *std.build.Builder) void {
.abi = .gnueabihf,
};
- const mode = b.standardReleaseOptions();
+ const optimize = b.standardOptimizeOption(.{});
- const elf = b.addExecutable("zig-nrf52-blink.elf", "main.zig");
- elf.setTarget(target);
- elf.setBuildMode(mode);
+ const elf = b.addExecutable(.{
+ .name = "zig-nrf52-blink.elf",
+ .root_source_file = .{ .path = "main.zig" },
+ .target = target,
+ .optimize = optimize,
+ });
const test_step = b.step("test", "Test the program");
b.default_step.dependOn(test_step);
diff --git a/test/standalone/issue_11595/build.zig b/test/standalone/issue_11595/build.zig
index d636f63ebc..b0310947f6 100644
--- a/test/standalone/issue_11595/build.zig
+++ b/test/standalone/issue_11595/build.zig
@@ -12,11 +12,15 @@ fn isRunnableTarget(t: CrossTarget) bool {
}
pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
+ const optimize = b.standardOptimizeOption(.{});
const target = b.standardTargetOptions(.{});
- const exe = b.addExecutable("zigtest", "main.zig");
- exe.setBuildMode(mode);
+ const exe = b.addExecutable(.{
+ .name = "zigtest",
+ .root_source_file = .{ .path = "main.zig" },
+ .target = target,
+ .optimize = optimize,
+ });
exe.install();
const c_sources = [_][]const u8{
@@ -39,7 +43,6 @@ pub fn build(b: *Builder) void {
exe.defineCMacro("QUX", "\"Q\" \"UX\"");
exe.defineCMacro("QUUX", "\"QU\\\"UX\"");
- exe.setTarget(target);
b.default_step.dependOn(&exe.step);
const test_step = b.step("test", "Test the program");
diff --git a/test/standalone/issue_12588/build.zig b/test/standalone/issue_12588/build.zig
index 02fa5e1680..27a23d5a76 100644
--- a/test/standalone/issue_12588/build.zig
+++ b/test/standalone/issue_12588/build.zig
@@ -2,12 +2,15 @@ const std = @import("std");
const Builder = std.build.Builder;
pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
+ const optimize = b.standardOptimizeOption(.{});
const target = b.standardTargetOptions(.{});
- const obj = b.addObject("main", "main.zig");
- obj.setBuildMode(mode);
- obj.setTarget(target);
+ const obj = b.addObject(.{
+ .name = "main",
+ .root_source_file = .{ .path = "main.zig" },
+ .optimize = optimize,
+ .target = target,
+ });
obj.emit_llvm_ir = .{ .emit_to = b.pathFromRoot("main.ll") };
obj.emit_llvm_bc = .{ .emit_to = b.pathFromRoot("main.bc") };
obj.emit_bin = .no_emit;
diff --git a/test/standalone/issue_12706/build.zig b/test/standalone/issue_12706/build.zig
index d84160a4f4..e3c40d34c6 100644
--- a/test/standalone/issue_12706/build.zig
+++ b/test/standalone/issue_12706/build.zig
@@ -12,11 +12,15 @@ fn isRunnableTarget(t: CrossTarget) bool {
}
pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
+ const optimize = b.standardOptimizeOption(.{});
const target = b.standardTargetOptions(.{});
- const exe = b.addExecutable("main", "main.zig");
- exe.setBuildMode(mode);
+ const exe = b.addExecutable(.{
+ .name = "main",
+ .root_source_file = .{ .path = "main.zig" },
+ .optimize = optimize,
+ .target = target,
+ });
exe.install();
const c_sources = [_][]const u8{
@@ -26,7 +30,6 @@ pub fn build(b: *Builder) void {
exe.addCSourceFiles(&c_sources, &.{});
exe.linkLibC();
- exe.setTarget(target);
b.default_step.dependOn(&exe.step);
const test_step = b.step("test", "Test the program");
diff --git a/test/standalone/issue_13030/build.zig b/test/standalone/issue_13030/build.zig
index 8c05e47cf6..510c7610d9 100644
--- a/test/standalone/issue_13030/build.zig
+++ b/test/standalone/issue_13030/build.zig
@@ -4,13 +4,15 @@ const Builder = std.build.Builder;
const CrossTarget = std.zig.CrossTarget;
pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
+ const optimize = b.standardOptimizeOption(.{});
const target = b.standardTargetOptions(.{});
- const obj = b.addObject("main", "main.zig");
- obj.setBuildMode(mode);
-
- obj.setTarget(target);
+ const obj = b.addObject(.{
+ .name = "main",
+ .root_source_file = .{ .path = "main.zig" },
+ .optimize = optimize,
+ .target = target,
+ });
b.default_step.dependOn(&obj.step);
const test_step = b.step("test", "Test the program");
diff --git a/test/standalone/issue_339/build.zig b/test/standalone/issue_339/build.zig
index 733b3729c1..34c555cfdb 100644
--- a/test/standalone/issue_339/build.zig
+++ b/test/standalone/issue_339/build.zig
@@ -1,7 +1,12 @@
const Builder = @import("std").build.Builder;
pub fn build(b: *Builder) void {
- const obj = b.addObject("test", "test.zig");
+ const obj = b.addObject(.{
+ .name = "test",
+ .root_source_file = .{ .path = "test.zig" },
+ .target = b.standardTargetOptions(.{}),
+ .optimize = b.standardOptimizeOption(.{}),
+ });
const test_step = b.step("test", "Test the program");
test_step.dependOn(&obj.step);
diff --git a/test/standalone/issue_5825/build.zig b/test/standalone/issue_5825/build.zig
index 8f43ae1358..8d7acc3e9a 100644
--- a/test/standalone/issue_5825/build.zig
+++ b/test/standalone/issue_5825/build.zig
@@ -6,17 +6,22 @@ pub fn build(b: *Builder) void {
.os_tag = .windows,
.abi = .msvc,
};
- const mode = b.standardReleaseOptions();
- const obj = b.addObject("issue_5825", "main.zig");
- obj.setTarget(target);
- obj.setBuildMode(mode);
+ const optimize = b.standardOptimizeOption(.{});
+ const obj = b.addObject(.{
+ .name = "issue_5825",
+ .root_source_file = .{ .path = "main.zig" },
+ .optimize = optimize,
+ .target = target,
+ });
- const exe = b.addExecutable("issue_5825", null);
+ const exe = b.addExecutable(.{
+ .name = "issue_5825",
+ .optimize = optimize,
+ .target = target,
+ });
exe.subsystem = .Console;
exe.linkSystemLibrary("kernel32");
exe.linkSystemLibrary("ntdll");
- exe.setTarget(target);
- exe.setBuildMode(mode);
exe.addObject(obj);
const test_step = b.step("test", "Test the program");
diff --git a/test/standalone/issue_7030/build.zig b/test/standalone/issue_7030/build.zig
index ab3677370a..41a646abe8 100644
--- a/test/standalone/issue_7030/build.zig
+++ b/test/standalone/issue_7030/build.zig
@@ -1,10 +1,13 @@
const Builder = @import("std").build.Builder;
pub fn build(b: *Builder) void {
- const exe = b.addExecutable("issue_7030", "main.zig");
- exe.setTarget(.{
- .cpu_arch = .wasm32,
- .os_tag = .freestanding,
+ const exe = b.addExecutable(.{
+ .name = "issue_7030",
+ .root_source_file = .{ .path = "main.zig" },
+ .target = .{
+ .cpu_arch = .wasm32,
+ .os_tag = .freestanding,
+ },
});
exe.install();
b.default_step.dependOn(&exe.step);
diff --git a/test/standalone/issue_794/build.zig b/test/standalone/issue_794/build.zig
index ece74f0e98..59ff7ea9ab 100644
--- a/test/standalone/issue_794/build.zig
+++ b/test/standalone/issue_794/build.zig
@@ -1,7 +1,9 @@
const Builder = @import("std").build.Builder;
pub fn build(b: *Builder) void {
- const test_artifact = b.addTest("main.zig");
+ const test_artifact = b.addTest(.{
+ .root_source_file = .{ .path = "main.zig" },
+ });
test_artifact.addIncludePath("a_directory");
b.default_step.dependOn(&test_artifact.step);
diff --git a/test/standalone/issue_8550/build.zig b/test/standalone/issue_8550/build.zig
index 03e8d04bfb..233f701661 100644
--- a/test/standalone/issue_8550/build.zig
+++ b/test/standalone/issue_8550/build.zig
@@ -8,12 +8,15 @@ pub fn build(b: *std.build.Builder) !void {
.explicit = &std.Target.arm.cpu.arm1176jz_s,
},
};
- const mode = b.standardReleaseOptions();
- const kernel = b.addExecutable("kernel", "./main.zig");
+ const optimize = b.standardOptimizeOption(.{});
+ const kernel = b.addExecutable(.{
+ .name = "kernel",
+ .root_source_file = .{ .path = "./main.zig" },
+ .optimize = optimize,
+ .target = target,
+ });
kernel.addObjectFile("./boot.S");
kernel.setLinkerScriptPath(.{ .path = "./linker.ld" });
- kernel.setBuildMode(mode);
- kernel.setTarget(target);
kernel.install();
const test_step = b.step("test", "Test it");
diff --git a/test/standalone/issue_9812/build.zig b/test/standalone/issue_9812/build.zig
index 677c589a84..50eefe846c 100644
--- a/test/standalone/issue_9812/build.zig
+++ b/test/standalone/issue_9812/build.zig
@@ -1,9 +1,11 @@
const std = @import("std");
pub fn build(b: *std.build.Builder) !void {
- const mode = b.standardReleaseOptions();
- const zip_add = b.addTest("main.zig");
- zip_add.setBuildMode(mode);
+ const optimize = b.standardOptimizeOption(.{});
+ const zip_add = b.addTest(.{
+ .root_source_file = .{ .path = "main.zig" },
+ .optimize = optimize,
+ });
zip_add.addCSourceFile("vendor/kuba-zip/zip.c", &[_][]const u8{
"-std=c99",
"-fno-sanitize=undefined",
diff --git a/test/standalone/load_dynamic_library/build.zig b/test/standalone/load_dynamic_library/build.zig
index 109c742c6f..1aca02bc71 100644
--- a/test/standalone/load_dynamic_library/build.zig
+++ b/test/standalone/load_dynamic_library/build.zig
@@ -1,13 +1,23 @@
const Builder = @import("std").build.Builder;
pub fn build(b: *Builder) void {
- const opts = b.standardReleaseOptions();
+ const target = b.standardTargetOptions(.{});
+ const optimize = b.standardOptimizeOption(.{});
- const lib = b.addSharedLibrary("add", "add.zig", b.version(1, 0, 0));
- lib.setBuildMode(opts);
+ const lib = b.addSharedLibrary(.{
+ .name = "add",
+ .root_source_file = .{ .path = "add.zig" },
+ .version = .{ .major = 1, .minor = 0 },
+ .optimize = optimize,
+ .target = target,
+ });
- const main = b.addExecutable("main", "main.zig");
- main.setBuildMode(opts);
+ const main = b.addExecutable(.{
+ .name = "main",
+ .root_source_file = .{ .path = "main.zig" },
+ .optimize = optimize,
+ .target = target,
+ });
const run = main.run();
run.addArtifactArg(lib);
diff --git a/test/standalone/main_pkg_path/build.zig b/test/standalone/main_pkg_path/build.zig
index c4ac18f967..baee74052e 100644
--- a/test/standalone/main_pkg_path/build.zig
+++ b/test/standalone/main_pkg_path/build.zig
@@ -1,7 +1,9 @@
const Builder = @import("std").build.Builder;
pub fn build(b: *Builder) void {
- const test_exe = b.addTest("a/test.zig");
+ const test_exe = b.addTest(.{
+ .root_source_file = .{ .path = "a/test.zig" },
+ });
test_exe.setMainPkgPath(".");
const test_step = b.step("test", "Test the program");
diff --git a/test/standalone/mix_c_files/build.zig b/test/standalone/mix_c_files/build.zig
index 68486ea18d..ad69f05ff6 100644
--- a/test/standalone/mix_c_files/build.zig
+++ b/test/standalone/mix_c_files/build.zig
@@ -12,14 +12,17 @@ fn isRunnableTarget(t: CrossTarget) bool {
}
pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
+ const optimize = b.standardOptimizeOption(.{});
const target = b.standardTargetOptions(.{});
- const exe = b.addExecutable("test", "main.zig");
+ const exe = b.addExecutable(.{
+ .name = "test",
+ .root_source_file = .{ .path = "main.zig" },
+ .optimize = optimize,
+ .target = target,
+ });
exe.addCSourceFile("test.c", &[_][]const u8{"-std=c11"});
- exe.setBuildMode(mode);
exe.linkLibC();
- exe.setTarget(target);
b.default_step.dependOn(&exe.step);
const test_step = b.step("test", "Test the program");
diff --git a/test/standalone/mix_o_files/build.zig b/test/standalone/mix_o_files/build.zig
index d498e2e20a..de37265388 100644
--- a/test/standalone/mix_o_files/build.zig
+++ b/test/standalone/mix_o_files/build.zig
@@ -1,9 +1,19 @@
const Builder = @import("std").build.Builder;
pub fn build(b: *Builder) void {
- const obj = b.addObject("base64", "base64.zig");
+ const optimize = b.standardOptimizeOption(.{});
- const exe = b.addExecutable("test", null);
+ const obj = b.addObject(.{
+ .name = "base64",
+ .root_source_file = .{ .path = "base64.zig" },
+ .optimize = optimize,
+ .target = .{},
+ });
+
+ const exe = b.addExecutable(.{
+ .name = "test",
+ .optimize = optimize,
+ });
exe.addCSourceFile("test.c", &[_][]const u8{"-std=c99"});
exe.addObject(obj);
exe.linkSystemLibrary("c");
diff --git a/test/standalone/options/build.zig b/test/standalone/options/build.zig
index 087aceff01..87a584a887 100644
--- a/test/standalone/options/build.zig
+++ b/test/standalone/options/build.zig
@@ -2,11 +2,13 @@ const std = @import("std");
pub fn build(b: *std.build.Builder) void {
const target = b.standardTargetOptions(.{});
- const mode = b.standardReleaseOptions();
+ const optimize = b.standardOptimizeOption(.{});
- const main = b.addTest("src/main.zig");
- main.setTarget(target);
- main.setBuildMode(mode);
+ const main = b.addTest(.{
+ .root_source_file = .{ .path = "src/main.zig" },
+ .target = target,
+ .optimize = optimize,
+ });
const options = b.addOptions();
main.addOptions("build_options", options);
diff --git a/test/standalone/pie/build.zig b/test/standalone/pie/build.zig
index d008fd31c9..3f0b8b9f2f 100644
--- a/test/standalone/pie/build.zig
+++ b/test/standalone/pie/build.zig
@@ -1,8 +1,10 @@
const Builder = @import("std").build.Builder;
pub fn build(b: *Builder) void {
- const main = b.addTest("main.zig");
- main.setBuildMode(b.standardReleaseOptions());
+ const main = b.addTest(.{
+ .root_source_file = .{ .path = "main.zig" },
+ .optimize = b.standardOptimizeOption(.{}),
+ });
main.pie = true;
const test_step = b.step("test", "Test the program");
diff --git a/test/standalone/pkg_import/build.zig b/test/standalone/pkg_import/build.zig
index 7529d106f9..8dcfaeded0 100644
--- a/test/standalone/pkg_import/build.zig
+++ b/test/standalone/pkg_import/build.zig
@@ -1,13 +1,14 @@
const Builder = @import("std").build.Builder;
pub fn build(b: *Builder) void {
- const exe = b.addExecutable("test", "test.zig");
- exe.addPackagePath("my_pkg", "pkg.zig");
+ const optimize = b.standardOptimizeOption(.{});
- // This is duplicated to test that you are allowed to call
- // b.standardReleaseOptions() twice.
- exe.setBuildMode(b.standardReleaseOptions());
- exe.setBuildMode(b.standardReleaseOptions());
+ const exe = b.addExecutable(.{
+ .name = "test",
+ .root_source_file = .{ .path = "test.zig" },
+ .optimize = optimize,
+ });
+ exe.addPackagePath("my_pkg", "pkg.zig");
const run = exe.run();
diff --git a/test/standalone/shared_library/build.zig b/test/standalone/shared_library/build.zig
index 18188311c7..135be095bc 100644
--- a/test/standalone/shared_library/build.zig
+++ b/test/standalone/shared_library/build.zig
@@ -1,12 +1,21 @@
const Builder = @import("std").build.Builder;
pub fn build(b: *Builder) void {
+ const optimize = b.standardOptimizeOption(.{});
const target = b.standardTargetOptions(.{});
- const lib = b.addSharedLibrary("mathtest", "mathtest.zig", b.version(1, 0, 0));
- lib.setTarget(target);
+ const lib = b.addSharedLibrary(.{
+ .name = "mathtest",
+ .root_source_file = .{ .path = "mathtest.zig" },
+ .version = .{ .major = 1, .minor = 0 },
+ .target = target,
+ .optimize = optimize,
+ });
- const exe = b.addExecutable("test", null);
- exe.setTarget(target);
+ const exe = b.addExecutable(.{
+ .name = "test",
+ .target = target,
+ .optimize = optimize,
+ });
exe.addCSourceFile("test.c", &[_][]const u8{"-std=c99"});
exe.linkLibrary(lib);
exe.linkSystemLibrary("c");
diff --git a/test/standalone/static_c_lib/build.zig b/test/standalone/static_c_lib/build.zig
index c64ae48dba..81b4349e20 100644
--- a/test/standalone/static_c_lib/build.zig
+++ b/test/standalone/static_c_lib/build.zig
@@ -1,15 +1,20 @@
const Builder = @import("std").build.Builder;
pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
+ const optimize = b.standardOptimizeOption(.{});
- const foo = b.addStaticLibrary("foo", null);
+ const foo = b.addStaticLibrary(.{
+ .name = "foo",
+ .optimize = optimize,
+ .target = .{},
+ });
foo.addCSourceFile("foo.c", &[_][]const u8{});
- foo.setBuildMode(mode);
foo.addIncludePath(".");
- const test_exe = b.addTest("foo.zig");
- test_exe.setBuildMode(mode);
+ const test_exe = b.addTest(.{
+ .root_source_file = .{ .path = "foo.zig" },
+ .optimize = optimize,
+ });
test_exe.linkLibrary(foo);
test_exe.addIncludePath(".");
diff --git a/test/standalone/test_runner_path/build.zig b/test/standalone/test_runner_path/build.zig
index 738cac9783..9b02da50c1 100644
--- a/test/standalone/test_runner_path/build.zig
+++ b/test/standalone/test_runner_path/build.zig
@@ -1,7 +1,10 @@
const Builder = @import("std").build.Builder;
pub fn build(b: *Builder) void {
- const test_exe = b.addTestExe("test", "test.zig");
+ const test_exe = b.addTest(.{
+ .root_source_file = .{ .path = "test.zig" },
+ .kind = .test_exe,
+ });
test_exe.test_runner = "test_runner.zig";
const test_run = test_exe.run();
diff --git a/test/standalone/use_alias/build.zig b/test/standalone/use_alias/build.zig
index da4e8bef4b..d2ca90f3ab 100644
--- a/test/standalone/use_alias/build.zig
+++ b/test/standalone/use_alias/build.zig
@@ -1,8 +1,10 @@
const Builder = @import("std").build.Builder;
pub fn build(b: *Builder) void {
- const main = b.addTest("main.zig");
- main.setBuildMode(b.standardReleaseOptions());
+ const main = b.addTest(.{
+ .root_source_file = .{ .path = "main.zig" },
+ .optimize = b.standardOptimizeOption(.{}),
+ });
main.addIncludePath(".");
const test_step = b.step("test", "Test it");
diff --git a/test/standalone/windows_spawn/build.zig b/test/standalone/windows_spawn/build.zig
index 10a1132d3a..de58a602c3 100644
--- a/test/standalone/windows_spawn/build.zig
+++ b/test/standalone/windows_spawn/build.zig
@@ -1,13 +1,20 @@
const Builder = @import("std").build.Builder;
pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
+ const optimize = b.standardOptimizeOption(.{});
- const hello = b.addExecutable("hello", "hello.zig");
- hello.setBuildMode(mode);
+ const hello = b.addExecutable(.{
+ .name = "hello",
+ .root_source_file = .{ .path = "hello.zig" },
+ .optimize = optimize,
+ });
+
+ const main = b.addExecutable(.{
+ .name = "main",
+ .root_source_file = .{ .path = "main.zig" },
+ .optimize = optimize,
+ });
- const main = b.addExecutable("main", "main.zig");
- main.setBuildMode(mode);
const run = main.run();
run.addArtifactArg(hello);
diff --git a/test/tests.zig b/test/tests.zig
index 8e972b9ba6..575550be02 100644
--- a/test/tests.zig
+++ b/test/tests.zig
@@ -8,7 +8,7 @@ const fs = std.fs;
const mem = std.mem;
const fmt = std.fmt;
const ArrayList = std.ArrayList;
-const Mode = std.builtin.Mode;
+const OptimizeMode = std.builtin.OptimizeMode;
const LibExeObjStep = build.LibExeObjStep;
const Allocator = mem.Allocator;
const ExecError = build.Builder.ExecError;
@@ -30,7 +30,7 @@ pub const CompareOutputContext = @import("src/compare_output.zig").CompareOutput
const TestTarget = struct {
target: CrossTarget = @as(CrossTarget, .{}),
- mode: std.builtin.Mode = .Debug,
+ optimize_mode: std.builtin.OptimizeMode = .Debug,
link_libc: bool = false,
single_threaded: bool = false,
disable_native: bool = false,
@@ -423,38 +423,38 @@ const test_targets = blk: {
// Do the release tests last because they take a long time
.{
- .mode = .ReleaseFast,
+ .optimize_mode = .ReleaseFast,
},
.{
.link_libc = true,
- .mode = .ReleaseFast,
+ .optimize_mode = .ReleaseFast,
},
.{
- .mode = .ReleaseFast,
+ .optimize_mode = .ReleaseFast,
.single_threaded = true,
},
.{
- .mode = .ReleaseSafe,
+ .optimize_mode = .ReleaseSafe,
},
.{
.link_libc = true,
- .mode = .ReleaseSafe,
+ .optimize_mode = .ReleaseSafe,
},
.{
- .mode = .ReleaseSafe,
+ .optimize_mode = .ReleaseSafe,
.single_threaded = true,
},
.{
- .mode = .ReleaseSmall,
+ .optimize_mode = .ReleaseSmall,
},
.{
.link_libc = true,
- .mode = .ReleaseSmall,
+ .optimize_mode = .ReleaseSmall,
},
.{
- .mode = .ReleaseSmall,
+ .optimize_mode = .ReleaseSmall,
.single_threaded = true,
},
};
@@ -462,14 +462,14 @@ const test_targets = blk: {
const max_stdout_size = 1 * 1024 * 1024; // 1 MB
-pub fn addCompareOutputTests(b: *build.Builder, test_filter: ?[]const u8, modes: []const Mode) *build.Step {
+pub fn addCompareOutputTests(b: *build.Builder, test_filter: ?[]const u8, optimize_modes: []const OptimizeMode) *build.Step {
const cases = b.allocator.create(CompareOutputContext) catch unreachable;
cases.* = CompareOutputContext{
.b = b,
.step = b.step("test-compare-output", "Run the compare output tests"),
.test_index = 0,
.test_filter = test_filter,
- .modes = modes,
+ .optimize_modes = optimize_modes,
};
compare_output.addCases(cases);
@@ -477,14 +477,14 @@ pub fn addCompareOutputTests(b: *build.Builder, test_filter: ?[]const u8, modes:
return cases.step;
}
-pub fn addStackTraceTests(b: *build.Builder, test_filter: ?[]const u8, modes: []const Mode) *build.Step {
+pub fn addStackTraceTests(b: *build.Builder, test_filter: ?[]const u8, optimize_modes: []const OptimizeMode) *build.Step {
const cases = b.allocator.create(StackTracesContext) catch unreachable;
cases.* = StackTracesContext{
.b = b,
.step = b.step("test-stack-traces", "Run the stack trace tests"),
.test_index = 0,
.test_filter = test_filter,
- .modes = modes,
+ .optimize_modes = optimize_modes,
};
stack_traces.addCases(cases);
@@ -495,7 +495,7 @@ pub fn addStackTraceTests(b: *build.Builder, test_filter: ?[]const u8, modes: []
pub fn addStandaloneTests(
b: *build.Builder,
test_filter: ?[]const u8,
- modes: []const Mode,
+ optimize_modes: []const OptimizeMode,
skip_non_native: bool,
enable_macos_sdk: bool,
target: std.zig.CrossTarget,
@@ -513,7 +513,7 @@ pub fn addStandaloneTests(
.step = b.step("test-standalone", "Run the standalone tests"),
.test_index = 0,
.test_filter = test_filter,
- .modes = modes,
+ .optimize_modes = optimize_modes,
.skip_non_native = skip_non_native,
.enable_macos_sdk = enable_macos_sdk,
.target = target,
@@ -534,7 +534,7 @@ pub fn addStandaloneTests(
pub fn addLinkTests(
b: *build.Builder,
test_filter: ?[]const u8,
- modes: []const Mode,
+ optimize_modes: []const OptimizeMode,
enable_macos_sdk: bool,
omit_stage2: bool,
enable_symlinks_windows: bool,
@@ -545,7 +545,7 @@ pub fn addLinkTests(
.step = b.step("test-link", "Run the linker tests"),
.test_index = 0,
.test_filter = test_filter,
- .modes = modes,
+ .optimize_modes = optimize_modes,
.skip_non_native = true,
.enable_macos_sdk = enable_macos_sdk,
.target = .{},
@@ -556,12 +556,17 @@ pub fn addLinkTests(
return cases.step;
}
-pub fn addCliTests(b: *build.Builder, test_filter: ?[]const u8, modes: []const Mode) *build.Step {
+pub fn addCliTests(b: *build.Builder, test_filter: ?[]const u8, optimize_modes: []const OptimizeMode) *build.Step {
_ = test_filter;
- _ = modes;
+ _ = optimize_modes;
const step = b.step("test-cli", "Test the command line interface");
- const exe = b.addExecutable("test-cli", "test/cli.zig");
+ const exe = b.addExecutable(.{
+ .name = "test-cli",
+ .root_source_file = .{ .path = "test/cli.zig" },
+ .target = .{},
+ .optimize = .Debug,
+ });
const run_cmd = exe.run();
run_cmd.addArgs(&[_][]const u8{
fs.realpathAlloc(b.allocator, b.zig_exe) catch unreachable,
@@ -572,14 +577,14 @@ pub fn addCliTests(b: *build.Builder, test_filter: ?[]const u8, modes: []const M
return step;
}
-pub fn addAssembleAndLinkTests(b: *build.Builder, test_filter: ?[]const u8, modes: []const Mode) *build.Step {
+pub fn addAssembleAndLinkTests(b: *build.Builder, test_filter: ?[]const u8, optimize_modes: []const OptimizeMode) *build.Step {
const cases = b.allocator.create(CompareOutputContext) catch unreachable;
cases.* = CompareOutputContext{
.b = b,
.step = b.step("test-asm-link", "Run the assemble and link tests"),
.test_index = 0,
.test_filter = test_filter,
- .modes = modes,
+ .optimize_modes = optimize_modes,
};
assemble_and_link.addCases(cases);
@@ -640,7 +645,7 @@ pub fn addPkgTests(
root_src: []const u8,
name: []const u8,
desc: []const u8,
- modes: []const Mode,
+ optimize_modes: []const OptimizeMode,
skip_single_threaded: bool,
skip_non_native: bool,
skip_libc: bool,
@@ -677,8 +682,8 @@ pub fn addPkgTests(
else => if (skip_stage2) continue,
};
- const want_this_mode = for (modes) |m| {
- if (m == test_target.mode) break true;
+ const want_this_mode = for (optimize_modes) |m| {
+ if (m == test_target.optimize_mode) break true;
} else false;
if (!want_this_mode) continue;
@@ -691,21 +696,23 @@ pub fn addPkgTests(
const triple_prefix = test_target.target.zigTriple(b.allocator) catch unreachable;
- const these_tests = b.addTest(root_src);
+ const these_tests = b.addTest(.{
+ .root_source_file = .{ .path = root_src },
+ .optimize = test_target.optimize_mode,
+ .target = test_target.target,
+ });
const single_threaded_txt = if (test_target.single_threaded) "single" else "multi";
const backend_txt = if (test_target.backend) |backend| @tagName(backend) else "default";
these_tests.setNamePrefix(b.fmt("{s}-{s}-{s}-{s}-{s}-{s} ", .{
name,
triple_prefix,
- @tagName(test_target.mode),
+ @tagName(test_target.optimize_mode),
libc_prefix,
single_threaded_txt,
backend_txt,
}));
these_tests.single_threaded = test_target.single_threaded;
these_tests.setFilter(test_filter);
- these_tests.setBuildMode(test_target.mode);
- these_tests.setTarget(test_target.target);
if (test_target.link_libc) {
these_tests.linkSystemLibrary("c");
}
@@ -739,9 +746,9 @@ pub const StackTracesContext = struct {
step: *build.Step,
test_index: usize,
test_filter: ?[]const u8,
- modes: []const Mode,
+ optimize_modes: []const OptimizeMode,
- const Expect = [@typeInfo(Mode).Enum.fields.len][]const u8;
+ const Expect = [@typeInfo(OptimizeMode).Enum.fields.len][]const u8;
pub fn addCase(self: *StackTracesContext, config: anytype) void {
if (@hasField(@TypeOf(config), "exclude")) {
@@ -755,26 +762,26 @@ pub const StackTracesContext = struct {
const exclude_os: []const std.Target.Os.Tag = &config.exclude_os;
for (exclude_os) |os| if (os == builtin.os.tag) return;
}
- for (self.modes) |mode| {
- switch (mode) {
+ for (self.optimize_modes) |optimize_mode| {
+ switch (optimize_mode) {
.Debug => {
if (@hasField(@TypeOf(config), "Debug")) {
- self.addExpect(config.name, config.source, mode, config.Debug);
+ self.addExpect(config.name, config.source, optimize_mode, config.Debug);
}
},
.ReleaseSafe => {
if (@hasField(@TypeOf(config), "ReleaseSafe")) {
- self.addExpect(config.name, config.source, mode, config.ReleaseSafe);
+ self.addExpect(config.name, config.source, optimize_mode, config.ReleaseSafe);
}
},
.ReleaseFast => {
if (@hasField(@TypeOf(config), "ReleaseFast")) {
- self.addExpect(config.name, config.source, mode, config.ReleaseFast);
+ self.addExpect(config.name, config.source, optimize_mode, config.ReleaseFast);
}
},
.ReleaseSmall => {
if (@hasField(@TypeOf(config), "ReleaseSmall")) {
- self.addExpect(config.name, config.source, mode, config.ReleaseSmall);
+ self.addExpect(config.name, config.source, optimize_mode, config.ReleaseSmall);
}
},
}
@@ -785,7 +792,7 @@ pub const StackTracesContext = struct {
self: *StackTracesContext,
name: []const u8,
source: []const u8,
- mode: Mode,
+ optimize_mode: OptimizeMode,
mode_config: anytype,
) void {
if (@hasField(@TypeOf(mode_config), "exclude")) {
@@ -803,7 +810,7 @@ pub const StackTracesContext = struct {
const annotated_case_name = fmt.allocPrint(self.b.allocator, "{s} {s} ({s})", .{
"stack-trace",
name,
- @tagName(mode),
+ @tagName(optimize_mode),
}) catch unreachable;
if (self.test_filter) |filter| {
if (mem.indexOf(u8, annotated_case_name, filter) == null) return;
@@ -812,14 +819,18 @@ pub const StackTracesContext = struct {
const b = self.b;
const src_basename = "source.zig";
const write_src = b.addWriteFile(src_basename, source);
- const exe = b.addExecutableSource("test", write_src.getFileSource(src_basename).?);
- exe.setBuildMode(mode);
+ const exe = b.addExecutable(.{
+ .name = "test",
+ .root_source_file = write_src.getFileSource(src_basename).?,
+ .optimize = optimize_mode,
+ .target = .{},
+ });
const run_and_compare = RunAndCompareStep.create(
self,
exe,
annotated_case_name,
- mode,
+ optimize_mode,
mode_config.expect,
);
@@ -833,7 +844,7 @@ pub const StackTracesContext = struct {
context: *StackTracesContext,
exe: *LibExeObjStep,
name: []const u8,
- mode: Mode,
+ optimize_mode: OptimizeMode,
expect_output: []const u8,
test_index: usize,
@@ -841,7 +852,7 @@ pub const StackTracesContext = struct {
context: *StackTracesContext,
exe: *LibExeObjStep,
name: []const u8,
- mode: Mode,
+ optimize_mode: OptimizeMode,
expect_output: []const u8,
) *RunAndCompareStep {
const allocator = context.b.allocator;
@@ -851,7 +862,7 @@ pub const StackTracesContext = struct {
.context = context,
.exe = exe,
.name = name,
- .mode = mode,
+ .optimize_mode = optimize_mode,
.expect_output = expect_output,
.test_index = context.test_index,
};
@@ -932,7 +943,7 @@ pub const StackTracesContext = struct {
// process result
// - keep only basename of source file path
// - replace address with symbolic string
- // - replace function name with symbolic string when mode != .Debug
+ // - replace function name with symbolic string when optimize_mode != .Debug
// - skip empty lines
const got: []const u8 = got_result: {
var buf = ArrayList(u8).init(b.allocator);
@@ -968,7 +979,7 @@ pub const StackTracesContext = struct {
// emit substituted line
try buf.appendSlice(line[pos + 1 .. marks[2] + delims[2].len]);
try buf.appendSlice(" [address]");
- if (self.mode == .Debug) {
+ if (self.optimize_mode == .Debug) {
// On certain platforms (windows) or possibly depending on how we choose to link main
// the object file extension may be present so we simply strip any extension.
if (mem.indexOfScalar(u8, line[marks[4]..marks[5]], '.')) |idot| {
@@ -1007,7 +1018,7 @@ pub const StandaloneContext = struct {
step: *build.Step,
test_index: usize,
test_filter: ?[]const u8,
- modes: []const Mode,
+ optimize_modes: []const OptimizeMode,
skip_non_native: bool,
enable_macos_sdk: bool,
target: std.zig.CrossTarget,
@@ -1087,13 +1098,13 @@ pub const StandaloneContext = struct {
}
}
- const modes = if (features.build_modes) self.modes else &[1]Mode{.Debug};
- for (modes) |mode| {
- const arg = switch (mode) {
+ const optimize_modes = if (features.build_modes) self.optimize_modes else &[1]OptimizeMode{.Debug};
+ for (optimize_modes) |optimize_mode| {
+ const arg = switch (optimize_mode) {
.Debug => "",
- .ReleaseFast => "-Drelease-fast",
- .ReleaseSafe => "-Drelease-safe",
- .ReleaseSmall => "-Drelease-small",
+ .ReleaseFast => "-Doptimize=ReleaseFast",
+ .ReleaseSafe => "-Doptimize=ReleaseSafe",
+ .ReleaseSmall => "-Doptimize=ReleaseSmall",
};
const zig_args_base_len = zig_args.items.len;
if (arg.len > 0)
@@ -1101,7 +1112,7 @@ pub const StandaloneContext = struct {
defer zig_args.resize(zig_args_base_len) catch unreachable;
const run_cmd = b.addSystemCommand(zig_args.items);
- const log_step = b.addLog("PASS {s} ({s})", .{ annotated_case_name, @tagName(mode) });
+ const log_step = b.addLog("PASS {s} ({s})", .{ annotated_case_name, @tagName(optimize_mode) });
log_step.step.dependOn(&run_cmd.step);
self.step.dependOn(&log_step.step);
@@ -1111,17 +1122,21 @@ pub const StandaloneContext = struct {
pub fn addAllArgs(self: *StandaloneContext, root_src: []const u8, link_libc: bool) void {
const b = self.b;
- for (self.modes) |mode| {
+ for (self.optimize_modes) |optimize| {
const annotated_case_name = fmt.allocPrint(self.b.allocator, "build {s} ({s})", .{
root_src,
- @tagName(mode),
+ @tagName(optimize),
}) catch unreachable;
if (self.test_filter) |filter| {
if (mem.indexOf(u8, annotated_case_name, filter) == null) continue;
}
- const exe = b.addExecutable("test", root_src);
- exe.setBuildMode(mode);
+ const exe = b.addExecutable(.{
+ .name = "test",
+ .root_source_file = .{ .path = root_src },
+ .optimize = optimize,
+ .target = .{},
+ });
if (link_libc) {
exe.linkSystemLibrary("c");
}
@@ -1247,8 +1262,8 @@ pub const GenHContext = struct {
pub fn addCase(self: *GenHContext, case: *const TestCase) void {
const b = self.b;
- const mode = std.builtin.Mode.Debug;
- const annotated_case_name = fmt.allocPrint(self.b.allocator, "gen-h {s} ({s})", .{ case.name, @tagName(mode) }) catch unreachable;
+ const optimize_mode = std.builtin.OptimizeMode.Debug;
+ const annotated_case_name = fmt.allocPrint(self.b.allocator, "gen-h {s} ({s})", .{ case.name, @tagName(optimize_mode) }) catch unreachable;
if (self.test_filter) |filter| {
if (mem.indexOf(u8, annotated_case_name, filter) == null) return;
}
@@ -1259,7 +1274,7 @@ pub const GenHContext = struct {
}
const obj = b.addObjectFromWriteFileStep("test", write_src, case.sources.items[0].filename);
- obj.setBuildMode(mode);
+ obj.setBuildMode(optimize_mode);
const cmp_h = GenHCmpOutputStep.create(self, obj, annotated_case_name, case);
@@ -1336,14 +1351,16 @@ const c_abi_targets = [_]CrossTarget{
pub fn addCAbiTests(b: *build.Builder, skip_non_native: bool, skip_release: bool) *build.Step {
const step = b.step("test-c-abi", "Run the C ABI tests");
- const modes: [2]Mode = .{ .Debug, .ReleaseFast };
+ const optimize_modes: [2]OptimizeMode = .{ .Debug, .ReleaseFast };
- for (modes[0 .. @as(u8, 1) + @boolToInt(!skip_release)]) |mode| for (c_abi_targets) |c_abi_target| {
+ for (optimize_modes[0 .. @as(u8, 1) + @boolToInt(!skip_release)]) |optimize_mode| for (c_abi_targets) |c_abi_target| {
if (skip_non_native and !c_abi_target.isNative())
continue;
- const test_step = b.addTest("test/c_abi/main.zig");
- test_step.setTarget(c_abi_target);
+ const test_step = b.addTest(.{
+ .root_source_file = .{ .path = "test/c_abi/main.zig" },
+ .optimize = optimize_mode,
+ });
if (c_abi_target.abi != null and c_abi_target.abi.?.isMusl()) {
// TODO NativeTargetInfo insists on dynamically linking musl
// for some reason?
@@ -1351,7 +1368,6 @@ pub fn addCAbiTests(b: *build.Builder, skip_non_native: bool, skip_release: bool
}
test_step.linkLibC();
test_step.addCSourceFile("test/c_abi/cfuncs.c", &.{"-std=c99"});
- test_step.setBuildMode(mode);
if (c_abi_target.isWindows() and (c_abi_target.getCpuArch() == .x86 or builtin.target.os.tag == .linux)) {
// LTO currently incorrectly strips stdcall name-mangled functions
@@ -1363,7 +1379,7 @@ pub fn addCAbiTests(b: *build.Builder, skip_non_native: bool, skip_release: bool
test_step.setNamePrefix(b.fmt("{s}-{s}-{s} ", .{
"test-c-abi",
triple_prefix,
- @tagName(mode),
+ @tagName(optimize_mode),
}));
step.dependOn(&test_step.step);
--
cgit v1.2.3
From 2f5892671e49850070064f689a7d8f93d6a7a0dd Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Tue, 31 Jan 2023 14:56:35 -0700
Subject: move compiler's CType logic to std.Target
This API only depends on std.Target and is extremely useful in build
scripts when populating configure files.
---
lib/std/target.zig | 553 ++++++++++++++++++++++++++++++++++++++++++++
src/Sema.zig | 2 +-
src/codegen/c.zig | 1 -
src/codegen/llvm.zig | 5 +-
src/type.zig | 630 ++++-----------------------------------------------
5 files changed, 601 insertions(+), 590 deletions(-)
(limited to 'src')
diff --git a/lib/std/target.zig b/lib/std/target.zig
index 8ae175aac8..4429f8be2d 100644
--- a/lib/std/target.zig
+++ b/lib/std/target.zig
@@ -1880,6 +1880,559 @@ pub const Target = struct {
=> 16,
};
}
+
+ pub const CType = enum {
+ short,
+ ushort,
+ int,
+ uint,
+ long,
+ ulong,
+ longlong,
+ ulonglong,
+ float,
+ double,
+ longdouble,
+ };
+
+ pub fn c_type_byte_size(t: Target, c_type: CType) u16 {
+ return switch (c_type) {
+ .short,
+ .ushort,
+ .int,
+ .uint,
+ .long,
+ .ulong,
+ .longlong,
+ .ulonglong,
+ => @divExact(c_type_bit_size(t, c_type), 8),
+
+ .float => 4,
+ .double => 8,
+
+ .longdouble => switch (c_type_bit_size(t, c_type)) {
+ 16 => 2,
+ 32 => 4,
+ 64 => 8,
+ 80 => @intCast(u16, mem.alignForward(10, c_type_alignment(t, .longdouble))),
+ 128 => 16,
+ else => unreachable,
+ },
+ };
+ }
+
+ pub fn c_type_bit_size(target: Target, c_type: CType) u16 {
+ switch (target.os.tag) {
+ .freestanding, .other => switch (target.cpu.arch) {
+ .msp430 => switch (c_type) {
+ .short, .ushort, .int, .uint => return 16,
+ .float, .long, .ulong => return 32,
+ .longlong, .ulonglong, .double, .longdouble => return 64,
+ },
+ .avr => switch (c_type) {
+ .short, .ushort, .int, .uint => return 16,
+ .long, .ulong, .float, .double, .longdouble => return 32,
+ .longlong, .ulonglong => return 64,
+ },
+ .tce, .tcele => switch (c_type) {
+ .short, .ushort => return 16,
+ .int, .uint, .long, .ulong, .longlong, .ulonglong => return 32,
+ .float, .double, .longdouble => return 32,
+ },
+ .mips64, .mips64el => switch (c_type) {
+ .short, .ushort => return 16,
+ .int, .uint, .float => return 32,
+ .long, .ulong => return if (target.abi != .gnuabin32) 64 else 32,
+ .longlong, .ulonglong, .double => return 64,
+ .longdouble => return 128,
+ },
+ .x86_64 => switch (c_type) {
+ .short, .ushort => return 16,
+ .int, .uint, .float => return 32,
+ .long, .ulong => switch (target.abi) {
+ .gnux32, .muslx32 => return 32,
+ else => return 64,
+ },
+ .longlong, .ulonglong, .double => return 64,
+ .longdouble => return 80,
+ },
+ else => switch (c_type) {
+ .short, .ushort => return 16,
+ .int, .uint, .float => return 32,
+ .long, .ulong => return target.cpu.arch.ptrBitWidth(),
+ .longlong, .ulonglong, .double => return 64,
+ .longdouble => switch (target.cpu.arch) {
+ .x86 => switch (target.abi) {
+ .android => return 64,
+ else => return 80,
+ },
+
+ .powerpc,
+ .powerpcle,
+ .powerpc64,
+ .powerpc64le,
+ => switch (target.abi) {
+ .musl,
+ .musleabi,
+ .musleabihf,
+ .muslx32,
+ => return 64,
+ else => return 128,
+ },
+
+ .riscv32,
+ .riscv64,
+ .aarch64,
+ .aarch64_be,
+ .aarch64_32,
+ .s390x,
+ .sparc,
+ .sparc64,
+ .sparcel,
+ .wasm32,
+ .wasm64,
+ => return 128,
+
+ else => return 64,
+ },
+ },
+ },
+
+ .linux,
+ .freebsd,
+ .netbsd,
+ .dragonfly,
+ .openbsd,
+ .wasi,
+ .emscripten,
+ .plan9,
+ .solaris,
+ .haiku,
+ .ananas,
+ .fuchsia,
+ .minix,
+ => switch (target.cpu.arch) {
+ .msp430 => switch (c_type) {
+ .short, .ushort, .int, .uint => return 16,
+ .long, .ulong, .float => return 32,
+ .longlong, .ulonglong, .double, .longdouble => return 64,
+ },
+ .avr => switch (c_type) {
+ .short, .ushort, .int, .uint => return 16,
+ .long, .ulong, .float, .double, .longdouble => return 32,
+ .longlong, .ulonglong => return 64,
+ },
+ .tce, .tcele => switch (c_type) {
+ .short, .ushort => return 16,
+ .int, .uint, .long, .ulong, .longlong, .ulonglong => return 32,
+ .float, .double, .longdouble => return 32,
+ },
+ .mips64, .mips64el => switch (c_type) {
+ .short, .ushort => return 16,
+ .int, .uint, .float => return 32,
+ .long, .ulong => return if (target.abi != .gnuabin32) 64 else 32,
+ .longlong, .ulonglong, .double => return 64,
+ .longdouble => if (target.os.tag == .freebsd) return 64 else return 128,
+ },
+ .x86_64 => switch (c_type) {
+ .short, .ushort => return 16,
+ .int, .uint, .float => return 32,
+ .long, .ulong => switch (target.abi) {
+ .gnux32, .muslx32 => return 32,
+ else => return 64,
+ },
+ .longlong, .ulonglong, .double => return 64,
+ .longdouble => return 80,
+ },
+ else => switch (c_type) {
+ .short, .ushort => return 16,
+ .int, .uint, .float => return 32,
+ .long, .ulong => return target.cpu.arch.ptrBitWidth(),
+ .longlong, .ulonglong, .double => return 64,
+ .longdouble => switch (target.cpu.arch) {
+ .x86 => switch (target.abi) {
+ .android => return 64,
+ else => return 80,
+ },
+
+ .powerpc,
+ .powerpcle,
+ => switch (target.abi) {
+ .musl,
+ .musleabi,
+ .musleabihf,
+ .muslx32,
+ => return 64,
+ else => switch (target.os.tag) {
+ .freebsd, .netbsd, .openbsd => return 64,
+ else => return 128,
+ },
+ },
+
+ .powerpc64,
+ .powerpc64le,
+ => switch (target.abi) {
+ .musl,
+ .musleabi,
+ .musleabihf,
+ .muslx32,
+ => return 64,
+ else => switch (target.os.tag) {
+ .freebsd, .openbsd => return 64,
+ else => return 128,
+ },
+ },
+
+ .riscv32,
+ .riscv64,
+ .aarch64,
+ .aarch64_be,
+ .aarch64_32,
+ .s390x,
+ .mips64,
+ .mips64el,
+ .sparc,
+ .sparc64,
+ .sparcel,
+ .wasm32,
+ .wasm64,
+ => return 128,
+
+ else => return 64,
+ },
+ },
+ },
+
+ .windows, .uefi => switch (target.cpu.arch) {
+ .x86 => switch (c_type) {
+ .short, .ushort => return 16,
+ .int, .uint, .float => return 32,
+ .long, .ulong => return 32,
+ .longlong, .ulonglong, .double => return 64,
+ .longdouble => switch (target.abi) {
+ .gnu, .gnuilp32, .cygnus => return 80,
+ else => return 64,
+ },
+ },
+ .x86_64 => switch (c_type) {
+ .short, .ushort => return 16,
+ .int, .uint, .float => return 32,
+ .long, .ulong => switch (target.abi) {
+ .cygnus => return 64,
+ else => return 32,
+ },
+ .longlong, .ulonglong, .double => return 64,
+ .longdouble => switch (target.abi) {
+ .gnu, .gnuilp32, .cygnus => return 80,
+ else => return 64,
+ },
+ },
+ else => switch (c_type) {
+ .short, .ushort => return 16,
+ .int, .uint, .float => return 32,
+ .long, .ulong => return 32,
+ .longlong, .ulonglong, .double => return 64,
+ .longdouble => return 64,
+ },
+ },
+
+ .macos, .ios, .tvos, .watchos => switch (c_type) {
+ .short, .ushort => return 16,
+ .int, .uint, .float => return 32,
+ .long, .ulong => switch (target.cpu.arch) {
+ .x86, .arm, .aarch64_32 => return 32,
+ .x86_64 => switch (target.abi) {
+ .gnux32, .muslx32 => return 32,
+ else => return 64,
+ },
+ else => return 64,
+ },
+ .longlong, .ulonglong, .double => return 64,
+ .longdouble => switch (target.cpu.arch) {
+ .x86 => switch (target.abi) {
+ .android => return 64,
+ else => return 80,
+ },
+ .x86_64 => return 80,
+ else => return 64,
+ },
+ },
+
+ .nvcl, .cuda => switch (c_type) {
+ .short, .ushort => return 16,
+ .int, .uint, .float => return 32,
+ .long, .ulong => switch (target.cpu.arch) {
+ .nvptx => return 32,
+ .nvptx64 => return 64,
+ else => return 64,
+ },
+ .longlong, .ulonglong, .double => return 64,
+ .longdouble => return 64,
+ },
+
+ .amdhsa, .amdpal => switch (c_type) {
+ .short, .ushort => return 16,
+ .int, .uint, .float => return 32,
+ .long, .ulong, .longlong, .ulonglong, .double => return 64,
+ .longdouble => return 128,
+ },
+
+ .cloudabi,
+ .kfreebsd,
+ .lv2,
+ .zos,
+ .rtems,
+ .nacl,
+ .aix,
+ .ps4,
+ .ps5,
+ .elfiamcu,
+ .mesa3d,
+ .contiki,
+ .hermit,
+ .hurd,
+ .opencl,
+ .glsl450,
+ .vulkan,
+ .driverkit,
+ .shadermodel,
+ => @panic("TODO specify the C integer and float type sizes for this OS"),
+ }
+ }
+
+ pub fn c_type_alignment(target: Target, c_type: CType) u16 {
+ // Overrides for unusual alignments
+ switch (target.cpu.arch) {
+ .avr => switch (c_type) {
+ .short, .ushort => return 2,
+ else => return 1,
+ },
+ .x86 => switch (target.os.tag) {
+ .windows, .uefi => switch (c_type) {
+ .longlong, .ulonglong, .double => return 8,
+ .longdouble => switch (target.abi) {
+ .gnu, .gnuilp32, .cygnus => return 4,
+ else => return 8,
+ },
+ else => {},
+ },
+ else => {},
+ },
+ else => {},
+ }
+
+ // Next-power-of-two-aligned, up to a maximum.
+ return @min(
+ std.math.ceilPowerOfTwoAssert(u16, (c_type_bit_size(target, c_type) + 7) / 8),
+ switch (target.cpu.arch) {
+ .arm, .armeb, .thumb, .thumbeb => switch (target.os.tag) {
+ .netbsd => switch (target.abi) {
+ .gnueabi,
+ .gnueabihf,
+ .eabi,
+ .eabihf,
+ .android,
+ .musleabi,
+ .musleabihf,
+ => 8,
+
+ else => @as(u16, 4),
+ },
+ .ios, .tvos, .watchos => 4,
+ else => 8,
+ },
+
+ .msp430,
+ .avr,
+ => 2,
+
+ .arc,
+ .csky,
+ .x86,
+ .xcore,
+ .dxil,
+ .loongarch32,
+ .tce,
+ .tcele,
+ .le32,
+ .amdil,
+ .hsail,
+ .spir,
+ .spirv32,
+ .kalimba,
+ .shave,
+ .renderscript32,
+ .ve,
+ .spu_2,
+ => 4,
+
+ .aarch64_32,
+ .amdgcn,
+ .amdil64,
+ .bpfel,
+ .bpfeb,
+ .hexagon,
+ .hsail64,
+ .loongarch64,
+ .m68k,
+ .mips,
+ .mipsel,
+ .sparc,
+ .sparcel,
+ .sparc64,
+ .lanai,
+ .le64,
+ .nvptx,
+ .nvptx64,
+ .r600,
+ .s390x,
+ .spir64,
+ .spirv64,
+ .renderscript64,
+ => 8,
+
+ .aarch64,
+ .aarch64_be,
+ .mips64,
+ .mips64el,
+ .powerpc,
+ .powerpcle,
+ .powerpc64,
+ .powerpc64le,
+ .riscv32,
+ .riscv64,
+ .x86_64,
+ .wasm32,
+ .wasm64,
+ => 16,
+ },
+ );
+ }
+
+ pub fn c_type_preferred_alignment(target: Target, c_type: CType) u16 {
+ // Overrides for unusual alignments
+ switch (target.cpu.arch) {
+ .arm, .armeb, .thumb, .thumbeb => switch (target.os.tag) {
+ .netbsd => switch (target.abi) {
+ .gnueabi,
+ .gnueabihf,
+ .eabi,
+ .eabihf,
+ .android,
+ .musleabi,
+ .musleabihf,
+ => {},
+
+ else => switch (c_type) {
+ .longdouble => return 4,
+ else => {},
+ },
+ },
+ .ios, .tvos, .watchos => switch (c_type) {
+ .longdouble => return 4,
+ else => {},
+ },
+ else => {},
+ },
+ .arc => switch (c_type) {
+ .longdouble => return 4,
+ else => {},
+ },
+ .avr => switch (c_type) {
+ .int, .uint, .long, .ulong, .float, .longdouble => return 1,
+ .short, .ushort => return 2,
+ .double => return 4,
+ .longlong, .ulonglong => return 8,
+ },
+ .x86 => switch (target.os.tag) {
+ .windows, .uefi => switch (c_type) {
+ .longdouble => switch (target.abi) {
+ .gnu, .gnuilp32, .cygnus => return 4,
+ else => return 8,
+ },
+ else => {},
+ },
+ else => switch (c_type) {
+ .longdouble => return 4,
+ else => {},
+ },
+ },
+ else => {},
+ }
+
+ // Next-power-of-two-aligned, up to a maximum.
+ return @min(
+ std.math.ceilPowerOfTwoAssert(u16, (c_type_bit_size(target, c_type) + 7) / 8),
+ switch (target.cpu.arch) {
+ .msp430 => @as(u16, 2),
+
+ .csky,
+ .xcore,
+ .dxil,
+ .loongarch32,
+ .tce,
+ .tcele,
+ .le32,
+ .amdil,
+ .hsail,
+ .spir,
+ .spirv32,
+ .kalimba,
+ .shave,
+ .renderscript32,
+ .ve,
+ .spu_2,
+ => 4,
+
+ .arc,
+ .arm,
+ .armeb,
+ .avr,
+ .thumb,
+ .thumbeb,
+ .aarch64_32,
+ .amdgcn,
+ .amdil64,
+ .bpfel,
+ .bpfeb,
+ .hexagon,
+ .hsail64,
+ .x86,
+ .loongarch64,
+ .m68k,
+ .mips,
+ .mipsel,
+ .sparc,
+ .sparcel,
+ .sparc64,
+ .lanai,
+ .le64,
+ .nvptx,
+ .nvptx64,
+ .r600,
+ .s390x,
+ .spir64,
+ .spirv64,
+ .renderscript64,
+ => 8,
+
+ .aarch64,
+ .aarch64_be,
+ .mips64,
+ .mips64el,
+ .powerpc,
+ .powerpcle,
+ .powerpc64,
+ .powerpc64le,
+ .riscv32,
+ .riscv64,
+ .x86_64,
+ .wasm32,
+ .wasm64,
+ => 16,
+ },
+ );
+ }
};
test {
diff --git a/src/Sema.zig b/src/Sema.zig
index 7448fd149c..87be3de7be 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -26076,7 +26076,7 @@ fn coerceVarArgParam(
.Array => return sema.fail(block, inst_src, "arrays must be passed by reference to variadic function", .{}),
.Float => float: {
const target = sema.mod.getTarget();
- const double_bits = @import("type.zig").CType.sizeInBits(.double, target);
+ const double_bits = target.c_type_bit_size(.double);
const inst_bits = uncasted_ty.floatBits(sema.mod.getTarget());
if (inst_bits >= double_bits) break :float inst;
switch (double_bits) {
diff --git a/src/codegen/c.zig b/src/codegen/c.zig
index eb0ae1b1f6..2f721e1b4b 100644
--- a/src/codegen/c.zig
+++ b/src/codegen/c.zig
@@ -16,7 +16,6 @@ const trace = @import("../tracy.zig").trace;
const LazySrcLoc = Module.LazySrcLoc;
const Air = @import("../Air.zig");
const Liveness = @import("../Liveness.zig");
-const CType = @import("../type.zig").CType;
const target_util = @import("../target.zig");
const libcFloatPrefix = target_util.libcFloatPrefix;
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index c528abdd7c..e19c70f322 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -19,7 +19,6 @@ const Liveness = @import("../Liveness.zig");
const Value = @import("../value.zig").Value;
const Type = @import("../type.zig").Type;
const LazySrcLoc = Module.LazySrcLoc;
-const CType = @import("../type.zig").CType;
const x86_64_abi = @import("../arch/x86_64/abi.zig");
const wasm_c_abi = @import("../arch/wasm/abi.zig");
const aarch64_c_abi = @import("../arch/aarch64/abi.zig");
@@ -11043,8 +11042,8 @@ fn backendSupportsF128(target: std.Target) bool {
fn intrinsicsAllowed(scalar_ty: Type, target: std.Target) bool {
return switch (scalar_ty.tag()) {
.f16 => backendSupportsF16(target),
- .f80 => (CType.longdouble.sizeInBits(target) == 80) and backendSupportsF80(target),
- .f128 => (CType.longdouble.sizeInBits(target) == 128) and backendSupportsF128(target),
+ .f80 => (target.c_type_bit_size(.longdouble) == 80) and backendSupportsF80(target),
+ .f128 => (target.c_type_bit_size(.longdouble) == 128) and backendSupportsF128(target),
else => true,
};
}
diff --git a/src/type.zig b/src/type.zig
index c675cd225d..a13e30cb4c 100644
--- a/src/type.zig
+++ b/src/type.zig
@@ -2937,24 +2937,24 @@ pub const Type = extern union {
.anyframe_T,
=> return AbiAlignmentAdvanced{ .scalar = @divExact(target.cpu.arch.ptrBitWidth(), 8) },
- .c_short => return AbiAlignmentAdvanced{ .scalar = CType.short.alignment(target) },
- .c_ushort => return AbiAlignmentAdvanced{ .scalar = CType.ushort.alignment(target) },
- .c_int => return AbiAlignmentAdvanced{ .scalar = CType.int.alignment(target) },
- .c_uint => return AbiAlignmentAdvanced{ .scalar = CType.uint.alignment(target) },
- .c_long => return AbiAlignmentAdvanced{ .scalar = CType.long.alignment(target) },
- .c_ulong => return AbiAlignmentAdvanced{ .scalar = CType.ulong.alignment(target) },
- .c_longlong => return AbiAlignmentAdvanced{ .scalar = CType.longlong.alignment(target) },
- .c_ulonglong => return AbiAlignmentAdvanced{ .scalar = CType.ulonglong.alignment(target) },
- .c_longdouble => return AbiAlignmentAdvanced{ .scalar = CType.longdouble.alignment(target) },
+ .c_short => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.short) },
+ .c_ushort => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.ushort) },
+ .c_int => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.int) },
+ .c_uint => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.uint) },
+ .c_long => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.long) },
+ .c_ulong => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.ulong) },
+ .c_longlong => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.longlong) },
+ .c_ulonglong => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.ulonglong) },
+ .c_longdouble => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.longdouble) },
.f16 => return AbiAlignmentAdvanced{ .scalar = 2 },
- .f32 => return AbiAlignmentAdvanced{ .scalar = CType.float.alignment(target) },
- .f64 => switch (CType.double.sizeInBits(target)) {
- 64 => return AbiAlignmentAdvanced{ .scalar = CType.double.alignment(target) },
+ .f32 => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.float) },
+ .f64 => switch (target.c_type_bit_size(.double)) {
+ 64 => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.double) },
else => return AbiAlignmentAdvanced{ .scalar = 8 },
},
- .f80 => switch (CType.longdouble.sizeInBits(target)) {
- 80 => return AbiAlignmentAdvanced{ .scalar = CType.longdouble.alignment(target) },
+ .f80 => switch (target.c_type_bit_size(.longdouble)) {
+ 80 => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.longdouble) },
else => {
var payload: Payload.Bits = .{
.base = .{ .tag = .int_unsigned },
@@ -2964,8 +2964,8 @@ pub const Type = extern union {
return AbiAlignmentAdvanced{ .scalar = abiAlignment(u80_ty, target) };
},
},
- .f128 => switch (CType.longdouble.sizeInBits(target)) {
- 128 => return AbiAlignmentAdvanced{ .scalar = CType.longdouble.alignment(target) },
+ .f128 => switch (target.c_type_bit_size(.longdouble)) {
+ 128 => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.longdouble) },
else => return AbiAlignmentAdvanced{ .scalar = 16 },
},
@@ -3434,21 +3434,22 @@ pub const Type = extern union {
else => return AbiSizeAdvanced{ .scalar = @divExact(target.cpu.arch.ptrBitWidth(), 8) },
},
- .c_short => return AbiSizeAdvanced{ .scalar = @divExact(CType.short.sizeInBits(target), 8) },
- .c_ushort => return AbiSizeAdvanced{ .scalar = @divExact(CType.ushort.sizeInBits(target), 8) },
- .c_int => return AbiSizeAdvanced{ .scalar = @divExact(CType.int.sizeInBits(target), 8) },
- .c_uint => return AbiSizeAdvanced{ .scalar = @divExact(CType.uint.sizeInBits(target), 8) },
- .c_long => return AbiSizeAdvanced{ .scalar = @divExact(CType.long.sizeInBits(target), 8) },
- .c_ulong => return AbiSizeAdvanced{ .scalar = @divExact(CType.ulong.sizeInBits(target), 8) },
- .c_longlong => return AbiSizeAdvanced{ .scalar = @divExact(CType.longlong.sizeInBits(target), 8) },
- .c_ulonglong => return AbiSizeAdvanced{ .scalar = @divExact(CType.ulonglong.sizeInBits(target), 8) },
+ .c_short => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.short) },
+ .c_ushort => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.ushort) },
+ .c_int => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.int) },
+ .c_uint => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.uint) },
+ .c_long => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.long) },
+ .c_ulong => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.ulong) },
+ .c_longlong => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.longlong) },
+ .c_ulonglong => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.ulonglong) },
+ .c_longdouble => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.longdouble) },
.f16 => return AbiSizeAdvanced{ .scalar = 2 },
.f32 => return AbiSizeAdvanced{ .scalar = 4 },
.f64 => return AbiSizeAdvanced{ .scalar = 8 },
.f128 => return AbiSizeAdvanced{ .scalar = 16 },
- .f80 => switch (CType.longdouble.sizeInBits(target)) {
- 80 => return AbiSizeAdvanced{ .scalar = std.mem.alignForward(10, CType.longdouble.alignment(target)) },
+ .f80 => switch (target.c_type_bit_size(.longdouble)) {
+ 80 => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.longdouble) },
else => {
var payload: Payload.Bits = .{
.base = .{ .tag = .int_unsigned },
@@ -3458,14 +3459,6 @@ pub const Type = extern union {
return AbiSizeAdvanced{ .scalar = abiSize(u80_ty, target) };
},
},
- .c_longdouble => switch (CType.longdouble.sizeInBits(target)) {
- 16 => return AbiSizeAdvanced{ .scalar = abiSize(Type.f16, target) },
- 32 => return AbiSizeAdvanced{ .scalar = abiSize(Type.f32, target) },
- 64 => return AbiSizeAdvanced{ .scalar = abiSize(Type.f64, target) },
- 80 => return AbiSizeAdvanced{ .scalar = abiSize(Type.f80, target) },
- 128 => return AbiSizeAdvanced{ .scalar = abiSize(Type.f128, target) },
- else => unreachable,
- },
// TODO revisit this when we have the concept of the error tag type
.anyerror_void_error_union,
@@ -3748,15 +3741,15 @@ pub const Type = extern union {
.manyptr_const_u8_sentinel_0,
=> return target.cpu.arch.ptrBitWidth(),
- .c_short => return CType.short.sizeInBits(target),
- .c_ushort => return CType.ushort.sizeInBits(target),
- .c_int => return CType.int.sizeInBits(target),
- .c_uint => return CType.uint.sizeInBits(target),
- .c_long => return CType.long.sizeInBits(target),
- .c_ulong => return CType.ulong.sizeInBits(target),
- .c_longlong => return CType.longlong.sizeInBits(target),
- .c_ulonglong => return CType.ulonglong.sizeInBits(target),
- .c_longdouble => return CType.longdouble.sizeInBits(target),
+ .c_short => return target.c_type_bit_size(.short),
+ .c_ushort => return target.c_type_bit_size(.ushort),
+ .c_int => return target.c_type_bit_size(.int),
+ .c_uint => return target.c_type_bit_size(.uint),
+ .c_long => return target.c_type_bit_size(.long),
+ .c_ulong => return target.c_type_bit_size(.ulong),
+ .c_longlong => return target.c_type_bit_size(.longlong),
+ .c_ulonglong => return target.c_type_bit_size(.ulonglong),
+ .c_longdouble => return target.c_type_bit_size(.longdouble),
.error_set,
.error_set_single,
@@ -4631,14 +4624,14 @@ pub const Type = extern union {
.i128 => return .{ .signedness = .signed, .bits = 128 },
.usize => return .{ .signedness = .unsigned, .bits = target.cpu.arch.ptrBitWidth() },
.isize => return .{ .signedness = .signed, .bits = target.cpu.arch.ptrBitWidth() },
- .c_short => return .{ .signedness = .signed, .bits = CType.short.sizeInBits(target) },
- .c_ushort => return .{ .signedness = .unsigned, .bits = CType.ushort.sizeInBits(target) },
- .c_int => return .{ .signedness = .signed, .bits = CType.int.sizeInBits(target) },
- .c_uint => return .{ .signedness = .unsigned, .bits = CType.uint.sizeInBits(target) },
- .c_long => return .{ .signedness = .signed, .bits = CType.long.sizeInBits(target) },
- .c_ulong => return .{ .signedness = .unsigned, .bits = CType.ulong.sizeInBits(target) },
- .c_longlong => return .{ .signedness = .signed, .bits = CType.longlong.sizeInBits(target) },
- .c_ulonglong => return .{ .signedness = .unsigned, .bits = CType.ulonglong.sizeInBits(target) },
+ .c_short => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.short) },
+ .c_ushort => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ushort) },
+ .c_int => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.int) },
+ .c_uint => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.uint) },
+ .c_long => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.long) },
+ .c_ulong => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ulong) },
+ .c_longlong => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.longlong) },
+ .c_ulonglong => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ulonglong) },
.enum_full, .enum_nonexhaustive => ty = ty.cast(Payload.EnumFull).?.data.tag_ty,
.enum_numbered => ty = ty.castTag(.enum_numbered).?.data.tag_ty,
@@ -4724,7 +4717,7 @@ pub const Type = extern union {
.f64 => 64,
.f80 => 80,
.f128, .comptime_float => 128,
- .c_longdouble => CType.longdouble.sizeInBits(target),
+ .c_longdouble => target.c_type_bit_size(.longdouble),
else => unreachable,
};
@@ -6689,536 +6682,3 @@ pub const Type = extern union {
/// to packed struct layout to find out all the places in the codebase you need to edit!
pub const packed_struct_layout_version = 2;
};
-
-pub const CType = enum {
- short,
- ushort,
- int,
- uint,
- long,
- ulong,
- longlong,
- ulonglong,
- longdouble,
-
- // We don't have a `c_float`/`c_double` type in Zig, but these
- // are useful for querying target-correct alignment and checking
- // whether C's double is f64 or f32
- float,
- double,
-
- pub fn sizeInBits(self: CType, target: Target) u16 {
- switch (target.os.tag) {
- .freestanding, .other => switch (target.cpu.arch) {
- .msp430 => switch (self) {
- .short, .ushort, .int, .uint => return 16,
- .float, .long, .ulong => return 32,
- .longlong, .ulonglong, .double, .longdouble => return 64,
- },
- .avr => switch (self) {
- .short, .ushort, .int, .uint => return 16,
- .long, .ulong, .float, .double, .longdouble => return 32,
- .longlong, .ulonglong => return 64,
- },
- .tce, .tcele => switch (self) {
- .short, .ushort => return 16,
- .int, .uint, .long, .ulong, .longlong, .ulonglong => return 32,
- .float, .double, .longdouble => return 32,
- },
- .mips64, .mips64el => switch (self) {
- .short, .ushort => return 16,
- .int, .uint, .float => return 32,
- .long, .ulong => return if (target.abi != .gnuabin32) 64 else 32,
- .longlong, .ulonglong, .double => return 64,
- .longdouble => return 128,
- },
- .x86_64 => switch (self) {
- .short, .ushort => return 16,
- .int, .uint, .float => return 32,
- .long, .ulong => switch (target.abi) {
- .gnux32, .muslx32 => return 32,
- else => return 64,
- },
- .longlong, .ulonglong, .double => return 64,
- .longdouble => return 80,
- },
- else => switch (self) {
- .short, .ushort => return 16,
- .int, .uint, .float => return 32,
- .long, .ulong => return target.cpu.arch.ptrBitWidth(),
- .longlong, .ulonglong, .double => return 64,
- .longdouble => switch (target.cpu.arch) {
- .x86 => switch (target.abi) {
- .android => return 64,
- else => return 80,
- },
-
- .powerpc,
- .powerpcle,
- .powerpc64,
- .powerpc64le,
- => switch (target.abi) {
- .musl,
- .musleabi,
- .musleabihf,
- .muslx32,
- => return 64,
- else => return 128,
- },
-
- .riscv32,
- .riscv64,
- .aarch64,
- .aarch64_be,
- .aarch64_32,
- .s390x,
- .sparc,
- .sparc64,
- .sparcel,
- .wasm32,
- .wasm64,
- => return 128,
-
- else => return 64,
- },
- },
- },
-
- .linux,
- .freebsd,
- .netbsd,
- .dragonfly,
- .openbsd,
- .wasi,
- .emscripten,
- .plan9,
- .solaris,
- .haiku,
- .ananas,
- .fuchsia,
- .minix,
- => switch (target.cpu.arch) {
- .msp430 => switch (self) {
- .short, .ushort, .int, .uint => return 16,
- .long, .ulong, .float => return 32,
- .longlong, .ulonglong, .double, .longdouble => return 64,
- },
- .avr => switch (self) {
- .short, .ushort, .int, .uint => return 16,
- .long, .ulong, .float, .double, .longdouble => return 32,
- .longlong, .ulonglong => return 64,
- },
- .tce, .tcele => switch (self) {
- .short, .ushort => return 16,
- .int, .uint, .long, .ulong, .longlong, .ulonglong => return 32,
- .float, .double, .longdouble => return 32,
- },
- .mips64, .mips64el => switch (self) {
- .short, .ushort => return 16,
- .int, .uint, .float => return 32,
- .long, .ulong => return if (target.abi != .gnuabin32) 64 else 32,
- .longlong, .ulonglong, .double => return 64,
- .longdouble => if (target.os.tag == .freebsd) return 64 else return 128,
- },
- .x86_64 => switch (self) {
- .short, .ushort => return 16,
- .int, .uint, .float => return 32,
- .long, .ulong => switch (target.abi) {
- .gnux32, .muslx32 => return 32,
- else => return 64,
- },
- .longlong, .ulonglong, .double => return 64,
- .longdouble => return 80,
- },
- else => switch (self) {
- .short, .ushort => return 16,
- .int, .uint, .float => return 32,
- .long, .ulong => return target.cpu.arch.ptrBitWidth(),
- .longlong, .ulonglong, .double => return 64,
- .longdouble => switch (target.cpu.arch) {
- .x86 => switch (target.abi) {
- .android => return 64,
- else => return 80,
- },
-
- .powerpc,
- .powerpcle,
- => switch (target.abi) {
- .musl,
- .musleabi,
- .musleabihf,
- .muslx32,
- => return 64,
- else => switch (target.os.tag) {
- .freebsd, .netbsd, .openbsd => return 64,
- else => return 128,
- },
- },
-
- .powerpc64,
- .powerpc64le,
- => switch (target.abi) {
- .musl,
- .musleabi,
- .musleabihf,
- .muslx32,
- => return 64,
- else => switch (target.os.tag) {
- .freebsd, .openbsd => return 64,
- else => return 128,
- },
- },
-
- .riscv32,
- .riscv64,
- .aarch64,
- .aarch64_be,
- .aarch64_32,
- .s390x,
- .mips64,
- .mips64el,
- .sparc,
- .sparc64,
- .sparcel,
- .wasm32,
- .wasm64,
- => return 128,
-
- else => return 64,
- },
- },
- },
-
- .windows, .uefi => switch (target.cpu.arch) {
- .x86 => switch (self) {
- .short, .ushort => return 16,
- .int, .uint, .float => return 32,
- .long, .ulong => return 32,
- .longlong, .ulonglong, .double => return 64,
- .longdouble => switch (target.abi) {
- .gnu, .gnuilp32, .cygnus => return 80,
- else => return 64,
- },
- },
- .x86_64 => switch (self) {
- .short, .ushort => return 16,
- .int, .uint, .float => return 32,
- .long, .ulong => switch (target.abi) {
- .cygnus => return 64,
- else => return 32,
- },
- .longlong, .ulonglong, .double => return 64,
- .longdouble => switch (target.abi) {
- .gnu, .gnuilp32, .cygnus => return 80,
- else => return 64,
- },
- },
- else => switch (self) {
- .short, .ushort => return 16,
- .int, .uint, .float => return 32,
- .long, .ulong => return 32,
- .longlong, .ulonglong, .double => return 64,
- .longdouble => return 64,
- },
- },
-
- .macos, .ios, .tvos, .watchos => switch (self) {
- .short, .ushort => return 16,
- .int, .uint, .float => return 32,
- .long, .ulong => switch (target.cpu.arch) {
- .x86, .arm, .aarch64_32 => return 32,
- .x86_64 => switch (target.abi) {
- .gnux32, .muslx32 => return 32,
- else => return 64,
- },
- else => return 64,
- },
- .longlong, .ulonglong, .double => return 64,
- .longdouble => switch (target.cpu.arch) {
- .x86 => switch (target.abi) {
- .android => return 64,
- else => return 80,
- },
- .x86_64 => return 80,
- else => return 64,
- },
- },
-
- .nvcl, .cuda => switch (self) {
- .short, .ushort => return 16,
- .int, .uint, .float => return 32,
- .long, .ulong => switch (target.cpu.arch) {
- .nvptx => return 32,
- .nvptx64 => return 64,
- else => return 64,
- },
- .longlong, .ulonglong, .double => return 64,
- .longdouble => return 64,
- },
-
- .amdhsa, .amdpal => switch (self) {
- .short, .ushort => return 16,
- .int, .uint, .float => return 32,
- .long, .ulong, .longlong, .ulonglong, .double => return 64,
- .longdouble => return 128,
- },
-
- .cloudabi,
- .kfreebsd,
- .lv2,
- .zos,
- .rtems,
- .nacl,
- .aix,
- .ps4,
- .ps5,
- .elfiamcu,
- .mesa3d,
- .contiki,
- .hermit,
- .hurd,
- .opencl,
- .glsl450,
- .vulkan,
- .driverkit,
- .shadermodel,
- => @panic("TODO specify the C integer and float type sizes for this OS"),
- }
- }
-
- pub fn alignment(self: CType, target: Target) u16 {
-
- // Overrides for unusual alignments
- switch (target.cpu.arch) {
- .avr => switch (self) {
- .short, .ushort => return 2,
- else => return 1,
- },
- .x86 => switch (target.os.tag) {
- .windows, .uefi => switch (self) {
- .longlong, .ulonglong, .double => return 8,
- .longdouble => switch (target.abi) {
- .gnu, .gnuilp32, .cygnus => return 4,
- else => return 8,
- },
- else => {},
- },
- else => {},
- },
- else => {},
- }
-
- // Next-power-of-two-aligned, up to a maximum.
- return @min(
- std.math.ceilPowerOfTwoAssert(u16, (self.sizeInBits(target) + 7) / 8),
- switch (target.cpu.arch) {
- .arm, .armeb, .thumb, .thumbeb => switch (target.os.tag) {
- .netbsd => switch (target.abi) {
- .gnueabi,
- .gnueabihf,
- .eabi,
- .eabihf,
- .android,
- .musleabi,
- .musleabihf,
- => 8,
-
- else => @as(u16, 4),
- },
- .ios, .tvos, .watchos => 4,
- else => 8,
- },
-
- .msp430,
- .avr,
- => 2,
-
- .arc,
- .csky,
- .x86,
- .xcore,
- .dxil,
- .loongarch32,
- .tce,
- .tcele,
- .le32,
- .amdil,
- .hsail,
- .spir,
- .spirv32,
- .kalimba,
- .shave,
- .renderscript32,
- .ve,
- .spu_2,
- => 4,
-
- .aarch64_32,
- .amdgcn,
- .amdil64,
- .bpfel,
- .bpfeb,
- .hexagon,
- .hsail64,
- .loongarch64,
- .m68k,
- .mips,
- .mipsel,
- .sparc,
- .sparcel,
- .sparc64,
- .lanai,
- .le64,
- .nvptx,
- .nvptx64,
- .r600,
- .s390x,
- .spir64,
- .spirv64,
- .renderscript64,
- => 8,
-
- .aarch64,
- .aarch64_be,
- .mips64,
- .mips64el,
- .powerpc,
- .powerpcle,
- .powerpc64,
- .powerpc64le,
- .riscv32,
- .riscv64,
- .x86_64,
- .wasm32,
- .wasm64,
- => 16,
- },
- );
- }
-
- pub fn preferredAlignment(self: CType, target: Target) u16 {
-
- // Overrides for unusual alignments
- switch (target.cpu.arch) {
- .arm, .armeb, .thumb, .thumbeb => switch (target.os.tag) {
- .netbsd => switch (target.abi) {
- .gnueabi,
- .gnueabihf,
- .eabi,
- .eabihf,
- .android,
- .musleabi,
- .musleabihf,
- => {},
-
- else => switch (self) {
- .longdouble => return 4,
- else => {},
- },
- },
- .ios, .tvos, .watchos => switch (self) {
- .longdouble => return 4,
- else => {},
- },
- else => {},
- },
- .arc => switch (self) {
- .longdouble => return 4,
- else => {},
- },
- .avr => switch (self) {
- .int, .uint, .long, .ulong, .float, .longdouble => return 1,
- .short, .ushort => return 2,
- .double => return 4,
- .longlong, .ulonglong => return 8,
- },
- .x86 => switch (target.os.tag) {
- .windows, .uefi => switch (self) {
- .longdouble => switch (target.abi) {
- .gnu, .gnuilp32, .cygnus => return 4,
- else => return 8,
- },
- else => {},
- },
- else => switch (self) {
- .longdouble => return 4,
- else => {},
- },
- },
- else => {},
- }
-
- // Next-power-of-two-aligned, up to a maximum.
- return @min(
- std.math.ceilPowerOfTwoAssert(u16, (self.sizeInBits(target) + 7) / 8),
- switch (target.cpu.arch) {
- .msp430 => @as(u16, 2),
-
- .csky,
- .xcore,
- .dxil,
- .loongarch32,
- .tce,
- .tcele,
- .le32,
- .amdil,
- .hsail,
- .spir,
- .spirv32,
- .kalimba,
- .shave,
- .renderscript32,
- .ve,
- .spu_2,
- => 4,
-
- .arc,
- .arm,
- .armeb,
- .avr,
- .thumb,
- .thumbeb,
- .aarch64_32,
- .amdgcn,
- .amdil64,
- .bpfel,
- .bpfeb,
- .hexagon,
- .hsail64,
- .x86,
- .loongarch64,
- .m68k,
- .mips,
- .mipsel,
- .sparc,
- .sparcel,
- .sparc64,
- .lanai,
- .le64,
- .nvptx,
- .nvptx64,
- .r600,
- .s390x,
- .spir64,
- .spirv64,
- .renderscript64,
- => 8,
-
- .aarch64,
- .aarch64_be,
- .mips64,
- .mips64el,
- .powerpc,
- .powerpcle,
- .powerpc64,
- .powerpc64le,
- .riscv32,
- .riscv64,
- .x86_64,
- .wasm32,
- .wasm64,
- => 16,
- },
- );
- }
-};
--
cgit v1.2.3
From 2ccff5115454bab4898bae3de88f5619310bc5c1 Mon Sep 17 00:00:00 2001
From: praschke
Date: Tue, 31 Jan 2023 21:55:16 +0000
Subject: mingw: repair msvcrt-os build flags
__LIBMSVCRT__ is still used and is distinct from __LIBMSVCRT_OS__
---
src/mingw.zig | 1 +
1 file changed, 1 insertion(+)
(limited to 'src')
diff --git a/src/mingw.zig b/src/mingw.zig
index 1fee8e90a4..06880743c6 100644
--- a/src/mingw.zig
+++ b/src/mingw.zig
@@ -106,6 +106,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void {
.msvcrt_os_lib => {
const extra_flags = try arena.dupe([]const u8, &[_][]const u8{
"-DHAVE_CONFIG_H",
+ "-D__LIBMSVCRT__",
"-D__LIBMSVCRT_OS__",
"-I",
--
cgit v1.2.3
From 9fdc32c96e3961ae2f5287483c9638051df34180 Mon Sep 17 00:00:00 2001
From: Jakub Konka
Date: Wed, 1 Feb 2023 09:13:49 +0100
Subject: link: clean up type resolution in Elf.Atom and MachO.Atom
---
src/link/Coff/Atom.zig | 2 +-
src/link/Elf/Atom.zig | 4 ++--
src/link/MachO/Atom.zig | 18 +++++++++---------
3 files changed, 12 insertions(+), 12 deletions(-)
(limited to 'src')
diff --git a/src/link/Coff/Atom.zig b/src/link/Coff/Atom.zig
index 1ee31cccaa..80c04a8fa1 100644
--- a/src/link/Coff/Atom.zig
+++ b/src/link/Coff/Atom.zig
@@ -119,7 +119,7 @@ pub fn addBaseRelocation(coff_file: *Coff, atom_index: Index, offset: u32) !void
try gop.value_ptr.append(gpa, offset);
}
-pub fn freeRelocations(coff_file: *Coff, atom_index: Atom.Index) void {
+pub fn freeRelocations(coff_file: *Coff, atom_index: Index) void {
const gpa = coff_file.base.allocator;
var removed_relocs = coff_file.relocs.fetchRemove(atom_index);
if (removed_relocs) |*relocs| relocs.value.deinit(gpa);
diff --git a/src/link/Elf/Atom.zig b/src/link/Elf/Atom.zig
index 79b699636f..24cf19432c 100644
--- a/src/link/Elf/Atom.zig
+++ b/src/link/Elf/Atom.zig
@@ -20,8 +20,8 @@ offset_table_index: u32,
/// Points to the previous and next neighbors, based on the `text_offset`.
/// This can be used to find, for example, the capacity of this `TextBlock`.
-prev_index: ?Atom.Index,
-next_index: ?Atom.Index,
+prev_index: ?Index,
+next_index: ?Index,
dbg_info_atom: Dwarf.Atom,
diff --git a/src/link/MachO/Atom.zig b/src/link/MachO/Atom.zig
index da0115d069..401d71813c 100644
--- a/src/link/MachO/Atom.zig
+++ b/src/link/MachO/Atom.zig
@@ -40,8 +40,8 @@ alignment: u32,
/// Points to the previous and next neighbours
/// TODO use the same trick as with symbols: reserve index 0 as null atom
-next_index: ?Atom.Index,
-prev_index: ?Atom.Index,
+next_index: ?Index,
+prev_index: ?Index,
dbg_info_atom: Dwarf.Atom,
@@ -119,13 +119,13 @@ pub fn freeListEligible(self: Atom, macho_file: *MachO) bool {
return surplus >= MachO.min_text_capacity;
}
-pub fn addRelocation(macho_file: *MachO, atom_index: Atom.Index, reloc: Relocation) !void {
+pub fn addRelocation(macho_file: *MachO, atom_index: Index, reloc: Relocation) !void {
return addRelocations(macho_file, atom_index, 1, .{reloc});
}
pub fn addRelocations(
macho_file: *MachO,
- atom_index: Atom.Index,
+ atom_index: Index,
comptime count: comptime_int,
relocs: [count]Relocation,
) !void {
@@ -145,7 +145,7 @@ pub fn addRelocations(
}
}
-pub fn addRebase(macho_file: *MachO, atom_index: Atom.Index, offset: u32) !void {
+pub fn addRebase(macho_file: *MachO, atom_index: Index, offset: u32) !void {
const gpa = macho_file.base.allocator;
const atom = macho_file.getAtom(atom_index);
log.debug(" (adding rebase at offset 0x{x} in %{?d})", .{ offset, atom.getSymbolIndex() });
@@ -156,7 +156,7 @@ pub fn addRebase(macho_file: *MachO, atom_index: Atom.Index, offset: u32) !void
try gop.value_ptr.append(gpa, offset);
}
-pub fn addBinding(macho_file: *MachO, atom_index: Atom.Index, binding: Binding) !void {
+pub fn addBinding(macho_file: *MachO, atom_index: Index, binding: Binding) !void {
const gpa = macho_file.base.allocator;
const atom = macho_file.getAtom(atom_index);
log.debug(" (adding binding to symbol {s} at offset 0x{x} in %{?d})", .{
@@ -171,7 +171,7 @@ pub fn addBinding(macho_file: *MachO, atom_index: Atom.Index, binding: Binding)
try gop.value_ptr.append(gpa, binding);
}
-pub fn addLazyBinding(macho_file: *MachO, atom_index: Atom.Index, binding: Binding) !void {
+pub fn addLazyBinding(macho_file: *MachO, atom_index: Index, binding: Binding) !void {
const gpa = macho_file.base.allocator;
const atom = macho_file.getAtom(atom_index);
log.debug(" (adding lazy binding to symbol {s} at offset 0x{x} in %{?d})", .{
@@ -186,7 +186,7 @@ pub fn addLazyBinding(macho_file: *MachO, atom_index: Atom.Index, binding: Bindi
try gop.value_ptr.append(gpa, binding);
}
-pub fn resolveRelocations(macho_file: *MachO, atom_index: Atom.Index) !void {
+pub fn resolveRelocations(macho_file: *MachO, atom_index: Index) !void {
const atom = macho_file.getAtom(atom_index);
const relocs = macho_file.relocs.get(atom_index) orelse return;
const source_sym = atom.getSymbol(macho_file);
@@ -203,7 +203,7 @@ pub fn resolveRelocations(macho_file: *MachO, atom_index: Atom.Index) !void {
}
}
-pub fn freeRelocations(macho_file: *MachO, atom_index: Atom.Index) void {
+pub fn freeRelocations(macho_file: *MachO, atom_index: Index) void {
const gpa = macho_file.base.allocator;
var removed_relocs = macho_file.relocs.fetchOrderedRemove(atom_index);
if (removed_relocs) |*relocs| relocs.value.deinit(gpa);
--
cgit v1.2.3
From b3277c893691c462ec2e82577a78e7baafb42bf6 Mon Sep 17 00:00:00 2001
From: Jakub Konka
Date: Wed, 1 Feb 2023 11:12:53 +0100
Subject: link: make Plan9 atoms fully owned by the linker
---
src/Module.zig | 4 +-
src/Sema.zig | 2 +-
src/arch/aarch64/CodeGen.zig | 11 ++-
src/arch/arm/CodeGen.zig | 5 +-
src/arch/riscv64/CodeGen.zig | 5 +-
src/arch/x86_64/CodeGen.zig | 11 ++-
src/link.zig | 4 +-
src/link/Plan9.zig | 228 ++++++++++++++++++++++++++++---------------
8 files changed, 170 insertions(+), 100 deletions(-)
(limited to 'src')
diff --git a/src/Module.zig b/src/Module.zig
index b39fd2bab2..a914dc90d8 100644
--- a/src/Module.zig
+++ b/src/Module.zig
@@ -5277,7 +5277,7 @@ pub fn clearDecl(
.coff => .{ .coff = {} },
.elf => .{ .elf = {} },
.macho => .{ .macho = {} },
- .plan9 => .{ .plan9 = link.File.Plan9.DeclBlock.empty },
+ .plan9 => .{ .plan9 = {} },
.c => .{ .c = {} },
.wasm => .{ .wasm = link.File.Wasm.DeclBlock.empty },
.spirv => .{ .spirv = {} },
@@ -5697,7 +5697,7 @@ pub fn allocateNewDecl(
.coff => .{ .coff = {} },
.elf => .{ .elf = {} },
.macho => .{ .macho = {} },
- .plan9 => .{ .plan9 = link.File.Plan9.DeclBlock.empty },
+ .plan9 => .{ .plan9 = {} },
.c => .{ .c = {} },
.wasm => .{ .wasm = link.File.Wasm.DeclBlock.empty },
.spirv => .{ .spirv = {} },
diff --git a/src/Sema.zig b/src/Sema.zig
index 82321ef545..e54bfc7bd9 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -5568,7 +5568,7 @@ pub fn analyzeExport(
.coff => .{ .coff = {} },
.elf => .{ .elf = {} },
.macho => .{ .macho = {} },
- .plan9 => .{ .plan9 = null },
+ .plan9 => .{ .plan9 = {} },
.c => .{ .c = {} },
.wasm => .{ .wasm = .{} },
.spirv => .{ .spirv = {} },
diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig
index d0fba2fd0e..aab30b73dc 100644
--- a/src/arch/aarch64/CodeGen.zig
+++ b/src/arch/aarch64/CodeGen.zig
@@ -4307,7 +4307,6 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
if (self.air.value(callee)) |func_value| {
if (func_value.castTag(.function)) |func_payload| {
const func = func_payload.data;
- const fn_owner_decl = mod.declPtr(func.owner_decl);
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl);
@@ -4333,11 +4332,12 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
},
});
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
- try p9.seeDecl(func.owner_decl);
+ const decl_block_index = try p9.seeDecl(func.owner_decl);
+ const decl_block = p9.getDeclBlock(decl_block_index);
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
const got_addr = p9.bases.data;
- const got_index = fn_owner_decl.link.plan9.got_index.?;
+ const got_index = decl_block.got_index.?;
const fn_got_addr = got_addr + got_index * ptr_bytes;
try self.genSetReg(Type.initTag(.usize), .x30, .{ .memory = fn_got_addr });
} else unreachable;
@@ -6166,8 +6166,9 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) Inne
.sym_index = sym_index,
} };
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
- try p9.seeDecl(decl_index);
- const got_addr = p9.bases.data + decl.link.plan9.got_index.? * ptr_bytes;
+ const decl_block_index = try p9.seeDecl(decl_index);
+ const decl_block = p9.getDeclBlock(decl_block_index);
+ const got_addr = p9.bases.data + decl_block.got_index.? * ptr_bytes;
return MCValue{ .memory = got_addr };
} else {
return self.fail("TODO codegen non-ELF const Decl pointer", .{});
diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig
index c6ee960e51..6574501767 100644
--- a/src/arch/arm/CodeGen.zig
+++ b/src/arch/arm/CodeGen.zig
@@ -6091,8 +6091,9 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) Inne
} else if (self.bin_file.cast(link.File.Coff)) |_| {
return self.fail("TODO codegen COFF const Decl pointer", .{});
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
- try p9.seeDecl(decl_index);
- const got_addr = p9.bases.data + decl.link.plan9.got_index.? * ptr_bytes;
+ const decl_block_index = try p9.seeDecl(decl_index);
+ const decl_block = p9.getDeclBlock(decl_block_index);
+ const got_addr = p9.bases.data + decl_block.got_index.? * ptr_bytes;
return MCValue{ .memory = got_addr };
} else {
return self.fail("TODO codegen non-ELF const Decl pointer", .{});
diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig
index a0af1b3cce..423816c0b1 100644
--- a/src/arch/riscv64/CodeGen.zig
+++ b/src/arch/riscv64/CodeGen.zig
@@ -2558,8 +2558,9 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) Inne
} else if (self.bin_file.cast(link.File.Coff)) |_| {
return self.fail("TODO codegen COFF const Decl pointer", .{});
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
- try p9.seeDecl(decl_index);
- const got_addr = p9.bases.data + decl.link.plan9.got_index.? * ptr_bytes;
+ const decl_block_index = try p9.seeDecl(decl_index);
+ const decl_block = p9.getDeclBlock(decl_block_index);
+ const got_addr = p9.bases.data + decl_block.got_index.? * ptr_bytes;
return MCValue{ .memory = got_addr };
} else {
return self.fail("TODO codegen non-ELF const Decl pointer", .{});
diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig
index b41973ea97..fcae7eaabc 100644
--- a/src/arch/x86_64/CodeGen.zig
+++ b/src/arch/x86_64/CodeGen.zig
@@ -3996,7 +3996,6 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
if (self.air.value(callee)) |func_value| {
if (func_value.castTag(.function)) |func_payload| {
const func = func_payload.data;
- const fn_owner_decl = mod.declPtr(func.owner_decl);
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl);
@@ -4042,11 +4041,12 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
.data = undefined,
});
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
- try p9.seeDecl(func.owner_decl);
+ const decl_block_index = try p9.seeDecl(func.owner_decl);
+ const decl_block = p9.getDeclBlock(decl_block_index);
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
const got_addr = p9.bases.data;
- const got_index = fn_owner_decl.link.plan9.got_index.?;
+ const got_index = decl_block.got_index.?;
const fn_got_addr = got_addr + got_index * ptr_bytes;
_ = try self.addInst(.{
.tag = .call,
@@ -6739,8 +6739,9 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) Inne
.sym_index = sym_index,
} };
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
- try p9.seeDecl(decl_index);
- const got_addr = p9.bases.data + decl.link.plan9.got_index.? * ptr_bytes;
+ const decl_block_index = try p9.seeDecl(decl_index);
+ const decl_block = p9.getDeclBlock(decl_block_index);
+ const got_addr = p9.bases.data + decl_block.got_index.? * ptr_bytes;
return MCValue{ .memory = got_addr };
} else {
return self.fail("TODO codegen non-ELF const Decl pointer", .{});
diff --git a/src/link.zig b/src/link.zig
index eb74615492..c0eacf88a0 100644
--- a/src/link.zig
+++ b/src/link.zig
@@ -265,7 +265,7 @@ pub const File = struct {
elf: void,
coff: void,
macho: void,
- plan9: Plan9.DeclBlock,
+ plan9: void,
c: void,
wasm: Wasm.DeclBlock,
spirv: void,
@@ -287,7 +287,7 @@ pub const File = struct {
elf: void,
coff: void,
macho: void,
- plan9: Plan9.Export,
+ plan9: void,
c: void,
wasm: Wasm.Export,
spirv: void,
diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig
index a8b8caafab..20f540022a 100644
--- a/src/link/Plan9.zig
+++ b/src/link/Plan9.zig
@@ -21,14 +21,7 @@ const Allocator = std.mem.Allocator;
const log = std.log.scoped(.link);
const assert = std.debug.assert;
-const FnDeclOutput = struct {
- /// this code is modified when relocated so it is mutable
- code: []u8,
- /// this might have to be modified in the linker, so thats why its mutable
- lineinfo: []u8,
- start_line: u32,
- end_line: u32,
-};
+pub const base_tag = .plan9;
base: link.File,
sixtyfour_bit: bool,
@@ -101,6 +94,9 @@ got_index_free_list: std.ArrayListUnmanaged(usize) = .{},
syms_index_free_list: std.ArrayListUnmanaged(usize) = .{},
+decl_blocks: std.ArrayListUnmanaged(DeclBlock) = .{},
+decls: std.AutoHashMapUnmanaged(Module.Decl.Index, DeclMetadata) = .{},
+
const Reloc = struct {
target: Module.Decl.Index,
offset: u64,
@@ -115,6 +111,42 @@ const Bases = struct {
const UnnamedConstTable = std.AutoHashMapUnmanaged(Module.Decl.Index, std.ArrayListUnmanaged(struct { info: DeclBlock, code: []const u8 }));
+pub const PtrWidth = enum { p32, p64 };
+
+pub const DeclBlock = struct {
+ type: aout.Sym.Type,
+ /// offset in the text or data sects
+ offset: ?u64,
+ /// offset into syms
+ sym_index: ?usize,
+ /// offset into got
+ got_index: ?usize,
+
+ pub const Index = u32;
+};
+
+const DeclMetadata = struct {
+ index: DeclBlock.Index,
+ exports: std.ArrayListUnmanaged(usize) = .{},
+
+ fn getExport(m: DeclMetadata, p9: *const Plan9, name: []const u8) ?usize {
+ for (m.exports.items) |exp| {
+ const sym = p9.syms.items[exp];
+ if (mem.eql(u8, name, sym.name)) return exp;
+ }
+ return null;
+ }
+};
+
+const FnDeclOutput = struct {
+ /// this code is modified when relocated so it is mutable
+ code: []u8,
+ /// this might have to be modified in the linker, so thats why its mutable
+ lineinfo: []u8,
+ start_line: u32,
+ end_line: u32,
+};
+
fn getAddr(self: Plan9, addr: u64, t: aout.Sym.Type) u64 {
return addr + switch (t) {
.T, .t, .l, .L => self.bases.text,
@@ -127,22 +159,6 @@ fn getSymAddr(self: Plan9, s: aout.Sym) u64 {
return self.getAddr(s.value, s.type);
}
-pub const DeclBlock = struct {
- type: aout.Sym.Type,
- /// offset in the text or data sects
- offset: ?u64,
- /// offset into syms
- sym_index: ?usize,
- /// offset into got
- got_index: ?usize,
- pub const empty = DeclBlock{
- .type = .t,
- .offset = null,
- .sym_index = null,
- .got_index = null,
- };
-};
-
pub fn defaultBaseAddrs(arch: std.Target.Cpu.Arch) Bases {
return switch (arch) {
.x86_64 => .{
@@ -164,8 +180,6 @@ pub fn defaultBaseAddrs(arch: std.Target.Cpu.Arch) Bases {
};
}
-pub const PtrWidth = enum { p32, p64 };
-
pub fn createEmpty(gpa: Allocator, options: link.Options) !*Plan9 {
if (options.use_llvm)
return error.LLVMBackendDoesNotSupportPlan9;
@@ -271,7 +285,7 @@ pub fn updateFunc(self: *Plan9, module: *Module, func: *Module.Fn, air: Air, liv
const decl = module.declPtr(decl_index);
self.freeUnnamedConsts(decl_index);
- try self.seeDecl(decl_index);
+ _ = try self.seeDecl(decl_index);
log.debug("codegen decl {*} ({s})", .{ decl, decl.name });
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
@@ -313,11 +327,11 @@ pub fn updateFunc(self: *Plan9, module: *Module, func: *Module.Fn, air: Air, liv
.end_line = end_line,
};
try self.putFn(decl_index, out);
- return self.updateFinish(decl);
+ return self.updateFinish(decl_index);
}
pub fn lowerUnnamedConst(self: *Plan9, tv: TypedValue, decl_index: Module.Decl.Index) !u32 {
- try self.seeDecl(decl_index);
+ _ = try self.seeDecl(decl_index);
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
defer code_buffer.deinit();
@@ -387,7 +401,7 @@ pub fn updateDecl(self: *Plan9, module: *Module, decl_index: Module.Decl.Index)
}
}
- try self.seeDecl(decl_index);
+ _ = try self.seeDecl(decl_index);
log.debug("codegen decl {*} ({s}) ({d})", .{ decl, decl.name, decl_index });
@@ -414,28 +428,31 @@ pub fn updateDecl(self: *Plan9, module: *Module, decl_index: Module.Decl.Index)
if (self.data_decl_table.fetchPutAssumeCapacity(decl_index, duped_code)) |old_entry| {
self.base.allocator.free(old_entry.value);
}
- return self.updateFinish(decl);
+ return self.updateFinish(decl_index);
}
/// called at the end of update{Decl,Func}
-fn updateFinish(self: *Plan9, decl: *Module.Decl) !void {
+fn updateFinish(self: *Plan9, decl_index: Module.Decl.Index) !void {
+ const decl = self.base.options.module.?.declPtr(decl_index);
const is_fn = (decl.ty.zigTypeTag() == .Fn);
log.debug("update the symbol table and got for decl {*} ({s})", .{ decl, decl.name });
const sym_t: aout.Sym.Type = if (is_fn) .t else .d;
+
+ const decl_block = self.getDeclBlockPtr(self.decls.get(decl_index).?.index);
// write the internal linker metadata
- decl.link.plan9.type = sym_t;
+ decl_block.type = sym_t;
// write the symbol
// we already have the got index
const sym: aout.Sym = .{
.value = undefined, // the value of stuff gets filled in in flushModule
- .type = decl.link.plan9.type,
+ .type = decl_block.type,
.name = mem.span(decl.name),
};
- if (decl.link.plan9.sym_index) |s| {
+ if (decl_block.sym_index) |s| {
self.syms.items[s] = sym;
} else {
const s = try self.allocateSymbolIndex();
- decl.link.plan9.sym_index = s;
+ decl_block.sym_index = s;
self.syms.items[s] = sym;
}
}
@@ -550,6 +567,7 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No
while (it.next()) |entry| {
const decl_index = entry.key_ptr.*;
const decl = mod.declPtr(decl_index);
+ const decl_block = self.getDeclBlockPtr(self.decls.get(decl_index).?.index);
const out = entry.value_ptr.*;
log.debug("write text decl {*} ({s}), lines {d} to {d}", .{ decl, decl.name, out.start_line + 1, out.end_line });
{
@@ -568,16 +586,16 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No
iovecs_i += 1;
const off = self.getAddr(text_i, .t);
text_i += out.code.len;
- decl.link.plan9.offset = off;
+ decl_block.offset = off;
if (!self.sixtyfour_bit) {
- mem.writeIntNative(u32, got_table[decl.link.plan9.got_index.? * 4 ..][0..4], @intCast(u32, off));
- mem.writeInt(u32, got_table[decl.link.plan9.got_index.? * 4 ..][0..4], @intCast(u32, off), self.base.options.target.cpu.arch.endian());
+ mem.writeIntNative(u32, got_table[decl_block.got_index.? * 4 ..][0..4], @intCast(u32, off));
+ mem.writeInt(u32, got_table[decl_block.got_index.? * 4 ..][0..4], @intCast(u32, off), self.base.options.target.cpu.arch.endian());
} else {
- mem.writeInt(u64, got_table[decl.link.plan9.got_index.? * 8 ..][0..8], off, self.base.options.target.cpu.arch.endian());
+ mem.writeInt(u64, got_table[decl_block.got_index.? * 8 ..][0..8], off, self.base.options.target.cpu.arch.endian());
}
- self.syms.items[decl.link.plan9.sym_index.?].value = off;
+ self.syms.items[decl_block.sym_index.?].value = off;
if (mod.decl_exports.get(decl_index)) |exports| {
- try self.addDeclExports(mod, decl, exports.items);
+ try self.addDeclExports(mod, decl_index, exports.items);
}
}
}
@@ -598,6 +616,7 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No
while (it.next()) |entry| {
const decl_index = entry.key_ptr.*;
const decl = mod.declPtr(decl_index);
+ const decl_block = self.getDeclBlockPtr(self.decls.get(decl_index).?.index);
const code = entry.value_ptr.*;
log.debug("write data decl {*} ({s})", .{ decl, decl.name });
@@ -606,15 +625,15 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No
iovecs_i += 1;
const off = self.getAddr(data_i, .d);
data_i += code.len;
- decl.link.plan9.offset = off;
+ decl_block.offset = off;
if (!self.sixtyfour_bit) {
- mem.writeInt(u32, got_table[decl.link.plan9.got_index.? * 4 ..][0..4], @intCast(u32, off), self.base.options.target.cpu.arch.endian());
+ mem.writeInt(u32, got_table[decl_block.got_index.? * 4 ..][0..4], @intCast(u32, off), self.base.options.target.cpu.arch.endian());
} else {
- mem.writeInt(u64, got_table[decl.link.plan9.got_index.? * 8 ..][0..8], off, self.base.options.target.cpu.arch.endian());
+ mem.writeInt(u64, got_table[decl_block.got_index.? * 8 ..][0..8], off, self.base.options.target.cpu.arch.endian());
}
- self.syms.items[decl.link.plan9.sym_index.?].value = off;
+ self.syms.items[decl_block.sym_index.?].value = off;
if (mod.decl_exports.get(decl_index)) |exports| {
- try self.addDeclExports(mod, decl, exports.items);
+ try self.addDeclExports(mod, decl_index, exports.items);
}
}
// write the unnamed constants after the other data decls
@@ -676,7 +695,8 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No
for (kv.value_ptr.items) |reloc| {
const target_decl_index = reloc.target;
const target_decl = mod.declPtr(target_decl_index);
- const target_decl_offset = target_decl.link.plan9.offset.?;
+ const target_decl_block = self.getDeclBlock(self.decls.get(target_decl_index).?.index);
+ const target_decl_offset = target_decl_block.offset.?;
const offset = reloc.offset;
const addend = reloc.addend;
@@ -709,28 +729,36 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No
fn addDeclExports(
self: *Plan9,
module: *Module,
- decl: *Module.Decl,
+ decl_index: Module.Decl.Index,
exports: []const *Module.Export,
) !void {
+ const metadata = self.decls.getPtr(decl_index).?;
+ const decl_block = self.getDeclBlock(metadata.index);
+
for (exports) |exp| {
// plan9 does not support custom sections
if (exp.options.section) |section_name| {
if (!mem.eql(u8, section_name, ".text") or !mem.eql(u8, section_name, ".data")) {
- try module.failed_exports.put(module.gpa, exp, try Module.ErrorMsg.create(self.base.allocator, decl.srcLoc(), "plan9 does not support extra sections", .{}));
+ try module.failed_exports.put(module.gpa, exp, try Module.ErrorMsg.create(
+ self.base.allocator,
+ module.declPtr(decl_index).srcLoc(),
+ "plan9 does not support extra sections",
+ .{},
+ ));
break;
}
}
const sym = .{
- .value = decl.link.plan9.offset.?,
- .type = decl.link.plan9.type.toGlobal(),
+ .value = decl_block.offset.?,
+ .type = decl_block.type.toGlobal(),
.name = exp.options.name,
};
- if (exp.link.plan9) |i| {
+ if (metadata.getExport(self, exp.options.name)) |i| {
self.syms.items[i] = sym;
} else {
try self.syms.append(self.base.allocator, sym);
- exp.link.plan9 = self.syms.items.len - 1;
+ try metadata.exports.append(self.base.allocator, self.syms.items.len - 1);
}
}
}
@@ -760,13 +788,18 @@ pub fn freeDecl(self: *Plan9, decl_index: Module.Decl.Index) void {
self.base.allocator.free(removed_entry.value);
}
}
- if (decl.link.plan9.got_index) |i| {
- // TODO: if this catch {} is triggered, an assertion in flushModule will be triggered, because got_index_free_list will have the wrong length
- self.got_index_free_list.append(self.base.allocator, i) catch {};
- }
- if (decl.link.plan9.sym_index) |i| {
- self.syms_index_free_list.append(self.base.allocator, i) catch {};
- self.syms.items[i] = aout.Sym.undefined_symbol;
+ if (self.decls.fetchRemove(decl_index)) |const_kv| {
+ var kv = const_kv;
+ const decl_block = self.getDeclBlock(kv.value.index);
+ if (decl_block.got_index) |i| {
+ // TODO: if this catch {} is triggered, an assertion in flushModule will be triggered, because got_index_free_list will have the wrong length
+ self.got_index_free_list.append(self.base.allocator, i) catch {};
+ }
+ if (decl_block.sym_index) |i| {
+ self.syms_index_free_list.append(self.base.allocator, i) catch {};
+ self.syms.items[i] = aout.Sym.undefined_symbol;
+ }
+ kv.value.exports.deinit(self.base.allocator);
}
self.freeUnnamedConsts(decl_index);
{
@@ -786,12 +819,30 @@ fn freeUnnamedConsts(self: *Plan9, decl_index: Module.Decl.Index) void {
unnamed_consts.clearAndFree(self.base.allocator);
}
-pub fn seeDecl(self: *Plan9, decl_index: Module.Decl.Index) !void {
- const mod = self.base.options.module.?;
- const decl = mod.declPtr(decl_index);
- if (decl.link.plan9.got_index == null) {
- decl.link.plan9.got_index = self.allocateGotIndex();
+fn createDeclBlock(self: *Plan9) !DeclBlock.Index {
+ const gpa = self.base.allocator;
+ const index = @intCast(DeclBlock.Index, self.decl_blocks.items.len);
+ const decl_block = try self.decl_blocks.addOne(gpa);
+ decl_block.* = .{
+ .type = .t,
+ .offset = null,
+ .sym_index = null,
+ .got_index = null,
+ };
+ return index;
+}
+
+pub fn seeDecl(self: *Plan9, decl_index: Module.Decl.Index) !DeclBlock.Index {
+ const gop = try self.decls.getOrPut(self.base.allocator, decl_index);
+ if (!gop.found_existing) {
+ const index = try self.createDeclBlock();
+ self.getDeclBlockPtr(index).got_index = self.allocateGotIndex();
+ gop.value_ptr.* = .{
+ .index = index,
+ .exports = .{},
+ };
}
+ return gop.value_ptr.index;
}
pub fn updateDeclExports(
@@ -800,7 +851,7 @@ pub fn updateDeclExports(
decl_index: Module.Decl.Index,
exports: []const *Module.Export,
) !void {
- try self.seeDecl(decl_index);
+ _ = try self.seeDecl(decl_index);
// we do all the things in flush
_ = module;
_ = exports;
@@ -842,10 +893,17 @@ pub fn deinit(self: *Plan9) void {
self.syms_index_free_list.deinit(gpa);
self.file_segments.deinit(gpa);
self.path_arena.deinit();
+ self.decl_blocks.deinit(gpa);
+
+ {
+ var it = self.decls.iterator();
+ while (it.next()) |entry| {
+ entry.value_ptr.exports.deinit(gpa);
+ }
+ self.decls.deinit(gpa);
+ }
}
-pub const Export = ?usize;
-pub const base_tag = .plan9;
pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Options) !*Plan9 {
if (options.use_llvm)
return error.LLVMBackendDoesNotSupportPlan9;
@@ -911,20 +969,19 @@ pub fn writeSyms(self: *Plan9, buf: *std.ArrayList(u8)) !void {
}
}
- const mod = self.base.options.module.?;
-
// write the data symbols
{
var it = self.data_decl_table.iterator();
while (it.next()) |entry| {
const decl_index = entry.key_ptr.*;
- const decl = mod.declPtr(decl_index);
- const sym = self.syms.items[decl.link.plan9.sym_index.?];
+ const decl_metadata = self.decls.get(decl_index).?;
+ const decl_block = self.getDeclBlock(decl_metadata.index);
+ const sym = self.syms.items[decl_block.sym_index.?];
try self.writeSym(writer, sym);
if (self.base.options.module.?.decl_exports.get(decl_index)) |exports| {
- for (exports.items) |e| {
- try self.writeSym(writer, self.syms.items[e.link.plan9.?]);
- }
+ for (exports.items) |e| if (decl_metadata.getExport(self, e.options.name)) |exp_i| {
+ try self.writeSym(writer, self.syms.items[exp_i]);
+ };
}
}
}
@@ -943,16 +1000,17 @@ pub fn writeSyms(self: *Plan9, buf: *std.ArrayList(u8)) !void {
var submap_it = symidx_and_submap.functions.iterator();
while (submap_it.next()) |entry| {
const decl_index = entry.key_ptr.*;
- const decl = mod.declPtr(decl_index);
- const sym = self.syms.items[decl.link.plan9.sym_index.?];
+ const decl_metadata = self.decls.get(decl_index).?;
+ const decl_block = self.getDeclBlock(decl_metadata.index);
+ const sym = self.syms.items[decl_block.sym_index.?];
try self.writeSym(writer, sym);
if (self.base.options.module.?.decl_exports.get(decl_index)) |exports| {
- for (exports.items) |e| {
- const s = self.syms.items[e.link.plan9.?];
+ for (exports.items) |e| if (decl_metadata.getExport(self, e.options.name)) |exp_i| {
+ const s = self.syms.items[exp_i];
if (mem.eql(u8, s.name, "_start"))
self.entry_val = s.value;
try self.writeSym(writer, s);
- }
+ };
}
}
}
@@ -1004,3 +1062,11 @@ pub fn getDeclVAddr(
});
return undefined;
}
+
+pub fn getDeclBlock(self: *const Plan9, index: DeclBlock.Index) DeclBlock {
+ return self.decl_blocks.items[index];
+}
+
+fn getDeclBlockPtr(self: *Plan9, index: DeclBlock.Index) *DeclBlock {
+ return &self.decl_blocks.items[index];
+}
--
cgit v1.2.3
From d98fc53b8fbe479f828114b0276d5290146cc2a3 Mon Sep 17 00:00:00 2001
From: Jakub Konka
Date: Wed, 1 Feb 2023 11:49:07 +0100
Subject: link: use strtab.StringTable in Dwarf
---
src/link/Dwarf.zig | 20 ++++++--------------
src/link/Elf.zig | 10 +++++-----
src/link/MachO/DebugSymbols.zig | 12 ++++++------
3 files changed, 17 insertions(+), 25 deletions(-)
(limited to 'src')
diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig
index 8278377095..61ddda3494 100644
--- a/src/link/Dwarf.zig
+++ b/src/link/Dwarf.zig
@@ -18,8 +18,9 @@ const LinkBlock = File.LinkBlock;
const LinkFn = File.LinkFn;
const LinkerLoad = @import("../codegen.zig").LinkerLoad;
const Module = @import("../Module.zig");
-const Value = @import("../value.zig").Value;
+const StringTable = @import("strtab.zig").StringTable;
const Type = @import("../type.zig").Type;
+const Value = @import("../value.zig").Value;
allocator: Allocator,
bin_file: *File,
@@ -42,7 +43,7 @@ abbrev_table_offset: ?u64 = null,
/// TODO replace with InternPool
/// Table of debug symbol names.
-strtab: std.ArrayListUnmanaged(u8) = .{},
+strtab: StringTable(.strtab) = .{},
/// Quick lookup array of all defined source files referenced by at least one Decl.
/// They will end up in the DWARF debug_line header as two lists:
@@ -1770,11 +1771,11 @@ pub fn writeDbgInfoHeader(self: *Dwarf, module: *Module, low_pc: u64, high_pc: u
},
}
// Write the form for the compile unit, which must match the abbrev table above.
- const name_strp = try self.makeString(module.root_pkg.root_src_path);
+ const name_strp = try self.strtab.insert(self.allocator, module.root_pkg.root_src_path);
var compile_unit_dir_buffer: [std.fs.MAX_PATH_BYTES]u8 = undefined;
const compile_unit_dir = resolveCompilationDir(module, &compile_unit_dir_buffer);
- const comp_dir_strp = try self.makeString(compile_unit_dir);
- const producer_strp = try self.makeString(link.producer_string);
+ const comp_dir_strp = try self.strtab.insert(self.allocator, compile_unit_dir);
+ const producer_strp = try self.strtab.insert(self.allocator, link.producer_string);
di_buf.appendAssumeCapacity(@enumToInt(AbbrevKind.compile_unit));
if (self.bin_file.tag == .macho) {
@@ -2435,15 +2436,6 @@ fn getRelocDbgInfoSubprogramHighPC(self: Dwarf) u32 {
return dbg_info_low_pc_reloc_index + self.ptrWidthBytes();
}
-/// TODO Improve this to use a table.
-fn makeString(self: *Dwarf, bytes: []const u8) !u32 {
- try self.strtab.ensureUnusedCapacity(self.allocator, bytes.len + 1);
- const result = self.strtab.items.len;
- self.strtab.appendSliceAssumeCapacity(bytes);
- self.strtab.appendAssumeCapacity(0);
- return @intCast(u32, result);
-}
-
fn padToIdeal(actual_size: anytype) @TypeOf(actual_size) {
return actual_size +| (actual_size / ideal_factor);
}
diff --git a/src/link/Elf.zig b/src/link/Elf.zig
index 747120ac5d..01326fb82e 100644
--- a/src/link/Elf.zig
+++ b/src/link/Elf.zig
@@ -688,8 +688,8 @@ pub fn populateMissingMetadata(self: *Elf) !void {
// if (self.dwarf) |*dw| {
// if (self.debug_str_section_index == null) {
// self.debug_str_section_index = @intCast(u16, self.sections.slice().len);
- // assert(dw.strtab.items.len == 0);
- // try dw.strtab.append(gpa, 0);
+ // assert(dw.strtab.buffer.items.len == 0);
+ // try dw.strtab.buffer.append(gpa, 0);
// try self.sections.append(gpa, .{
// .shdr = .{
// .sh_name = try self.shstrtab.insert(gpa, ".debug_str"),
@@ -1164,10 +1164,10 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
// if (self.dwarf) |dwarf| {
// const shdr_index = self.debug_str_section_index.?;
- // if (self.debug_strtab_dirty or dwarf.strtab.items.len != self.sections.items(.shdr)[shdr_index].sh_size) {
- // try self.growNonAllocSection(shdr_index, dwarf.strtab.items.len, 1, false);
+ // if (self.debug_strtab_dirty or dwarf.strtab.buffer.items.len != self.sections.items(.shdr)[shdr_index].sh_size) {
+ // try self.growNonAllocSection(shdr_index, dwarf.strtab.buffer.items.len, 1, false);
// const debug_strtab_sect = self.sections.items(.shdr)[shdr_index];
- // try self.base.file.?.pwriteAll(dwarf.strtab.items, debug_strtab_sect.sh_offset);
+ // try self.base.file.?.pwriteAll(dwarf.strtab.buffer.items, debug_strtab_sect.sh_offset);
// self.debug_strtab_dirty = false;
// }
// }
diff --git a/src/link/MachO/DebugSymbols.zig b/src/link/MachO/DebugSymbols.zig
index 7c22f441cd..0a5c8b0372 100644
--- a/src/link/MachO/DebugSymbols.zig
+++ b/src/link/MachO/DebugSymbols.zig
@@ -82,11 +82,11 @@ pub fn populateMissingMetadata(self: *DebugSymbols) !void {
}
if (self.debug_str_section_index == null) {
- assert(self.dwarf.strtab.items.len == 0);
- try self.dwarf.strtab.append(self.allocator, 0);
+ assert(self.dwarf.strtab.buffer.items.len == 0);
+ try self.dwarf.strtab.buffer.append(self.allocator, 0);
self.debug_str_section_index = try self.allocateSection(
"__debug_str",
- @intCast(u32, self.dwarf.strtab.items.len),
+ @intCast(u32, self.dwarf.strtab.buffer.items.len),
0,
);
self.debug_string_table_dirty = true;
@@ -291,10 +291,10 @@ pub fn flushModule(self: *DebugSymbols, macho_file: *MachO) !void {
{
const sect_index = self.debug_str_section_index.?;
- if (self.debug_string_table_dirty or self.dwarf.strtab.items.len != self.getSection(sect_index).size) {
- const needed_size = @intCast(u32, self.dwarf.strtab.items.len);
+ if (self.debug_string_table_dirty or self.dwarf.strtab.buffer.items.len != self.getSection(sect_index).size) {
+ const needed_size = @intCast(u32, self.dwarf.strtab.buffer.items.len);
try self.growSection(sect_index, needed_size, false);
- try self.file.pwriteAll(self.dwarf.strtab.items, self.getSection(sect_index).offset);
+ try self.file.pwriteAll(self.dwarf.strtab.buffer.items, self.getSection(sect_index).offset);
self.debug_string_table_dirty = false;
}
}
--
cgit v1.2.3
From 5de2aae63cd75322e58204a6be8df49754e4851a Mon Sep 17 00:00:00 2001
From: Jakub Konka
Date: Wed, 1 Feb 2023 15:03:55 +0100
Subject: link: decouple DI atoms from linker atoms, and manage them in Dwarf
linker
---
src/Compilation.zig | 2 +-
src/Module.zig | 12 +-
src/arch/aarch64/CodeGen.zig | 17 +-
src/arch/arm/CodeGen.zig | 17 +-
src/arch/riscv64/CodeGen.zig | 10 +-
src/arch/sparc64/CodeGen.zig | 10 +-
src/arch/wasm/CodeGen.zig | 4 +-
src/arch/x86_64/CodeGen.zig | 4 +-
src/link.zig | 23 +-
src/link/C.zig | 4 +-
src/link/Coff.zig | 5 +-
src/link/Dwarf.zig | 516 ++++++++++++++++++++++------------------
src/link/Elf.zig | 542 +++++++++++++++++++++----------------------
src/link/Elf/Atom.zig | 3 -
src/link/MachO.zig | 130 +++++------
src/link/MachO/Atom.zig | 3 -
src/link/Plan9.zig | 4 +-
src/link/Wasm.zig | 17 +-
src/link/Wasm/Atom.zig | 5 -
19 files changed, 663 insertions(+), 665 deletions(-)
(limited to 'src')
diff --git a/src/Compilation.zig b/src/Compilation.zig
index 09c6e1c686..7d42d3b610 100644
--- a/src/Compilation.zig
+++ b/src/Compilation.zig
@@ -3299,7 +3299,7 @@ fn processOneJob(comp: *Compilation, job: Job) !void {
const gpa = comp.gpa;
const module = comp.bin_file.options.module.?;
const decl = module.declPtr(decl_index);
- comp.bin_file.updateDeclLineNumber(module, decl) catch |err| {
+ comp.bin_file.updateDeclLineNumber(module, decl_index) catch |err| {
try module.failed_decls.ensureUnusedCapacity(gpa, 1);
module.failed_decls.putAssumeCapacityNoClobber(decl_index, try Module.ErrorMsg.create(
gpa,
diff --git a/src/Module.zig b/src/Module.zig
index a914dc90d8..eb947a6977 100644
--- a/src/Module.zig
+++ b/src/Module.zig
@@ -5186,12 +5186,12 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) Allocator.Err
.coff => {
// TODO Implement for COFF
},
- .elf => if (decl.fn_link.elf.len != 0) {
+ .elf => {
// TODO Look into detecting when this would be unnecessary by storing enough state
// in `Decl` to notice that the line number did not change.
comp.work_queue.writeItemAssumeCapacity(.{ .update_line_number = decl_index });
},
- .macho => if (decl.fn_link.macho.len != 0) {
+ .macho => {
// TODO Look into detecting when this would be unnecessary by storing enough state
// in `Decl` to notice that the line number did not change.
comp.work_queue.writeItemAssumeCapacity(.{ .update_line_number = decl_index });
@@ -5285,8 +5285,8 @@ pub fn clearDecl(
};
decl.fn_link = switch (mod.comp.bin_file.tag) {
.coff => .{ .coff = {} },
- .elf => .{ .elf = link.File.Dwarf.SrcFn.empty },
- .macho => .{ .macho = link.File.Dwarf.SrcFn.empty },
+ .elf => .{ .elf = {} },
+ .macho => .{ .macho = {} },
.plan9 => .{ .plan9 = {} },
.c => .{ .c = {} },
.wasm => .{ .wasm = link.File.Wasm.FnData.empty },
@@ -5705,8 +5705,8 @@ pub fn allocateNewDecl(
},
.fn_link = switch (mod.comp.bin_file.tag) {
.coff => .{ .coff = {} },
- .elf => .{ .elf = link.File.Dwarf.SrcFn.empty },
- .macho => .{ .macho = link.File.Dwarf.SrcFn.empty },
+ .elf => .{ .elf = {} },
+ .macho => .{ .macho = {} },
.plan9 => .{ .plan9 = {} },
.c => .{ .c = {} },
.wasm => .{ .wasm = link.File.Wasm.FnData.empty },
diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig
index aab30b73dc..473a62fd83 100644
--- a/src/arch/aarch64/CodeGen.zig
+++ b/src/arch/aarch64/CodeGen.zig
@@ -203,13 +203,7 @@ const DbgInfoReloc = struct {
else => unreachable, // not a possible argument
};
- try dw.genArgDbgInfo(
- reloc.name,
- reloc.ty,
- function.bin_file.tag,
- function.mod_fn.owner_decl,
- loc,
- );
+ try dw.genArgDbgInfo(reloc.name, reloc.ty, function.mod_fn.owner_decl, loc);
},
.plan9 => {},
.none => {},
@@ -255,14 +249,7 @@ const DbgInfoReloc = struct {
break :blk .nop;
},
};
- try dw.genVarDbgInfo(
- reloc.name,
- reloc.ty,
- function.bin_file.tag,
- function.mod_fn.owner_decl,
- is_ptr,
- loc,
- );
+ try dw.genVarDbgInfo(reloc.name, reloc.ty, function.mod_fn.owner_decl, is_ptr, loc);
},
.plan9 => {},
.none => {},
diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig
index 6574501767..57a8aed699 100644
--- a/src/arch/arm/CodeGen.zig
+++ b/src/arch/arm/CodeGen.zig
@@ -282,13 +282,7 @@ const DbgInfoReloc = struct {
else => unreachable, // not a possible argument
};
- try dw.genArgDbgInfo(
- reloc.name,
- reloc.ty,
- function.bin_file.tag,
- function.mod_fn.owner_decl,
- loc,
- );
+ try dw.genArgDbgInfo(reloc.name, reloc.ty, function.mod_fn.owner_decl, loc);
},
.plan9 => {},
.none => {},
@@ -331,14 +325,7 @@ const DbgInfoReloc = struct {
break :blk .nop;
},
};
- try dw.genVarDbgInfo(
- reloc.name,
- reloc.ty,
- function.bin_file.tag,
- function.mod_fn.owner_decl,
- is_ptr,
- loc,
- );
+ try dw.genVarDbgInfo(reloc.name, reloc.ty, function.mod_fn.owner_decl, is_ptr, loc);
},
.plan9 => {},
.none => {},
diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig
index 423816c0b1..8b8fca4859 100644
--- a/src/arch/riscv64/CodeGen.zig
+++ b/src/arch/riscv64/CodeGen.zig
@@ -1615,13 +1615,9 @@ fn genArgDbgInfo(self: Self, inst: Air.Inst.Index, mcv: MCValue) !void {
switch (self.debug_output) {
.dwarf => |dw| switch (mcv) {
- .register => |reg| try dw.genArgDbgInfo(
- name,
- ty,
- self.bin_file.tag,
- self.mod_fn.owner_decl,
- .{ .register = reg.dwarfLocOp() },
- ),
+ .register => |reg| try dw.genArgDbgInfo(name, ty, self.mod_fn.owner_decl, .{
+ .register = reg.dwarfLocOp(),
+ }),
.stack_offset => {},
else => {},
},
diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig
index e67244167e..418c67c580 100644
--- a/src/arch/sparc64/CodeGen.zig
+++ b/src/arch/sparc64/CodeGen.zig
@@ -3412,13 +3412,9 @@ fn genArgDbgInfo(self: Self, inst: Air.Inst.Index, mcv: MCValue) !void {
switch (self.debug_output) {
.dwarf => |dw| switch (mcv) {
- .register => |reg| try dw.genArgDbgInfo(
- name,
- ty,
- self.bin_file.tag,
- self.mod_fn.owner_decl,
- .{ .register = reg.dwarfLocOp() },
- ),
+ .register => |reg| try dw.genArgDbgInfo(name, ty, self.mod_fn.owner_decl, .{
+ .register = reg.dwarfLocOp(),
+ }),
else => {},
},
else => {},
diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig
index 342d6b70cc..8212d281e5 100644
--- a/src/arch/wasm/CodeGen.zig
+++ b/src/arch/wasm/CodeGen.zig
@@ -2475,7 +2475,7 @@ fn airArg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
.dwarf => |dwarf| {
const src_index = func.air.instructions.items(.data)[inst].arg.src_index;
const name = func.mod_fn.getParamName(func.bin_file.base.options.module.?, src_index);
- try dwarf.genArgDbgInfo(name, arg_ty, .wasm, func.mod_fn.owner_decl, .{
+ try dwarf.genArgDbgInfo(name, arg_ty, func.mod_fn.owner_decl, .{
.wasm_local = arg.local.value,
});
},
@@ -5539,7 +5539,7 @@ fn airDbgVar(func: *CodeGen, inst: Air.Inst.Index, is_ptr: bool) !void {
break :blk .nop;
},
};
- try func.debug_output.dwarf.genVarDbgInfo(name, ty, .wasm, func.mod_fn.owner_decl, is_ptr, loc);
+ try func.debug_output.dwarf.genVarDbgInfo(name, ty, func.mod_fn.owner_decl, is_ptr, loc);
func.finishAir(inst, .none, &.{});
}
diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig
index fcae7eaabc..c11ea4e63e 100644
--- a/src/arch/x86_64/CodeGen.zig
+++ b/src/arch/x86_64/CodeGen.zig
@@ -3836,7 +3836,7 @@ fn genArgDbgInfo(self: Self, ty: Type, name: [:0]const u8, mcv: MCValue) !void {
},
else => unreachable, // not a valid function parameter
};
- try dw.genArgDbgInfo(name, ty, self.bin_file.tag, self.mod_fn.owner_decl, loc);
+ try dw.genArgDbgInfo(name, ty, self.mod_fn.owner_decl, loc);
},
.plan9 => {},
.none => {},
@@ -3876,7 +3876,7 @@ fn genVarDbgInfo(
break :blk .nop;
},
};
- try dw.genVarDbgInfo(name, ty, self.bin_file.tag, self.mod_fn.owner_decl, is_ptr, loc);
+ try dw.genVarDbgInfo(name, ty, self.mod_fn.owner_decl, is_ptr, loc);
},
.plan9 => {},
.none => {},
diff --git a/src/link.zig b/src/link.zig
index c0eacf88a0..3dd182b586 100644
--- a/src/link.zig
+++ b/src/link.zig
@@ -273,9 +273,9 @@ pub const File = struct {
};
pub const LinkFn = union {
- elf: Dwarf.SrcFn,
- coff: Coff.SrcFn,
- macho: Dwarf.SrcFn,
+ elf: void,
+ coff: void,
+ macho: void,
plan9: void,
c: void,
wasm: Wasm.FnData,
@@ -580,22 +580,23 @@ pub const File = struct {
}
}
- pub fn updateDeclLineNumber(base: *File, module: *Module, decl: *Module.Decl) UpdateDeclError!void {
+ pub fn updateDeclLineNumber(base: *File, module: *Module, decl_index: Module.Decl.Index) UpdateDeclError!void {
+ const decl = module.declPtr(decl_index);
log.debug("updateDeclLineNumber {*} ({s}), line={}", .{
decl, decl.name, decl.src_line + 1,
});
assert(decl.has_tv);
if (build_options.only_c) {
assert(base.tag == .c);
- return @fieldParentPtr(C, "base", base).updateDeclLineNumber(module, decl);
+ return @fieldParentPtr(C, "base", base).updateDeclLineNumber(module, decl_index);
}
switch (base.tag) {
- .coff => return @fieldParentPtr(Coff, "base", base).updateDeclLineNumber(module, decl),
- .elf => return @fieldParentPtr(Elf, "base", base).updateDeclLineNumber(module, decl),
- .macho => return @fieldParentPtr(MachO, "base", base).updateDeclLineNumber(module, decl),
- .c => return @fieldParentPtr(C, "base", base).updateDeclLineNumber(module, decl),
- .wasm => return @fieldParentPtr(Wasm, "base", base).updateDeclLineNumber(module, decl),
- .plan9 => return @fieldParentPtr(Plan9, "base", base).updateDeclLineNumber(module, decl),
+ .coff => return @fieldParentPtr(Coff, "base", base).updateDeclLineNumber(module, decl_index),
+ .elf => return @fieldParentPtr(Elf, "base", base).updateDeclLineNumber(module, decl_index),
+ .macho => return @fieldParentPtr(MachO, "base", base).updateDeclLineNumber(module, decl_index),
+ .c => return @fieldParentPtr(C, "base", base).updateDeclLineNumber(module, decl_index),
+ .wasm => return @fieldParentPtr(Wasm, "base", base).updateDeclLineNumber(module, decl_index),
+ .plan9 => return @fieldParentPtr(Plan9, "base", base).updateDeclLineNumber(module, decl_index),
.spirv, .nvptx => {},
}
}
diff --git a/src/link/C.zig b/src/link/C.zig
index 8b05b8b22d..02e5cadfbc 100644
--- a/src/link/C.zig
+++ b/src/link/C.zig
@@ -219,12 +219,12 @@ pub fn updateDecl(self: *C, module: *Module, decl_index: Module.Decl.Index) !voi
code.shrinkAndFree(module.gpa, code.items.len);
}
-pub fn updateDeclLineNumber(self: *C, module: *Module, decl: *Module.Decl) !void {
+pub fn updateDeclLineNumber(self: *C, module: *Module, decl_index: Module.Decl.Index) !void {
// The C backend does not have the ability to fix line numbers without re-generating
// the entire Decl.
_ = self;
_ = module;
- _ = decl;
+ _ = decl_index;
}
pub fn flush(self: *C, comp: *Compilation, prog_node: *std.Progress.Node) !void {
diff --git a/src/link/Coff.zig b/src/link/Coff.zig
index c062276b73..f563a617c7 100644
--- a/src/link/Coff.zig
+++ b/src/link/Coff.zig
@@ -195,7 +195,6 @@ pub const PtrWidth = enum {
};
}
};
-pub const SrcFn = void;
pub const SymbolWithLoc = struct {
// Index into the respective symbol table.
@@ -1545,10 +1544,10 @@ pub fn getGlobalSymbol(self: *Coff, name: []const u8) !u32 {
return global_index;
}
-pub fn updateDeclLineNumber(self: *Coff, module: *Module, decl: *Module.Decl) !void {
+pub fn updateDeclLineNumber(self: *Coff, module: *Module, decl_index: Module.Decl.Index) !void {
_ = self;
_ = module;
- _ = decl;
+ _ = decl_index;
log.debug("TODO implement updateDeclLineNumber", .{});
}
diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig
index 61ddda3494..e90db2d0df 100644
--- a/src/link/Dwarf.zig
+++ b/src/link/Dwarf.zig
@@ -27,17 +27,21 @@ bin_file: *File,
ptr_width: PtrWidth,
target: std.Target,
-/// A list of `File.LinkFn` whose Line Number Programs have surplus capacity.
-/// This is the same concept as `text_block_free_list`; see those doc comments.
-dbg_line_fn_free_list: std.AutoHashMapUnmanaged(*SrcFn, void) = .{},
-dbg_line_fn_first: ?*SrcFn = null,
-dbg_line_fn_last: ?*SrcFn = null,
+/// A list of `Atom`s whose Line Number Programs have surplus capacity.
+/// This is the same concept as `Section.free_list` in Elf; see those doc comments.
+src_fn_free_list: std.AutoHashMapUnmanaged(Atom.Index, void) = .{},
+src_fn_first_index: ?Atom.Index = null,
+src_fn_last_index: ?Atom.Index = null,
+src_fns: std.ArrayListUnmanaged(Atom) = .{},
+src_fn_decls: AtomTable = .{},
/// A list of `Atom`s whose corresponding .debug_info tags have surplus capacity.
/// This is the same concept as `text_block_free_list`; see those doc comments.
-atom_free_list: std.AutoHashMapUnmanaged(*Atom, void) = .{},
-atom_first: ?*Atom = null,
-atom_last: ?*Atom = null,
+di_atom_free_list: std.AutoHashMapUnmanaged(Atom.Index, void) = .{},
+di_atom_first_index: ?Atom.Index = null,
+di_atom_last_index: ?Atom.Index = null,
+di_atoms: std.ArrayListUnmanaged(Atom) = .{},
+di_atom_decls: AtomTable = .{},
abbrev_table_offset: ?u64 = null,
@@ -51,22 +55,23 @@ strtab: StringTable(.strtab) = .{},
/// * []file_names
di_files: std.AutoArrayHashMapUnmanaged(*const Module.File, void) = .{},
-/// List of atoms that are owned directly by the DWARF module.
-/// TODO convert links in DebugInfoAtom into indices and make
-/// sure every atom is owned by this module.
-managed_atoms: std.ArrayListUnmanaged(*Atom) = .{},
-
global_abbrev_relocs: std.ArrayListUnmanaged(AbbrevRelocation) = .{},
-pub const Atom = struct {
- /// Previous/next linked list pointers.
- /// This is the linked list node for this Decl's corresponding .debug_info tag.
- prev: ?*Atom,
- next: ?*Atom,
- /// Offset into .debug_info pointing to the tag for this Decl.
+const AtomTable = std.AutoHashMapUnmanaged(Module.Decl.Index, Atom.Index);
+
+const Atom = struct {
+ /// Offset into .debug_info pointing to the tag for this Decl, or
+ /// offset from the beginning of the Debug Line Program header that contains this function.
off: u32,
- /// Size of the .debug_info tag for this Decl, not including padding.
+ /// Size of the .debug_info tag for this Decl, not including padding, or
+ /// size of the line number program component belonging to this function, not
+ /// including padding.
len: u32,
+
+ prev_index: ?Index,
+ next_index: ?Index,
+
+ pub const Index = u32;
};
/// Represents state of the analysed Decl.
@@ -76,6 +81,7 @@ pub const Atom = struct {
pub const DeclState = struct {
gpa: Allocator,
mod: *Module,
+ di_atom_decls: *const AtomTable,
dbg_line: std.ArrayList(u8),
dbg_info: std.ArrayList(u8),
abbrev_type_arena: std.heap.ArenaAllocator,
@@ -89,10 +95,11 @@ pub const DeclState = struct {
abbrev_relocs: std.ArrayListUnmanaged(AbbrevRelocation) = .{},
exprloc_relocs: std.ArrayListUnmanaged(ExprlocRelocation) = .{},
- fn init(gpa: Allocator, mod: *Module) DeclState {
+ fn init(gpa: Allocator, mod: *Module, di_atom_decls: *const AtomTable) DeclState {
return .{
.gpa = gpa,
.mod = mod,
+ .di_atom_decls = di_atom_decls,
.dbg_line = std.ArrayList(u8).init(gpa),
.dbg_info = std.ArrayList(u8).init(gpa),
.abbrev_type_arena = std.heap.ArenaAllocator.init(gpa),
@@ -120,11 +127,11 @@ pub const DeclState = struct {
/// Adds local type relocation of the form: @offset => @this + addend
/// @this signifies the offset within the .debug_abbrev section of the containing atom.
- fn addTypeRelocLocal(self: *DeclState, atom: *const Atom, offset: u32, addend: u32) !void {
+ fn addTypeRelocLocal(self: *DeclState, atom_index: Atom.Index, offset: u32, addend: u32) !void {
log.debug("{x}: @this + {x}", .{ offset, addend });
try self.abbrev_relocs.append(self.gpa, .{
.target = null,
- .atom = atom,
+ .atom_index = atom_index,
.offset = offset,
.addend = addend,
});
@@ -133,13 +140,13 @@ pub const DeclState = struct {
/// Adds global type relocation of the form: @offset => @symbol + 0
/// @symbol signifies a type abbreviation posititioned somewhere in the .debug_abbrev section
/// which we use as our target of the relocation.
- fn addTypeRelocGlobal(self: *DeclState, atom: *const Atom, ty: Type, offset: u32) !void {
+ fn addTypeRelocGlobal(self: *DeclState, atom_index: Atom.Index, ty: Type, offset: u32) !void {
const resolv = self.abbrev_resolver.getContext(ty, .{
.mod = self.mod,
}) orelse blk: {
const sym_index = @intCast(u32, self.abbrev_table.items.len);
try self.abbrev_table.append(self.gpa, .{
- .atom = atom,
+ .atom_index = atom_index,
.type = ty,
.offset = undefined,
});
@@ -154,7 +161,7 @@ pub const DeclState = struct {
log.debug("{x}: %{d} + 0", .{ offset, resolv });
try self.abbrev_relocs.append(self.gpa, .{
.target = resolv,
- .atom = atom,
+ .atom_index = atom_index,
.offset = offset,
.addend = 0,
});
@@ -163,7 +170,7 @@ pub const DeclState = struct {
fn addDbgInfoType(
self: *DeclState,
module: *Module,
- atom: *Atom,
+ atom_index: Atom.Index,
ty: Type,
) error{OutOfMemory}!void {
const arena = self.abbrev_type_arena.allocator();
@@ -228,7 +235,7 @@ pub const DeclState = struct {
// DW.AT.type, DW.FORM.ref4
var index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
- try self.addTypeRelocGlobal(atom, Type.bool, @intCast(u32, index));
+ try self.addTypeRelocGlobal(atom_index, Type.bool, @intCast(u32, index));
// DW.AT.data_member_location, DW.FORM.sdata
try dbg_info_buffer.ensureUnusedCapacity(6);
dbg_info_buffer.appendAssumeCapacity(0);
@@ -240,7 +247,7 @@ pub const DeclState = struct {
// DW.AT.type, DW.FORM.ref4
index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
- try self.addTypeRelocGlobal(atom, payload_ty, @intCast(u32, index));
+ try self.addTypeRelocGlobal(atom_index, payload_ty, @intCast(u32, index));
// DW.AT.data_member_location, DW.FORM.sdata
const offset = abi_size - payload_ty.abiSize(target);
try leb128.writeULEB128(dbg_info_buffer.writer(), offset);
@@ -271,7 +278,7 @@ pub const DeclState = struct {
try dbg_info_buffer.resize(index + 4);
var buf = try arena.create(Type.SlicePtrFieldTypeBuffer);
const ptr_ty = ty.slicePtrFieldType(buf);
- try self.addTypeRelocGlobal(atom, ptr_ty, @intCast(u32, index));
+ try self.addTypeRelocGlobal(atom_index, ptr_ty, @intCast(u32, index));
// DW.AT.data_member_location, DW.FORM.sdata
try dbg_info_buffer.ensureUnusedCapacity(6);
dbg_info_buffer.appendAssumeCapacity(0);
@@ -283,7 +290,7 @@ pub const DeclState = struct {
// DW.AT.type, DW.FORM.ref4
index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
- try self.addTypeRelocGlobal(atom, Type.usize, @intCast(u32, index));
+ try self.addTypeRelocGlobal(atom_index, Type.usize, @intCast(u32, index));
// DW.AT.data_member_location, DW.FORM.sdata
try dbg_info_buffer.ensureUnusedCapacity(2);
dbg_info_buffer.appendAssumeCapacity(ptr_bytes);
@@ -295,7 +302,7 @@ pub const DeclState = struct {
// DW.AT.type, DW.FORM.ref4
const index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
- try self.addTypeRelocGlobal(atom, ty.childType(), @intCast(u32, index));
+ try self.addTypeRelocGlobal(atom_index, ty.childType(), @intCast(u32, index));
}
},
.Array => {
@@ -306,13 +313,13 @@ pub const DeclState = struct {
// DW.AT.type, DW.FORM.ref4
var index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
- try self.addTypeRelocGlobal(atom, ty.childType(), @intCast(u32, index));
+ try self.addTypeRelocGlobal(atom_index, ty.childType(), @intCast(u32, index));
// DW.AT.subrange_type
try dbg_info_buffer.append(@enumToInt(AbbrevKind.array_dim));
// DW.AT.type, DW.FORM.ref4
index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
- try self.addTypeRelocGlobal(atom, Type.usize, @intCast(u32, index));
+ try self.addTypeRelocGlobal(atom_index, Type.usize, @intCast(u32, index));
// DW.AT.count, DW.FORM.udata
const len = ty.arrayLenIncludingSentinel();
try leb128.writeULEB128(dbg_info_buffer.writer(), len);
@@ -340,7 +347,7 @@ pub const DeclState = struct {
// DW.AT.type, DW.FORM.ref4
var index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
- try self.addTypeRelocGlobal(atom, field, @intCast(u32, index));
+ try self.addTypeRelocGlobal(atom_index, field, @intCast(u32, index));
// DW.AT.data_member_location, DW.FORM.sdata
const field_off = ty.structFieldOffset(field_index, target);
try leb128.writeULEB128(dbg_info_buffer.writer(), field_off);
@@ -372,7 +379,7 @@ pub const DeclState = struct {
// DW.AT.type, DW.FORM.ref4
var index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
- try self.addTypeRelocGlobal(atom, field.ty, @intCast(u32, index));
+ try self.addTypeRelocGlobal(atom_index, field.ty, @intCast(u32, index));
// DW.AT.data_member_location, DW.FORM.sdata
const field_off = ty.structFieldOffset(field_index, target);
try leb128.writeULEB128(dbg_info_buffer.writer(), field_off);
@@ -455,7 +462,7 @@ pub const DeclState = struct {
// DW.AT.type, DW.FORM.ref4
const inner_union_index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(inner_union_index + 4);
- try self.addTypeRelocLocal(atom, @intCast(u32, inner_union_index), 5);
+ try self.addTypeRelocLocal(atom_index, @intCast(u32, inner_union_index), 5);
// DW.AT.data_member_location, DW.FORM.sdata
try leb128.writeULEB128(dbg_info_buffer.writer(), payload_offset);
}
@@ -482,7 +489,7 @@ pub const DeclState = struct {
// DW.AT.type, DW.FORM.ref4
const index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
- try self.addTypeRelocGlobal(atom, field.ty, @intCast(u32, index));
+ try self.addTypeRelocGlobal(atom_index, field.ty, @intCast(u32, index));
// DW.AT.data_member_location, DW.FORM.sdata
try dbg_info_buffer.append(0);
}
@@ -499,7 +506,7 @@ pub const DeclState = struct {
// DW.AT.type, DW.FORM.ref4
const index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
- try self.addTypeRelocGlobal(atom, union_obj.tag_ty, @intCast(u32, index));
+ try self.addTypeRelocGlobal(atom_index, union_obj.tag_ty, @intCast(u32, index));
// DW.AT.data_member_location, DW.FORM.sdata
try leb128.writeULEB128(dbg_info_buffer.writer(), tag_offset);
@@ -542,7 +549,7 @@ pub const DeclState = struct {
// DW.AT.type, DW.FORM.ref4
var index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
- try self.addTypeRelocGlobal(atom, payload_ty, @intCast(u32, index));
+ try self.addTypeRelocGlobal(atom_index, payload_ty, @intCast(u32, index));
// DW.AT.data_member_location, DW.FORM.sdata
try leb128.writeULEB128(dbg_info_buffer.writer(), payload_off);
@@ -555,7 +562,7 @@ pub const DeclState = struct {
// DW.AT.type, DW.FORM.ref4
index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
- try self.addTypeRelocGlobal(atom, error_ty, @intCast(u32, index));
+ try self.addTypeRelocGlobal(atom_index, error_ty, @intCast(u32, index));
// DW.AT.data_member_location, DW.FORM.sdata
try leb128.writeULEB128(dbg_info_buffer.writer(), error_off);
@@ -588,12 +595,11 @@ pub const DeclState = struct {
self: *DeclState,
name: [:0]const u8,
ty: Type,
- tag: File.Tag,
owner_decl: Module.Decl.Index,
loc: DbgInfoLoc,
) error{OutOfMemory}!void {
const dbg_info = &self.dbg_info;
- const atom = getDbgInfoAtom(tag, self.mod, owner_decl);
+ const atom_index = self.di_atom_decls.get(owner_decl).?;
const name_with_null = name.ptr[0 .. name.len + 1];
switch (loc) {
@@ -638,7 +644,7 @@ pub const DeclState = struct {
try dbg_info.ensureUnusedCapacity(5 + name_with_null.len);
const index = dbg_info.items.len;
try dbg_info.resize(index + 4); // dw.at.type, dw.form.ref4
- try self.addTypeRelocGlobal(atom, ty, @intCast(u32, index)); // DW.AT.type, DW.FORM.ref4
+ try self.addTypeRelocGlobal(atom_index, ty, @intCast(u32, index)); // DW.AT.type, DW.FORM.ref4
dbg_info.appendSliceAssumeCapacity(name_with_null); // DW.AT.name, DW.FORM.string
}
@@ -646,13 +652,12 @@ pub const DeclState = struct {
self: *DeclState,
name: [:0]const u8,
ty: Type,
- tag: File.Tag,
owner_decl: Module.Decl.Index,
is_ptr: bool,
loc: DbgInfoLoc,
) error{OutOfMemory}!void {
const dbg_info = &self.dbg_info;
- const atom = getDbgInfoAtom(tag, self.mod, owner_decl);
+ const atom_index = self.di_atom_decls.get(owner_decl).?;
const name_with_null = name.ptr[0 .. name.len + 1];
try dbg_info.append(@enumToInt(AbbrevKind.variable));
const target = self.mod.getTarget();
@@ -782,7 +787,7 @@ pub const DeclState = struct {
try dbg_info.ensureUnusedCapacity(5 + name_with_null.len);
const index = dbg_info.items.len;
try dbg_info.resize(index + 4); // dw.at.type, dw.form.ref4
- try self.addTypeRelocGlobal(atom, child_ty, @intCast(u32, index));
+ try self.addTypeRelocGlobal(atom_index, child_ty, @intCast(u32, index));
dbg_info.appendSliceAssumeCapacity(name_with_null); // DW.AT.name, DW.FORM.string
}
@@ -815,7 +820,7 @@ pub const DeclState = struct {
};
pub const AbbrevEntry = struct {
- atom: *const Atom,
+ atom_index: Atom.Index,
type: Type,
offset: u32,
};
@@ -824,7 +829,7 @@ pub const AbbrevRelocation = struct {
/// If target is null, we deal with a local relocation that is based on simple offset + addend
/// only.
target: ?u32,
- atom: *const Atom,
+ atom_index: Atom.Index,
offset: u32,
addend: u32,
};
@@ -841,26 +846,6 @@ pub const ExprlocRelocation = struct {
offset: u32,
};
-pub const SrcFn = struct {
- /// Offset from the beginning of the Debug Line Program header that contains this function.
- off: u32,
- /// Size of the line number program component belonging to this function, not
- /// including padding.
- len: u32,
-
- /// Points to the previous and next neighbors, based on the offset from .debug_line.
- /// This can be used to find, for example, the capacity of this `SrcFn`.
- prev: ?*SrcFn,
- next: ?*SrcFn,
-
- pub const empty: SrcFn = .{
- .off = 0,
- .len = 0,
- .prev = null,
- .next = null,
- };
-};
-
pub const PtrWidth = enum { p32, p64 };
pub const AbbrevKind = enum(u8) {
@@ -910,16 +895,18 @@ pub fn init(allocator: Allocator, bin_file: *File, target: std.Target) Dwarf {
pub fn deinit(self: *Dwarf) void {
const gpa = self.allocator;
- self.dbg_line_fn_free_list.deinit(gpa);
- self.atom_free_list.deinit(gpa);
+
+ self.src_fn_free_list.deinit(gpa);
+ self.src_fns.deinit(gpa);
+ self.src_fn_decls.deinit(gpa);
+
+ self.di_atom_free_list.deinit(gpa);
+ self.di_atoms.deinit(gpa);
+ self.di_atom_decls.deinit(gpa);
+
self.strtab.deinit(gpa);
self.di_files.deinit(gpa);
self.global_abbrev_relocs.deinit(gpa);
-
- for (self.managed_atoms.items) |atom| {
- gpa.destroy(atom);
- }
- self.managed_atoms.deinit(gpa);
}
/// Initializes Decl's state and its matching output buffers.
@@ -935,15 +922,19 @@ pub fn initDeclState(self: *Dwarf, mod: *Module, decl_index: Module.Decl.Index)
log.debug("initDeclState {s}{*}", .{ decl_name, decl });
const gpa = self.allocator;
- var decl_state = DeclState.init(gpa, mod);
+ var decl_state = DeclState.init(gpa, mod, &self.di_atom_decls);
errdefer decl_state.deinit();
const dbg_line_buffer = &decl_state.dbg_line;
const dbg_info_buffer = &decl_state.dbg_info;
+ const di_atom_index = try self.getOrCreateAtomForDecl(.di_atom, decl_index);
+
assert(decl.has_tv);
switch (decl.ty.zigTypeTag()) {
.Fn => {
+ _ = try self.getOrCreateAtomForDecl(.src_fn, decl_index);
+
// For functions we need to add a prologue to the debug line program.
try dbg_line_buffer.ensureTotalCapacity(26);
@@ -1003,8 +994,7 @@ pub fn initDeclState(self: *Dwarf, mod: *Module, decl_index: Module.Decl.Index)
dbg_info_buffer.items.len += 4; // DW.AT.high_pc, DW.FORM.data4
//
if (fn_ret_has_bits) {
- const atom = getDbgInfoAtom(self.bin_file.tag, mod, decl_index);
- try decl_state.addTypeRelocGlobal(atom, fn_ret_type, @intCast(u32, dbg_info_buffer.items.len));
+ try decl_state.addTypeRelocGlobal(di_atom_index, fn_ret_type, @intCast(u32, dbg_info_buffer.items.len));
dbg_info_buffer.items.len += 4; // DW.AT.type, DW.FORM.ref4
}
@@ -1076,26 +1066,23 @@ pub fn commitDeclState(
// This logic is nearly identical to the logic below in `updateDeclDebugInfo` for
// `TextBlock` and the .debug_info. If you are editing this logic, you
// probably need to edit that logic too.
- const src_fn = switch (self.bin_file.tag) {
- .elf => &decl.fn_link.elf,
- .macho => &decl.fn_link.macho,
- .wasm => &decl.fn_link.wasm.src_fn,
- else => unreachable, // TODO
- };
+ const src_fn_index = self.src_fn_decls.get(decl_index).?;
+ const src_fn = self.getAtomPtr(.src_fn, src_fn_index);
src_fn.len = @intCast(u32, dbg_line_buffer.items.len);
- if (self.dbg_line_fn_last) |last| blk: {
- if (src_fn == last) break :blk;
- if (src_fn.next) |next| {
+ if (self.src_fn_last_index) |last_index| blk: {
+ if (src_fn_index == last_index) break :blk;
+ if (src_fn.next_index) |next_index| {
+ const next = self.getAtomPtr(.src_fn, next_index);
// Update existing function - non-last item.
if (src_fn.off + src_fn.len + min_nop_size > next.off) {
// It grew too big, so we move it to a new location.
- if (src_fn.prev) |prev| {
- self.dbg_line_fn_free_list.put(gpa, prev, {}) catch {};
- prev.next = src_fn.next;
+ if (src_fn.prev_index) |prev_index| {
+ self.src_fn_free_list.put(gpa, prev_index, {}) catch {};
+ self.getAtomPtr(.src_fn, prev_index).next_index = src_fn.next_index;
}
- next.prev = src_fn.prev;
- src_fn.next = null;
+ next.prev_index = src_fn.prev_index;
+ src_fn.next_index = null;
// Populate where it used to be with NOPs.
switch (self.bin_file.tag) {
.elf => {
@@ -1118,33 +1105,42 @@ pub fn commitDeclState(
else => unreachable,
}
// TODO Look at the free list before appending at the end.
- src_fn.prev = last;
- last.next = src_fn;
- self.dbg_line_fn_last = src_fn;
+ src_fn.prev_index = last_index;
+ const last = self.getAtomPtr(.src_fn, last_index);
+ last.next_index = src_fn_index;
+ self.src_fn_last_index = src_fn_index;
src_fn.off = last.off + padToIdeal(last.len);
}
- } else if (src_fn.prev == null) {
+ } else if (src_fn.prev_index == null) {
// Append new function.
// TODO Look at the free list before appending at the end.
- src_fn.prev = last;
- last.next = src_fn;
- self.dbg_line_fn_last = src_fn;
+ src_fn.prev_index = last_index;
+ const last = self.getAtomPtr(.src_fn, last_index);
+ last.next_index = src_fn_index;
+ self.src_fn_last_index = src_fn_index;
src_fn.off = last.off + padToIdeal(last.len);
}
} else {
// This is the first function of the Line Number Program.
- self.dbg_line_fn_first = src_fn;
- self.dbg_line_fn_last = src_fn;
+ self.src_fn_first_index = src_fn_index;
+ self.src_fn_last_index = src_fn_index;
src_fn.off = padToIdeal(self.dbgLineNeededHeaderBytes(&[0][]u8{}, &[0][]u8{}));
}
- const last_src_fn = self.dbg_line_fn_last.?;
+ const last_src_fn_index = self.src_fn_last_index.?;
+ const last_src_fn = self.getAtom(.src_fn, last_src_fn_index);
const needed_size = last_src_fn.off + last_src_fn.len;
- const prev_padding_size: u32 = if (src_fn.prev) |prev| src_fn.off - (prev.off + prev.len) else 0;
- const next_padding_size: u32 = if (src_fn.next) |next| next.off - (src_fn.off + src_fn.len) else 0;
+ const prev_padding_size: u32 = if (src_fn.prev_index) |prev_index| blk: {
+ const prev = self.getAtom(.src_fn, prev_index);
+ break :blk src_fn.off - (prev.off + prev.len);
+ } else 0;
+ const next_padding_size: u32 = if (src_fn.next_index) |next_index| blk: {
+ const next = self.getAtom(.src_fn, next_index);
+ break :blk next.off - (src_fn.off + src_fn.len);
+ } else 0;
// We only have support for one compilation unit so far, so the offsets are directly
// from the .debug_line section.
@@ -1213,7 +1209,7 @@ pub fn commitDeclState(
if (dbg_info_buffer.items.len == 0)
return;
- const atom = getDbgInfoAtom(self.bin_file.tag, module, decl_index);
+ const di_atom_index = self.di_atom_decls.get(decl_index).?;
if (decl_state.abbrev_table.items.len > 0) {
// Now we emit the .debug_info types of the Decl. These will count towards the size of
// the buffer, so we have to do it before computing the offset, and we can't perform the actual
@@ -1235,12 +1231,12 @@ pub fn commitDeclState(
if (deferred) continue;
symbol.offset = @intCast(u32, dbg_info_buffer.items.len);
- try decl_state.addDbgInfoType(module, atom, ty);
+ try decl_state.addDbgInfoType(module, di_atom_index, ty);
}
}
log.debug("updateDeclDebugInfoAllocation for '{s}'", .{decl.name});
- try self.updateDeclDebugInfoAllocation(atom, @intCast(u32, dbg_info_buffer.items.len));
+ try self.updateDeclDebugInfoAllocation(di_atom_index, @intCast(u32, dbg_info_buffer.items.len));
while (decl_state.abbrev_relocs.popOrNull()) |reloc| {
if (reloc.target) |target| {
@@ -1261,11 +1257,12 @@ pub fn commitDeclState(
try self.global_abbrev_relocs.append(gpa, .{
.target = null,
.offset = reloc.offset,
- .atom = reloc.atom,
+ .atom_index = reloc.atom_index,
.addend = reloc.addend,
});
} else {
- const value = symbol.atom.off + symbol.offset + reloc.addend;
+ const atom = self.getAtom(.di_atom, symbol.atom_index);
+ const value = atom.off + symbol.offset + reloc.addend;
log.debug("{x}: [() => {x}] (%{d}, '{}')", .{ reloc.offset, value, target, ty.fmtDebug() });
mem.writeInt(
u32,
@@ -1275,10 +1272,11 @@ pub fn commitDeclState(
);
}
} else {
+ const atom = self.getAtom(.di_atom, reloc.atom_index);
mem.writeInt(
u32,
dbg_info_buffer.items[reloc.offset..][0..@sizeOf(u32)],
- reloc.atom.off + reloc.offset + reloc.addend,
+ atom.off + reloc.offset + reloc.addend,
target_endian,
);
}
@@ -1294,7 +1292,7 @@ pub fn commitDeclState(
.got_load => .got_load,
},
.target = reloc.target,
- .offset = reloc.offset + atom.off,
+ .offset = reloc.offset + self.getAtom(.di_atom, di_atom_index).off,
.addend = 0,
.prev_vaddr = 0,
});
@@ -1304,10 +1302,10 @@ pub fn commitDeclState(
}
log.debug("writeDeclDebugInfo for '{s}", .{decl.name});
- try self.writeDeclDebugInfo(atom, dbg_info_buffer.items);
+ try self.writeDeclDebugInfo(di_atom_index, dbg_info_buffer.items);
}
-fn updateDeclDebugInfoAllocation(self: *Dwarf, atom: *Atom, len: u32) !void {
+fn updateDeclDebugInfoAllocation(self: *Dwarf, atom_index: Atom.Index, len: u32) !void {
const tracy = trace(@src());
defer tracy.end();
@@ -1316,19 +1314,21 @@ fn updateDeclDebugInfoAllocation(self: *Dwarf, atom: *Atom, len: u32) !void {
// probably need to edit that logic too.
const gpa = self.allocator;
+ const atom = self.getAtomPtr(.di_atom, atom_index);
atom.len = len;
- if (self.atom_last) |last| blk: {
- if (atom == last) break :blk;
- if (atom.next) |next| {
+ if (self.di_atom_last_index) |last_index| blk: {
+ if (atom_index == last_index) break :blk;
+ if (atom.next_index) |next_index| {
+ const next = self.getAtomPtr(.di_atom, next_index);
// Update existing Decl - non-last item.
if (atom.off + atom.len + min_nop_size > next.off) {
// It grew too big, so we move it to a new location.
- if (atom.prev) |prev| {
- self.atom_free_list.put(gpa, prev, {}) catch {};
- prev.next = atom.next;
+ if (atom.prev_index) |prev_index| {
+ self.di_atom_free_list.put(gpa, prev_index, {}) catch {};
+ self.getAtomPtr(.di_atom, prev_index).next_index = atom.next_index;
}
- next.prev = atom.prev;
- atom.next = null;
+ next.prev_index = atom.prev_index;
+ atom.next_index = null;
// Populate where it used to be with NOPs.
switch (self.bin_file.tag) {
.elf => {
@@ -1351,31 +1351,33 @@ fn updateDeclDebugInfoAllocation(self: *Dwarf, atom: *Atom, len: u32) !void {
else => unreachable,
}
// TODO Look at the free list before appending at the end.
- atom.prev = last;
- last.next = atom;
- self.atom_last = atom;
+ atom.prev_index = last_index;
+ const last = self.getAtomPtr(.di_atom, last_index);
+ last.next_index = atom_index;
+ self.di_atom_last_index = atom_index;
atom.off = last.off + padToIdeal(last.len);
}
- } else if (atom.prev == null) {
+ } else if (atom.prev_index == null) {
// Append new Decl.
// TODO Look at the free list before appending at the end.
- atom.prev = last;
- last.next = atom;
- self.atom_last = atom;
+ atom.prev_index = last_index;
+ const last = self.getAtomPtr(.di_atom, last_index);
+ last.next_index = atom_index;
+ self.di_atom_last_index = atom_index;
atom.off = last.off + padToIdeal(last.len);
}
} else {
// This is the first Decl of the .debug_info
- self.atom_first = atom;
- self.atom_last = atom;
+ self.di_atom_first_index = atom_index;
+ self.di_atom_last_index = atom_index;
atom.off = @intCast(u32, padToIdeal(self.dbgInfoHeaderBytes()));
}
}
-fn writeDeclDebugInfo(self: *Dwarf, atom: *Atom, dbg_info_buf: []const u8) !void {
+fn writeDeclDebugInfo(self: *Dwarf, atom_index: Atom.Index, dbg_info_buf: []const u8) !void {
const tracy = trace(@src());
defer tracy.end();
@@ -1384,14 +1386,22 @@ fn writeDeclDebugInfo(self: *Dwarf, atom: *Atom, dbg_info_buf: []const u8) !void
// probably need to edit that logic too.
const gpa = self.allocator;
- const last_decl = self.atom_last.?;
+ const atom = self.getAtom(.di_atom, atom_index);
+ const last_decl_index = self.di_atom_last_index.?;
+ const last_decl = self.getAtom(.di_atom, last_decl_index);
// +1 for a trailing zero to end the children of the decl tag.
const needed_size = last_decl.off + last_decl.len + 1;
- const prev_padding_size: u32 = if (atom.prev) |prev| atom.off - (prev.off + prev.len) else 0;
- const next_padding_size: u32 = if (atom.next) |next| next.off - (atom.off + atom.len) else 0;
+ const prev_padding_size: u32 = if (atom.prev_index) |prev_index| blk: {
+ const prev = self.getAtom(.di_atom, prev_index);
+ break :blk atom.off - (prev.off + prev.len);
+ } else 0;
+ const next_padding_size: u32 = if (atom.next_index) |next_index| blk: {
+ const next = self.getAtom(.di_atom, next_index);
+ break :blk next.off - (atom.off + atom.len);
+ } else 0;
// To end the children of the decl tag.
- const trailing_zero = atom.next == null;
+ const trailing_zero = atom.next_index == null;
// We only have support for one compilation unit so far, so the offsets are directly
// from the .debug_info section.
@@ -1459,10 +1469,15 @@ fn writeDeclDebugInfo(self: *Dwarf, atom: *Atom, dbg_info_buf: []const u8) !void
}
}
-pub fn updateDeclLineNumber(self: *Dwarf, decl: *const Module.Decl) !void {
+pub fn updateDeclLineNumber(self: *Dwarf, module: *Module, decl_index: Module.Decl.Index) !void {
const tracy = trace(@src());
defer tracy.end();
+ const atom_index = try self.getOrCreateAtomForDecl(.src_fn, decl_index);
+ const atom = self.getAtom(.src_fn, atom_index);
+ if (atom.len == 0) return;
+
+ const decl = module.declPtr(decl_index);
const func = decl.val.castTag(.function).?.data;
log.debug("decl.src_line={d}, func.lbrace_line={d}, func.rbrace_line={d}", .{
decl.src_line,
@@ -1477,78 +1492,80 @@ pub fn updateDeclLineNumber(self: *Dwarf, decl: *const Module.Decl) !void {
.elf => {
const elf_file = self.bin_file.cast(File.Elf).?;
const shdr = elf_file.sections.items(.shdr)[elf_file.debug_line_section_index.?];
- const file_pos = shdr.sh_offset + decl.fn_link.elf.off + self.getRelocDbgLineOff();
+ const file_pos = shdr.sh_offset + atom.off + self.getRelocDbgLineOff();
try elf_file.base.file.?.pwriteAll(&data, file_pos);
},
.macho => {
const d_sym = self.bin_file.cast(File.MachO).?.getDebugSymbols().?;
const sect = d_sym.getSection(d_sym.debug_line_section_index.?);
- const file_pos = sect.offset + decl.fn_link.macho.off + self.getRelocDbgLineOff();
+ const file_pos = sect.offset + atom.off + self.getRelocDbgLineOff();
try d_sym.file.pwriteAll(&data, file_pos);
},
.wasm => {
const wasm_file = self.bin_file.cast(File.Wasm).?;
- const offset = decl.fn_link.wasm.src_fn.off + self.getRelocDbgLineOff();
- const atom = wasm_file.debug_line_atom.?;
- mem.copy(u8, atom.code.items[offset..], &data);
+ const offset = atom.off + self.getRelocDbgLineOff();
+ const atom_ = wasm_file.debug_line_atom.?;
+ mem.copy(u8, atom_.code.items[offset..], &data);
},
else => unreachable,
}
}
-pub fn freeAtom(self: *Dwarf, atom: *Atom) void {
- if (self.atom_first == atom) {
- self.atom_first = atom.next;
- }
- if (self.atom_last == atom) {
- // TODO shrink the .debug_info section size here
- self.atom_last = atom.prev;
- }
-
- if (atom.prev) |prev| {
- prev.next = atom.next;
+pub fn freeDecl(self: *Dwarf, decl_index: Module.Decl.Index) void {
+ const gpa = self.allocator;
- // TODO the free list logic like we do for text blocks above
- } else {
- atom.prev = null;
+ // Free SrcFn atom
+ if (self.src_fn_decls.fetchRemove(decl_index)) |kv| {
+ const src_fn_index = kv.value;
+ const src_fn = self.getAtom(.src_fn, src_fn_index);
+ _ = self.src_fn_free_list.remove(src_fn_index);
+
+ if (src_fn.prev_index) |prev_index| {
+ self.src_fn_free_list.put(gpa, prev_index, {}) catch {};
+ const prev = self.getAtomPtr(.src_fn, prev_index);
+ prev.next_index = src_fn.next_index;
+ if (src_fn.next_index) |next_index| {
+ self.getAtomPtr(.src_fn, next_index).prev_index = prev_index;
+ } else {
+ self.src_fn_last_index = prev_index;
+ }
+ } else if (src_fn.next_index) |next_index| {
+ self.src_fn_first_index = next_index;
+ self.getAtomPtr(.src_fn, next_index).prev_index = null;
+ }
+ if (self.src_fn_first_index == src_fn_index) {
+ self.src_fn_first_index = src_fn.next_index;
+ }
+ if (self.src_fn_last_index == src_fn_index) {
+ self.src_fn_last_index = src_fn.prev_index;
+ }
}
- if (atom.next) |next| {
- next.prev = atom.prev;
- } else {
- atom.next = null;
- }
-}
+ // Free DI atom
+ if (self.di_atom_decls.fetchRemove(decl_index)) |kv| {
+ const di_atom_index = kv.value;
+ const di_atom = self.getAtomPtr(.di_atom, di_atom_index);
-pub fn freeDecl(self: *Dwarf, decl: *Module.Decl) void {
- // TODO make this logic match freeTextBlock. Maybe abstract the logic out since the same thing
- // is desired for both.
- const gpa = self.allocator;
- const fn_link = switch (self.bin_file.tag) {
- .elf => &decl.fn_link.elf,
- .macho => &decl.fn_link.macho,
- .wasm => &decl.fn_link.wasm.src_fn,
- else => unreachable,
- };
- _ = self.dbg_line_fn_free_list.remove(fn_link);
+ if (self.di_atom_first_index == di_atom_index) {
+ self.di_atom_first_index = di_atom.next_index;
+ }
+ if (self.di_atom_last_index == di_atom_index) {
+ // TODO shrink the .debug_info section size here
+ self.di_atom_last_index = di_atom.prev_index;
+ }
- if (fn_link.prev) |prev| {
- self.dbg_line_fn_free_list.put(gpa, prev, {}) catch {};
- prev.next = fn_link.next;
- if (fn_link.next) |next| {
- next.prev = prev;
+ if (di_atom.prev_index) |prev_index| {
+ self.getAtomPtr(.di_atom, prev_index).next_index = di_atom.next_index;
+ // TODO the free list logic like we do for SrcFn above
} else {
- self.dbg_line_fn_last = prev;
+ di_atom.prev_index = null;
+ }
+
+ if (di_atom.next_index) |next_index| {
+ self.getAtomPtr(.di_atom, next_index).prev_index = di_atom.prev_index;
+ } else {
+ di_atom.next_index = null;
}
- } else if (fn_link.next) |next| {
- self.dbg_line_fn_first = next;
- next.prev = null;
- }
- if (self.dbg_line_fn_first == fn_link) {
- self.dbg_line_fn_first = fn_link.next;
- }
- if (self.dbg_line_fn_last == fn_link) {
- self.dbg_line_fn_last = fn_link.prev;
}
}
@@ -2276,10 +2293,14 @@ pub fn writeDbgLineHeader(self: *Dwarf) !void {
const needed_with_padding = padToIdeal(needed_bytes);
const delta = needed_with_padding - dbg_line_prg_off;
- var src_fn = self.dbg_line_fn_first.?;
- const last_fn = self.dbg_line_fn_last.?;
+ const first_fn_index = self.src_fn_first_index.?;
+ const first_fn = self.getAtom(.src_fn, first_fn_index);
+ const last_fn_index = self.src_fn_last_index.?;
+ const last_fn = self.getAtom(.src_fn, last_fn_index);
+
+ var src_fn_index = first_fn_index;
- var buffer = try gpa.alloc(u8, last_fn.off + last_fn.len - src_fn.off);
+ var buffer = try gpa.alloc(u8, last_fn.off + last_fn.len - first_fn.off);
defer gpa.free(buffer);
switch (self.bin_file.tag) {
@@ -2288,7 +2309,7 @@ pub fn writeDbgLineHeader(self: *Dwarf) !void {
const shdr_index = elf_file.debug_line_section_index.?;
const needed_size = elf_file.sections.items(.shdr)[shdr_index].sh_size + delta;
try elf_file.growNonAllocSection(shdr_index, needed_size, 1, true);
- const file_pos = elf_file.sections.items(.shdr)[shdr_index].sh_offset + src_fn.off;
+ const file_pos = elf_file.sections.items(.shdr)[shdr_index].sh_offset + first_fn.off;
const amt = try elf_file.base.file.?.preadAll(buffer, file_pos);
if (amt != buffer.len) return error.InputOutput;
@@ -2300,7 +2321,7 @@ pub fn writeDbgLineHeader(self: *Dwarf) !void {
const sect_index = d_sym.debug_line_section_index.?;
const needed_size = @intCast(u32, d_sym.getSection(sect_index).size + delta);
try d_sym.growSection(sect_index, needed_size, true);
- const file_pos = d_sym.getSection(sect_index).offset + src_fn.off;
+ const file_pos = d_sym.getSection(sect_index).offset + first_fn.off;
const amt = try d_sym.file.preadAll(buffer, file_pos);
if (amt != buffer.len) return error.InputOutput;
@@ -2310,18 +2331,19 @@ pub fn writeDbgLineHeader(self: *Dwarf) !void {
.wasm => {
const wasm_file = self.bin_file.cast(File.Wasm).?;
const debug_line = &wasm_file.debug_line_atom.?.code;
- mem.copy(u8, buffer, debug_line.items[src_fn.off..]);
+ mem.copy(u8, buffer, debug_line.items[first_fn.off..]);
try debug_line.resize(self.allocator, debug_line.items.len + delta);
- mem.copy(u8, debug_line.items[src_fn.off + delta ..], buffer);
+ mem.copy(u8, debug_line.items[first_fn.off + delta ..], buffer);
},
else => unreachable,
}
while (true) {
+ const src_fn = self.getAtomPtr(.src_fn, src_fn_index);
src_fn.off += delta;
- if (src_fn.next) |next| {
- src_fn = next;
+ if (src_fn.next_index) |next_index| {
+ src_fn_index = next_index;
} else break;
}
}
@@ -2367,22 +2389,26 @@ pub fn writeDbgLineHeader(self: *Dwarf) !void {
}
fn getDebugInfoOff(self: Dwarf) ?u32 {
- const first = self.atom_first orelse return null;
+ const first_index = self.di_atom_first_index orelse return null;
+ const first = self.getAtom(.di_atom, first_index);
return first.off;
}
fn getDebugInfoEnd(self: Dwarf) ?u32 {
- const last = self.atom_last orelse return null;
+ const last_index = self.di_atom_last_index orelse return null;
+ const last = self.getAtom(.di_atom, last_index);
return last.off + last.len;
}
fn getDebugLineProgramOff(self: Dwarf) ?u32 {
- const first = self.dbg_line_fn_first orelse return null;
+ const first_index = self.src_fn_first_index orelse return null;
+ const first = self.getAtom(.src_fn, first_index);
return first.off;
}
fn getDebugLineProgramEnd(self: Dwarf) ?u32 {
- const last = self.dbg_line_fn_last orelse return null;
+ const last_index = self.src_fn_last_index orelse return null;
+ const last = self.getAtom(.src_fn, last_index);
return last.off + last.len;
}
@@ -2457,23 +2483,14 @@ pub fn flushModule(self: *Dwarf, module: *Module) !void {
}
error_set.names = names;
- const atom = try gpa.create(Atom);
- errdefer gpa.destroy(atom);
- atom.* = .{
- .prev = null,
- .next = null,
- .off = 0,
- .len = 0,
- };
-
var dbg_info_buffer = std.ArrayList(u8).init(arena);
try addDbgInfoErrorSet(arena, module, error_ty, self.target, &dbg_info_buffer);
- try self.managed_atoms.append(gpa, atom);
+ const di_atom_index = try self.createAtom(.di_atom);
log.debug("updateDeclDebugInfoAllocation in flushModule", .{});
- try self.updateDeclDebugInfoAllocation(atom, @intCast(u32, dbg_info_buffer.items.len));
+ try self.updateDeclDebugInfoAllocation(di_atom_index, @intCast(u32, dbg_info_buffer.items.len));
log.debug("writeDeclDebugInfo in flushModule", .{});
- try self.writeDeclDebugInfo(atom, dbg_info_buffer.items);
+ try self.writeDeclDebugInfo(di_atom_index, dbg_info_buffer.items);
const file_pos = blk: {
switch (self.bin_file.tag) {
@@ -2494,22 +2511,23 @@ pub fn flushModule(self: *Dwarf, module: *Module) !void {
};
var buf: [@sizeOf(u32)]u8 = undefined;
- mem.writeInt(u32, &buf, atom.off, self.target.cpu.arch.endian());
+ mem.writeInt(u32, &buf, self.getAtom(.di_atom, di_atom_index).off, self.target.cpu.arch.endian());
while (self.global_abbrev_relocs.popOrNull()) |reloc| {
+ const atom = self.getAtom(.di_atom, reloc.atom_index);
switch (self.bin_file.tag) {
.elf => {
const elf_file = self.bin_file.cast(File.Elf).?;
- try elf_file.base.file.?.pwriteAll(&buf, file_pos + reloc.atom.off + reloc.offset);
+ try elf_file.base.file.?.pwriteAll(&buf, file_pos + atom.off + reloc.offset);
},
.macho => {
const d_sym = self.bin_file.cast(File.MachO).?.getDebugSymbols().?;
- try d_sym.file.pwriteAll(&buf, file_pos + reloc.atom.off + reloc.offset);
+ try d_sym.file.pwriteAll(&buf, file_pos + atom.off + reloc.offset);
},
.wasm => {
const wasm_file = self.bin_file.cast(File.Wasm).?;
const debug_info = wasm_file.debug_info_atom.?.code;
- mem.copy(u8, debug_info.items[reloc.atom.off + reloc.offset ..], &buf);
+ mem.copy(u8, debug_info.items[atom.off + reloc.offset ..], &buf);
},
else => unreachable,
}
@@ -2627,12 +2645,62 @@ fn addDbgInfoErrorSet(
try dbg_info_buffer.append(0);
}
-fn getDbgInfoAtom(tag: File.Tag, mod: *Module, decl_index: Module.Decl.Index) *Atom {
- const decl = mod.declPtr(decl_index);
- return switch (tag) {
- .elf => unreachable,
- .macho => unreachable,
- .wasm => &decl.link.wasm.dbg_info_atom,
- else => unreachable,
+const Kind = enum { src_fn, di_atom };
+
+fn createAtom(self: *Dwarf, comptime kind: Kind) !Atom.Index {
+ const index = blk: {
+ switch (kind) {
+ .src_fn => {
+ const index = @intCast(Atom.Index, self.src_fns.items.len);
+ _ = try self.src_fns.addOne(self.allocator);
+ break :blk index;
+ },
+ .di_atom => {
+ const index = @intCast(Atom.Index, self.di_atoms.items.len);
+ _ = try self.di_atoms.addOne(self.allocator);
+ break :blk index;
+ },
+ }
+ };
+ const atom = self.getAtomPtr(kind, index);
+ atom.* = .{
+ .off = 0,
+ .len = 0,
+ .prev_index = null,
+ .next_index = null,
+ };
+ return index;
+}
+
+fn getOrCreateAtomForDecl(self: *Dwarf, comptime kind: Kind, decl_index: Module.Decl.Index) !Atom.Index {
+ switch (kind) {
+ .src_fn => {
+ const gop = try self.src_fn_decls.getOrPut(self.allocator, decl_index);
+ if (!gop.found_existing) {
+ gop.value_ptr.* = try self.createAtom(kind);
+ }
+ return gop.value_ptr.*;
+ },
+ .di_atom => {
+ const gop = try self.di_atom_decls.getOrPut(self.allocator, decl_index);
+ if (!gop.found_existing) {
+ gop.value_ptr.* = try self.createAtom(kind);
+ }
+ return gop.value_ptr.*;
+ },
+ }
+}
+
+fn getAtom(self: *const Dwarf, comptime kind: Kind, index: Atom.Index) Atom {
+ return switch (kind) {
+ .src_fn => self.src_fns.items[index],
+ .di_atom => self.di_atoms.items[index],
+ };
+}
+
+fn getAtomPtr(self: *Dwarf, comptime kind: Kind, index: Atom.Index) *Atom {
+ return switch (kind) {
+ .src_fn => &self.src_fns.items[index],
+ .di_atom => &self.di_atoms.items[index],
};
}
diff --git a/src/link/Elf.zig b/src/link/Elf.zig
index 01326fb82e..3e0c6d2b57 100644
--- a/src/link/Elf.zig
+++ b/src/link/Elf.zig
@@ -344,9 +344,9 @@ pub fn deinit(self: *Elf) void {
self.relocs.deinit(gpa);
}
- // if (self.dwarf) |*dw| {
- // dw.deinit();
- // }
+ if (self.dwarf) |*dw| {
+ dw.deinit();
+ }
}
pub fn getDeclVAddr(self: *Elf, decl_index: Module.Decl.Index, reloc_info: File.RelocInfo) !u64 {
@@ -685,146 +685,146 @@ pub fn populateMissingMetadata(self: *Elf) !void {
try self.writeSymbol(0);
}
- // if (self.dwarf) |*dw| {
- // if (self.debug_str_section_index == null) {
- // self.debug_str_section_index = @intCast(u16, self.sections.slice().len);
- // assert(dw.strtab.buffer.items.len == 0);
- // try dw.strtab.buffer.append(gpa, 0);
- // try self.sections.append(gpa, .{
- // .shdr = .{
- // .sh_name = try self.shstrtab.insert(gpa, ".debug_str"),
- // .sh_type = elf.SHT_PROGBITS,
- // .sh_flags = elf.SHF_MERGE | elf.SHF_STRINGS,
- // .sh_addr = 0,
- // .sh_offset = 0,
- // .sh_size = 0,
- // .sh_link = 0,
- // .sh_info = 0,
- // .sh_addralign = 1,
- // .sh_entsize = 1,
- // },
- // .phdr_index = undefined,
- // });
- // self.debug_strtab_dirty = true;
- // self.shdr_table_dirty = true;
- // }
-
- // if (self.debug_info_section_index == null) {
- // self.debug_info_section_index = @intCast(u16, self.sections.slice().len);
-
- // const file_size_hint = 200;
- // const p_align = 1;
- // const off = self.findFreeSpace(file_size_hint, p_align);
- // log.debug("found .debug_info free space 0x{x} to 0x{x}", .{
- // off,
- // off + file_size_hint,
- // });
- // try self.sections.append(gpa, .{
- // .shdr = .{
- // .sh_name = try self.shstrtab.insert(gpa, ".debug_info"),
- // .sh_type = elf.SHT_PROGBITS,
- // .sh_flags = 0,
- // .sh_addr = 0,
- // .sh_offset = off,
- // .sh_size = file_size_hint,
- // .sh_link = 0,
- // .sh_info = 0,
- // .sh_addralign = p_align,
- // .sh_entsize = 0,
- // },
- // .phdr_index = undefined,
- // });
- // self.shdr_table_dirty = true;
- // self.debug_info_header_dirty = true;
- // }
-
- // if (self.debug_abbrev_section_index == null) {
- // self.debug_abbrev_section_index = @intCast(u16, self.sections.slice().len);
-
- // const file_size_hint = 128;
- // const p_align = 1;
- // const off = self.findFreeSpace(file_size_hint, p_align);
- // log.debug("found .debug_abbrev free space 0x{x} to 0x{x}", .{
- // off,
- // off + file_size_hint,
- // });
- // try self.sections.append(gpa, .{
- // .shdr = .{
- // .sh_name = try self.shstrtab.insert(gpa, ".debug_abbrev"),
- // .sh_type = elf.SHT_PROGBITS,
- // .sh_flags = 0,
- // .sh_addr = 0,
- // .sh_offset = off,
- // .sh_size = file_size_hint,
- // .sh_link = 0,
- // .sh_info = 0,
- // .sh_addralign = p_align,
- // .sh_entsize = 0,
- // },
- // .phdr_index = undefined,
- // });
- // self.shdr_table_dirty = true;
- // self.debug_abbrev_section_dirty = true;
- // }
-
- // if (self.debug_aranges_section_index == null) {
- // self.debug_aranges_section_index = @intCast(u16, self.sections.slice().len);
-
- // const file_size_hint = 160;
- // const p_align = 16;
- // const off = self.findFreeSpace(file_size_hint, p_align);
- // log.debug("found .debug_aranges free space 0x{x} to 0x{x}", .{
- // off,
- // off + file_size_hint,
- // });
- // try self.sections.append(gpa, .{
- // .shdr = .{
- // .sh_name = try self.shstrtab.insert(gpa, ".debug_aranges"),
- // .sh_type = elf.SHT_PROGBITS,
- // .sh_flags = 0,
- // .sh_addr = 0,
- // .sh_offset = off,
- // .sh_size = file_size_hint,
- // .sh_link = 0,
- // .sh_info = 0,
- // .sh_addralign = p_align,
- // .sh_entsize = 0,
- // },
- // .phdr_index = undefined,
- // });
- // self.shdr_table_dirty = true;
- // self.debug_aranges_section_dirty = true;
- // }
-
- // if (self.debug_line_section_index == null) {
- // self.debug_line_section_index = @intCast(u16, self.sections.slice().len);
-
- // const file_size_hint = 250;
- // const p_align = 1;
- // const off = self.findFreeSpace(file_size_hint, p_align);
- // log.debug("found .debug_line free space 0x{x} to 0x{x}", .{
- // off,
- // off + file_size_hint,
- // });
- // try self.sections.append(gpa, .{
- // .shdr = .{
- // .sh_name = try self.shstrtab.insert(gpa, ".debug_line"),
- // .sh_type = elf.SHT_PROGBITS,
- // .sh_flags = 0,
- // .sh_addr = 0,
- // .sh_offset = off,
- // .sh_size = file_size_hint,
- // .sh_link = 0,
- // .sh_info = 0,
- // .sh_addralign = p_align,
- // .sh_entsize = 0,
- // },
- // .phdr_index = undefined,
- // });
- // self.shdr_table_dirty = true;
- // self.debug_line_header_dirty = true;
- // }
- // }
+ if (self.dwarf) |*dw| {
+ if (self.debug_str_section_index == null) {
+ self.debug_str_section_index = @intCast(u16, self.sections.slice().len);
+ assert(dw.strtab.buffer.items.len == 0);
+ try dw.strtab.buffer.append(gpa, 0);
+ try self.sections.append(gpa, .{
+ .shdr = .{
+ .sh_name = try self.shstrtab.insert(gpa, ".debug_str"),
+ .sh_type = elf.SHT_PROGBITS,
+ .sh_flags = elf.SHF_MERGE | elf.SHF_STRINGS,
+ .sh_addr = 0,
+ .sh_offset = 0,
+ .sh_size = 0,
+ .sh_link = 0,
+ .sh_info = 0,
+ .sh_addralign = 1,
+ .sh_entsize = 1,
+ },
+ .phdr_index = undefined,
+ });
+ self.debug_strtab_dirty = true;
+ self.shdr_table_dirty = true;
+ }
+
+ if (self.debug_info_section_index == null) {
+ self.debug_info_section_index = @intCast(u16, self.sections.slice().len);
+
+ const file_size_hint = 200;
+ const p_align = 1;
+ const off = self.findFreeSpace(file_size_hint, p_align);
+ log.debug("found .debug_info free space 0x{x} to 0x{x}", .{
+ off,
+ off + file_size_hint,
+ });
+ try self.sections.append(gpa, .{
+ .shdr = .{
+ .sh_name = try self.shstrtab.insert(gpa, ".debug_info"),
+ .sh_type = elf.SHT_PROGBITS,
+ .sh_flags = 0,
+ .sh_addr = 0,
+ .sh_offset = off,
+ .sh_size = file_size_hint,
+ .sh_link = 0,
+ .sh_info = 0,
+ .sh_addralign = p_align,
+ .sh_entsize = 0,
+ },
+ .phdr_index = undefined,
+ });
+ self.shdr_table_dirty = true;
+ self.debug_info_header_dirty = true;
+ }
+
+ if (self.debug_abbrev_section_index == null) {
+ self.debug_abbrev_section_index = @intCast(u16, self.sections.slice().len);
+
+ const file_size_hint = 128;
+ const p_align = 1;
+ const off = self.findFreeSpace(file_size_hint, p_align);
+ log.debug("found .debug_abbrev free space 0x{x} to 0x{x}", .{
+ off,
+ off + file_size_hint,
+ });
+ try self.sections.append(gpa, .{
+ .shdr = .{
+ .sh_name = try self.shstrtab.insert(gpa, ".debug_abbrev"),
+ .sh_type = elf.SHT_PROGBITS,
+ .sh_flags = 0,
+ .sh_addr = 0,
+ .sh_offset = off,
+ .sh_size = file_size_hint,
+ .sh_link = 0,
+ .sh_info = 0,
+ .sh_addralign = p_align,
+ .sh_entsize = 0,
+ },
+ .phdr_index = undefined,
+ });
+ self.shdr_table_dirty = true;
+ self.debug_abbrev_section_dirty = true;
+ }
+
+ if (self.debug_aranges_section_index == null) {
+ self.debug_aranges_section_index = @intCast(u16, self.sections.slice().len);
+
+ const file_size_hint = 160;
+ const p_align = 16;
+ const off = self.findFreeSpace(file_size_hint, p_align);
+ log.debug("found .debug_aranges free space 0x{x} to 0x{x}", .{
+ off,
+ off + file_size_hint,
+ });
+ try self.sections.append(gpa, .{
+ .shdr = .{
+ .sh_name = try self.shstrtab.insert(gpa, ".debug_aranges"),
+ .sh_type = elf.SHT_PROGBITS,
+ .sh_flags = 0,
+ .sh_addr = 0,
+ .sh_offset = off,
+ .sh_size = file_size_hint,
+ .sh_link = 0,
+ .sh_info = 0,
+ .sh_addralign = p_align,
+ .sh_entsize = 0,
+ },
+ .phdr_index = undefined,
+ });
+ self.shdr_table_dirty = true;
+ self.debug_aranges_section_dirty = true;
+ }
+
+ if (self.debug_line_section_index == null) {
+ self.debug_line_section_index = @intCast(u16, self.sections.slice().len);
+
+ const file_size_hint = 250;
+ const p_align = 1;
+ const off = self.findFreeSpace(file_size_hint, p_align);
+ log.debug("found .debug_line free space 0x{x} to 0x{x}", .{
+ off,
+ off + file_size_hint,
+ });
+ try self.sections.append(gpa, .{
+ .shdr = .{
+ .sh_name = try self.shstrtab.insert(gpa, ".debug_line"),
+ .sh_type = elf.SHT_PROGBITS,
+ .sh_flags = 0,
+ .sh_addr = 0,
+ .sh_offset = off,
+ .sh_size = file_size_hint,
+ .sh_link = 0,
+ .sh_info = 0,
+ .sh_addralign = p_align,
+ .sh_entsize = 0,
+ },
+ .phdr_index = undefined,
+ });
+ self.shdr_table_dirty = true;
+ self.debug_line_header_dirty = true;
+ }
+ }
const shsize: u64 = switch (self.ptr_width) {
.p32 => @sizeOf(elf.Elf32_Shdr),
@@ -956,26 +956,25 @@ pub fn growNonAllocSection(
}
pub fn markDirty(self: *Elf, shdr_index: u16, phdr_index: ?u16) void {
- _ = shdr_index;
self.shdr_table_dirty = true; // TODO look into only writing one section
if (phdr_index) |_| {
self.phdr_table_dirty = true; // TODO look into making only the one program header dirty
}
- // if (self.dwarf) |_| {
- // if (self.debug_info_section_index.? == shdr_index) {
- // self.debug_info_header_dirty = true;
- // } else if (self.debug_line_section_index.? == shdr_index) {
- // self.debug_line_header_dirty = true;
- // } else if (self.debug_abbrev_section_index.? == shdr_index) {
- // self.debug_abbrev_section_dirty = true;
- // } else if (self.debug_str_section_index.? == shdr_index) {
- // self.debug_strtab_dirty = true;
- // } else if (self.debug_aranges_section_index.? == shdr_index) {
- // self.debug_aranges_section_dirty = true;
- // }
- // }
+ if (self.dwarf) |_| {
+ if (self.debug_info_section_index.? == shdr_index) {
+ self.debug_info_header_dirty = true;
+ } else if (self.debug_line_section_index.? == shdr_index) {
+ self.debug_line_header_dirty = true;
+ } else if (self.debug_abbrev_section_index.? == shdr_index) {
+ self.debug_abbrev_section_dirty = true;
+ } else if (self.debug_str_section_index.? == shdr_index) {
+ self.debug_strtab_dirty = true;
+ } else if (self.debug_aranges_section_index.? == shdr_index) {
+ self.debug_aranges_section_dirty = true;
+ }
+ }
}
pub fn flush(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node) link.File.FlushError!void {
@@ -1015,14 +1014,13 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
// TODO This linker code currently assumes there is only 1 compilation unit and it
// corresponds to the Zig source code.
const module = self.base.options.module orelse return error.LinkingWithoutZigSourceUnimplemented;
- _ = module;
const target_endian = self.base.options.target.cpu.arch.endian();
const foreign_endian = target_endian != builtin.cpu.arch.endian();
- // if (self.dwarf) |*dw| {
- // try dw.flushModule(module);
- // }
+ if (self.dwarf) |*dw| {
+ try dw.flushModule(module);
+ }
{
var it = self.relocs.iterator();
@@ -1068,43 +1066,43 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
self.logSymtab();
}
- // if (self.dwarf) |*dw| {
- // if (self.debug_abbrev_section_dirty) {
- // try dw.writeDbgAbbrev();
- // if (!self.shdr_table_dirty) {
- // // Then it won't get written with the others and we need to do it.
- // try self.writeSectHeader(self.debug_abbrev_section_index.?);
- // }
- // self.debug_abbrev_section_dirty = false;
- // }
-
- // if (self.debug_info_header_dirty) {
- // // Currently only one compilation unit is supported, so the address range is simply
- // // identical to the main program header virtual address and memory size.
- // const text_phdr = &self.program_headers.items[self.phdr_load_re_index.?];
- // const low_pc = text_phdr.p_vaddr;
- // const high_pc = text_phdr.p_vaddr + text_phdr.p_memsz;
- // try dw.writeDbgInfoHeader(module, low_pc, high_pc);
- // self.debug_info_header_dirty = false;
- // }
-
- // if (self.debug_aranges_section_dirty) {
- // // Currently only one compilation unit is supported, so the address range is simply
- // // identical to the main program header virtual address and memory size.
- // const text_phdr = &self.program_headers.items[self.phdr_load_re_index.?];
- // try dw.writeDbgAranges(text_phdr.p_vaddr, text_phdr.p_memsz);
- // if (!self.shdr_table_dirty) {
- // // Then it won't get written with the others and we need to do it.
- // try self.writeSectHeader(self.debug_aranges_section_index.?);
- // }
- // self.debug_aranges_section_dirty = false;
- // }
-
- // if (self.debug_line_header_dirty) {
- // try dw.writeDbgLineHeader();
- // self.debug_line_header_dirty = false;
- // }
- // }
+ if (self.dwarf) |*dw| {
+ if (self.debug_abbrev_section_dirty) {
+ try dw.writeDbgAbbrev();
+ if (!self.shdr_table_dirty) {
+ // Then it won't get written with the others and we need to do it.
+ try self.writeSectHeader(self.debug_abbrev_section_index.?);
+ }
+ self.debug_abbrev_section_dirty = false;
+ }
+
+ if (self.debug_info_header_dirty) {
+ // Currently only one compilation unit is supported, so the address range is simply
+ // identical to the main program header virtual address and memory size.
+ const text_phdr = &self.program_headers.items[self.phdr_load_re_index.?];
+ const low_pc = text_phdr.p_vaddr;
+ const high_pc = text_phdr.p_vaddr + text_phdr.p_memsz;
+ try dw.writeDbgInfoHeader(module, low_pc, high_pc);
+ self.debug_info_header_dirty = false;
+ }
+
+ if (self.debug_aranges_section_dirty) {
+ // Currently only one compilation unit is supported, so the address range is simply
+ // identical to the main program header virtual address and memory size.
+ const text_phdr = &self.program_headers.items[self.phdr_load_re_index.?];
+ try dw.writeDbgAranges(text_phdr.p_vaddr, text_phdr.p_memsz);
+ if (!self.shdr_table_dirty) {
+ // Then it won't get written with the others and we need to do it.
+ try self.writeSectHeader(self.debug_aranges_section_index.?);
+ }
+ self.debug_aranges_section_dirty = false;
+ }
+
+ if (self.debug_line_header_dirty) {
+ try dw.writeDbgLineHeader();
+ self.debug_line_header_dirty = false;
+ }
+ }
if (self.phdr_table_dirty) {
const phsize: u64 = switch (self.ptr_width) {
@@ -1162,15 +1160,15 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
}
}
- // if (self.dwarf) |dwarf| {
- // const shdr_index = self.debug_str_section_index.?;
- // if (self.debug_strtab_dirty or dwarf.strtab.buffer.items.len != self.sections.items(.shdr)[shdr_index].sh_size) {
- // try self.growNonAllocSection(shdr_index, dwarf.strtab.buffer.items.len, 1, false);
- // const debug_strtab_sect = self.sections.items(.shdr)[shdr_index];
- // try self.base.file.?.pwriteAll(dwarf.strtab.buffer.items, debug_strtab_sect.sh_offset);
- // self.debug_strtab_dirty = false;
- // }
- // }
+ if (self.dwarf) |dwarf| {
+ const shdr_index = self.debug_str_section_index.?;
+ if (self.debug_strtab_dirty or dwarf.strtab.buffer.items.len != self.sections.items(.shdr)[shdr_index].sh_size) {
+ try self.growNonAllocSection(shdr_index, dwarf.strtab.buffer.items.len, 1, false);
+ const debug_strtab_sect = self.sections.items(.shdr)[shdr_index];
+ try self.base.file.?.pwriteAll(dwarf.strtab.buffer.items, debug_strtab_sect.sh_offset);
+ self.debug_strtab_dirty = false;
+ }
+ }
if (self.shdr_table_dirty) {
const shsize: u64 = switch (self.ptr_width) {
@@ -2100,10 +2098,6 @@ fn freeAtom(self: *Elf, atom_index: Atom.Index) void {
self.getAtomPtr(atom_index).local_sym_index = 0;
self.offset_table_free_list.append(self.base.allocator, atom.offset_table_index) catch {};
-
- // if (self.dwarf) |*dw| {
- // dw.freeAtom(&atom.dbg_info_atom);
- // }
}
fn shrinkAtom(self: *Elf, atom_index: Atom.Index, new_block_size: u64) void {
@@ -2133,7 +2127,6 @@ pub fn createAtom(self: *Elf) !Atom.Index {
.offset_table_index = offset_table_index,
.prev_index = null,
.next_index = null,
- .dbg_info_atom = undefined,
};
log.debug("creating ATOM(%{d}) at index {d}", .{ local_sym_index, atom_index });
return atom_index;
@@ -2219,16 +2212,16 @@ fn allocateAtom(self: *Elf, atom_index: Atom.Index, new_block_size: u64, alignme
try self.growAllocSection(sym.st_shndx, needed_size);
maybe_last_atom_index.* = atom_index;
- // if (self.dwarf) |_| {
- // // The .debug_info section has `low_pc` and `high_pc` values which is the virtual address
- // // range of the compilation unit. When we expand the text section, this range changes,
- // // so the DW_TAG.compile_unit tag of the .debug_info section becomes dirty.
- // self.debug_info_header_dirty = true;
- // // This becomes dirty for the same reason. We could potentially make this more
- // // fine-grained with the addition of support for more compilation units. It is planned to
- // // model each package as a different compilation unit.
- // self.debug_aranges_section_dirty = true;
- // }
+ if (self.dwarf) |_| {
+ // The .debug_info section has `low_pc` and `high_pc` values which is the virtual address
+ // range of the compilation unit. When we expand the text section, this range changes,
+ // so the DW_TAG.compile_unit tag of the .debug_info section becomes dirty.
+ self.debug_info_header_dirty = true;
+ // This becomes dirty for the same reason. We could potentially make this more
+ // fine-grained with the addition of support for more compilation units. It is planned to
+ // model each package as a different compilation unit.
+ self.debug_aranges_section_dirty = true;
+ }
}
shdr.sh_addralign = math.max(shdr.sh_addralign, alignment);
@@ -2333,9 +2326,9 @@ pub fn freeDecl(self: *Elf, decl_index: Module.Decl.Index) void {
kv.value.exports.deinit(self.base.allocator);
}
- // if (self.dwarf) |*dw| {
- // dw.freeDecl(decl);
- // }
+ if (self.dwarf) |*dw| {
+ dw.freeDecl(decl_index);
+ }
}
pub fn getOrCreateAtomForDecl(self: *Elf, decl_index: Module.Decl.Index) !Atom.Index {
@@ -2471,15 +2464,15 @@ pub fn updateFunc(self: *Elf, module: *Module, func: *Module.Fn, air: Air, liven
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
defer code_buffer.deinit();
- // var decl_state: ?Dwarf.DeclState = if (self.dwarf) |*dw| try dw.initDeclState(module, decl_index) else null;
- // defer if (decl_state) |*ds| ds.deinit();
+ var decl_state: ?Dwarf.DeclState = if (self.dwarf) |*dw| try dw.initDeclState(module, decl_index) else null;
+ defer if (decl_state) |*ds| ds.deinit();
- // const res = if (decl_state) |*ds|
- // try codegen.generateFunction(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .{
- // .dwarf = ds,
- // })
- // else
- const res = try codegen.generateFunction(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .none);
+ const res = if (decl_state) |*ds|
+ try codegen.generateFunction(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .{
+ .dwarf = ds,
+ })
+ else
+ try codegen.generateFunction(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .none);
const code = switch (res) {
.ok => code_buffer.items,
@@ -2490,16 +2483,15 @@ pub fn updateFunc(self: *Elf, module: *Module, func: *Module.Fn, air: Air, liven
},
};
const local_sym = try self.updateDeclCode(decl_index, code, elf.STT_FUNC);
- _ = local_sym;
- // if (decl_state) |*ds| {
- // try self.dwarf.?.commitDeclState(
- // module,
- // decl_index,
- // local_sym.st_value,
- // local_sym.st_size,
- // ds,
- // );
- // }
+ if (decl_state) |*ds| {
+ try self.dwarf.?.commitDeclState(
+ module,
+ decl_index,
+ local_sym.st_value,
+ local_sym.st_size,
+ ds,
+ );
+ }
// Since we updated the vaddr and the size, each corresponding export
// symbol also needs to be updated.
@@ -2536,27 +2528,27 @@ pub fn updateDecl(self: *Elf, module: *Module, decl_index: Module.Decl.Index) !v
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
defer code_buffer.deinit();
- // var decl_state: ?Dwarf.DeclState = if (self.dwarf) |*dw| try dw.initDeclState(module, decl_index) else null;
- // defer if (decl_state) |*ds| ds.deinit();
+ var decl_state: ?Dwarf.DeclState = if (self.dwarf) |*dw| try dw.initDeclState(module, decl_index) else null;
+ defer if (decl_state) |*ds| ds.deinit();
// TODO implement .debug_info for global variables
const decl_val = if (decl.val.castTag(.variable)) |payload| payload.data.init else decl.val;
- // const res = if (decl_state) |*ds|
- // try codegen.generateSymbol(&self.base, decl.srcLoc(), .{
- // .ty = decl.ty,
- // .val = decl_val,
- // }, &code_buffer, .{
- // .dwarf = ds,
- // }, .{
- // .parent_atom_index = atom.getSymbolIndex().?,
- // })
- // else
- const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), .{
- .ty = decl.ty,
- .val = decl_val,
- }, &code_buffer, .none, .{
- .parent_atom_index = atom.getSymbolIndex().?,
- });
+ const res = if (decl_state) |*ds|
+ try codegen.generateSymbol(&self.base, decl.srcLoc(), .{
+ .ty = decl.ty,
+ .val = decl_val,
+ }, &code_buffer, .{
+ .dwarf = ds,
+ }, .{
+ .parent_atom_index = atom.getSymbolIndex().?,
+ })
+ else
+ try codegen.generateSymbol(&self.base, decl.srcLoc(), .{
+ .ty = decl.ty,
+ .val = decl_val,
+ }, &code_buffer, .none, .{
+ .parent_atom_index = atom.getSymbolIndex().?,
+ });
const code = switch (res) {
.ok => code_buffer.items,
@@ -2568,16 +2560,15 @@ pub fn updateDecl(self: *Elf, module: *Module, decl_index: Module.Decl.Index) !v
};
const local_sym = try self.updateDeclCode(decl_index, code, elf.STT_OBJECT);
- _ = local_sym;
- // if (decl_state) |*ds| {
- // try self.dwarf.?.commitDeclState(
- // module,
- // decl_index,
- // local_sym.st_value,
- // local_sym.st_size,
- // ds,
- // );
- // }
+ if (decl_state) |*ds| {
+ try self.dwarf.?.commitDeclState(
+ module,
+ decl_index,
+ local_sym.st_value,
+ local_sym.st_size,
+ ds,
+ );
+ }
// Since we updated the vaddr and the size, each corresponding export
// symbol also needs to be updated.
@@ -2737,19 +2728,20 @@ pub fn updateDeclExports(
}
/// Must be called only after a successful call to `updateDecl`.
-pub fn updateDeclLineNumber(self: *Elf, mod: *Module, decl: *const Module.Decl) !void {
+pub fn updateDeclLineNumber(self: *Elf, mod: *Module, decl_index: Module.Decl.Index) !void {
const tracy = trace(@src());
defer tracy.end();
+ const decl = mod.declPtr(decl_index);
const decl_name = try decl.getFullyQualifiedName(mod);
defer self.base.allocator.free(decl_name);
log.debug("updateDeclLineNumber {s}{*}", .{ decl_name, decl });
if (self.llvm_object) |_| return;
- // if (self.dwarf) |*dw| {
- // try dw.updateDeclLineNumber(decl);
- // }
+ if (self.dwarf) |*dw| {
+ try dw.updateDeclLineNumber(mod, decl_index);
+ }
}
pub fn deleteDeclExport(self: *Elf, decl_index: Module.Decl.Index, name: []const u8) void {
diff --git a/src/link/Elf/Atom.zig b/src/link/Elf/Atom.zig
index 24cf19432c..4ab304ef71 100644
--- a/src/link/Elf/Atom.zig
+++ b/src/link/Elf/Atom.zig
@@ -4,7 +4,6 @@ const std = @import("std");
const assert = std.debug.assert;
const elf = std.elf;
-const Dwarf = @import("../Dwarf.zig");
const Elf = @import("../Elf.zig");
/// Each decl always gets a local symbol with the fully qualified name.
@@ -23,8 +22,6 @@ offset_table_index: u32,
prev_index: ?Index,
next_index: ?Index,
-dbg_info_atom: Dwarf.Atom,
-
pub const Index = u32;
pub const Reloc = struct {
diff --git a/src/link/MachO.zig b/src/link/MachO.zig
index 42aaa3a275..22eb58775b 100644
--- a/src/link/MachO.zig
+++ b/src/link/MachO.zig
@@ -472,9 +472,9 @@ pub fn flushModule(self: *MachO, comp: *Compilation, prog_node: *std.Progress.No
const module = self.base.options.module orelse return error.LinkingWithoutZigSourceUnimplemented;
- // if (self.d_sym) |*d_sym| {
- // try d_sym.dwarf.flushModule(module);
- // }
+ if (self.d_sym) |*d_sym| {
+ try d_sym.dwarf.flushModule(module);
+ }
var libs = std.StringArrayHashMap(link.SystemLib).init(arena);
try resolveLibSystem(
@@ -664,10 +664,10 @@ pub fn flushModule(self: *MachO, comp: *Compilation, prog_node: *std.Progress.No
try self.writeCodeSignature(comp, csig); // code signing always comes last
}
- // if (self.d_sym) |*d_sym| {
- // // Flush debug symbols bundle.
- // try d_sym.flushModule(self);
- // }
+ if (self.d_sym) |*d_sym| {
+ // Flush debug symbols bundle.
+ try d_sym.flushModule(self);
+ }
// if (build_options.enable_link_snapshots) {
// if (self.base.options.enable_link_snapshots)
@@ -1089,7 +1089,6 @@ pub fn createAtom(self: *MachO) !Atom.Index {
.alignment = 0,
.prev_index = null,
.next_index = null,
- .dbg_info_atom = undefined,
};
log.debug("creating ATOM(%{d}) at index {d}", .{ sym_index, atom_index });
return atom_index;
@@ -1724,9 +1723,9 @@ pub fn deinit(self: *MachO) void {
if (self.llvm_object) |llvm_object| llvm_object.destroy(gpa);
}
- // if (self.d_sym) |*d_sym| {
- // d_sym.deinit();
- // }
+ if (self.d_sym) |*d_sym| {
+ d_sym.deinit();
+ }
self.got_entries.deinit(gpa);
self.got_entries_free_list.deinit(gpa);
@@ -1804,9 +1803,8 @@ pub fn deinit(self: *MachO) void {
}
fn freeAtom(self: *MachO, atom_index: Atom.Index) void {
- log.debug("freeAtom {d}", .{atom_index});
-
const gpa = self.base.allocator;
+ log.debug("freeAtom {d}", .{atom_index});
// Remove any relocs and base relocs associated with this Atom
Atom.freeRelocations(self, atom_index);
@@ -1876,9 +1874,9 @@ fn freeAtom(self: *MachO, atom_index: Atom.Index) void {
};
_ = self.got_entries_table.remove(got_target);
- // if (self.d_sym) |*d_sym| {
- // d_sym.swapRemoveRelocs(sym_index);
- // }
+ if (self.d_sym) |*d_sym| {
+ d_sym.swapRemoveRelocs(sym_index);
+ }
log.debug(" adding GOT index {d} to free list (target local@{d})", .{ got_index, sym_index });
}
@@ -1887,10 +1885,6 @@ fn freeAtom(self: *MachO, atom_index: Atom.Index) void {
_ = self.atom_by_index_table.remove(sym_index);
log.debug(" adding local symbol index {d} to free list", .{sym_index});
self.getAtomPtr(atom_index).sym_index = 0;
-
- // if (self.d_sym) |*d_sym| {
- // d_sym.dwarf.freeAtom(&atom.dbg_info_atom);
- // }
}
fn shrinkAtom(self: *MachO, atom_index: Atom.Index, new_block_size: u64) void {
@@ -2020,23 +2014,22 @@ pub fn updateFunc(self: *MachO, module: *Module, func: *Module.Fn, air: Air, liv
Atom.freeRelocations(self, atom_index);
const atom = self.getAtom(atom_index);
- _ = atom;
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
defer code_buffer.deinit();
- // var decl_state = if (self.d_sym) |*d_sym|
- // try d_sym.dwarf.initDeclState(module, decl_index)
- // else
- // null;
- // defer if (decl_state) |*ds| ds.deinit();
+ var decl_state = if (self.d_sym) |*d_sym|
+ try d_sym.dwarf.initDeclState(module, decl_index)
+ else
+ null;
+ defer if (decl_state) |*ds| ds.deinit();
- // const res = if (decl_state) |*ds|
- // try codegen.generateFunction(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .{
- // .dwarf = ds,
- // })
- // else
- const res = try codegen.generateFunction(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .none);
+ const res = if (decl_state) |*ds|
+ try codegen.generateFunction(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .{
+ .dwarf = ds,
+ })
+ else
+ try codegen.generateFunction(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .none);
const code = switch (res) {
.ok => code_buffer.items,
@@ -2048,11 +2041,10 @@ pub fn updateFunc(self: *MachO, module: *Module, func: *Module.Fn, air: Air, liv
};
const addr = try self.updateDeclCode(decl_index, code);
- _ = addr;
- // if (decl_state) |*ds| {
- // try self.d_sym.?.dwarf.commitDeclState(module, decl_index, addr, atom.size, ds);
- // }
+ if (decl_state) |*ds| {
+ try self.d_sym.?.dwarf.commitDeclState(module, decl_index, addr, atom.size, ds);
+ }
// Since we updated the vaddr and the size, each corresponding export symbol also
// needs to be updated.
@@ -2154,29 +2146,29 @@ pub fn updateDecl(self: *MachO, module: *Module, decl_index: Module.Decl.Index)
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
defer code_buffer.deinit();
- // var decl_state: ?Dwarf.DeclState = if (self.d_sym) |*d_sym|
- // try d_sym.dwarf.initDeclState(module, decl_index)
- // else
- // null;
- // defer if (decl_state) |*ds| ds.deinit();
+ var decl_state: ?Dwarf.DeclState = if (self.d_sym) |*d_sym|
+ try d_sym.dwarf.initDeclState(module, decl_index)
+ else
+ null;
+ defer if (decl_state) |*ds| ds.deinit();
const decl_val = if (decl.val.castTag(.variable)) |payload| payload.data.init else decl.val;
- // const res = if (decl_state) |*ds|
- // try codegen.generateSymbol(&self.base, decl.srcLoc(), .{
- // .ty = decl.ty,
- // .val = decl_val,
- // }, &code_buffer, .{
- // .dwarf = ds,
- // }, .{
- // .parent_atom_index = atom.getSymbolIndex().?,
- // })
- // else
- const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), .{
- .ty = decl.ty,
- .val = decl_val,
- }, &code_buffer, .none, .{
- .parent_atom_index = atom.getSymbolIndex().?,
- });
+ const res = if (decl_state) |*ds|
+ try codegen.generateSymbol(&self.base, decl.srcLoc(), .{
+ .ty = decl.ty,
+ .val = decl_val,
+ }, &code_buffer, .{
+ .dwarf = ds,
+ }, .{
+ .parent_atom_index = atom.getSymbolIndex().?,
+ })
+ else
+ try codegen.generateSymbol(&self.base, decl.srcLoc(), .{
+ .ty = decl.ty,
+ .val = decl_val,
+ }, &code_buffer, .none, .{
+ .parent_atom_index = atom.getSymbolIndex().?,
+ });
const code = switch (res) {
.ok => code_buffer.items,
@@ -2187,11 +2179,10 @@ pub fn updateDecl(self: *MachO, module: *Module, decl_index: Module.Decl.Index)
},
};
const addr = try self.updateDeclCode(decl_index, code);
- _ = addr;
- // if (decl_state) |*ds| {
- // try self.d_sym.?.dwarf.commitDeclState(module, decl_index, addr, atom.size, ds);
- // }
+ if (decl_state) |*ds| {
+ try self.d_sym.?.dwarf.commitDeclState(module, decl_index, addr, atom.size, ds);
+ }
// Since we updated the vaddr and the size, each corresponding export symbol also
// needs to be updated.
@@ -2432,13 +2423,10 @@ fn updateDeclCode(self: *MachO, decl_index: Module.Decl.Index, code: []const u8)
return atom.getSymbol(self).n_value;
}
-pub fn updateDeclLineNumber(self: *MachO, module: *Module, decl: *const Module.Decl) !void {
- _ = decl;
- _ = self;
- _ = module;
- // if (self.d_sym) |*d_sym| {
- // try d_sym.dwarf.updateDeclLineNumber(decl);
- // }
+pub fn updateDeclLineNumber(self: *MachO, module: *Module, decl_index: Module.Decl.Index) !void {
+ if (self.d_sym) |*d_sym| {
+ try d_sym.dwarf.updateDeclLineNumber(module, decl_index);
+ }
}
pub fn updateDeclExports(
@@ -2611,9 +2599,9 @@ pub fn freeDecl(self: *MachO, decl_index: Module.Decl.Index) void {
kv.value.exports.deinit(self.base.allocator);
}
- // if (self.d_sym) |*d_sym| {
- // d_sym.dwarf.freeDecl(decl);
- // }
+ if (self.d_sym) |*d_sym| {
+ d_sym.dwarf.freeDecl(decl_index);
+ }
}
pub fn getDeclVAddr(self: *MachO, decl_index: Module.Decl.Index, reloc_info: File.RelocInfo) !u64 {
diff --git a/src/link/MachO/Atom.zig b/src/link/MachO/Atom.zig
index 401d71813c..5fb94b7c13 100644
--- a/src/link/MachO/Atom.zig
+++ b/src/link/MachO/Atom.zig
@@ -13,7 +13,6 @@ const trace = @import("../../tracy.zig").trace;
const Allocator = mem.Allocator;
const Arch = std.Target.Cpu.Arch;
-const Dwarf = @import("../Dwarf.zig");
const MachO = @import("../MachO.zig");
const Relocation = @import("Relocation.zig");
const SymbolWithLoc = MachO.SymbolWithLoc;
@@ -43,8 +42,6 @@ alignment: u32,
next_index: ?Index,
prev_index: ?Index,
-dbg_info_atom: Dwarf.Atom,
-
pub const Index = u32;
pub const Binding = struct {
diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig
index 20f540022a..87e3ca5c22 100644
--- a/src/link/Plan9.zig
+++ b/src/link/Plan9.zig
@@ -1018,10 +1018,10 @@ pub fn writeSyms(self: *Plan9, buf: *std.ArrayList(u8)) !void {
}
/// Must be called only after a successful call to `updateDecl`.
-pub fn updateDeclLineNumber(self: *Plan9, mod: *Module, decl: *const Module.Decl) !void {
+pub fn updateDeclLineNumber(self: *Plan9, mod: *Module, decl_index: Module.Decl.Index) !void {
_ = self;
_ = mod;
- _ = decl;
+ _ = decl_index;
}
pub fn getDeclVAddr(
diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig
index 31dfb87659..ee4518796e 100644
--- a/src/link/Wasm.zig
+++ b/src/link/Wasm.zig
@@ -183,13 +183,9 @@ pub const Segment = struct {
pub const FnData = struct {
/// Reference to the wasm type that represents this function.
type_index: u32,
- /// Contains debug information related to this function.
- /// For Wasm, the offset is relative to the code-section.
- src_fn: Dwarf.SrcFn,
pub const empty: FnData = .{
.type_index = undefined,
- .src_fn = Dwarf.SrcFn.empty,
};
};
@@ -1122,17 +1118,18 @@ pub fn updateDecl(wasm: *Wasm, mod: *Module, decl_index: Module.Decl.Index) !voi
return wasm.finishUpdateDecl(decl, code);
}
-pub fn updateDeclLineNumber(wasm: *Wasm, mod: *Module, decl: *const Module.Decl) !void {
+pub fn updateDeclLineNumber(wasm: *Wasm, mod: *Module, decl_index: Module.Decl.Index) !void {
if (wasm.llvm_object) |_| return;
if (wasm.dwarf) |*dw| {
const tracy = trace(@src());
defer tracy.end();
+ const decl = mod.declPtr(decl_index);
const decl_name = try decl.getFullyQualifiedName(mod);
defer wasm.base.allocator.free(decl_name);
log.debug("updateDeclLineNumber {s}{*}", .{ decl_name, decl });
- try dw.updateDeclLineNumber(decl);
+ try dw.updateDeclLineNumber(mod, decl_index);
}
}
@@ -1460,10 +1457,9 @@ pub fn freeDecl(wasm: *Wasm, decl_index: Module.Decl.Index) void {
_ = wasm.resolved_symbols.swapRemove(atom.symbolLoc());
_ = wasm.symbol_atom.remove(atom.symbolLoc());
- if (wasm.dwarf) |*dwarf| {
- dwarf.freeDecl(decl);
- dwarf.freeAtom(&atom.dbg_info_atom);
- }
+ // if (wasm.dwarf) |*dwarf| {
+ // dwarf.freeDecl(decl_index);
+ // }
atom.deinit(wasm.base.allocator);
}
@@ -1882,7 +1878,6 @@ fn initializeCallCtorsFunction(wasm: *Wasm) !void {
.next = null,
.prev = null,
.code = function_body.moveToUnmanaged(),
- .dbg_info_atom = undefined,
};
try wasm.managed_atoms.append(wasm.base.allocator, atom);
try wasm.appendAtomAtIndex(wasm.code_section_index.?, atom);
diff --git a/src/link/Wasm/Atom.zig b/src/link/Wasm/Atom.zig
index 20f847e475..554f98b5ca 100644
--- a/src/link/Wasm/Atom.zig
+++ b/src/link/Wasm/Atom.zig
@@ -4,7 +4,6 @@ const std = @import("std");
const types = @import("types.zig");
const Wasm = @import("../Wasm.zig");
const Symbol = @import("Symbol.zig");
-const Dwarf = @import("../Dwarf.zig");
const leb = std.leb;
const log = std.log.scoped(.link);
@@ -39,9 +38,6 @@ prev: ?*Atom,
/// When the parent atom is being freed, it will also do so for all local atoms.
locals: std.ArrayListUnmanaged(Atom) = .{},
-/// Represents the debug Atom that holds all debug information of this Atom.
-dbg_info_atom: Dwarf.Atom,
-
/// Represents a default empty wasm `Atom`
pub const empty: Atom = .{
.alignment = 0,
@@ -51,7 +47,6 @@ pub const empty: Atom = .{
.prev = null,
.size = 0,
.sym_index = 0,
- .dbg_info_atom = undefined,
};
/// Frees all resources owned by this `Atom`.
--
cgit v1.2.3
From e0f3975fc8a7afd8a613802321fd46e64d8970d5 Mon Sep 17 00:00:00 2001
From: Jakub Konka
Date: Wed, 1 Feb 2023 16:01:43 +0100
Subject: link: make SpirV atoms fully owned by the linker
---
src/Module.zig | 19 +++----------------
src/codegen/spirv.zig | 30 +++++++++++++++++++-----------
src/link.zig | 2 +-
src/link/SpirV.zig | 18 +++++++-----------
4 files changed, 30 insertions(+), 39 deletions(-)
(limited to 'src')
diff --git a/src/Module.zig b/src/Module.zig
index eb947a6977..bfeeea51e8 100644
--- a/src/Module.zig
+++ b/src/Module.zig
@@ -5183,20 +5183,7 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) Allocator.Err
decl.zir_decl_index = @intCast(u32, decl_sub_index);
if (decl.getFunction()) |_| {
switch (comp.bin_file.tag) {
- .coff => {
- // TODO Implement for COFF
- },
- .elf => {
- // TODO Look into detecting when this would be unnecessary by storing enough state
- // in `Decl` to notice that the line number did not change.
- comp.work_queue.writeItemAssumeCapacity(.{ .update_line_number = decl_index });
- },
- .macho => {
- // TODO Look into detecting when this would be unnecessary by storing enough state
- // in `Decl` to notice that the line number did not change.
- comp.work_queue.writeItemAssumeCapacity(.{ .update_line_number = decl_index });
- },
- .plan9 => {
+ .coff, .elf, .macho, .plan9 => {
// TODO Look into detecting when this would be unnecessary by storing enough state
// in `Decl` to notice that the line number did not change.
comp.work_queue.writeItemAssumeCapacity(.{ .update_line_number = decl_index });
@@ -5290,7 +5277,7 @@ pub fn clearDecl(
.plan9 => .{ .plan9 = {} },
.c => .{ .c = {} },
.wasm => .{ .wasm = link.File.Wasm.FnData.empty },
- .spirv => .{ .spirv = .{} },
+ .spirv => .{ .spirv = {} },
.nvptx => .{ .nvptx = {} },
};
}
@@ -5710,7 +5697,7 @@ pub fn allocateNewDecl(
.plan9 => .{ .plan9 = {} },
.c => .{ .c = {} },
.wasm => .{ .wasm = link.File.Wasm.FnData.empty },
- .spirv => .{ .spirv = .{} },
+ .spirv => .{ .spirv = {} },
.nvptx => .{ .nvptx = {} },
},
.generation = 0,
diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig
index e1af8c847f..c5a3d57d07 100644
--- a/src/codegen/spirv.zig
+++ b/src/codegen/spirv.zig
@@ -49,7 +49,7 @@ pub const DeclGen = struct {
spv: *SpvModule,
/// The decl we are currently generating code for.
- decl: *Decl,
+ decl_index: Decl.Index,
/// The intermediate code of the declaration we are currently generating. Note: If
/// the declaration is not a function, this value will be undefined!
@@ -59,6 +59,8 @@ pub const DeclGen = struct {
/// Note: If the declaration is not a function, this value will be undefined!
liveness: Liveness,
+ ids: *const std.AutoHashMap(Decl.Index, IdResult),
+
/// An array of function argument result-ids. Each index corresponds with the
/// function argument of the same index.
args: std.ArrayListUnmanaged(IdRef) = .{},
@@ -133,14 +135,20 @@ pub const DeclGen = struct {
/// Initialize the common resources of a DeclGen. Some fields are left uninitialized,
/// only set when `gen` is called.
- pub fn init(allocator: Allocator, module: *Module, spv: *SpvModule) DeclGen {
+ pub fn init(
+ allocator: Allocator,
+ module: *Module,
+ spv: *SpvModule,
+ ids: *const std.AutoHashMap(Decl.Index, IdResult),
+ ) DeclGen {
return .{
.gpa = allocator,
.module = module,
.spv = spv,
- .decl = undefined,
+ .decl_index = undefined,
.air = undefined,
.liveness = undefined,
+ .ids = ids,
.next_arg_index = undefined,
.current_block_label_id = undefined,
.error_msg = undefined,
@@ -150,9 +158,9 @@ pub const DeclGen = struct {
/// Generate the code for `decl`. If a reportable error occurred during code generation,
/// a message is returned by this function. Callee owns the memory. If this function
/// returns such a reportable error, it is valid to be called again for a different decl.
- pub fn gen(self: *DeclGen, decl: *Decl, air: Air, liveness: Liveness) !?*Module.ErrorMsg {
+ pub fn gen(self: *DeclGen, decl_index: Decl.Index, air: Air, liveness: Liveness) !?*Module.ErrorMsg {
// Reset internal resources, we don't want to re-allocate these.
- self.decl = decl;
+ self.decl_index = decl_index;
self.air = air;
self.liveness = liveness;
self.args.items.len = 0;
@@ -194,7 +202,7 @@ pub const DeclGen = struct {
pub fn fail(self: *DeclGen, comptime format: []const u8, args: anytype) Error {
@setCold(true);
const src = LazySrcLoc.nodeOffset(0);
- const src_loc = src.toSrcLoc(self.decl);
+ const src_loc = src.toSrcLoc(self.module.declPtr(self.decl_index));
assert(self.error_msg == null);
self.error_msg = try Module.ErrorMsg.create(self.module.gpa, src_loc, format, args);
return error.CodegenFail;
@@ -332,7 +340,7 @@ pub const DeclGen = struct {
};
const decl = self.module.declPtr(fn_decl_index);
self.module.markDeclAlive(decl);
- return decl.fn_link.spirv.id.toRef();
+ return self.ids.get(fn_decl_index).?.toRef();
}
const target = self.getTarget();
@@ -553,8 +561,8 @@ pub const DeclGen = struct {
}
fn genDecl(self: *DeclGen) !void {
- const decl = self.decl;
- const result_id = decl.fn_link.spirv.id;
+ const result_id = self.ids.get(self.decl_index).?;
+ const decl = self.module.declPtr(self.decl_index);
if (decl.val.castTag(.function)) |_| {
assert(decl.ty.zigTypeTag() == .Fn);
@@ -945,7 +953,7 @@ pub const DeclGen = struct {
fn airDbgStmt(self: *DeclGen, inst: Air.Inst.Index) !void {
const dbg_stmt = self.air.instructions.items(.data)[inst].dbg_stmt;
- const src_fname_id = try self.spv.resolveSourceFileName(self.decl);
+ const src_fname_id = try self.spv.resolveSourceFileName(self.module.declPtr(self.decl_index));
try self.func.body.emit(self.spv.gpa, .OpLine, .{
.file = src_fname_id,
.line = dbg_stmt.line,
@@ -1106,7 +1114,7 @@ pub const DeclGen = struct {
assert(as.errors.items.len != 0);
assert(self.error_msg == null);
const loc = LazySrcLoc.nodeOffset(0);
- const src_loc = loc.toSrcLoc(self.decl);
+ const src_loc = loc.toSrcLoc(self.module.declPtr(self.decl_index));
self.error_msg = try Module.ErrorMsg.create(self.module.gpa, src_loc, "failed to assemble SPIR-V inline assembly", .{});
const notes = try self.module.gpa.alloc(Module.ErrorMsg, as.errors.items.len);
diff --git a/src/link.zig b/src/link.zig
index 3dd182b586..450a008cea 100644
--- a/src/link.zig
+++ b/src/link.zig
@@ -279,7 +279,7 @@ pub const File = struct {
plan9: void,
c: void,
wasm: Wasm.FnData,
- spirv: SpirV.FnData,
+ spirv: void,
nvptx: void,
};
diff --git a/src/link/SpirV.zig b/src/link/SpirV.zig
index 7dbd3a42ce..14a29e4498 100644
--- a/src/link/SpirV.zig
+++ b/src/link/SpirV.zig
@@ -42,13 +42,6 @@ const SpvModule = @import("../codegen/spirv/Module.zig");
const spec = @import("../codegen/spirv/spec.zig");
const IdResult = spec.IdResult;
-// TODO: Should this struct be used at all rather than just a hashmap of aux data for every decl?
-pub const FnData = struct {
- // We're going to fill these in flushModule, and we're going to fill them unconditionally,
- // so just set it to undefined.
- id: IdResult = undefined,
-};
-
base: link.File,
/// This linker backend does not try to incrementally link output SPIR-V code.
@@ -209,16 +202,19 @@ pub fn flushModule(self: *SpirV, comp: *Compilation, prog_node: *std.Progress.No
// so that we can access them before processing them.
// TODO: We're allocating an ID unconditionally now, are there
// declarations which don't generate a result?
- // TODO: fn_link is used here, but thats probably not the right field. It will work anyway though.
+ var ids = std.AutoHashMap(Module.Decl.Index, IdResult).init(self.base.allocator);
+ defer ids.deinit();
+ try ids.ensureTotalCapacity(@intCast(u32, self.decl_table.count()));
+
for (self.decl_table.keys()) |decl_index| {
const decl = module.declPtr(decl_index);
if (decl.has_tv) {
- decl.fn_link.spirv.id = spv.allocId();
+ ids.putAssumeCapacityNoClobber(decl_index, spv.allocId());
}
}
// Now, actually generate the code for all declarations.
- var decl_gen = codegen.DeclGen.init(self.base.allocator, module, &spv);
+ var decl_gen = codegen.DeclGen.init(self.base.allocator, module, &spv, &ids);
defer decl_gen.deinit();
var it = self.decl_table.iterator();
@@ -231,7 +227,7 @@ pub fn flushModule(self: *SpirV, comp: *Compilation, prog_node: *std.Progress.No
const liveness = entry.value_ptr.liveness;
// Note, if `decl` is not a function, air/liveness may be undefined.
- if (try decl_gen.gen(decl, air, liveness)) |msg| {
+ if (try decl_gen.gen(decl_index, air, liveness)) |msg| {
try module.failed_decls.put(module.gpa, decl_index, msg);
return; // TODO: Attempt to generate more decls?
}
--
cgit v1.2.3
From 1aa0f8aa2f382fb56639ea6833a62c4b8b031247 Mon Sep 17 00:00:00 2001
From: Jakub Konka
Date: Wed, 1 Feb 2023 17:39:07 +0100
Subject: link: fix pointer invalidation issues in Elf, MachO and Coff
---
src/link/Coff.zig | 12 ++++++++----
src/link/Elf.zig | 9 ++++-----
src/link/MachO.zig | 4 ++--
3 files changed, 14 insertions(+), 11 deletions(-)
(limited to 'src')
diff --git a/src/link/Coff.zig b/src/link/Coff.zig
index f563a617c7..2922e783e1 100644
--- a/src/link/Coff.zig
+++ b/src/link/Coff.zig
@@ -1035,7 +1035,6 @@ pub fn lowerUnnamedConst(self: *Coff, tv: TypedValue, decl_index: Module.Decl.In
const unnamed_consts = gop.value_ptr;
const atom_index = try self.createAtom();
- const atom = self.getAtomPtr(atom_index);
const sym_name = blk: {
const decl_name = try decl.getFullyQualifiedName(mod);
@@ -1045,11 +1044,15 @@ pub fn lowerUnnamedConst(self: *Coff, tv: TypedValue, decl_index: Module.Decl.In
break :blk try std.fmt.allocPrint(gpa, "__unnamed_{s}_{d}", .{ decl_name, index });
};
defer gpa.free(sym_name);
- try self.setSymbolName(atom.getSymbolPtr(self), sym_name);
- atom.getSymbolPtr(self).section_number = @intToEnum(coff.SectionNumber, self.rdata_section_index.? + 1);
+ {
+ const atom = self.getAtom(atom_index);
+ const sym = atom.getSymbolPtr(self);
+ try self.setSymbolName(sym, sym_name);
+ sym.section_number = @intToEnum(coff.SectionNumber, self.rdata_section_index.? + 1);
+ }
const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), tv, &code_buffer, .none, .{
- .parent_atom_index = atom.getSymbolIndex().?,
+ .parent_atom_index = self.getAtom(atom_index).getSymbolIndex().?,
});
const code = switch (res) {
.ok => code_buffer.items,
@@ -1062,6 +1065,7 @@ pub fn lowerUnnamedConst(self: *Coff, tv: TypedValue, decl_index: Module.Decl.In
};
const required_alignment = tv.ty.abiAlignment(self.base.options.target);
+ const atom = self.getAtomPtr(atom_index);
atom.alignment = required_alignment;
atom.size = @intCast(u32, code.len);
atom.getSymbolPtr(self).value = try self.allocateAtom(atom_index, atom.size, atom.alignment);
diff --git a/src/link/Elf.zig b/src/link/Elf.zig
index 3e0c6d2b57..45952da6c0 100644
--- a/src/link/Elf.zig
+++ b/src/link/Elf.zig
@@ -2600,12 +2600,11 @@ pub fn lowerUnnamedConst(self: *Elf, typed_value: TypedValue, decl_index: Module
const name = self.shstrtab.get(name_str_index).?;
const atom_index = try self.createAtom();
- const atom = self.getAtomPtr(atom_index);
const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), typed_value, &code_buffer, .{
.none = {},
}, .{
- .parent_atom_index = atom.getSymbolIndex().?,
+ .parent_atom_index = self.getAtom(atom_index).getSymbolIndex().?,
});
const code = switch (res) {
.ok => code_buffer.items,
@@ -2620,7 +2619,7 @@ pub fn lowerUnnamedConst(self: *Elf, typed_value: TypedValue, decl_index: Module
const required_alignment = typed_value.ty.abiAlignment(self.base.options.target);
const shdr_index = self.rodata_section_index.?;
const phdr_index = self.sections.items(.phdr_index)[shdr_index];
- const local_sym = atom.getSymbolPtr(self);
+ const local_sym = self.getAtom(atom_index).getSymbolPtr(self);
local_sym.st_name = name_str_index;
local_sym.st_info = (elf.STB_LOCAL << 4) | elf.STT_OBJECT;
local_sym.st_other = 0;
@@ -2631,14 +2630,14 @@ pub fn lowerUnnamedConst(self: *Elf, typed_value: TypedValue, decl_index: Module
log.debug("allocated text block for {s} at 0x{x}", .{ name, local_sym.st_value });
- try self.writeSymbol(atom.getSymbolIndex().?);
+ try self.writeSymbol(self.getAtom(atom_index).getSymbolIndex().?);
try unnamed_consts.append(gpa, atom_index);
const section_offset = local_sym.st_value - self.program_headers.items[phdr_index].p_vaddr;
const file_offset = self.sections.items(.shdr)[shdr_index].sh_offset + section_offset;
try self.base.file.?.pwriteAll(code, file_offset);
- return atom.getSymbolIndex().?;
+ return self.getAtom(atom_index).getSymbolIndex().?;
}
pub fn updateDeclExports(
diff --git a/src/link/MachO.zig b/src/link/MachO.zig
index 22eb58775b..24ef275c5b 100644
--- a/src/link/MachO.zig
+++ b/src/link/MachO.zig
@@ -2079,10 +2079,9 @@ pub fn lowerUnnamedConst(self: *MachO, typed_value: TypedValue, decl_index: Modu
log.debug("allocating symbol indexes for {?s}", .{name});
const atom_index = try self.createAtom();
- const atom = self.getAtomPtr(atom_index);
const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), typed_value, &code_buffer, .none, .{
- .parent_atom_index = atom.getSymbolIndex().?,
+ .parent_atom_index = self.getAtom(atom_index).getSymbolIndex().?,
});
const code = switch (res) {
.ok => code_buffer.items,
@@ -2095,6 +2094,7 @@ pub fn lowerUnnamedConst(self: *MachO, typed_value: TypedValue, decl_index: Modu
};
const required_alignment = typed_value.ty.abiAlignment(self.base.options.target);
+ const atom = self.getAtomPtr(atom_index);
atom.size = code.len;
atom.alignment = required_alignment;
// TODO: work out logic for disambiguating functions from function pointers
--
cgit v1.2.3
From 46f54b23ae604c3f99f51ca719d9085530f6b59c Mon Sep 17 00:00:00 2001
From: Luuk de Gram
Date: Wed, 1 Feb 2023 18:55:35 +0100
Subject: link: make Wasm atoms fully owned by the linker
---
src/Module.zig | 6 +-
src/Sema.zig | 2 +-
src/arch/wasm/CodeGen.zig | 30 +-
src/arch/wasm/Emit.zig | 29 +-
src/link.zig | 4 +-
src/link/Dwarf.zig | 25 +-
src/link/Wasm.zig | 522 ++++++++++++++++++-----------------
src/link/Wasm/Atom.zig | 44 ++-
src/link/Wasm/Object.zig | 15 +-
test/link/wasm/export-data/build.zig | 4 +-
10 files changed, 354 insertions(+), 327 deletions(-)
(limited to 'src')
diff --git a/src/Module.zig b/src/Module.zig
index bfeeea51e8..f84d720d1f 100644
--- a/src/Module.zig
+++ b/src/Module.zig
@@ -5266,7 +5266,7 @@ pub fn clearDecl(
.macho => .{ .macho = {} },
.plan9 => .{ .plan9 = {} },
.c => .{ .c = {} },
- .wasm => .{ .wasm = link.File.Wasm.DeclBlock.empty },
+ .wasm => .{ .wasm = {} },
.spirv => .{ .spirv = {} },
.nvptx => .{ .nvptx = {} },
};
@@ -5374,7 +5374,7 @@ fn deleteDeclExports(mod: *Module, decl_index: Decl.Index) Allocator.Error!void
try macho.deleteDeclExport(decl_index, exp.options.name);
}
if (mod.comp.bin_file.cast(link.File.Wasm)) |wasm| {
- wasm.deleteExport(exp.link.wasm);
+ wasm.deleteDeclExport(decl_index);
}
if (mod.comp.bin_file.cast(link.File.Coff)) |coff| {
coff.deleteDeclExport(decl_index, exp.options.name);
@@ -5686,7 +5686,7 @@ pub fn allocateNewDecl(
.macho => .{ .macho = {} },
.plan9 => .{ .plan9 = {} },
.c => .{ .c = {} },
- .wasm => .{ .wasm = link.File.Wasm.DeclBlock.empty },
+ .wasm => .{ .wasm = {} },
.spirv => .{ .spirv = {} },
.nvptx => .{ .nvptx = {} },
},
diff --git a/src/Sema.zig b/src/Sema.zig
index e54bfc7bd9..4871961753 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -5570,7 +5570,7 @@ pub fn analyzeExport(
.macho => .{ .macho = {} },
.plan9 => .{ .plan9 = {} },
.c => .{ .c = {} },
- .wasm => .{ .wasm = .{} },
+ .wasm => .{ .wasm = {} },
.spirv => .{ .spirv = {} },
.nvptx => .{ .nvptx = {} },
},
diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig
index 8212d281e5..8559a728e5 100644
--- a/src/arch/wasm/CodeGen.zig
+++ b/src/arch/wasm/CodeGen.zig
@@ -1269,10 +1269,10 @@ fn genFunc(func: *CodeGen) InnerError!void {
var emit: Emit = .{
.mir = mir,
- .bin_file = &func.bin_file.base,
+ .bin_file = func.bin_file,
.code = func.code,
.locals = func.locals.items,
- .decl = func.decl,
+ .decl_index = func.decl_index,
.dbg_output = func.debug_output,
.prev_di_line = 0,
.prev_di_column = 0,
@@ -2115,21 +2115,20 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
const fn_info = fn_ty.fnInfo();
const first_param_sret = firstParamSRet(fn_info.cc, fn_info.return_type, func.target);
- const callee: ?*Decl = blk: {
+ const callee: ?Decl.Index = blk: {
const func_val = func.air.value(pl_op.operand) orelse break :blk null;
const module = func.bin_file.base.options.module.?;
if (func_val.castTag(.function)) |function| {
- const decl = module.declPtr(function.data.owner_decl);
- try decl.link.wasm.ensureInitialized(func.bin_file);
- break :blk decl;
+ _ = try func.bin_file.getOrCreateAtomForDecl(function.data.owner_decl);
+ break :blk function.data.owner_decl;
} else if (func_val.castTag(.extern_fn)) |extern_fn| {
const ext_decl = module.declPtr(extern_fn.data.owner_decl);
const ext_info = ext_decl.ty.fnInfo();
var func_type = try genFunctype(func.gpa, ext_info.cc, ext_info.param_types, ext_info.return_type, func.target);
defer func_type.deinit(func.gpa);
- const atom = &ext_decl.link.wasm;
- try atom.ensureInitialized(func.bin_file);
+ const atom_index = try func.bin_file.getOrCreateAtomForDecl(extern_fn.data.owner_decl);
+ const atom = func.bin_file.getAtomPtr(atom_index);
ext_decl.fn_link.wasm.type_index = try func.bin_file.putOrGetFuncType(func_type);
try func.bin_file.addOrUpdateImport(
mem.sliceTo(ext_decl.name, 0),
@@ -2137,11 +2136,10 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
ext_decl.getExternFn().?.lib_name,
ext_decl.fn_link.wasm.type_index,
);
- break :blk ext_decl;
+ break :blk extern_fn.data.owner_decl;
} else if (func_val.castTag(.decl_ref)) |decl_ref| {
- const decl = module.declPtr(decl_ref.data);
- try decl.link.wasm.ensureInitialized(func.bin_file);
- break :blk decl;
+ _ = try func.bin_file.getOrCreateAtomForDecl(decl_ref.data);
+ break :blk decl_ref.data;
}
return func.fail("Expected a function, but instead found type '{}'", .{func_val.tag()});
};
@@ -2162,7 +2160,8 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
}
if (callee) |direct| {
- try func.addLabel(.call, direct.link.wasm.sym_index);
+ const atom_index = func.bin_file.decls.get(direct).?;
+ try func.addLabel(.call, func.bin_file.getAtom(atom_index).sym_index);
} else {
// in this case we call a function pointer
// so load its value onto the stack
@@ -2758,9 +2757,10 @@ fn lowerDeclRefValue(func: *CodeGen, tv: TypedValue, decl_index: Module.Decl.Ind
}
module.markDeclAlive(decl);
- try decl.link.wasm.ensureInitialized(func.bin_file);
+ const atom_index = try func.bin_file.getOrCreateAtomForDecl(decl_index);
+ const atom = func.bin_file.getAtom(atom_index);
- const target_sym_index = decl.link.wasm.sym_index;
+ const target_sym_index = atom.sym_index;
if (decl.ty.zigTypeTag() == .Fn) {
try func.bin_file.addTableFunction(target_sym_index);
return WValue{ .function_index = target_sym_index };
diff --git a/src/arch/wasm/Emit.zig b/src/arch/wasm/Emit.zig
index 71d21d2797..a340ac5da8 100644
--- a/src/arch/wasm/Emit.zig
+++ b/src/arch/wasm/Emit.zig
@@ -11,8 +11,8 @@ const leb128 = std.leb;
/// Contains our list of instructions
mir: Mir,
-/// Reference to the file handler
-bin_file: *link.File,
+/// Reference to the Wasm module linker
+bin_file: *link.File.Wasm,
/// Possible error message. When set, the value is allocated and
/// must be freed manually.
error_msg: ?*Module.ErrorMsg = null,
@@ -21,7 +21,7 @@ code: *std.ArrayList(u8),
/// List of allocated locals.
locals: []const u8,
/// The declaration that code is being generated for.
-decl: *Module.Decl,
+decl_index: Module.Decl.Index,
// Debug information
/// Holds the debug information for this emission
@@ -252,8 +252,8 @@ fn offset(self: Emit) u32 {
fn fail(emit: *Emit, comptime format: []const u8, args: anytype) InnerError {
@setCold(true);
std.debug.assert(emit.error_msg == null);
- // TODO: Determine the source location.
- emit.error_msg = try Module.ErrorMsg.create(emit.bin_file.allocator, emit.decl.srcLoc(), format, args);
+ const mod = emit.bin_file.base.options.module.?;
+ emit.error_msg = try Module.ErrorMsg.create(emit.bin_file.base.allocator, mod.declPtr(emit.decl_index).srcLoc(), format, args);
return error.EmitFail;
}
@@ -304,8 +304,9 @@ fn emitGlobal(emit: *Emit, tag: Mir.Inst.Tag, inst: Mir.Inst.Index) !void {
const global_offset = emit.offset();
try emit.code.appendSlice(&buf);
- // globals can have index 0 as it represents the stack pointer
- try emit.decl.link.wasm.relocs.append(emit.bin_file.allocator, .{
+ const atom_index = emit.bin_file.decls.get(emit.decl_index).?;
+ const atom = emit.bin_file.getAtomPtr(atom_index);
+ try atom.relocs.append(emit.bin_file.base.allocator, .{
.index = label,
.offset = global_offset,
.relocation_type = .R_WASM_GLOBAL_INDEX_LEB,
@@ -361,7 +362,9 @@ fn emitCall(emit: *Emit, inst: Mir.Inst.Index) !void {
try emit.code.appendSlice(&buf);
if (label != 0) {
- try emit.decl.link.wasm.relocs.append(emit.bin_file.allocator, .{
+ const atom_index = emit.bin_file.decls.get(emit.decl_index).?;
+ const atom = emit.bin_file.getAtomPtr(atom_index);
+ try atom.relocs.append(emit.bin_file.base.allocator, .{
.offset = call_offset,
.index = label,
.relocation_type = .R_WASM_FUNCTION_INDEX_LEB,
@@ -387,7 +390,9 @@ fn emitFunctionIndex(emit: *Emit, inst: Mir.Inst.Index) !void {
try emit.code.appendSlice(&buf);
if (symbol_index != 0) {
- try emit.decl.link.wasm.relocs.append(emit.bin_file.allocator, .{
+ const atom_index = emit.bin_file.decls.get(emit.decl_index).?;
+ const atom = emit.bin_file.getAtomPtr(atom_index);
+ try atom.relocs.append(emit.bin_file.base.allocator, .{
.offset = index_offset,
.index = symbol_index,
.relocation_type = .R_WASM_TABLE_INDEX_SLEB,
@@ -399,7 +404,7 @@ fn emitMemAddress(emit: *Emit, inst: Mir.Inst.Index) !void {
const extra_index = emit.mir.instructions.items(.data)[inst].payload;
const mem = emit.mir.extraData(Mir.Memory, extra_index).data;
const mem_offset = emit.offset() + 1;
- const is_wasm32 = emit.bin_file.options.target.cpu.arch == .wasm32;
+ const is_wasm32 = emit.bin_file.base.options.target.cpu.arch == .wasm32;
if (is_wasm32) {
try emit.code.append(std.wasm.opcode(.i32_const));
var buf: [5]u8 = undefined;
@@ -413,7 +418,9 @@ fn emitMemAddress(emit: *Emit, inst: Mir.Inst.Index) !void {
}
if (mem.pointer != 0) {
- try emit.decl.link.wasm.relocs.append(emit.bin_file.allocator, .{
+ const atom_index = emit.bin_file.decls.get(emit.decl_index).?;
+ const atom = emit.bin_file.getAtomPtr(atom_index);
+ try atom.relocs.append(emit.bin_file.base.allocator, .{
.offset = mem_offset,
.index = mem.pointer,
.relocation_type = if (is_wasm32) .R_WASM_MEMORY_ADDR_LEB else .R_WASM_MEMORY_ADDR_LEB64,
diff --git a/src/link.zig b/src/link.zig
index 450a008cea..0a3226f004 100644
--- a/src/link.zig
+++ b/src/link.zig
@@ -267,7 +267,7 @@ pub const File = struct {
macho: void,
plan9: void,
c: void,
- wasm: Wasm.DeclBlock,
+ wasm: void,
spirv: void,
nvptx: void,
};
@@ -289,7 +289,7 @@ pub const File = struct {
macho: void,
plan9: void,
c: void,
- wasm: Wasm.Export,
+ wasm: void,
spirv: void,
nvptx: void,
};
diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig
index e90db2d0df..a3d0aa8a53 100644
--- a/src/link/Dwarf.zig
+++ b/src/link/Dwarf.zig
@@ -1099,7 +1099,7 @@ pub fn commitDeclState(
},
.wasm => {
const wasm_file = self.bin_file.cast(File.Wasm).?;
- const debug_line = wasm_file.debug_line_atom.?.code;
+ const debug_line = wasm_file.getAtomPtr(wasm_file.debug_line_atom.?).code;
writeDbgLineNopsBuffered(debug_line.items, src_fn.off, 0, &.{}, src_fn.len);
},
else => unreachable,
@@ -1177,7 +1177,7 @@ pub fn commitDeclState(
.wasm => {
const wasm_file = self.bin_file.cast(File.Wasm).?;
- const atom = wasm_file.debug_line_atom.?;
+ const atom = wasm_file.getAtomPtr(wasm_file.debug_line_atom.?);
const debug_line = &atom.code;
const segment_size = debug_line.items.len;
if (needed_size != segment_size) {
@@ -1345,7 +1345,8 @@ fn updateDeclDebugInfoAllocation(self: *Dwarf, atom_index: Atom.Index, len: u32)
},
.wasm => {
const wasm_file = self.bin_file.cast(File.Wasm).?;
- const debug_info = &wasm_file.debug_info_atom.?.code;
+ const debug_info_index = wasm_file.debug_info_atom.?;
+ const debug_info = &wasm_file.getAtomPtr(debug_info_index).code;
try writeDbgInfoNopsToArrayList(gpa, debug_info, atom.off, 0, &.{0}, atom.len, false);
},
else => unreachable,
@@ -1441,7 +1442,7 @@ fn writeDeclDebugInfo(self: *Dwarf, atom_index: Atom.Index, dbg_info_buf: []cons
.wasm => {
const wasm_file = self.bin_file.cast(File.Wasm).?;
const info_atom = wasm_file.debug_info_atom.?;
- const debug_info = &info_atom.code;
+ const debug_info = &wasm_file.getAtomPtr(info_atom).code;
const segment_size = debug_info.items.len;
if (needed_size != segment_size) {
log.debug(" needed size does not equal allocated size: {d}", .{needed_size});
@@ -1504,8 +1505,8 @@ pub fn updateDeclLineNumber(self: *Dwarf, module: *Module, decl_index: Module.De
.wasm => {
const wasm_file = self.bin_file.cast(File.Wasm).?;
const offset = atom.off + self.getRelocDbgLineOff();
- const atom_ = wasm_file.debug_line_atom.?;
- mem.copy(u8, atom_.code.items[offset..], &data);
+ const line_atom_index = wasm_file.debug_line_atom.?;
+ mem.copy(u8, wasm_file.getAtomPtr(line_atom_index).code.items[offset..], &data);
},
else => unreachable,
}
@@ -1722,7 +1723,7 @@ pub fn writeDbgAbbrev(self: *Dwarf) !void {
},
.wasm => {
const wasm_file = self.bin_file.cast(File.Wasm).?;
- const debug_abbrev = &wasm_file.debug_abbrev_atom.?.code;
+ const debug_abbrev = &wasm_file.getAtomPtr(wasm_file.debug_abbrev_atom.?).code;
try debug_abbrev.resize(wasm_file.base.allocator, needed_size);
mem.copy(u8, debug_abbrev.items, &abbrev_buf);
},
@@ -1835,7 +1836,7 @@ pub fn writeDbgInfoHeader(self: *Dwarf, module: *Module, low_pc: u64, high_pc: u
},
.wasm => {
const wasm_file = self.bin_file.cast(File.Wasm).?;
- const debug_info = &wasm_file.debug_info_atom.?.code;
+ const debug_info = &wasm_file.getAtomPtr(wasm_file.debug_info_atom.?).code;
try writeDbgInfoNopsToArrayList(self.allocator, debug_info, 0, 0, di_buf.items, jmp_amt, false);
},
else => unreachable,
@@ -2156,7 +2157,7 @@ pub fn writeDbgAranges(self: *Dwarf, addr: u64, size: u64) !void {
},
.wasm => {
const wasm_file = self.bin_file.cast(File.Wasm).?;
- const debug_ranges = &wasm_file.debug_ranges_atom.?.code;
+ const debug_ranges = &wasm_file.getAtomPtr(wasm_file.debug_ranges_atom.?).code;
try debug_ranges.resize(wasm_file.base.allocator, needed_size);
mem.copy(u8, debug_ranges.items, di_buf.items);
},
@@ -2330,7 +2331,7 @@ pub fn writeDbgLineHeader(self: *Dwarf) !void {
},
.wasm => {
const wasm_file = self.bin_file.cast(File.Wasm).?;
- const debug_line = &wasm_file.debug_line_atom.?.code;
+ const debug_line = &wasm_file.getAtomPtr(wasm_file.debug_line_atom.?).code;
mem.copy(u8, buffer, debug_line.items[first_fn.off..]);
try debug_line.resize(self.allocator, debug_line.items.len + delta);
mem.copy(u8, debug_line.items[first_fn.off + delta ..], buffer);
@@ -2381,7 +2382,7 @@ pub fn writeDbgLineHeader(self: *Dwarf) !void {
},
.wasm => {
const wasm_file = self.bin_file.cast(File.Wasm).?;
- const debug_line = wasm_file.debug_line_atom.?.code;
+ const debug_line = &wasm_file.getAtomPtr(wasm_file.debug_line_atom.?).code;
writeDbgLineNopsBuffered(debug_line.items, 0, 0, di_buf.items, jmp_amt);
},
else => unreachable,
@@ -2526,7 +2527,7 @@ pub fn flushModule(self: *Dwarf, module: *Module) !void {
},
.wasm => {
const wasm_file = self.bin_file.cast(File.Wasm).?;
- const debug_info = wasm_file.debug_info_atom.?.code;
+ const debug_info = wasm_file.getAtomPtr(wasm_file.debug_info_atom.?).code;
mem.copy(u8, debug_info.items[atom.off + reloc.offset ..], &buf);
},
else => unreachable,
diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig
index ee4518796e..b06703ed61 100644
--- a/src/link/Wasm.zig
+++ b/src/link/Wasm.zig
@@ -9,7 +9,7 @@ const fs = std.fs;
const leb = std.leb;
const log = std.log.scoped(.link);
-const Atom = @import("Wasm/Atom.zig");
+pub const Atom = @import("Wasm/Atom.zig");
const Dwarf = @import("Dwarf.zig");
const Module = @import("../Module.zig");
const Compilation = @import("../Compilation.zig");
@@ -31,10 +31,7 @@ const Object = @import("Wasm/Object.zig");
const Archive = @import("Wasm/Archive.zig");
const types = @import("Wasm/types.zig");
-pub const base_tag = link.File.Tag.wasm;
-
-/// deprecated: Use `@import("Wasm/Atom.zig");`
-pub const DeclBlock = Atom;
+pub const base_tag: link.File.Tag = .wasm;
base: link.File,
/// Output name of the file
@@ -47,18 +44,16 @@ llvm_object: ?*LlvmObject = null,
/// TODO: Allow setting this through a flag?
host_name: []const u8 = "env",
/// List of all `Decl` that are currently alive.
-/// This is ment for bookkeeping so we can safely cleanup all codegen memory
-/// when calling `deinit`
-decls: std.AutoHashMapUnmanaged(Module.Decl.Index, void) = .{},
+/// Each index maps to the corresponding `Atom.Index`.
+decls: std.AutoHashMapUnmanaged(Module.Decl.Index, Atom.Index) = .{},
/// List of all symbols generated by Zig code.
symbols: std.ArrayListUnmanaged(Symbol) = .{},
/// List of symbol indexes which are free to be used.
symbols_free_list: std.ArrayListUnmanaged(u32) = .{},
/// Maps atoms to their segment index
-atoms: std.AutoHashMapUnmanaged(u32, *Atom) = .{},
-/// Atoms managed and created by the linker. This contains atoms
-/// from object files, and not Atoms generated by a Decl.
-managed_atoms: std.ArrayListUnmanaged(*Atom) = .{},
+atoms: std.AutoHashMapUnmanaged(u32, Atom.Index) = .{},
+/// List of all atoms.
+managed_atoms: std.ArrayListUnmanaged(Atom) = .{},
/// Represents the index into `segments` where the 'code' section
/// lives.
code_section_index: ?u32 = null,
@@ -148,7 +143,7 @@ undefs: std.StringArrayHashMapUnmanaged(SymbolLoc) = .{},
/// Maps a symbol's location to an atom. This can be used to find meta
/// data of a symbol, such as its size, or its offset to perform a relocation.
/// Undefined (and synthetic) symbols do not have an Atom and therefore cannot be mapped.
-symbol_atom: std.AutoHashMapUnmanaged(SymbolLoc, *Atom) = .{},
+symbol_atom: std.AutoHashMapUnmanaged(SymbolLoc, Atom.Index) = .{},
/// Maps a symbol's location to its export name, which may differ from the decl's name
/// which does the exporting.
/// Note: The value represents the offset into the string table, rather than the actual string.
@@ -165,14 +160,14 @@ error_table_symbol: ?u32 = null,
// unit contains Zig code. The lifetime of these atoms are extended
// until the end of the compiler's lifetime. Meaning they're not freed
// during `flush()` in incremental-mode.
-debug_info_atom: ?*Atom = null,
-debug_line_atom: ?*Atom = null,
-debug_loc_atom: ?*Atom = null,
-debug_ranges_atom: ?*Atom = null,
-debug_abbrev_atom: ?*Atom = null,
-debug_str_atom: ?*Atom = null,
-debug_pubnames_atom: ?*Atom = null,
-debug_pubtypes_atom: ?*Atom = null,
+debug_info_atom: ?Atom.Index = null,
+debug_line_atom: ?Atom.Index = null,
+debug_loc_atom: ?Atom.Index = null,
+debug_ranges_atom: ?Atom.Index = null,
+debug_abbrev_atom: ?Atom.Index = null,
+debug_str_atom: ?Atom.Index = null,
+debug_pubnames_atom: ?Atom.Index = null,
+debug_pubtypes_atom: ?Atom.Index = null,
pub const Segment = struct {
alignment: u32,
@@ -430,10 +425,10 @@ pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Option
// at the end during `initializeCallCtorsFunction`.
}
- if (!options.strip and options.module != null) {
- wasm_bin.dwarf = Dwarf.init(allocator, &wasm_bin.base, options.target);
- try wasm_bin.initDebugSections();
- }
+ // if (!options.strip and options.module != null) {
+ // wasm_bin.dwarf = Dwarf.init(allocator, &wasm_bin.base, options.target);
+ // try wasm_bin.initDebugSections();
+ // }
return wasm_bin;
}
@@ -474,6 +469,7 @@ fn createSyntheticSymbol(wasm: *Wasm, name: []const u8, tag: Symbol.Tag) !Symbol
try wasm.globals.put(wasm.base.allocator, name_offset, loc);
return loc;
}
+
/// Initializes symbols and atoms for the debug sections
/// Initialization is only done when compiling Zig code.
/// When Zig is invoked as a linker instead, the atoms
@@ -516,6 +512,36 @@ fn parseObjectFile(wasm: *Wasm, path: []const u8) !bool {
return true;
}
+/// For a given `Module.Decl.Index` returns its corresponding `Atom.Index`.
+/// When the index was not found, a new `Atom` will be created, and its index will be returned.
+/// The newly created Atom is empty with default fields as specified by `Atom.empty`.
+pub fn getOrCreateAtomForDecl(wasm: *Wasm, decl_index: Module.Decl.Index) !Atom.Index {
+ const gop = try wasm.decls.getOrPut(wasm.base.allocator, decl_index);
+ if (!gop.found_existing) {
+ gop.value_ptr.* = try wasm.createAtom();
+ }
+ return gop.value_ptr.*;
+}
+
+/// Creates a new empty `Atom` and returns its `Atom.Index`
+fn createAtom(wasm: *Wasm) !Atom.Index {
+ const index = @intCast(Atom.Index, wasm.managed_atoms.items.len);
+ const atom = try wasm.managed_atoms.addOne(wasm.base.allocator);
+ atom.* = Atom.empty;
+ atom.sym_index = try wasm.allocateSymbol();
+ try wasm.symbol_atom.putNoClobber(wasm.base.allocator, .{ .file = null, .index = atom.sym_index }, index);
+
+ return index;
+}
+
+pub inline fn getAtom(wasm: *const Wasm, index: Atom.Index) Atom {
+ return wasm.managed_atoms.items[index];
+}
+
+pub inline fn getAtomPtr(wasm: *Wasm, index: Atom.Index) *Atom {
+ return &wasm.managed_atoms.items[index];
+}
+
/// Parses an archive file and will then parse each object file
/// that was found in the archive file.
/// Returns false when the file is not an archive file.
@@ -857,15 +883,16 @@ fn resolveLazySymbols(wasm: *Wasm) !void {
try wasm.discarded.putNoClobber(wasm.base.allocator, kv.value, loc);
_ = wasm.resolved_symbols.swapRemove(loc); // we don't want to emit this symbol, only use it for relocations.
- const atom = try wasm.base.allocator.create(Atom);
- errdefer wasm.base.allocator.destroy(atom);
- try wasm.managed_atoms.append(wasm.base.allocator, atom);
+ // TODO: Can we use `createAtom` here while also re-using the symbol
+ // from `createSyntheticSymbol`.
+ const atom_index = @intCast(Atom.Index, wasm.managed_atoms.items.len);
+ const atom = try wasm.managed_atoms.addOne(wasm.base.allocator);
atom.* = Atom.empty;
atom.sym_index = loc.index;
atom.alignment = 1;
- try wasm.parseAtom(atom, .{ .data = .synthetic });
- try wasm.symbol_atom.putNoClobber(wasm.base.allocator, loc, atom);
+ try wasm.parseAtom(atom_index, .{ .data = .synthetic });
+ try wasm.symbol_atom.putNoClobber(wasm.base.allocator, loc, atom_index);
}
if (wasm.undefs.fetchSwapRemove("__heap_end")) |kv| {
@@ -873,15 +900,14 @@ fn resolveLazySymbols(wasm: *Wasm) !void {
try wasm.discarded.putNoClobber(wasm.base.allocator, kv.value, loc);
_ = wasm.resolved_symbols.swapRemove(loc);
- const atom = try wasm.base.allocator.create(Atom);
- errdefer wasm.base.allocator.destroy(atom);
- try wasm.managed_atoms.append(wasm.base.allocator, atom);
+ const atom_index = @intCast(Atom.Index, wasm.managed_atoms.items.len);
+ const atom = try wasm.managed_atoms.addOne(wasm.base.allocator);
atom.* = Atom.empty;
atom.sym_index = loc.index;
atom.alignment = 1;
- try wasm.parseAtom(atom, .{ .data = .synthetic });
- try wasm.symbol_atom.putNoClobber(wasm.base.allocator, loc, atom);
+ try wasm.parseAtom(atom_index, .{ .data = .synthetic });
+ try wasm.symbol_atom.putNoClobber(wasm.base.allocator, loc, atom_index);
}
}
@@ -920,16 +946,6 @@ pub fn deinit(wasm: *Wasm) void {
if (wasm.llvm_object) |llvm_object| llvm_object.destroy(gpa);
}
- if (wasm.base.options.module) |mod| {
- var decl_it = wasm.decls.keyIterator();
- while (decl_it.next()) |decl_index_ptr| {
- const decl = mod.declPtr(decl_index_ptr.*);
- decl.link.wasm.deinit(gpa);
- }
- } else {
- assert(wasm.decls.count() == 0);
- }
-
for (wasm.func_types.items) |*func_type| {
func_type.deinit(gpa);
}
@@ -954,9 +970,8 @@ pub fn deinit(wasm: *Wasm) void {
wasm.symbol_atom.deinit(gpa);
wasm.export_names.deinit(gpa);
wasm.atoms.deinit(gpa);
- for (wasm.managed_atoms.items) |managed_atom| {
- managed_atom.deinit(gpa);
- gpa.destroy(managed_atom);
+ for (wasm.managed_atoms.items) |*managed_atom| {
+ managed_atom.deinit(wasm);
}
wasm.managed_atoms.deinit(gpa);
wasm.segments.deinit(gpa);
@@ -1014,18 +1029,24 @@ pub fn updateFunc(wasm: *Wasm, mod: *Module, func: *Module.Fn, air: Air, livenes
const decl_index = func.owner_decl;
const decl = mod.declPtr(decl_index);
- const atom = &decl.link.wasm;
- try atom.ensureInitialized(wasm);
- const gop = try wasm.decls.getOrPut(wasm.base.allocator, decl_index);
- if (gop.found_existing) {
- atom.clear();
- } else gop.value_ptr.* = {};
+ const atom_index = try wasm.getOrCreateAtomForDecl(decl_index);
+ const atom = wasm.getAtomPtr(atom_index);
+ atom.clear();
- var decl_state: ?Dwarf.DeclState = if (wasm.dwarf) |*dwarf| try dwarf.initDeclState(mod, decl_index) else null;
- defer if (decl_state) |*ds| ds.deinit();
+ // var decl_state: ?Dwarf.DeclState = if (wasm.dwarf) |*dwarf| try dwarf.initDeclState(mod, decl_index) else null;
+ // defer if (decl_state) |*ds| ds.deinit();
var code_writer = std.ArrayList(u8).init(wasm.base.allocator);
defer code_writer.deinit();
+ // const result = try codegen.generateFunction(
+ // &wasm.base,
+ // decl.srcLoc(),
+ // func,
+ // air,
+ // liveness,
+ // &code_writer,
+ // if (decl_state) |*ds| .{ .dwarf = ds } else .none,
+ // );
const result = try codegen.generateFunction(
&wasm.base,
decl.srcLoc(),
@@ -1033,7 +1054,7 @@ pub fn updateFunc(wasm: *Wasm, mod: *Module, func: *Module.Fn, air: Air, livenes
air,
liveness,
&code_writer,
- if (decl_state) |*ds| .{ .dwarf = ds } else .none,
+ .none,
);
const code = switch (result) {
@@ -1045,19 +1066,19 @@ pub fn updateFunc(wasm: *Wasm, mod: *Module, func: *Module.Fn, air: Air, livenes
},
};
- if (wasm.dwarf) |*dwarf| {
- try dwarf.commitDeclState(
- mod,
- decl_index,
- // Actual value will be written after relocation.
- // For Wasm, this is the offset relative to the code section
- // which isn't known until flush().
- 0,
- code.len,
- &decl_state.?,
- );
- }
- return wasm.finishUpdateDecl(decl, code);
+ // if (wasm.dwarf) |*dwarf| {
+ // try dwarf.commitDeclState(
+ // mod,
+ // decl_index,
+ // // Actual value will be written after relocation.
+ // // For Wasm, this is the offset relative to the code section
+ // // which isn't known until flush().
+ // 0,
+ // code.len,
+ // &decl_state.?,
+ // );
+ // }
+ return wasm.finishUpdateDecl(decl_index, code);
}
// Generate code for the Decl, storing it in memory to be later written to
@@ -1080,17 +1101,14 @@ pub fn updateDecl(wasm: *Wasm, mod: *Module, decl_index: Module.Decl.Index) !voi
return;
}
- const atom = &decl.link.wasm;
- try atom.ensureInitialized(wasm);
- const gop = try wasm.decls.getOrPut(wasm.base.allocator, decl_index);
- if (gop.found_existing) {
- atom.clear();
- } else gop.value_ptr.* = {};
+ const atom_index = try wasm.getOrCreateAtomForDecl(decl_index);
+ const atom = wasm.getAtomPtr(atom_index);
+ atom.clear();
if (decl.isExtern()) {
const variable = decl.getVariable().?;
const name = mem.sliceTo(decl.name, 0);
- return wasm.addOrUpdateImport(name, decl.link.wasm.sym_index, variable.lib_name, null);
+ return wasm.addOrUpdateImport(name, atom.sym_index, variable.lib_name, null);
}
const val = if (decl.val.castTag(.variable)) |payload| payload.data.init else decl.val;
@@ -1103,7 +1121,7 @@ pub fn updateDecl(wasm: *Wasm, mod: *Module, decl_index: Module.Decl.Index) !voi
.{ .ty = decl.ty, .val = val },
&code_writer,
.none,
- .{ .parent_atom_index = decl.link.wasm.sym_index },
+ .{ .parent_atom_index = atom.sym_index },
);
const code = switch (res) {
@@ -1115,7 +1133,7 @@ pub fn updateDecl(wasm: *Wasm, mod: *Module, decl_index: Module.Decl.Index) !voi
},
};
- return wasm.finishUpdateDecl(decl, code);
+ return wasm.finishUpdateDecl(decl_index, code);
}
pub fn updateDeclLineNumber(wasm: *Wasm, mod: *Module, decl_index: Module.Decl.Index) !void {
@@ -1133,9 +1151,11 @@ pub fn updateDeclLineNumber(wasm: *Wasm, mod: *Module, decl_index: Module.Decl.I
}
}
-fn finishUpdateDecl(wasm: *Wasm, decl: *Module.Decl, code: []const u8) !void {
+fn finishUpdateDecl(wasm: *Wasm, decl_index: Module.Decl.Index, code: []const u8) !void {
const mod = wasm.base.options.module.?;
- const atom: *Atom = &decl.link.wasm;
+ const decl = mod.declPtr(decl_index);
+ const atom_index = wasm.decls.get(decl_index).?;
+ const atom = wasm.getAtomPtr(atom_index);
const symbol = &wasm.symbols.items[atom.sym_index];
const full_name = try decl.getFullyQualifiedName(mod);
defer wasm.base.allocator.free(full_name);
@@ -1201,48 +1221,51 @@ pub fn lowerUnnamedConst(wasm: *Wasm, tv: TypedValue, decl_index: Module.Decl.In
const decl = mod.declPtr(decl_index);
// Create and initialize a new local symbol and atom
- const local_index = decl.link.wasm.locals.items.len;
+ const atom_index = try wasm.createAtom();
+ const parent_atom_index = try wasm.getOrCreateAtomForDecl(decl_index);
+ const parent_atom = wasm.getAtomPtr(parent_atom_index);
+ const local_index = parent_atom.locals.items.len;
+ try parent_atom.locals.append(wasm.base.allocator, atom_index);
const fqdn = try decl.getFullyQualifiedName(mod);
defer wasm.base.allocator.free(fqdn);
const name = try std.fmt.allocPrintZ(wasm.base.allocator, "__unnamed_{s}_{d}", .{ fqdn, local_index });
defer wasm.base.allocator.free(name);
-
- const atom = try decl.link.wasm.locals.addOne(wasm.base.allocator);
- atom.* = Atom.empty;
- try atom.ensureInitialized(wasm);
- atom.alignment = tv.ty.abiAlignment(wasm.base.options.target);
- wasm.symbols.items[atom.sym_index] = .{
- .name = try wasm.string_table.put(wasm.base.allocator, name),
- .flags = @enumToInt(Symbol.Flag.WASM_SYM_BINDING_LOCAL),
- .tag = .data,
- .index = undefined,
- };
-
- try wasm.resolved_symbols.putNoClobber(wasm.base.allocator, atom.symbolLoc(), {});
-
var value_bytes = std.ArrayList(u8).init(wasm.base.allocator);
defer value_bytes.deinit();
- const result = try codegen.generateSymbol(
- &wasm.base,
- decl.srcLoc(),
- tv,
- &value_bytes,
- .none,
- .{
- .parent_atom_index = atom.sym_index,
- .addend = null,
- },
- );
- const code = switch (result) {
- .ok => value_bytes.items,
- .fail => |em| {
- decl.analysis = .codegen_failure;
- try mod.failed_decls.put(mod.gpa, decl_index, em);
- return error.AnalysisFail;
- },
+ const code = code: {
+ const atom = wasm.getAtomPtr(atom_index);
+ atom.alignment = tv.ty.abiAlignment(wasm.base.options.target);
+ wasm.symbols.items[atom.sym_index] = .{
+ .name = try wasm.string_table.put(wasm.base.allocator, name),
+ .flags = @enumToInt(Symbol.Flag.WASM_SYM_BINDING_LOCAL),
+ .tag = .data,
+ .index = undefined,
+ };
+ try wasm.resolved_symbols.putNoClobber(wasm.base.allocator, atom.symbolLoc(), {});
+
+ const result = try codegen.generateSymbol(
+ &wasm.base,
+ decl.srcLoc(),
+ tv,
+ &value_bytes,
+ .none,
+ .{
+ .parent_atom_index = atom.sym_index,
+ .addend = null,
+ },
+ );
+ break :code switch (result) {
+ .ok => value_bytes.items,
+ .fail => |em| {
+ decl.analysis = .codegen_failure;
+ try mod.failed_decls.put(mod.gpa, decl_index, em);
+ return error.AnalysisFail;
+ },
+ };
};
+ const atom = wasm.getAtomPtr(atom_index);
atom.size = @intCast(u32, code.len);
try atom.code.appendSlice(wasm.base.allocator, code);
return atom.sym_index;
@@ -1290,10 +1313,13 @@ pub fn getDeclVAddr(
) !u64 {
const mod = wasm.base.options.module.?;
const decl = mod.declPtr(decl_index);
- try decl.link.wasm.ensureInitialized(wasm);
- const target_symbol_index = decl.link.wasm.sym_index;
+
+ const target_atom_index = try wasm.getOrCreateAtomForDecl(decl_index);
+ const target_symbol_index = wasm.getAtom(target_atom_index).sym_index;
+
assert(reloc_info.parent_atom_index != 0);
- const atom = wasm.symbol_atom.get(.{ .file = null, .index = reloc_info.parent_atom_index }).?;
+ const atom_index = wasm.symbol_atom.get(.{ .file = null, .index = reloc_info.parent_atom_index }).?;
+ const atom = wasm.getAtomPtr(atom_index);
const is_wasm32 = wasm.base.options.target.cpu.arch == .wasm32;
if (decl.ty.zigTypeTag() == .Fn) {
assert(reloc_info.addend == 0); // addend not allowed for function relocations
@@ -1321,9 +1347,10 @@ pub fn getDeclVAddr(
return target_symbol_index;
}
-pub fn deleteExport(wasm: *Wasm, exp: Export) void {
+pub fn deleteDeclExport(wasm: *Wasm, decl_index: Module.Decl.Index) void {
if (wasm.llvm_object) |_| return;
- const sym_index = exp.sym_index orelse return;
+ const atom_index = wasm.decls.get(decl_index) orelse return;
+ const sym_index = wasm.getAtom(atom_index).sym_index;
const loc: SymbolLoc = .{ .file = null, .index = sym_index };
const symbol = loc.getSymbol(wasm);
const symbol_name = wasm.string_table.get(symbol.name);
@@ -1349,7 +1376,8 @@ pub fn updateDeclExports(
}
const decl = mod.declPtr(decl_index);
- if (decl.link.wasm.getSymbolIndex() == null) return; // unititialized
+ const atom_index = try wasm.getOrCreateAtomForDecl(decl_index);
+ const atom = wasm.getAtom(atom_index);
for (exports) |exp| {
if (exp.options.section) |section| {
@@ -1364,7 +1392,7 @@ pub fn updateDeclExports(
const export_name = try wasm.string_table.put(wasm.base.allocator, exp.options.name);
if (wasm.globals.getPtr(export_name)) |existing_loc| {
- if (existing_loc.index == decl.link.wasm.sym_index) continue;
+ if (existing_loc.index == atom.sym_index) continue;
const existing_sym: Symbol = existing_loc.getSymbol(wasm).*;
const exp_is_weak = exp.options.linkage == .Internal or exp.options.linkage == .Weak;
@@ -1385,15 +1413,16 @@ pub fn updateDeclExports(
} else if (exp_is_weak) {
continue; // to-be-exported symbol is weak, so we keep the existing symbol
} else {
- existing_loc.index = decl.link.wasm.sym_index;
+ // TODO: Revisit this, why was this needed?
+ existing_loc.index = atom.sym_index;
existing_loc.file = null;
- exp.link.wasm.sym_index = existing_loc.index;
+ // exp.link.wasm.sym_index = existing_loc.index;
}
}
- const exported_decl = mod.declPtr(exp.exported_decl);
- const sym_index = exported_decl.link.wasm.sym_index;
- const sym_loc = exported_decl.link.wasm.symbolLoc();
+ const exported_atom_index = try wasm.getOrCreateAtomForDecl(exp.exported_decl);
+ const exported_atom = wasm.getAtom(exported_atom_index);
+ const sym_loc = exported_atom.symbolLoc();
const symbol = sym_loc.getSymbol(wasm);
switch (exp.options.linkage) {
.Internal => {
@@ -1429,7 +1458,6 @@ pub fn updateDeclExports(
// if the symbol was previously undefined, remove it as an import
_ = wasm.imports.remove(sym_loc);
_ = wasm.undefs.swapRemove(exp.options.name);
- exp.link.wasm.sym_index = sym_index;
}
}
@@ -1439,11 +1467,13 @@ pub fn freeDecl(wasm: *Wasm, decl_index: Module.Decl.Index) void {
}
const mod = wasm.base.options.module.?;
const decl = mod.declPtr(decl_index);
- const atom = &decl.link.wasm;
+ const atom_index = wasm.decls.get(decl_index).?;
+ const atom = wasm.getAtomPtr(atom_index);
wasm.symbols_free_list.append(wasm.base.allocator, atom.sym_index) catch {};
_ = wasm.decls.remove(decl_index);
wasm.symbols.items[atom.sym_index].tag = .dead;
- for (atom.locals.items) |local_atom| {
+ for (atom.locals.items) |local_atom_index| {
+ const local_atom = wasm.getAtom(local_atom_index);
const local_symbol = &wasm.symbols.items[local_atom.sym_index];
local_symbol.tag = .dead; // also for any local symbol
wasm.symbols_free_list.append(wasm.base.allocator, local_atom.sym_index) catch {};
@@ -1461,7 +1491,16 @@ pub fn freeDecl(wasm: *Wasm, decl_index: Module.Decl.Index) void {
// dwarf.freeDecl(decl_index);
// }
- atom.deinit(wasm.base.allocator);
+ if (atom.next) |next_atom_index| {
+ const next_atom = wasm.getAtomPtr(next_atom_index);
+ next_atom.prev = atom.prev;
+ atom.next = null;
+ }
+ if (atom.prev) |prev_index| {
+ const prev_atom = wasm.getAtomPtr(prev_index);
+ prev_atom.next = atom.next;
+ atom.prev = null;
+ }
}
/// Appends a new entry to the indirect function table
@@ -1583,7 +1622,8 @@ const Kind = union(enum) {
};
/// Parses an Atom and inserts its metadata into the corresponding sections.
-fn parseAtom(wasm: *Wasm, atom: *Atom, kind: Kind) !void {
+fn parseAtom(wasm: *Wasm, atom_index: Atom.Index, kind: Kind) !void {
+ const atom = wasm.getAtomPtr(atom_index);
const symbol = (SymbolLoc{ .file = null, .index = atom.sym_index }).getSymbol(wasm);
const final_index: u32 = switch (kind) {
.function => |fn_data| result: {
@@ -1658,18 +1698,20 @@ fn parseAtom(wasm: *Wasm, atom: *Atom, kind: Kind) !void {
const segment: *Segment = &wasm.segments.items[final_index];
segment.alignment = std.math.max(segment.alignment, atom.alignment);
- try wasm.appendAtomAtIndex(final_index, atom);
+ try wasm.appendAtomAtIndex(final_index, atom_index);
}
/// From a given index, append the given `Atom` at the back of the linked list.
/// Simply inserts it into the map of atoms when it doesn't exist yet.
-pub fn appendAtomAtIndex(wasm: *Wasm, index: u32, atom: *Atom) !void {
- if (wasm.atoms.getPtr(index)) |last| {
- last.*.next = atom;
- atom.prev = last.*;
- last.* = atom;
+pub fn appendAtomAtIndex(wasm: *Wasm, index: u32, atom_index: Atom.Index) !void {
+ const atom = wasm.getAtomPtr(atom_index);
+ if (wasm.atoms.getPtr(index)) |last_index_ptr| {
+ const last = wasm.getAtomPtr(last_index_ptr.*);
+ last.*.next = atom_index;
+ atom.prev = last_index_ptr.*;
+ last_index_ptr.* = atom_index;
} else {
- try wasm.atoms.putNoClobber(wasm.base.allocator, index, atom);
+ try wasm.atoms.putNoClobber(wasm.base.allocator, index, atom_index);
}
}
@@ -1679,16 +1721,17 @@ fn allocateDebugAtoms(wasm: *Wasm) !void {
if (wasm.dwarf == null) return;
const allocAtom = struct {
- fn f(bin: *Wasm, maybe_index: *?u32, atom: *Atom) !void {
+ fn f(bin: *Wasm, maybe_index: *?u32, atom_index: Atom.Index) !void {
const index = maybe_index.* orelse idx: {
const index = @intCast(u32, bin.segments.items.len);
try bin.appendDummySegment();
maybe_index.* = index;
break :idx index;
};
+ const atom = bin.getAtomPtr(atom_index);
atom.size = @intCast(u32, atom.code.items.len);
bin.symbols.items[atom.sym_index].index = index;
- try bin.appendAtomAtIndex(index, atom);
+ try bin.appendAtomAtIndex(index, atom_index);
}
}.f;
@@ -1710,15 +1753,16 @@ fn allocateAtoms(wasm: *Wasm) !void {
var it = wasm.atoms.iterator();
while (it.next()) |entry| {
const segment = &wasm.segments.items[entry.key_ptr.*];
- var atom: *Atom = entry.value_ptr.*.getFirst();
+ var atom_index = entry.value_ptr.*;
var offset: u32 = 0;
while (true) {
+ const atom = wasm.getAtomPtr(atom_index);
const symbol_loc = atom.symbolLoc();
if (wasm.code_section_index) |index| {
if (index == entry.key_ptr.*) {
if (!wasm.resolved_symbols.contains(symbol_loc)) {
// only allocate resolved function body's.
- atom = atom.next orelse break;
+ atom_index = atom.prev orelse break;
continue;
}
}
@@ -1732,8 +1776,7 @@ fn allocateAtoms(wasm: *Wasm) !void {
atom.size,
});
offset += atom.size;
- try wasm.symbol_atom.put(wasm.base.allocator, symbol_loc, atom); // Update atom pointers
- atom = atom.next orelse break;
+ atom_index = atom.prev orelse break;
}
segment.size = std.mem.alignForwardGeneric(u32, offset, segment.alignment);
}
@@ -1867,8 +1910,8 @@ fn initializeCallCtorsFunction(wasm: *Wasm) !void {
symbol.index = func_index;
// create the atom that will be output into the final binary
- const atom = try wasm.base.allocator.create(Atom);
- errdefer wasm.base.allocator.destroy(atom);
+ const atom_index = @intCast(Atom.Index, wasm.managed_atoms.items.len);
+ const atom = try wasm.managed_atoms.addOne(wasm.base.allocator);
atom.* = .{
.size = @intCast(u32, function_body.items.len),
.offset = 0,
@@ -1879,13 +1922,13 @@ fn initializeCallCtorsFunction(wasm: *Wasm) !void {
.prev = null,
.code = function_body.moveToUnmanaged(),
};
- try wasm.managed_atoms.append(wasm.base.allocator, atom);
- try wasm.appendAtomAtIndex(wasm.code_section_index.?, atom);
- try wasm.symbol_atom.putNoClobber(wasm.base.allocator, loc, atom);
+ try wasm.appendAtomAtIndex(wasm.code_section_index.?, atom_index);
+ try wasm.symbol_atom.putNoClobber(wasm.base.allocator, loc, atom_index);
// `allocateAtoms` has already been called, set the atom's offset manually.
// This is fine to do manually as we insert the atom at the very end.
- atom.offset = atom.prev.?.offset + atom.prev.?.size;
+ const prev_atom = wasm.getAtom(atom.prev.?);
+ atom.offset = prev_atom.offset + prev_atom.size;
}
fn setupImports(wasm: *Wasm) !void {
@@ -2088,7 +2131,8 @@ fn setupExports(wasm: *Wasm) !void {
break :blk try wasm.string_table.put(wasm.base.allocator, sym_name);
};
const exp: types.Export = if (symbol.tag == .data) exp: {
- const atom = wasm.symbol_atom.get(sym_loc).?;
+ const atom_index = wasm.symbol_atom.get(sym_loc).?;
+ const atom = wasm.getAtom(atom_index);
const va = atom.getVA(wasm, symbol);
const global_index = @intCast(u32, wasm.imported_globals_count + wasm.wasm_globals.items.len);
try wasm.wasm_globals.append(wasm.base.allocator, .{
@@ -2193,7 +2237,8 @@ fn setupMemory(wasm: *Wasm) !void {
const segment_index = wasm.data_segments.get(".synthetic").?;
const segment = &wasm.segments.items[segment_index];
segment.offset = 0; // for simplicity we store the entire VA into atom's offset.
- const atom = wasm.symbol_atom.get(loc).?;
+ const atom_index = wasm.symbol_atom.get(loc).?;
+ const atom = wasm.getAtomPtr(atom_index);
atom.offset = @intCast(u32, mem.alignForwardGeneric(u64, memory_ptr, heap_alignment));
}
@@ -2226,7 +2271,8 @@ fn setupMemory(wasm: *Wasm) !void {
const segment_index = wasm.data_segments.get(".synthetic").?;
const segment = &wasm.segments.items[segment_index];
segment.offset = 0;
- const atom = wasm.symbol_atom.get(loc).?;
+ const atom_index = wasm.symbol_atom.get(loc).?;
+ const atom = wasm.getAtomPtr(atom_index);
atom.offset = @intCast(u32, memory_ptr);
}
@@ -2352,15 +2398,14 @@ pub fn getErrorTableSymbol(wasm: *Wasm) !u32 {
// and then return said symbol's index. The final table will be populated
// during `flush` when we know all possible error names.
- // As sym_index '0' is reserved, we use it for our stack pointer symbol
- const symbol_index = wasm.symbols_free_list.popOrNull() orelse blk: {
- const index = @intCast(u32, wasm.symbols.items.len);
- _ = try wasm.symbols.addOne(wasm.base.allocator);
- break :blk index;
- };
+ const atom_index = try wasm.createAtom();
+ const atom = wasm.getAtomPtr(atom_index);
+ const slice_ty = Type.initTag(.const_slice_u8_sentinel_0);
+ atom.alignment = slice_ty.abiAlignment(wasm.base.options.target);
+ const sym_index = atom.sym_index;
const sym_name = try wasm.string_table.put(wasm.base.allocator, "__zig_err_name_table");
- const symbol = &wasm.symbols.items[symbol_index];
+ const symbol = &wasm.symbols.items[sym_index];
symbol.* = .{
.name = sym_name,
.tag = .data,
@@ -2369,20 +2414,11 @@ pub fn getErrorTableSymbol(wasm: *Wasm) !u32 {
};
symbol.setFlag(.WASM_SYM_VISIBILITY_HIDDEN);
- const slice_ty = Type.initTag(.const_slice_u8_sentinel_0);
+ try wasm.resolved_symbols.put(wasm.base.allocator, atom.symbolLoc(), {});
- const atom = try wasm.base.allocator.create(Atom);
- atom.* = Atom.empty;
- atom.sym_index = symbol_index;
- atom.alignment = slice_ty.abiAlignment(wasm.base.options.target);
- try wasm.managed_atoms.append(wasm.base.allocator, atom);
- const loc = atom.symbolLoc();
- try wasm.resolved_symbols.put(wasm.base.allocator, loc, {});
- try wasm.symbol_atom.put(wasm.base.allocator, loc, atom);
-
- log.debug("Error name table was created with symbol index: ({d})", .{symbol_index});
- wasm.error_table_symbol = symbol_index;
- return symbol_index;
+ log.debug("Error name table was created with symbol index: ({d})", .{sym_index});
+ wasm.error_table_symbol = sym_index;
+ return sym_index;
}
/// Populates the error name table, when `error_table_symbol` is not null.
@@ -2391,22 +2427,17 @@ pub fn getErrorTableSymbol(wasm: *Wasm) !u32 {
/// The table is what is being pointed to within the runtime bodies that are generated.
fn populateErrorNameTable(wasm: *Wasm) !void {
const symbol_index = wasm.error_table_symbol orelse return;
- const atom: *Atom = wasm.symbol_atom.get(.{ .file = null, .index = symbol_index }).?;
+ const atom_index = wasm.symbol_atom.get(.{ .file = null, .index = symbol_index }).?;
+ const atom = wasm.getAtomPtr(atom_index);
+
// Rather than creating a symbol for each individual error name,
// we create a symbol for the entire region of error names. We then calculate
// the pointers into the list using addends which are appended to the relocation.
- const names_atom = try wasm.base.allocator.create(Atom);
- names_atom.* = Atom.empty;
- try wasm.managed_atoms.append(wasm.base.allocator, names_atom);
- const names_symbol_index = wasm.symbols_free_list.popOrNull() orelse blk: {
- const index = @intCast(u32, wasm.symbols.items.len);
- _ = try wasm.symbols.addOne(wasm.base.allocator);
- break :blk index;
- };
- names_atom.sym_index = names_symbol_index;
+ const names_atom_index = try wasm.createAtom();
+ const names_atom = wasm.getAtomPtr(names_atom_index);
names_atom.alignment = 1;
const sym_name = try wasm.string_table.put(wasm.base.allocator, "__zig_err_names");
- const names_symbol = &wasm.symbols.items[names_symbol_index];
+ const names_symbol = &wasm.symbols.items[names_atom.sym_index];
names_symbol.* = .{
.name = sym_name,
.tag = .data,
@@ -2430,7 +2461,7 @@ fn populateErrorNameTable(wasm: *Wasm) !void {
try atom.code.writer(wasm.base.allocator).writeIntLittle(u32, len - 1);
// create relocation to the error name
try atom.relocs.append(wasm.base.allocator, .{
- .index = names_symbol_index,
+ .index = names_atom.sym_index,
.relocation_type = .R_WASM_MEMORY_ADDR_I32,
.offset = offset,
.addend = @intCast(i32, addend),
@@ -2449,61 +2480,53 @@ fn populateErrorNameTable(wasm: *Wasm) !void {
const name_loc = names_atom.symbolLoc();
try wasm.resolved_symbols.put(wasm.base.allocator, name_loc, {});
- try wasm.symbol_atom.put(wasm.base.allocator, name_loc, names_atom);
+ try wasm.symbol_atom.put(wasm.base.allocator, name_loc, names_atom_index);
// link the atoms with the rest of the binary so they can be allocated
// and relocations will be performed.
- try wasm.parseAtom(atom, .{ .data = .read_only });
- try wasm.parseAtom(names_atom, .{ .data = .read_only });
+ try wasm.parseAtom(atom_index, .{ .data = .read_only });
+ try wasm.parseAtom(names_atom_index, .{ .data = .read_only });
}
/// From a given index variable, creates a new debug section.
/// This initializes the index, appends a new segment,
/// and finally, creates a managed `Atom`.
-pub fn createDebugSectionForIndex(wasm: *Wasm, index: *?u32, name: []const u8) !*Atom {
+pub fn createDebugSectionForIndex(wasm: *Wasm, index: *?u32, name: []const u8) !Atom.Index {
const new_index = @intCast(u32, wasm.segments.items.len);
index.* = new_index;
try wasm.appendDummySegment();
- const sym_index = wasm.symbols_free_list.popOrNull() orelse idx: {
- const tmp_index = @intCast(u32, wasm.symbols.items.len);
- _ = try wasm.symbols.addOne(wasm.base.allocator);
- break :idx tmp_index;
- };
- wasm.symbols.items[sym_index] = .{
+ const atom_index = try wasm.createAtom();
+ const atom = wasm.getAtomPtr(atom_index);
+ wasm.symbols.items[atom.sym_index] = .{
.tag = .section,
.name = try wasm.string_table.put(wasm.base.allocator, name),
.index = 0,
.flags = @enumToInt(Symbol.Flag.WASM_SYM_BINDING_LOCAL),
};
- const atom = try wasm.base.allocator.create(Atom);
- atom.* = Atom.empty;
atom.alignment = 1; // debug sections are always 1-byte-aligned
- atom.sym_index = sym_index;
- try wasm.managed_atoms.append(wasm.base.allocator, atom);
- try wasm.symbol_atom.put(wasm.base.allocator, atom.symbolLoc(), atom);
- return atom;
+ return atom_index;
}
fn resetState(wasm: *Wasm) void {
for (wasm.segment_info.values()) |segment_info| {
wasm.base.allocator.free(segment_info.name);
}
- if (wasm.base.options.module) |mod| {
- var decl_it = wasm.decls.keyIterator();
- while (decl_it.next()) |decl_index_ptr| {
- const decl = mod.declPtr(decl_index_ptr.*);
- const atom = &decl.link.wasm;
- atom.next = null;
- atom.prev = null;
-
- for (atom.locals.items) |*local_atom| {
- local_atom.next = null;
- local_atom.prev = null;
- }
+
+ var atom_it = wasm.decls.valueIterator();
+ while (atom_it.next()) |atom_index| {
+ const atom = wasm.getAtomPtr(atom_index.*);
+ atom.next = null;
+ atom.prev = null;
+
+ for (atom.locals.items) |local_atom_index| {
+ const local_atom = wasm.getAtomPtr(local_atom_index);
+ local_atom.next = null;
+ local_atom.prev = null;
}
}
+
wasm.functions.clearRetainingCapacity();
wasm.exports.clearRetainingCapacity();
wasm.segments.clearRetainingCapacity();
@@ -2800,28 +2823,29 @@ pub fn flushModule(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Nod
try wasm.setupStart();
try wasm.setupImports();
if (wasm.base.options.module) |mod| {
- var decl_it = wasm.decls.keyIterator();
- while (decl_it.next()) |decl_index_ptr| {
- const decl = mod.declPtr(decl_index_ptr.*);
+ var decl_it = wasm.decls.iterator();
+ while (decl_it.next()) |entry| {
+ const decl = mod.declPtr(entry.key_ptr.*);
if (decl.isExtern()) continue;
- const atom = &decl.*.link.wasm;
+ const atom_index = entry.value_ptr.*;
if (decl.ty.zigTypeTag() == .Fn) {
- try wasm.parseAtom(atom, .{ .function = decl.fn_link.wasm });
+ try wasm.parseAtom(atom_index, .{ .function = decl.fn_link.wasm });
} else if (decl.getVariable()) |variable| {
if (!variable.is_mutable) {
- try wasm.parseAtom(atom, .{ .data = .read_only });
+ try wasm.parseAtom(atom_index, .{ .data = .read_only });
} else if (variable.init.isUndefDeep()) {
- try wasm.parseAtom(atom, .{ .data = .uninitialized });
+ try wasm.parseAtom(atom_index, .{ .data = .uninitialized });
} else {
- try wasm.parseAtom(atom, .{ .data = .initialized });
+ try wasm.parseAtom(atom_index, .{ .data = .initialized });
}
} else {
- try wasm.parseAtom(atom, .{ .data = .read_only });
+ try wasm.parseAtom(atom_index, .{ .data = .read_only });
}
// also parse atoms for a decl's locals
- for (atom.locals.items) |*local_atom| {
- try wasm.parseAtom(local_atom, .{ .data = .read_only });
+ const atom = wasm.getAtomPtr(atom_index);
+ for (atom.locals.items) |local_atom_index| {
+ try wasm.parseAtom(local_atom_index, .{ .data = .read_only });
}
}
@@ -3066,20 +3090,22 @@ fn writeToFile(
var code_section_size: u32 = 0;
if (wasm.code_section_index) |code_index| {
const header_offset = try reserveVecSectionHeader(&binary_bytes);
- var atom: *Atom = wasm.atoms.get(code_index).?.getFirst();
+ var atom_index = wasm.atoms.get(code_index).?;
// The code section must be sorted in line with the function order.
var sorted_atoms = try std.ArrayList(*Atom).initCapacity(wasm.base.allocator, wasm.functions.count());
defer sorted_atoms.deinit();
while (true) {
+ var atom = wasm.getAtomPtr(atom_index);
if (wasm.resolved_symbols.contains(atom.symbolLoc())) {
if (!is_obj) {
atom.resolveRelocs(wasm);
}
sorted_atoms.appendAssumeCapacity(atom);
}
- atom = atom.next orelse break;
+ // atom = if (atom.prev) |prev| wasm.getAtomPtr(prev) else break;
+ atom_index = atom.prev orelse break;
}
const atom_sort_fn = struct {
@@ -3119,11 +3145,11 @@ fn writeToFile(
// do not output 'bss' section unless we import memory and therefore
// want to guarantee the data is zero initialized
if (!import_memory and std.mem.eql(u8, entry.key_ptr.*, ".bss")) continue;
- const atom_index = entry.value_ptr.*;
- const segment = wasm.segments.items[atom_index];
+ const segment_index = entry.value_ptr.*;
+ const segment = wasm.segments.items[segment_index];
if (segment.size == 0) continue; // do not emit empty segments
segment_count += 1;
- var atom: *Atom = wasm.atoms.getPtr(atom_index).?.*.getFirst();
+ var atom_index = wasm.atoms.get(segment_index).?;
// flag and index to memory section (currently, there can only be 1 memory section in wasm)
try leb.writeULEB128(binary_writer, @as(u32, 0));
@@ -3134,6 +3160,7 @@ fn writeToFile(
// fill in the offset table and the data segments
var current_offset: u32 = 0;
while (true) {
+ const atom = wasm.getAtomPtr(atom_index);
if (!is_obj) {
atom.resolveRelocs(wasm);
}
@@ -3149,8 +3176,8 @@ fn writeToFile(
try binary_writer.writeAll(atom.code.items);
current_offset += atom.size;
- if (atom.next) |next| {
- atom = next;
+ if (atom.prev) |prev| {
+ atom_index = prev;
} else {
// also pad with zeroes when last atom to ensure
// segments are aligned.
@@ -3192,15 +3219,15 @@ fn writeToFile(
}
if (!wasm.base.options.strip) {
- if (wasm.dwarf) |*dwarf| {
- const mod = wasm.base.options.module.?;
- try dwarf.writeDbgAbbrev();
- // for debug info and ranges, the address is always 0,
- // as locations are always offsets relative to 'code' section.
- try dwarf.writeDbgInfoHeader(mod, 0, code_section_size);
- try dwarf.writeDbgAranges(0, code_section_size);
- try dwarf.writeDbgLineHeader();
- }
+ // if (wasm.dwarf) |*dwarf| {
+ // const mod = wasm.base.options.module.?;
+ // try dwarf.writeDbgAbbrev();
+ // // for debug info and ranges, the address is always 0,
+ // // as locations are always offsets relative to 'code' section.
+ // try dwarf.writeDbgInfoHeader(mod, 0, code_section_size);
+ // try dwarf.writeDbgAranges(0, code_section_size);
+ // try dwarf.writeDbgLineHeader();
+ // }
var debug_bytes = std.ArrayList(u8).init(wasm.base.allocator);
defer debug_bytes.deinit();
@@ -3223,11 +3250,11 @@ fn writeToFile(
for (debug_sections) |item| {
if (item.index) |index| {
- var atom = wasm.atoms.get(index).?.getFirst();
+ var atom = wasm.getAtomPtr(wasm.atoms.get(index).?);
while (true) {
atom.resolveRelocs(wasm);
try debug_bytes.appendSlice(atom.code.items);
- atom = atom.next orelse break;
+ atom = if (atom.prev) |prev| wasm.getAtomPtr(prev) else break;
}
try emitDebugSection(&binary_bytes, debug_bytes.items, item.name);
debug_bytes.clearRetainingCapacity();
@@ -3959,7 +3986,8 @@ fn emitSymbolTable(wasm: *Wasm, binary_bytes: *std.ArrayList(u8), symbol_table:
if (symbol.isDefined()) {
try leb.writeULEB128(writer, symbol.index);
- const atom = wasm.symbol_atom.get(sym_loc).?;
+ const atom_index = wasm.symbol_atom.get(sym_loc).?;
+ const atom = wasm.getAtom(atom_index);
try leb.writeULEB128(writer, @as(u32, atom.offset));
try leb.writeULEB128(writer, @as(u32, atom.size));
}
@@ -4037,7 +4065,7 @@ fn emitCodeRelocations(
const reloc_start = binary_bytes.items.len;
var count: u32 = 0;
- var atom: *Atom = wasm.atoms.get(code_index).?.getFirst();
+ var atom: *Atom = wasm.getAtomPtr(wasm.atoms.get(code_index).?);
// for each atom, we calculate the uleb size and append that
var size_offset: u32 = 5; // account for code section size leb128
while (true) {
@@ -4055,7 +4083,7 @@ fn emitCodeRelocations(
}
log.debug("Emit relocation: {}", .{relocation});
}
- atom = atom.next orelse break;
+ atom = if (atom.prev) |prev| wasm.getAtomPtr(prev) else break;
}
if (count == 0) return;
var buf: [5]u8 = undefined;
@@ -4086,7 +4114,7 @@ fn emitDataRelocations(
// for each atom, we calculate the uleb size and append that
var size_offset: u32 = 5; // account for code section size leb128
for (wasm.data_segments.values()) |segment_index| {
- var atom: *Atom = wasm.atoms.get(segment_index).?.getFirst();
+ var atom: *Atom = wasm.getAtomPtr(wasm.atoms.get(segment_index).?);
while (true) {
size_offset += getULEB128Size(atom.size);
for (atom.relocs.items) |relocation| {
@@ -4105,7 +4133,7 @@ fn emitDataRelocations(
}
log.debug("Emit relocation: {}", .{relocation});
}
- atom = atom.next orelse break;
+ atom = if (atom.prev) |prev| wasm.getAtomPtr(prev) else break;
}
}
if (count == 0) return;
diff --git a/src/link/Wasm/Atom.zig b/src/link/Wasm/Atom.zig
index 554f98b5ca..e719f8dfcc 100644
--- a/src/link/Wasm/Atom.zig
+++ b/src/link/Wasm/Atom.zig
@@ -29,14 +29,17 @@ file: ?u16,
/// Next atom in relation to this atom.
/// When null, this atom is the last atom
-next: ?*Atom,
+next: ?Atom.Index,
/// Previous atom in relation to this atom.
/// is null when this atom is the first in its order
-prev: ?*Atom,
+prev: ?Atom.Index,
/// Contains atoms local to a decl, all managed by this `Atom`.
/// When the parent atom is being freed, it will also do so for all local atoms.
-locals: std.ArrayListUnmanaged(Atom) = .{},
+locals: std.ArrayListUnmanaged(Atom.Index) = .{},
+
+/// Alias to an unsigned 32-bit integer
+pub const Index = u32;
/// Represents a default empty wasm `Atom`
pub const empty: Atom = .{
@@ -50,14 +53,12 @@ pub const empty: Atom = .{
};
/// Frees all resources owned by this `Atom`.
-pub fn deinit(atom: *Atom, gpa: Allocator) void {
+pub fn deinit(atom: *Atom, wasm: *Wasm) void {
+ const gpa = wasm.base.allocator;
atom.relocs.deinit(gpa);
atom.code.deinit(gpa);
-
- for (atom.locals.items) |*local| {
- local.deinit(gpa);
- }
atom.locals.deinit(gpa);
+ atom.* = undefined;
}
/// Sets the length of relocations and code to '0',
@@ -78,24 +79,11 @@ pub fn format(atom: Atom, comptime fmt: []const u8, options: std.fmt.FormatOptio
});
}
-/// Returns the first `Atom` from a given atom
-pub fn getFirst(atom: *Atom) *Atom {
- var tmp = atom;
- while (tmp.prev) |prev| tmp = prev;
- return tmp;
-}
-
/// Returns the location of the symbol that represents this `Atom`
pub fn symbolLoc(atom: Atom) Wasm.SymbolLoc {
return .{ .file = atom.file, .index = atom.sym_index };
}
-pub fn ensureInitialized(atom: *Atom, wasm_bin: *Wasm) !void {
- if (atom.getSymbolIndex() != null) return; // already initialized
- atom.sym_index = try wasm_bin.allocateSymbol();
- try wasm_bin.symbol_atom.putNoClobber(wasm_bin.base.allocator, atom.symbolLoc(), atom);
-}
-
pub fn getSymbolIndex(atom: Atom) ?u32 {
if (atom.sym_index == 0) return null;
return atom.sym_index;
@@ -198,20 +186,28 @@ fn relocationValue(atom: Atom, relocation: types.Relocation, wasm_bin: *const Wa
if (symbol.isUndefined()) {
return 0;
}
- const target_atom = wasm_bin.symbol_atom.get(target_loc).?;
+ const target_atom_index = wasm_bin.symbol_atom.get(target_loc) orelse {
+ // this can only occur during incremental-compilation when a relocation
+ // still points to a freed decl. It is fine to emit the value 0 here
+ // as no actual code will point towards it.
+ return 0;
+ };
+ const target_atom = wasm_bin.getAtom(target_atom_index);
const va = @intCast(i32, target_atom.getVA(wasm_bin, symbol));
return @intCast(u32, va + relocation.addend);
},
.R_WASM_EVENT_INDEX_LEB => return symbol.index,
.R_WASM_SECTION_OFFSET_I32 => {
- const target_atom = wasm_bin.symbol_atom.get(target_loc).?;
+ const target_atom_index = wasm_bin.symbol_atom.get(target_loc).?;
+ const target_atom = wasm_bin.getAtom(target_atom_index);
const rel_value = @intCast(i32, target_atom.offset) + relocation.addend;
return @intCast(u32, rel_value);
},
.R_WASM_FUNCTION_OFFSET_I32 => {
- const target_atom = wasm_bin.symbol_atom.get(target_loc) orelse {
+ const target_atom_index = wasm_bin.symbol_atom.get(target_loc) orelse {
return @bitCast(u32, @as(i32, -1));
};
+ const target_atom = wasm_bin.getAtom(target_atom_index);
const offset: u32 = 11 + Wasm.getULEB128Size(target_atom.size); // Header (11 bytes fixed-size) + body size (leb-encoded)
const rel_value = @intCast(i32, target_atom.offset + offset) + relocation.addend;
return @intCast(u32, rel_value);
diff --git a/src/link/Wasm/Object.zig b/src/link/Wasm/Object.zig
index 8f49d68712..7d4f6a4e36 100644
--- a/src/link/Wasm/Object.zig
+++ b/src/link/Wasm/Object.zig
@@ -901,14 +901,9 @@ pub fn parseIntoAtoms(object: *Object, gpa: Allocator, object_index: u16, wasm_b
continue; // found unknown section, so skip parsing into atom as we do not know how to handle it.
};
- const atom = try gpa.create(Atom);
+ const atom_index = @intCast(Atom.Index, wasm_bin.managed_atoms.items.len);
+ const atom = try wasm_bin.managed_atoms.addOne(gpa);
atom.* = Atom.empty;
- errdefer {
- atom.deinit(gpa);
- gpa.destroy(atom);
- }
-
- try wasm_bin.managed_atoms.append(gpa, atom);
atom.file = object_index;
atom.size = relocatable_data.size;
atom.alignment = relocatable_data.getAlignment(object);
@@ -938,12 +933,12 @@ pub fn parseIntoAtoms(object: *Object, gpa: Allocator, object_index: u16, wasm_b
.index = relocatable_data.getIndex(),
})) |symbols| {
atom.sym_index = symbols.pop();
- try wasm_bin.symbol_atom.putNoClobber(gpa, atom.symbolLoc(), atom);
+ try wasm_bin.symbol_atom.putNoClobber(gpa, atom.symbolLoc(), atom_index);
// symbols referencing the same atom will be added as alias
// or as 'parent' when they are global.
while (symbols.popOrNull()) |idx| {
- try wasm_bin.symbol_atom.putNoClobber(gpa, .{ .file = atom.file, .index = idx }, atom);
+ try wasm_bin.symbol_atom.putNoClobber(gpa, .{ .file = atom.file, .index = idx }, atom_index);
const alias_symbol = object.symtable[idx];
if (alias_symbol.isGlobal()) {
atom.sym_index = idx;
@@ -956,7 +951,7 @@ pub fn parseIntoAtoms(object: *Object, gpa: Allocator, object_index: u16, wasm_b
segment.alignment = std.math.max(segment.alignment, atom.alignment);
}
- try wasm_bin.appendAtomAtIndex(final_index, atom);
+ try wasm_bin.appendAtomAtIndex(final_index, atom_index);
log.debug("Parsed into atom: '{s}' at segment index {d}", .{ object.string_table.get(object.symtable[atom.sym_index].name), final_index });
}
}
diff --git a/test/link/wasm/export-data/build.zig b/test/link/wasm/export-data/build.zig
index 283566dab3..db2ca804e8 100644
--- a/test/link/wasm/export-data/build.zig
+++ b/test/link/wasm/export-data/build.zig
@@ -23,8 +23,8 @@ pub fn build(b: *Builder) void {
check_lib.checkNext("type i32");
check_lib.checkNext("mutable false");
check_lib.checkNext("i32.const {bar_address}");
- check_lib.checkComputeCompare("foo_address", .{ .op = .eq, .value = .{ .literal = 0 } });
- check_lib.checkComputeCompare("bar_address", .{ .op = .eq, .value = .{ .literal = 4 } });
+ check_lib.checkComputeCompare("foo_address", .{ .op = .eq, .value = .{ .literal = 4 } });
+ check_lib.checkComputeCompare("bar_address", .{ .op = .eq, .value = .{ .literal = 0 } });
check_lib.checkStart("Section export");
check_lib.checkNext("entries 3");
--
cgit v1.2.3
From beb20d29db3fe945746581eba5d2f2cae1403cdb Mon Sep 17 00:00:00 2001
From: Jakub Konka
Date: Wed, 1 Feb 2023 19:32:54 +0100
Subject: link: remove union types which are now internal to backends
---
src/Module.zig | 56 +++++------------------------------------------
src/Sema.zig | 10 ---------
src/arch/wasm/CodeGen.zig | 6 ++---
src/link.zig | 33 ----------------------------
src/link/Wasm.zig | 2 +-
5 files changed, 10 insertions(+), 97 deletions(-)
(limited to 'src')
diff --git a/src/Module.zig b/src/Module.zig
index f84d720d1f..b395c0a950 100644
--- a/src/Module.zig
+++ b/src/Module.zig
@@ -328,8 +328,6 @@ pub const ErrorInt = u32;
pub const Export = struct {
options: std.builtin.ExportOptions,
src: LazySrcLoc,
- /// Represents the position of the export, if any, in the output file.
- link: link.File.Export,
/// The Decl that performs the export. Note that this is *not* the Decl being exported.
owner_decl: Decl.Index,
/// The Decl containing the export statement. Inline function calls
@@ -533,16 +531,8 @@ pub const Decl = struct {
/// What kind of a declaration is this.
kind: Kind,
- /// Represents the position of the code in the output file.
- /// This is populated regardless of semantic analysis and code generation.
- link: link.File.LinkBlock,
-
- /// Represents the function in the linked output file, if the `Decl` is a function.
- /// This is stored here and not in `Fn` because `Decl` survives across updates but
- /// `Fn` does not.
- /// TODO Look into making `Fn` a longer lived structure and moving this field there
- /// to save on memory usage.
- fn_link: link.File.LinkFn,
+ /// TODO remove this once Wasm backend catches up
+ fn_link: ?link.File.Wasm.FnData = null,
/// The shallow set of other decls whose typed_value could possibly change if this Decl's
/// typed_value is modified.
@@ -5258,27 +5248,9 @@ pub fn clearDecl(
if (decl.ty.isFnOrHasRuntimeBits()) {
mod.comp.bin_file.freeDecl(decl_index);
- // TODO instead of a union, put this memory trailing Decl objects,
- // and allow it to be variably sized.
- decl.link = switch (mod.comp.bin_file.tag) {
- .coff => .{ .coff = {} },
- .elf => .{ .elf = {} },
- .macho => .{ .macho = {} },
- .plan9 => .{ .plan9 = {} },
- .c => .{ .c = {} },
- .wasm => .{ .wasm = {} },
- .spirv => .{ .spirv = {} },
- .nvptx => .{ .nvptx = {} },
- };
decl.fn_link = switch (mod.comp.bin_file.tag) {
- .coff => .{ .coff = {} },
- .elf => .{ .elf = {} },
- .macho => .{ .macho = {} },
- .plan9 => .{ .plan9 = {} },
- .c => .{ .c = {} },
- .wasm => .{ .wasm = link.File.Wasm.FnData.empty },
- .spirv => .{ .spirv = {} },
- .nvptx => .{ .nvptx = {} },
+ .wasm => link.File.Wasm.FnData.empty,
+ else => null,
};
}
if (decl.getInnerNamespace()) |namespace| {
@@ -5680,25 +5652,9 @@ pub fn allocateNewDecl(
.deletion_flag = false,
.zir_decl_index = 0,
.src_scope = src_scope,
- .link = switch (mod.comp.bin_file.tag) {
- .coff => .{ .coff = {} },
- .elf => .{ .elf = {} },
- .macho => .{ .macho = {} },
- .plan9 => .{ .plan9 = {} },
- .c => .{ .c = {} },
- .wasm => .{ .wasm = {} },
- .spirv => .{ .spirv = {} },
- .nvptx => .{ .nvptx = {} },
- },
.fn_link = switch (mod.comp.bin_file.tag) {
- .coff => .{ .coff = {} },
- .elf => .{ .elf = {} },
- .macho => .{ .macho = {} },
- .plan9 => .{ .plan9 = {} },
- .c => .{ .c = {} },
- .wasm => .{ .wasm = link.File.Wasm.FnData.empty },
- .spirv => .{ .spirv = {} },
- .nvptx => .{ .nvptx = {} },
+ .wasm => link.File.Wasm.FnData.empty,
+ else => null,
},
.generation = 0,
.is_pub = false,
diff --git a/src/Sema.zig b/src/Sema.zig
index 4871961753..54c3c6dc38 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -5564,16 +5564,6 @@ pub fn analyzeExport(
.visibility = borrowed_options.visibility,
},
.src = src,
- .link = switch (mod.comp.bin_file.tag) {
- .coff => .{ .coff = {} },
- .elf => .{ .elf = {} },
- .macho => .{ .macho = {} },
- .plan9 => .{ .plan9 = {} },
- .c => .{ .c = {} },
- .wasm => .{ .wasm = {} },
- .spirv => .{ .spirv = {} },
- .nvptx => .{ .nvptx = {} },
- },
.owner_decl = sema.owner_decl_index,
.src_decl = block.src_decl,
.exported_decl = exported_decl_index,
diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig
index 8559a728e5..cf9c741513 100644
--- a/src/arch/wasm/CodeGen.zig
+++ b/src/arch/wasm/CodeGen.zig
@@ -1194,7 +1194,7 @@ fn genFunc(func: *CodeGen) InnerError!void {
const fn_info = func.decl.ty.fnInfo();
var func_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types, fn_info.return_type, func.target);
defer func_type.deinit(func.gpa);
- func.decl.fn_link.wasm.type_index = try func.bin_file.putOrGetFuncType(func_type);
+ func.decl.fn_link.?.type_index = try func.bin_file.putOrGetFuncType(func_type);
var cc_result = try func.resolveCallingConventionValues(func.decl.ty);
defer cc_result.deinit(func.gpa);
@@ -2129,12 +2129,12 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
defer func_type.deinit(func.gpa);
const atom_index = try func.bin_file.getOrCreateAtomForDecl(extern_fn.data.owner_decl);
const atom = func.bin_file.getAtomPtr(atom_index);
- ext_decl.fn_link.wasm.type_index = try func.bin_file.putOrGetFuncType(func_type);
+ ext_decl.fn_link.?.type_index = try func.bin_file.putOrGetFuncType(func_type);
try func.bin_file.addOrUpdateImport(
mem.sliceTo(ext_decl.name, 0),
atom.getSymbolIndex().?,
ext_decl.getExternFn().?.lib_name,
- ext_decl.fn_link.wasm.type_index,
+ ext_decl.fn_link.?.type_index,
);
break :blk extern_fn.data.owner_decl;
} else if (func_val.castTag(.decl_ref)) |decl_ref| {
diff --git a/src/link.zig b/src/link.zig
index 0a3226f004..2b3ce51667 100644
--- a/src/link.zig
+++ b/src/link.zig
@@ -261,39 +261,6 @@ pub const File = struct {
/// of this linking operation.
lock: ?Cache.Lock = null,
- pub const LinkBlock = union {
- elf: void,
- coff: void,
- macho: void,
- plan9: void,
- c: void,
- wasm: void,
- spirv: void,
- nvptx: void,
- };
-
- pub const LinkFn = union {
- elf: void,
- coff: void,
- macho: void,
- plan9: void,
- c: void,
- wasm: Wasm.FnData,
- spirv: void,
- nvptx: void,
- };
-
- pub const Export = union {
- elf: void,
- coff: void,
- macho: void,
- plan9: void,
- c: void,
- wasm: void,
- spirv: void,
- nvptx: void,
- };
-
/// Attempts incremental linking, if the file already exists. If
/// incremental linking fails, falls back to truncating the file and
/// rewriting it. A malicious file is detected as incremental link failure
diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig
index b06703ed61..17391b017a 100644
--- a/src/link/Wasm.zig
+++ b/src/link/Wasm.zig
@@ -2829,7 +2829,7 @@ pub fn flushModule(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Nod
if (decl.isExtern()) continue;
const atom_index = entry.value_ptr.*;
if (decl.ty.zigTypeTag() == .Fn) {
- try wasm.parseAtom(atom_index, .{ .function = decl.fn_link.wasm });
+ try wasm.parseAtom(atom_index, .{ .function = decl.fn_link.? });
} else if (decl.getVariable()) |variable| {
if (!variable.is_mutable) {
try wasm.parseAtom(atom_index, .{ .data = .read_only });
--
cgit v1.2.3
From 490addde278001694d554a9a9fe2eb8235831143 Mon Sep 17 00:00:00 2001
From: Veikka Tuominen
Date: Wed, 1 Feb 2023 20:50:43 +0200
Subject: Sema: fix error location on comptime arg to typed generic param
Closes #14505
---
src/Sema.zig | 13 ++++++++++++-
.../comptime_arg_to_generic_fn_callee_error.zig | 21 +++++++++++++++++++++
2 files changed, 33 insertions(+), 1 deletion(-)
create mode 100644 test/cases/compile_errors/comptime_arg_to_generic_fn_callee_error.zig
(limited to 'src')
diff --git a/src/Sema.zig b/src/Sema.zig
index d306c68e08..4615c5b162 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -9015,7 +9015,18 @@ fn zirParam(
if (is_comptime and sema.preallocated_new_func != null) {
// We have a comptime value for this parameter so it should be elided from the
// function type of the function instruction in this block.
- const coerced_arg = try sema.coerce(block, param_ty, arg, src);
+ const coerced_arg = sema.coerce(block, param_ty, arg, .unneeded) catch |err| switch (err) {
+ error.NeededSourceLocation => {
+ // We are instantiating a generic function and a comptime arg
+ // cannot be coerced to the param type, but since we don't
+ // have the callee source location return `GenericPoison`
+ // so that the instantiation is failed and the coercion
+ // is handled by comptime call logic instead.
+ assert(sema.is_generic_instantiation);
+ return error.GenericPoison;
+ },
+ else => return err,
+ };
sema.inst_map.putAssumeCapacity(inst, coerced_arg);
return;
}
diff --git a/test/cases/compile_errors/comptime_arg_to_generic_fn_callee_error.zig b/test/cases/compile_errors/comptime_arg_to_generic_fn_callee_error.zig
new file mode 100644
index 0000000000..efc3f556a9
--- /dev/null
+++ b/test/cases/compile_errors/comptime_arg_to_generic_fn_callee_error.zig
@@ -0,0 +1,21 @@
+const std = @import("std");
+const MyStruct = struct {
+ a: i32,
+ b: i32,
+
+ pub fn getA(self: *List) i32 {
+ return self.items(.c);
+ }
+};
+const List = std.MultiArrayList(MyStruct);
+pub export fn entry() void {
+ var list = List{};
+ _ = MyStruct.getA(&list);
+}
+
+// error
+// backend=stage2
+// target=native
+//
+// :7:28: error: no field named 'c' in enum 'meta.FieldEnum(tmp.MyStruct)'
+// :?:?: note: enum declared here
--
cgit v1.2.3
From 86ec26b1f00ce9ee2a9d559a1ca0415d05a9b908 Mon Sep 17 00:00:00 2001
From: Evan Typanski
Date: Sat, 28 Jan 2023 11:53:38 -0500
Subject: translate-c: Fix types on assign expression bool
---
src/translate_c.zig | 5 ++++-
test/translate_c.zig | 16 ++++++++++++++++
2 files changed, 20 insertions(+), 1 deletion(-)
(limited to 'src')
diff --git a/src/translate_c.zig b/src/translate_c.zig
index 47a21f5b5c..a6715d161c 100644
--- a/src/translate_c.zig
+++ b/src/translate_c.zig
@@ -4519,7 +4519,10 @@ fn transCreateNodeAssign(
defer block_scope.deinit();
const tmp = try block_scope.makeMangledName(c, "tmp");
- const rhs_node = try transExpr(c, &block_scope.base, rhs, .used);
+ var rhs_node = try transExpr(c, &block_scope.base, rhs, .used);
+ if (!exprIsBooleanType(lhs) and isBoolRes(rhs_node)) {
+ rhs_node = try Tag.bool_to_int.create(c.arena, rhs_node);
+ }
const tmp_decl = try Tag.var_simple.create(c.arena, .{ .name = tmp, .init = rhs_node });
try block_scope.statements.append(tmp_decl);
diff --git a/test/translate_c.zig b/test/translate_c.zig
index 4ecb6835f5..d2db895a5a 100644
--- a/test/translate_c.zig
+++ b/test/translate_c.zig
@@ -3900,4 +3900,20 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
\\pub const ZERO = @as(c_int, 0);
\\pub const WORLD = @as(c_int, 0o0000123);
});
+
+ cases.add("Assign expression from bool to int",
+ \\void foo(void) {
+ \\ int a;
+ \\ if (a = 1 > 0) {}
+ \\}
+ , &[_][]const u8{
+ \\pub export fn foo() void {
+ \\ var a: c_int = undefined;
+ \\ if ((blk: {
+ \\ const tmp = @boolToInt(@as(c_int, 1) > @as(c_int, 0));
+ \\ a = tmp;
+ \\ break :blk tmp;
+ \\ }) != 0) {}
+ \\}
+ });
}
--
cgit v1.2.3
From 629c3108aa71f94bd26dba8d4f20c9f3a3945bd4 Mon Sep 17 00:00:00 2001
From: Veikka Tuominen
Date: Wed, 1 Feb 2023 21:41:02 +0200
Subject: AstGen: fix orelse type coercion in call arguments
Closes #14506
---
src/AstGen.zig | 8 +++++++-
test/behavior/basic.zig | 18 ++++++++++++++++++
2 files changed, 25 insertions(+), 1 deletion(-)
(limited to 'src')
diff --git a/src/AstGen.zig b/src/AstGen.zig
index a5667ce9e8..10673a2b37 100644
--- a/src/AstGen.zig
+++ b/src/AstGen.zig
@@ -8721,6 +8721,7 @@ fn callExpr(
defer arg_block.unstack();
// `call_inst` is reused to provide the param type.
+ arg_block.rl_ty_inst = call_inst;
const arg_ref = try expr(&arg_block, &arg_block.base, .{ .rl = .{ .coerced_ty = call_inst }, .ctx = .fn_arg }, param_node);
_ = try arg_block.addBreak(.break_inline, call_index, arg_ref);
@@ -10869,7 +10870,12 @@ const GenZir = struct {
// we emit ZIR for the block break instructions to have the result values,
// and then rvalue() on that to pass the value to the result location.
switch (parent_ri.rl) {
- .ty, .coerced_ty => |ty_inst| {
+ .coerced_ty => |ty_inst| {
+ // Type coercion needs to happend before breaks.
+ gz.rl_ty_inst = ty_inst;
+ gz.break_result_info = .{ .rl = .{ .ty = ty_inst } };
+ },
+ .ty => |ty_inst| {
gz.rl_ty_inst = ty_inst;
gz.break_result_info = parent_ri;
},
diff --git a/test/behavior/basic.zig b/test/behavior/basic.zig
index 8a97b3cbcd..b82bfab99e 100644
--- a/test/behavior/basic.zig
+++ b/test/behavior/basic.zig
@@ -1125,3 +1125,21 @@ test "returning an opaque type from a function" {
};
try expect(S.foo(123).b == 123);
}
+
+test "orelse coercion as function argument" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+
+ const Loc = struct { start: i32 = -1 };
+ const Container = struct {
+ a: ?Loc = null,
+ fn init(a: Loc) @This() {
+ return .{
+ .a = a,
+ };
+ }
+ };
+ var optional: ?Loc = .{};
+ var foo = Container.init(optional orelse .{});
+ try expect(foo.a.?.start == -1);
+}
--
cgit v1.2.3
From ea6e0e33a7630fb78550a5567a98420c3377350c Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Wed, 1 Feb 2023 18:42:29 -0700
Subject: zig build: add executable bit and file path to package hash
Unfortunately, due to the Windows equivalent of executable permissions
being a bit tricky, there is follow-up work to be done.
What is done in this commit is the hash modifications. At the fetch
layer, executable bits inside packages are ignored. In the hash
computation layer, executable bit is implemented for POSIX but not yet
for Windows. This means that the hash will not break again in the future
for packages that do not have any executable files, but it will break
for packages that do.
This is a hash-breaking change.
Closes #14308
---
lib/std/fs/file.zig | 3 ++-
lib/std/tar.zig | 23 +++++++++++++++++++++++
src/Package.zig | 24 +++++++++++++++++++++++-
3 files changed, 48 insertions(+), 2 deletions(-)
(limited to 'src')
diff --git a/lib/std/fs/file.zig b/lib/std/fs/file.zig
index a6ecc37d92..1ba4bc18fd 100644
--- a/lib/std/fs/file.zig
+++ b/lib/std/fs/file.zig
@@ -179,7 +179,7 @@ pub const File = struct {
lock_nonblocking: bool = false,
/// For POSIX systems this is the file system mode the file will
- /// be created with.
+ /// be created with. On other systems this is always 0.
mode: Mode = default_mode,
/// Setting this to `.blocking` prevents `O.NONBLOCK` from being passed even
@@ -307,6 +307,7 @@ pub const File = struct {
/// is unique to each filesystem.
inode: INode,
size: u64,
+ /// This is available on POSIX systems and is always 0 otherwise.
mode: Mode,
kind: Kind,
diff --git a/lib/std/tar.zig b/lib/std/tar.zig
index 4f6a77c6ba..91772d7319 100644
--- a/lib/std/tar.zig
+++ b/lib/std/tar.zig
@@ -1,6 +1,18 @@
pub const Options = struct {
/// Number of directory levels to skip when extracting files.
strip_components: u32 = 0,
+ /// How to handle the "mode" property of files from within the tar file.
+ mode_mode: ModeMode = .executable_bit_only,
+
+ const ModeMode = enum {
+ /// The mode from the tar file is completely ignored. Files are created
+ /// with the default mode when creating files.
+ ignore,
+ /// The mode from the tar file is inspected for the owner executable bit
+ /// only. This bit is copied to the group and other executable bits.
+ /// Other bits of the mode are left as the default when creating files.
+ executable_bit_only,
+ };
};
pub const Header = struct {
@@ -72,6 +84,17 @@ pub const Header = struct {
};
pub fn pipeToFileSystem(dir: std.fs.Dir, reader: anytype, options: Options) !void {
+ switch (options.mode_mode) {
+ .ignore => {},
+ .executable_bit_only => {
+ // This code does not look at the mode bits yet. To implement this feature,
+ // the implementation must be adjusted to look at the mode, and check the
+ // user executable bit, then call fchmod on newly created files when
+ // the executable bit is supposed to be set.
+ // It also needs to properly deal with ACLs on Windows.
+ @panic("TODO: unimplemented: tar ModeMode.executable_bit_only");
+ },
+ }
var file_name_buffer: [255]u8 = undefined;
var buffer: [512 * 8]u8 = undefined;
var start: usize = 0;
diff --git a/src/Package.zig b/src/Package.zig
index ebe84b8444..0f036b9ef5 100644
--- a/src/Package.zig
+++ b/src/Package.zig
@@ -1,5 +1,6 @@
const Package = @This();
+const builtin = @import("builtin");
const std = @import("std");
const fs = std.fs;
const mem = std.mem;
@@ -440,6 +441,12 @@ fn unpackTarball(
try std.tar.pipeToFileSystem(out_dir, decompress.reader(), .{
.strip_components = 1,
+ // TODO: we would like to set this to executable_bit_only, but two
+ // things need to happen before that:
+ // 1. the tar implementation needs to support it
+ // 2. the hashing algorithm here needs to support detecting the is_executable
+ // bit on Windows from the ACLs (see the isExecutable function).
+ .mode_mode = .ignore,
});
}
@@ -468,7 +475,7 @@ const HashedFile = struct {
hash: [Hash.digest_length]u8,
failure: Error!void,
- const Error = fs.File.OpenError || fs.File.ReadError;
+ const Error = fs.File.OpenError || fs.File.ReadError || fs.File.StatError;
fn lessThan(context: void, lhs: *const HashedFile, rhs: *const HashedFile) bool {
_ = context;
@@ -544,6 +551,8 @@ fn hashFileFallible(dir: fs.Dir, hashed_file: *HashedFile) HashedFile.Error!void
var buf: [8000]u8 = undefined;
var file = try dir.openFile(hashed_file.path, .{});
var hasher = Hash.init(.{});
+ hasher.update(hashed_file.path);
+ hasher.update(&.{ 0, @boolToInt(try isExecutable(file)) });
while (true) {
const bytes_read = try file.read(&buf);
if (bytes_read == 0) break;
@@ -552,6 +561,19 @@ fn hashFileFallible(dir: fs.Dir, hashed_file: *HashedFile) HashedFile.Error!void
hasher.final(&hashed_file.hash);
}
+fn isExecutable(file: fs.File) !bool {
+ if (builtin.os.tag == .windows) {
+ // TODO check the ACL on Windows.
+ // Until this is implemented, this could be a false negative on
+ // Windows, which is why we do not yet set executable_bit_only above
+ // when unpacking the tarball.
+ return false;
+ } else {
+ const stat = try file.stat();
+ return (stat.mode & std.os.S.IXUSR) != 0;
+ }
+}
+
const hex_charset = "0123456789abcdef";
fn hex64(x: u64) [16]u8 {
--
cgit v1.2.3
From 24ff8a1a5fc00c405e5506251f11d23653d6c8b5 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Wed, 1 Feb 2023 20:02:35 -0700
Subject: zig build: use multihash for the hash field
https://multiformats.io/multihash/
Still, only SHA2-256 is supported. This is only intended to future-proof
the hash field of the manifest.
closes #14284
---
src/Package.zig | 76 ++++++++++++++++++++++++++++++++++++++++++++++++++-------
1 file changed, 67 insertions(+), 9 deletions(-)
(limited to 'src')
diff --git a/src/Package.zig b/src/Package.zig
index 0f036b9ef5..35b1ff5056 100644
--- a/src/Package.zig
+++ b/src/Package.zig
@@ -299,16 +299,37 @@ fn fetchAndUnpack(
// Check if the expected_hash is already present in the global package
// cache, and thereby avoid both fetching and unpacking.
if (expected_hash) |h| cached: {
- if (h.len != 2 * Hash.digest_length) {
+ const hex_multihash_len = 2 * multihash_len;
+ if (h.len >= 2) {
+ const their_multihash_func = std.fmt.parseInt(u8, h[0..2], 16) catch |err| {
+ return reportError(
+ ini,
+ comp_directory,
+ h.ptr,
+ "invalid multihash value: unable to parse hash function: {s}",
+ .{@errorName(err)},
+ );
+ };
+ if (@intToEnum(MultihashFunction, their_multihash_func) != multihash_function) {
+ return reportError(
+ ini,
+ comp_directory,
+ h.ptr,
+ "unsupported hash function: only sha2-256 is supported",
+ .{},
+ );
+ }
+ }
+ if (h.len != hex_multihash_len) {
return reportError(
ini,
comp_directory,
h.ptr,
"wrong hash size. expected: {d}, found: {d}",
- .{ Hash.digest_length, h.len },
+ .{ hex_multihash_len, h.len },
);
}
- const hex_digest = h[0 .. 2 * Hash.digest_length];
+ const hex_digest = h[0..hex_multihash_len];
const pkg_dir_sub_path = "p" ++ s ++ hex_digest;
var pkg_dir = global_cache_directory.handle.openDir(pkg_dir_sub_path, .{}) catch |err| switch (err) {
error.FileNotFound => break :cached,
@@ -397,8 +418,8 @@ fn fetchAndUnpack(
const pkg_dir_sub_path = "p" ++ s ++ hexDigest(actual_hash);
try renameTmpIntoCache(global_cache_directory.handle, tmp_dir_sub_path, pkg_dir_sub_path);
+ const actual_hex = hexDigest(actual_hash);
if (expected_hash) |h| {
- const actual_hex = hexDigest(actual_hash);
if (!mem.eql(u8, h, &actual_hex)) {
return reportError(
ini,
@@ -414,7 +435,7 @@ fn fetchAndUnpack(
comp_directory,
url.ptr,
"url field is missing corresponding hash field: hash={s}",
- .{std.fmt.fmtSliceHexLower(&actual_hash)},
+ .{&actual_hex},
);
}
@@ -592,11 +613,30 @@ test hex64 {
try std.testing.expectEqualStrings("[00efcdab78563412]", s);
}
-fn hexDigest(digest: [Hash.digest_length]u8) [Hash.digest_length * 2]u8 {
- var result: [Hash.digest_length * 2]u8 = undefined;
+const multihash_function: MultihashFunction = switch (Hash) {
+ std.crypto.hash.sha2.Sha256 => .@"sha2-256",
+ else => @compileError("unreachable"),
+};
+comptime {
+ // We avoid unnecessary uleb128 code in hexDigest by asserting here the
+ // values are small enough to be contained in the one-byte encoding.
+ assert(@enumToInt(multihash_function) < 127);
+ assert(Hash.digest_length < 127);
+}
+const multihash_len = 1 + 1 + Hash.digest_length;
+
+fn hexDigest(digest: [Hash.digest_length]u8) [multihash_len * 2]u8 {
+ var result: [multihash_len * 2]u8 = undefined;
+
+ result[0] = hex_charset[@enumToInt(multihash_function) >> 4];
+ result[1] = hex_charset[@enumToInt(multihash_function) & 15];
+
+ result[2] = hex_charset[Hash.digest_length >> 4];
+ result[3] = hex_charset[Hash.digest_length & 15];
+
for (digest) |byte, i| {
- result[i * 2 + 0] = hex_charset[byte >> 4];
- result[i * 2 + 1] = hex_charset[byte & 15];
+ result[4 + i * 2] = hex_charset[byte >> 4];
+ result[5 + i * 2] = hex_charset[byte & 15];
}
return result;
}
@@ -629,3 +669,21 @@ fn renameTmpIntoCache(
break;
}
}
+
+const MultihashFunction = enum(u16) {
+ identity = 0x00,
+ sha1 = 0x11,
+ @"sha2-256" = 0x12,
+ @"sha2-512" = 0x13,
+ @"sha3-512" = 0x14,
+ @"sha3-384" = 0x15,
+ @"sha3-256" = 0x16,
+ @"sha3-224" = 0x17,
+ @"sha2-384" = 0x20,
+ @"sha2-256-trunc254-padded" = 0x1012,
+ @"sha2-224" = 0x1013,
+ @"sha2-512-224" = 0x1014,
+ @"sha2-512-256" = 0x1015,
+ @"blake2b-256" = 0xb220,
+ _,
+};
--
cgit v1.2.3
From c3abb63fe99eb7091e5841f8e65f197b77d868a8 Mon Sep 17 00:00:00 2001
From: Krzysztof Wolicki Der Teufel
Date: Thu, 2 Feb 2023 14:46:42 +0100
Subject: autodoc: Added `@qualCast` builtin function handling
---
lib/docs/main.js | 4 ++++
src/Autodoc.zig | 1 +
2 files changed, 5 insertions(+)
(limited to 'src')
diff --git a/lib/docs/main.js b/lib/docs/main.js
index d488d018a6..62af5866b6 100644
--- a/lib/docs/main.js
+++ b/lib/docs/main.js
@@ -1354,6 +1354,10 @@ const NAV_MODES = {
payloadHtml += "ptrCast";
break;
}
+ case "qual_cast": {
+ payloadHtml += "qualCast";
+ break;
+ }
case "truncate": {
payloadHtml += "truncate";
break;
diff --git a/src/Autodoc.zig b/src/Autodoc.zig
index 8afc9c859b..0798ec198f 100644
--- a/src/Autodoc.zig
+++ b/src/Autodoc.zig
@@ -1400,6 +1400,7 @@ fn walkInstruction(
.float_cast,
.int_cast,
.ptr_cast,
+ .qual_cast,
.truncate,
.align_cast,
.has_decl,
--
cgit v1.2.3
From 873bb29c984b976021fb9ca95ad3298e03a8b3ff Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Thu, 2 Feb 2023 17:32:06 -0700
Subject: introduce ZON: Zig Object Notation
* std.zig.parse is moved to std.zig.Ast.parse
* the new function has an additional parameter that requires passing
Mode.zig or Mode.zon
* moved parser.zig code to Parse.zig
* added parseZon function next to parseRoot function
---
CMakeLists.txt | 2 +-
lib/std/Build/OptionsStep.zig | 2 +-
lib/std/zig.zig | 1 -
lib/std/zig/Ast.zig | 78 +-
lib/std/zig/Parse.zig | 3816 ++++++++++++++++++++++++++++++++++++++++
lib/std/zig/parse.zig | 3852 -----------------------------------------
lib/std/zig/parser_test.zig | 4 +-
lib/std/zig/perf_test.zig | 3 +-
src/Module.zig | 6 +-
src/main.zig | 10 +-
10 files changed, 3898 insertions(+), 3876 deletions(-)
create mode 100644 lib/std/zig/Parse.zig
delete mode 100644 lib/std/zig/parse.zig
(limited to 'src')
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 8a9de7bdc1..f8029fdcde 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -513,7 +513,7 @@ set(ZIG_STAGE2_SOURCES
"${CMAKE_SOURCE_DIR}/lib/std/zig/Ast.zig"
"${CMAKE_SOURCE_DIR}/lib/std/zig/CrossTarget.zig"
"${CMAKE_SOURCE_DIR}/lib/std/zig/c_builtins.zig"
- "${CMAKE_SOURCE_DIR}/lib/std/zig/parse.zig"
+ "${CMAKE_SOURCE_DIR}/lib/std/zig/Parse.zig"
"${CMAKE_SOURCE_DIR}/lib/std/zig/render.zig"
"${CMAKE_SOURCE_DIR}/lib/std/zig/string_literal.zig"
"${CMAKE_SOURCE_DIR}/lib/std/zig/system.zig"
diff --git a/lib/std/Build/OptionsStep.zig b/lib/std/Build/OptionsStep.zig
index a353737512..c1d2c8454a 100644
--- a/lib/std/Build/OptionsStep.zig
+++ b/lib/std/Build/OptionsStep.zig
@@ -367,5 +367,5 @@ test "OptionsStep" {
\\
, options.contents.items);
- _ = try std.zig.parse(arena.allocator(), try options.contents.toOwnedSliceSentinel(0));
+ _ = try std.zig.Ast.parse(arena.allocator(), try options.contents.toOwnedSliceSentinel(0), .zig);
}
diff --git a/lib/std/zig.zig b/lib/std/zig.zig
index bce8f6ce3c..f85cf75e60 100644
--- a/lib/std/zig.zig
+++ b/lib/std/zig.zig
@@ -8,7 +8,6 @@ pub const Tokenizer = tokenizer.Tokenizer;
pub const fmtId = fmt.fmtId;
pub const fmtEscapes = fmt.fmtEscapes;
pub const isValidId = fmt.isValidId;
-pub const parse = @import("zig/parse.zig").parse;
pub const string_literal = @import("zig/string_literal.zig");
pub const number_literal = @import("zig/number_literal.zig");
pub const primitives = @import("zig/primitives.zig");
diff --git a/lib/std/zig/Ast.zig b/lib/std/zig/Ast.zig
index f312093aa3..a9a02606eb 100644
--- a/lib/std/zig/Ast.zig
+++ b/lib/std/zig/Ast.zig
@@ -11,13 +11,6 @@ extra_data: []Node.Index,
errors: []const Error,
-const std = @import("../std.zig");
-const assert = std.debug.assert;
-const testing = std.testing;
-const mem = std.mem;
-const Token = std.zig.Token;
-const Ast = @This();
-
pub const TokenIndex = u32;
pub const ByteOffset = u32;
@@ -34,7 +27,7 @@ pub const Location = struct {
line_end: usize,
};
-pub fn deinit(tree: *Ast, gpa: mem.Allocator) void {
+pub fn deinit(tree: *Ast, gpa: Allocator) void {
tree.tokens.deinit(gpa);
tree.nodes.deinit(gpa);
gpa.free(tree.extra_data);
@@ -48,11 +41,69 @@ pub const RenderError = error{
OutOfMemory,
};
+pub const Mode = enum { zig, zon };
+
+/// Result should be freed with tree.deinit() when there are
+/// no more references to any of the tokens or nodes.
+pub fn parse(gpa: Allocator, source: [:0]const u8, mode: Mode) Allocator.Error!Ast {
+ var tokens = Ast.TokenList{};
+ defer tokens.deinit(gpa);
+
+ // Empirically, the zig std lib has an 8:1 ratio of source bytes to token count.
+ const estimated_token_count = source.len / 8;
+ try tokens.ensureTotalCapacity(gpa, estimated_token_count);
+
+ var tokenizer = std.zig.Tokenizer.init(source);
+ while (true) {
+ const token = tokenizer.next();
+ try tokens.append(gpa, .{
+ .tag = token.tag,
+ .start = @intCast(u32, token.loc.start),
+ });
+ if (token.tag == .eof) break;
+ }
+
+ var parser: Parse = .{
+ .source = source,
+ .gpa = gpa,
+ .token_tags = tokens.items(.tag),
+ .token_starts = tokens.items(.start),
+ .errors = .{},
+ .nodes = .{},
+ .extra_data = .{},
+ .scratch = .{},
+ .tok_i = 0,
+ };
+ defer parser.errors.deinit(gpa);
+ defer parser.nodes.deinit(gpa);
+ defer parser.extra_data.deinit(gpa);
+ defer parser.scratch.deinit(gpa);
+
+ // Empirically, Zig source code has a 2:1 ratio of tokens to AST nodes.
+ // Make sure at least 1 so we can use appendAssumeCapacity on the root node below.
+ const estimated_node_count = (tokens.len + 2) / 2;
+ try parser.nodes.ensureTotalCapacity(gpa, estimated_node_count);
+
+ switch (mode) {
+ .zig => try parser.parseRoot(),
+ .zon => try parser.parseZon(),
+ }
+
+ // TODO experiment with compacting the MultiArrayList slices here
+ return Ast{
+ .source = source,
+ .tokens = tokens.toOwnedSlice(),
+ .nodes = parser.nodes.toOwnedSlice(),
+ .extra_data = try parser.extra_data.toOwnedSlice(gpa),
+ .errors = try parser.errors.toOwnedSlice(gpa),
+ };
+}
+
/// `gpa` is used for allocating the resulting formatted source code, as well as
/// for allocating extra stack memory if needed, because this function utilizes recursion.
/// Note: that's not actually true yet, see https://github.com/ziglang/zig/issues/1006.
/// Caller owns the returned slice of bytes, allocated with `gpa`.
-pub fn render(tree: Ast, gpa: mem.Allocator) RenderError![]u8 {
+pub fn render(tree: Ast, gpa: Allocator) RenderError![]u8 {
var buffer = std.ArrayList(u8).init(gpa);
defer buffer.deinit();
@@ -3347,3 +3398,12 @@ pub const Node = struct {
rparen: TokenIndex,
};
};
+
+const std = @import("../std.zig");
+const assert = std.debug.assert;
+const testing = std.testing;
+const mem = std.mem;
+const Token = std.zig.Token;
+const Ast = @This();
+const Allocator = std.mem.Allocator;
+const Parse = @import("Parse.zig");
diff --git a/lib/std/zig/Parse.zig b/lib/std/zig/Parse.zig
new file mode 100644
index 0000000000..f599a08f55
--- /dev/null
+++ b/lib/std/zig/Parse.zig
@@ -0,0 +1,3816 @@
+//! Represents in-progress parsing, will be converted to an Ast after completion.
+
+pub const Error = error{ParseError} || Allocator.Error;
+
+gpa: Allocator,
+source: []const u8,
+token_tags: []const Token.Tag,
+token_starts: []const Ast.ByteOffset,
+tok_i: TokenIndex,
+errors: std.ArrayListUnmanaged(AstError),
+nodes: Ast.NodeList,
+extra_data: std.ArrayListUnmanaged(Node.Index),
+scratch: std.ArrayListUnmanaged(Node.Index),
+
+const SmallSpan = union(enum) {
+ zero_or_one: Node.Index,
+ multi: Node.SubRange,
+};
+
+const Members = struct {
+ len: usize,
+ lhs: Node.Index,
+ rhs: Node.Index,
+ trailing: bool,
+
+ fn toSpan(self: Members, p: *Parse) !Node.SubRange {
+ if (self.len <= 2) {
+ const nodes = [2]Node.Index{ self.lhs, self.rhs };
+ return p.listToSpan(nodes[0..self.len]);
+ } else {
+ return Node.SubRange{ .start = self.lhs, .end = self.rhs };
+ }
+ }
+};
+
+fn listToSpan(p: *Parse, list: []const Node.Index) !Node.SubRange {
+ try p.extra_data.appendSlice(p.gpa, list);
+ return Node.SubRange{
+ .start = @intCast(Node.Index, p.extra_data.items.len - list.len),
+ .end = @intCast(Node.Index, p.extra_data.items.len),
+ };
+}
+
+fn addNode(p: *Parse, elem: Ast.NodeList.Elem) Allocator.Error!Node.Index {
+ const result = @intCast(Node.Index, p.nodes.len);
+ try p.nodes.append(p.gpa, elem);
+ return result;
+}
+
+fn setNode(p: *Parse, i: usize, elem: Ast.NodeList.Elem) Node.Index {
+ p.nodes.set(i, elem);
+ return @intCast(Node.Index, i);
+}
+
+fn reserveNode(p: *Parse, tag: Ast.Node.Tag) !usize {
+ try p.nodes.resize(p.gpa, p.nodes.len + 1);
+ p.nodes.items(.tag)[p.nodes.len - 1] = tag;
+ return p.nodes.len - 1;
+}
+
+fn unreserveNode(p: *Parse, node_index: usize) void {
+ if (p.nodes.len == node_index) {
+ p.nodes.resize(p.gpa, p.nodes.len - 1) catch unreachable;
+ } else {
+ // There is zombie node left in the tree, let's make it as inoffensive as possible
+ // (sadly there's no no-op node)
+ p.nodes.items(.tag)[node_index] = .unreachable_literal;
+ p.nodes.items(.main_token)[node_index] = p.tok_i;
+ }
+}
+
+fn addExtra(p: *Parse, extra: anytype) Allocator.Error!Node.Index {
+ const fields = std.meta.fields(@TypeOf(extra));
+ try p.extra_data.ensureUnusedCapacity(p.gpa, fields.len);
+ const result = @intCast(u32, p.extra_data.items.len);
+ inline for (fields) |field| {
+ comptime assert(field.type == Node.Index);
+ p.extra_data.appendAssumeCapacity(@field(extra, field.name));
+ }
+ return result;
+}
+
+fn warnExpected(p: *Parse, expected_token: Token.Tag) error{OutOfMemory}!void {
+ @setCold(true);
+ try p.warnMsg(.{
+ .tag = .expected_token,
+ .token = p.tok_i,
+ .extra = .{ .expected_tag = expected_token },
+ });
+}
+
+fn warn(p: *Parse, error_tag: AstError.Tag) error{OutOfMemory}!void {
+ @setCold(true);
+ try p.warnMsg(.{ .tag = error_tag, .token = p.tok_i });
+}
+
+fn warnMsg(p: *Parse, msg: Ast.Error) error{OutOfMemory}!void {
+ @setCold(true);
+ switch (msg.tag) {
+ .expected_semi_after_decl,
+ .expected_semi_after_stmt,
+ .expected_comma_after_field,
+ .expected_comma_after_arg,
+ .expected_comma_after_param,
+ .expected_comma_after_initializer,
+ .expected_comma_after_switch_prong,
+ .expected_semi_or_else,
+ .expected_semi_or_lbrace,
+ .expected_token,
+ .expected_block,
+ .expected_block_or_assignment,
+ .expected_block_or_expr,
+ .expected_block_or_field,
+ .expected_expr,
+ .expected_expr_or_assignment,
+ .expected_fn,
+ .expected_inlinable,
+ .expected_labelable,
+ .expected_param_list,
+ .expected_prefix_expr,
+ .expected_primary_type_expr,
+ .expected_pub_item,
+ .expected_return_type,
+ .expected_suffix_op,
+ .expected_type_expr,
+ .expected_var_decl,
+ .expected_var_decl_or_fn,
+ .expected_loop_payload,
+ .expected_container,
+ => if (msg.token != 0 and !p.tokensOnSameLine(msg.token - 1, msg.token)) {
+ var copy = msg;
+ copy.token_is_prev = true;
+ copy.token -= 1;
+ return p.errors.append(p.gpa, copy);
+ },
+ else => {},
+ }
+ try p.errors.append(p.gpa, msg);
+}
+
+fn fail(p: *Parse, tag: Ast.Error.Tag) error{ ParseError, OutOfMemory } {
+ @setCold(true);
+ return p.failMsg(.{ .tag = tag, .token = p.tok_i });
+}
+
+fn failExpected(p: *Parse, expected_token: Token.Tag) error{ ParseError, OutOfMemory } {
+ @setCold(true);
+ return p.failMsg(.{
+ .tag = .expected_token,
+ .token = p.tok_i,
+ .extra = .{ .expected_tag = expected_token },
+ });
+}
+
+fn failMsg(p: *Parse, msg: Ast.Error) error{ ParseError, OutOfMemory } {
+ @setCold(true);
+ try p.warnMsg(msg);
+ return error.ParseError;
+}
+
+/// Root <- skip container_doc_comment? ContainerMembers eof
+pub fn parseRoot(p: *Parse) !void {
+ // Root node must be index 0.
+ p.nodes.appendAssumeCapacity(.{
+ .tag = .root,
+ .main_token = 0,
+ .data = undefined,
+ });
+ const root_members = try p.parseContainerMembers();
+ const root_decls = try root_members.toSpan(p);
+ if (p.token_tags[p.tok_i] != .eof) {
+ try p.warnExpected(.eof);
+ }
+ p.nodes.items(.data)[0] = .{
+ .lhs = root_decls.start,
+ .rhs = root_decls.end,
+ };
+}
+
+/// Parse in ZON mode. Subset of the language.
+/// TODO: set a flag in Parse struct, and honor that flag
+/// by emitting compilation errors when non-zon nodes are encountered.
+pub fn parseZon(p: *Parse) !void {
+ const node_index = p.parseExpr() catch |err| switch (err) {
+ error.ParseError => {
+ assert(p.errors.items.len > 0);
+ return;
+ },
+ else => |e| return e,
+ };
+ assert(node_index == 0);
+ if (p.token_tags[p.tok_i] != .eof) {
+ try p.warnExpected(.eof);
+ }
+}
+
+/// ContainerMembers <- ContainerDeclarations (ContainerField COMMA)* (ContainerField / ContainerDeclarations)
+///
+/// ContainerDeclarations
+/// <- TestDecl ContainerDeclarations
+/// / ComptimeDecl ContainerDeclarations
+/// / doc_comment? KEYWORD_pub? Decl ContainerDeclarations
+/// /
+///
+/// ComptimeDecl <- KEYWORD_comptime Block
+fn parseContainerMembers(p: *Parse) !Members {
+ const scratch_top = p.scratch.items.len;
+ defer p.scratch.shrinkRetainingCapacity(scratch_top);
+
+ var field_state: union(enum) {
+ /// No fields have been seen.
+ none,
+ /// Currently parsing fields.
+ seen,
+ /// Saw fields and then a declaration after them.
+ /// Payload is first token of previous declaration.
+ end: Node.Index,
+ /// There was a declaration between fields, don't report more errors.
+ err,
+ } = .none;
+
+ var last_field: TokenIndex = undefined;
+
+ // Skip container doc comments.
+ while (p.eatToken(.container_doc_comment)) |_| {}
+
+ var trailing = false;
+ while (true) {
+ const doc_comment = try p.eatDocComments();
+
+ switch (p.token_tags[p.tok_i]) {
+ .keyword_test => {
+ if (doc_comment) |some| {
+ try p.warnMsg(.{ .tag = .test_doc_comment, .token = some });
+ }
+ const test_decl_node = try p.expectTestDeclRecoverable();
+ if (test_decl_node != 0) {
+ if (field_state == .seen) {
+ field_state = .{ .end = test_decl_node };
+ }
+ try p.scratch.append(p.gpa, test_decl_node);
+ }
+ trailing = false;
+ },
+ .keyword_comptime => switch (p.token_tags[p.tok_i + 1]) {
+ .l_brace => {
+ if (doc_comment) |some| {
+ try p.warnMsg(.{ .tag = .comptime_doc_comment, .token = some });
+ }
+ const comptime_token = p.nextToken();
+ const block = p.parseBlock() catch |err| switch (err) {
+ error.OutOfMemory => return error.OutOfMemory,
+ error.ParseError => blk: {
+ p.findNextContainerMember();
+ break :blk null_node;
+ },
+ };
+ if (block != 0) {
+ const comptime_node = try p.addNode(.{
+ .tag = .@"comptime",
+ .main_token = comptime_token,
+ .data = .{
+ .lhs = block,
+ .rhs = undefined,
+ },
+ });
+ if (field_state == .seen) {
+ field_state = .{ .end = comptime_node };
+ }
+ try p.scratch.append(p.gpa, comptime_node);
+ }
+ trailing = false;
+ },
+ else => {
+ const identifier = p.tok_i;
+ defer last_field = identifier;
+ const container_field = p.expectContainerField() catch |err| switch (err) {
+ error.OutOfMemory => return error.OutOfMemory,
+ error.ParseError => {
+ p.findNextContainerMember();
+ continue;
+ },
+ };
+ switch (field_state) {
+ .none => field_state = .seen,
+ .err, .seen => {},
+ .end => |node| {
+ try p.warnMsg(.{
+ .tag = .decl_between_fields,
+ .token = p.nodes.items(.main_token)[node],
+ });
+ try p.warnMsg(.{
+ .tag = .previous_field,
+ .is_note = true,
+ .token = last_field,
+ });
+ try p.warnMsg(.{
+ .tag = .next_field,
+ .is_note = true,
+ .token = identifier,
+ });
+ // Continue parsing; error will be reported later.
+ field_state = .err;
+ },
+ }
+ try p.scratch.append(p.gpa, container_field);
+ switch (p.token_tags[p.tok_i]) {
+ .comma => {
+ p.tok_i += 1;
+ trailing = true;
+ continue;
+ },
+ .r_brace, .eof => {
+ trailing = false;
+ break;
+ },
+ else => {},
+ }
+ // There is not allowed to be a decl after a field with no comma.
+ // Report error but recover parser.
+ try p.warn(.expected_comma_after_field);
+ p.findNextContainerMember();
+ },
+ },
+ .keyword_pub => {
+ p.tok_i += 1;
+ const top_level_decl = try p.expectTopLevelDeclRecoverable();
+ if (top_level_decl != 0) {
+ if (field_state == .seen) {
+ field_state = .{ .end = top_level_decl };
+ }
+ try p.scratch.append(p.gpa, top_level_decl);
+ }
+ trailing = p.token_tags[p.tok_i - 1] == .semicolon;
+ },
+ .keyword_usingnamespace => {
+ const node = try p.expectUsingNamespaceRecoverable();
+ if (node != 0) {
+ if (field_state == .seen) {
+ field_state = .{ .end = node };
+ }
+ try p.scratch.append(p.gpa, node);
+ }
+ trailing = p.token_tags[p.tok_i - 1] == .semicolon;
+ },
+ .keyword_const,
+ .keyword_var,
+ .keyword_threadlocal,
+ .keyword_export,
+ .keyword_extern,
+ .keyword_inline,
+ .keyword_noinline,
+ .keyword_fn,
+ => {
+ const top_level_decl = try p.expectTopLevelDeclRecoverable();
+ if (top_level_decl != 0) {
+ if (field_state == .seen) {
+ field_state = .{ .end = top_level_decl };
+ }
+ try p.scratch.append(p.gpa, top_level_decl);
+ }
+ trailing = p.token_tags[p.tok_i - 1] == .semicolon;
+ },
+ .eof, .r_brace => {
+ if (doc_comment) |tok| {
+ try p.warnMsg(.{
+ .tag = .unattached_doc_comment,
+ .token = tok,
+ });
+ }
+ break;
+ },
+ else => {
+ const c_container = p.parseCStyleContainer() catch |err| switch (err) {
+ error.OutOfMemory => return error.OutOfMemory,
+ error.ParseError => false,
+ };
+ if (c_container) continue;
+
+ const identifier = p.tok_i;
+ defer last_field = identifier;
+ const container_field = p.expectContainerField() catch |err| switch (err) {
+ error.OutOfMemory => return error.OutOfMemory,
+ error.ParseError => {
+ p.findNextContainerMember();
+ continue;
+ },
+ };
+ switch (field_state) {
+ .none => field_state = .seen,
+ .err, .seen => {},
+ .end => |node| {
+ try p.warnMsg(.{
+ .tag = .decl_between_fields,
+ .token = p.nodes.items(.main_token)[node],
+ });
+ try p.warnMsg(.{
+ .tag = .previous_field,
+ .is_note = true,
+ .token = last_field,
+ });
+ try p.warnMsg(.{
+ .tag = .next_field,
+ .is_note = true,
+ .token = identifier,
+ });
+ // Continue parsing; error will be reported later.
+ field_state = .err;
+ },
+ }
+ try p.scratch.append(p.gpa, container_field);
+ switch (p.token_tags[p.tok_i]) {
+ .comma => {
+ p.tok_i += 1;
+ trailing = true;
+ continue;
+ },
+ .r_brace, .eof => {
+ trailing = false;
+ break;
+ },
+ else => {},
+ }
+ // There is not allowed to be a decl after a field with no comma.
+ // Report error but recover parser.
+ try p.warn(.expected_comma_after_field);
+ if (p.token_tags[p.tok_i] == .semicolon and p.token_tags[identifier] == .identifier) {
+ try p.warnMsg(.{
+ .tag = .var_const_decl,
+ .is_note = true,
+ .token = identifier,
+ });
+ }
+ p.findNextContainerMember();
+ continue;
+ },
+ }
+ }
+
+ const items = p.scratch.items[scratch_top..];
+ switch (items.len) {
+ 0 => return Members{
+ .len = 0,
+ .lhs = 0,
+ .rhs = 0,
+ .trailing = trailing,
+ },
+ 1 => return Members{
+ .len = 1,
+ .lhs = items[0],
+ .rhs = 0,
+ .trailing = trailing,
+ },
+ 2 => return Members{
+ .len = 2,
+ .lhs = items[0],
+ .rhs = items[1],
+ .trailing = trailing,
+ },
+ else => {
+ const span = try p.listToSpan(items);
+ return Members{
+ .len = items.len,
+ .lhs = span.start,
+ .rhs = span.end,
+ .trailing = trailing,
+ };
+ },
+ }
+}
+
+/// Attempts to find next container member by searching for certain tokens
+fn findNextContainerMember(p: *Parse) void {
+ var level: u32 = 0;
+ while (true) {
+ const tok = p.nextToken();
+ switch (p.token_tags[tok]) {
+ // Any of these can start a new top level declaration.
+ .keyword_test,
+ .keyword_comptime,
+ .keyword_pub,
+ .keyword_export,
+ .keyword_extern,
+ .keyword_inline,
+ .keyword_noinline,
+ .keyword_usingnamespace,
+ .keyword_threadlocal,
+ .keyword_const,
+ .keyword_var,
+ .keyword_fn,
+ => {
+ if (level == 0) {
+ p.tok_i -= 1;
+ return;
+ }
+ },
+ .identifier => {
+ if (p.token_tags[tok + 1] == .comma and level == 0) {
+ p.tok_i -= 1;
+ return;
+ }
+ },
+ .comma, .semicolon => {
+ // this decl was likely meant to end here
+ if (level == 0) {
+ return;
+ }
+ },
+ .l_paren, .l_bracket, .l_brace => level += 1,
+ .r_paren, .r_bracket => {
+ if (level != 0) level -= 1;
+ },
+ .r_brace => {
+ if (level == 0) {
+ // end of container, exit
+ p.tok_i -= 1;
+ return;
+ }
+ level -= 1;
+ },
+ .eof => {
+ p.tok_i -= 1;
+ return;
+ },
+ else => {},
+ }
+ }
+}
+
+/// Attempts to find the next statement by searching for a semicolon
+fn findNextStmt(p: *Parse) void {
+ var level: u32 = 0;
+ while (true) {
+ const tok = p.nextToken();
+ switch (p.token_tags[tok]) {
+ .l_brace => level += 1,
+ .r_brace => {
+ if (level == 0) {
+ p.tok_i -= 1;
+ return;
+ }
+ level -= 1;
+ },
+ .semicolon => {
+ if (level == 0) {
+ return;
+ }
+ },
+ .eof => {
+ p.tok_i -= 1;
+ return;
+ },
+ else => {},
+ }
+ }
+}
+
+/// TestDecl <- KEYWORD_test (STRINGLITERALSINGLE / IDENTIFIER)? Block
+fn expectTestDecl(p: *Parse) !Node.Index {
+ const test_token = p.assertToken(.keyword_test);
+ const name_token = switch (p.token_tags[p.nextToken()]) {
+ .string_literal, .identifier => p.tok_i - 1,
+ else => blk: {
+ p.tok_i -= 1;
+ break :blk null;
+ },
+ };
+ const block_node = try p.parseBlock();
+ if (block_node == 0) return p.fail(.expected_block);
+ return p.addNode(.{
+ .tag = .test_decl,
+ .main_token = test_token,
+ .data = .{
+ .lhs = name_token orelse 0,
+ .rhs = block_node,
+ },
+ });
+}
+
+fn expectTestDeclRecoverable(p: *Parse) error{OutOfMemory}!Node.Index {
+ return p.expectTestDecl() catch |err| switch (err) {
+ error.OutOfMemory => return error.OutOfMemory,
+ error.ParseError => {
+ p.findNextContainerMember();
+ return null_node;
+ },
+ };
+}
+
+/// Decl
+/// <- (KEYWORD_export / KEYWORD_extern STRINGLITERALSINGLE? / (KEYWORD_inline / KEYWORD_noinline))? FnProto (SEMICOLON / Block)
+/// / (KEYWORD_export / KEYWORD_extern STRINGLITERALSINGLE?)? KEYWORD_threadlocal? VarDecl
+/// / KEYWORD_usingnamespace Expr SEMICOLON
+fn expectTopLevelDecl(p: *Parse) !Node.Index {
+ const extern_export_inline_token = p.nextToken();
+ var is_extern: bool = false;
+ var expect_fn: bool = false;
+ var expect_var_or_fn: bool = false;
+ switch (p.token_tags[extern_export_inline_token]) {
+ .keyword_extern => {
+ _ = p.eatToken(.string_literal);
+ is_extern = true;
+ expect_var_or_fn = true;
+ },
+ .keyword_export => expect_var_or_fn = true,
+ .keyword_inline, .keyword_noinline => expect_fn = true,
+ else => p.tok_i -= 1,
+ }
+ const fn_proto = try p.parseFnProto();
+ if (fn_proto != 0) {
+ switch (p.token_tags[p.tok_i]) {
+ .semicolon => {
+ p.tok_i += 1;
+ return fn_proto;
+ },
+ .l_brace => {
+ if (is_extern) {
+ try p.warnMsg(.{ .tag = .extern_fn_body, .token = extern_export_inline_token });
+ return null_node;
+ }
+ const fn_decl_index = try p.reserveNode(.fn_decl);
+ errdefer p.unreserveNode(fn_decl_index);
+
+ const body_block = try p.parseBlock();
+ assert(body_block != 0);
+ return p.setNode(fn_decl_index, .{
+ .tag = .fn_decl,
+ .main_token = p.nodes.items(.main_token)[fn_proto],
+ .data = .{
+ .lhs = fn_proto,
+ .rhs = body_block,
+ },
+ });
+ },
+ else => {
+ // Since parseBlock only return error.ParseError on
+ // a missing '}' we can assume this function was
+ // supposed to end here.
+ try p.warn(.expected_semi_or_lbrace);
+ return null_node;
+ },
+ }
+ }
+ if (expect_fn) {
+ try p.warn(.expected_fn);
+ return error.ParseError;
+ }
+
+ const thread_local_token = p.eatToken(.keyword_threadlocal);
+ const var_decl = try p.parseVarDecl();
+ if (var_decl != 0) {
+ try p.expectSemicolon(.expected_semi_after_decl, false);
+ return var_decl;
+ }
+ if (thread_local_token != null) {
+ return p.fail(.expected_var_decl);
+ }
+ if (expect_var_or_fn) {
+ return p.fail(.expected_var_decl_or_fn);
+ }
+ if (p.token_tags[p.tok_i] != .keyword_usingnamespace) {
+ return p.fail(.expected_pub_item);
+ }
+ return p.expectUsingNamespace();
+}
+
+fn expectTopLevelDeclRecoverable(p: *Parse) error{OutOfMemory}!Node.Index {
+ return p.expectTopLevelDecl() catch |err| switch (err) {
+ error.OutOfMemory => return error.OutOfMemory,
+ error.ParseError => {
+ p.findNextContainerMember();
+ return null_node;
+ },
+ };
+}
+
+fn expectUsingNamespace(p: *Parse) !Node.Index {
+ const usingnamespace_token = p.assertToken(.keyword_usingnamespace);
+ const expr = try p.expectExpr();
+ try p.expectSemicolon(.expected_semi_after_decl, false);
+ return p.addNode(.{
+ .tag = .@"usingnamespace",
+ .main_token = usingnamespace_token,
+ .data = .{
+ .lhs = expr,
+ .rhs = undefined,
+ },
+ });
+}
+
+fn expectUsingNamespaceRecoverable(p: *Parse) error{OutOfMemory}!Node.Index {
+ return p.expectUsingNamespace() catch |err| switch (err) {
+ error.OutOfMemory => return error.OutOfMemory,
+ error.ParseError => {
+ p.findNextContainerMember();
+ return null_node;
+ },
+ };
+}
+
+/// FnProto <- KEYWORD_fn IDENTIFIER? LPAREN ParamDeclList RPAREN ByteAlign? AddrSpace? LinkSection? CallConv? EXCLAMATIONMARK? TypeExpr
+fn parseFnProto(p: *Parse) !Node.Index {
+ const fn_token = p.eatToken(.keyword_fn) orelse return null_node;
+
+ // We want the fn proto node to be before its children in the array.
+ const fn_proto_index = try p.reserveNode(.fn_proto);
+ errdefer p.unreserveNode(fn_proto_index);
+
+ _ = p.eatToken(.identifier);
+ const params = try p.parseParamDeclList();
+ const align_expr = try p.parseByteAlign();
+ const addrspace_expr = try p.parseAddrSpace();
+ const section_expr = try p.parseLinkSection();
+ const callconv_expr = try p.parseCallconv();
+ _ = p.eatToken(.bang);
+
+ const return_type_expr = try p.parseTypeExpr();
+ if (return_type_expr == 0) {
+ // most likely the user forgot to specify the return type.
+ // Mark return type as invalid and try to continue.
+ try p.warn(.expected_return_type);
+ }
+
+ if (align_expr == 0 and section_expr == 0 and callconv_expr == 0 and addrspace_expr == 0) {
+ switch (params) {
+ .zero_or_one => |param| return p.setNode(fn_proto_index, .{
+ .tag = .fn_proto_simple,
+ .main_token = fn_token,
+ .data = .{
+ .lhs = param,
+ .rhs = return_type_expr,
+ },
+ }),
+ .multi => |span| {
+ return p.setNode(fn_proto_index, .{
+ .tag = .fn_proto_multi,
+ .main_token = fn_token,
+ .data = .{
+ .lhs = try p.addExtra(Node.SubRange{
+ .start = span.start,
+ .end = span.end,
+ }),
+ .rhs = return_type_expr,
+ },
+ });
+ },
+ }
+ }
+ switch (params) {
+ .zero_or_one => |param| return p.setNode(fn_proto_index, .{
+ .tag = .fn_proto_one,
+ .main_token = fn_token,
+ .data = .{
+ .lhs = try p.addExtra(Node.FnProtoOne{
+ .param = param,
+ .align_expr = align_expr,
+ .addrspace_expr = addrspace_expr,
+ .section_expr = section_expr,
+ .callconv_expr = callconv_expr,
+ }),
+ .rhs = return_type_expr,
+ },
+ }),
+ .multi => |span| {
+ return p.setNode(fn_proto_index, .{
+ .tag = .fn_proto,
+ .main_token = fn_token,
+ .data = .{
+ .lhs = try p.addExtra(Node.FnProto{
+ .params_start = span.start,
+ .params_end = span.end,
+ .align_expr = align_expr,
+ .addrspace_expr = addrspace_expr,
+ .section_expr = section_expr,
+ .callconv_expr = callconv_expr,
+ }),
+ .rhs = return_type_expr,
+ },
+ });
+ },
+ }
+}
+
+/// VarDecl <- (KEYWORD_const / KEYWORD_var) IDENTIFIER (COLON TypeExpr)? ByteAlign? AddrSpace? LinkSection? (EQUAL Expr)? SEMICOLON
+fn parseVarDecl(p: *Parse) !Node.Index {
+ const mut_token = p.eatToken(.keyword_const) orelse
+ p.eatToken(.keyword_var) orelse
+ return null_node;
+
+ _ = try p.expectToken(.identifier);
+ const type_node: Node.Index = if (p.eatToken(.colon) == null) 0 else try p.expectTypeExpr();
+ const align_node = try p.parseByteAlign();
+ const addrspace_node = try p.parseAddrSpace();
+ const section_node = try p.parseLinkSection();
+ const init_node: Node.Index = switch (p.token_tags[p.tok_i]) {
+ .equal_equal => blk: {
+ try p.warn(.wrong_equal_var_decl);
+ p.tok_i += 1;
+ break :blk try p.expectExpr();
+ },
+ .equal => blk: {
+ p.tok_i += 1;
+ break :blk try p.expectExpr();
+ },
+ else => 0,
+ };
+ if (section_node == 0 and addrspace_node == 0) {
+ if (align_node == 0) {
+ return p.addNode(.{
+ .tag = .simple_var_decl,
+ .main_token = mut_token,
+ .data = .{
+ .lhs = type_node,
+ .rhs = init_node,
+ },
+ });
+ } else if (type_node == 0) {
+ return p.addNode(.{
+ .tag = .aligned_var_decl,
+ .main_token = mut_token,
+ .data = .{
+ .lhs = align_node,
+ .rhs = init_node,
+ },
+ });
+ } else {
+ return p.addNode(.{
+ .tag = .local_var_decl,
+ .main_token = mut_token,
+ .data = .{
+ .lhs = try p.addExtra(Node.LocalVarDecl{
+ .type_node = type_node,
+ .align_node = align_node,
+ }),
+ .rhs = init_node,
+ },
+ });
+ }
+ } else {
+ return p.addNode(.{
+ .tag = .global_var_decl,
+ .main_token = mut_token,
+ .data = .{
+ .lhs = try p.addExtra(Node.GlobalVarDecl{
+ .type_node = type_node,
+ .align_node = align_node,
+ .addrspace_node = addrspace_node,
+ .section_node = section_node,
+ }),
+ .rhs = init_node,
+ },
+ });
+ }
+}
+
+/// ContainerField
+/// <- doc_comment? KEYWORD_comptime? IDENTIFIER (COLON TypeExpr)? ByteAlign? (EQUAL Expr)?
+/// / doc_comment? KEYWORD_comptime? (IDENTIFIER COLON)? !KEYWORD_fn TypeExpr ByteAlign? (EQUAL Expr)?
+fn expectContainerField(p: *Parse) !Node.Index {
+ var main_token = p.tok_i;
+ _ = p.eatToken(.keyword_comptime);
+ const tuple_like = p.token_tags[p.tok_i] != .identifier or p.token_tags[p.tok_i + 1] != .colon;
+ if (!tuple_like) {
+ main_token = p.assertToken(.identifier);
+ }
+
+ var align_expr: Node.Index = 0;
+ var type_expr: Node.Index = 0;
+ if (p.eatToken(.colon) != null or tuple_like) {
+ type_expr = try p.expectTypeExpr();
+ align_expr = try p.parseByteAlign();
+ }
+
+ const value_expr: Node.Index = if (p.eatToken(.equal) == null) 0 else try p.expectExpr();
+
+ if (align_expr == 0) {
+ return p.addNode(.{
+ .tag = .container_field_init,
+ .main_token = main_token,
+ .data = .{
+ .lhs = type_expr,
+ .rhs = value_expr,
+ },
+ });
+ } else if (value_expr == 0) {
+ return p.addNode(.{
+ .tag = .container_field_align,
+ .main_token = main_token,
+ .data = .{
+ .lhs = type_expr,
+ .rhs = align_expr,
+ },
+ });
+ } else {
+ return p.addNode(.{
+ .tag = .container_field,
+ .main_token = main_token,
+ .data = .{
+ .lhs = type_expr,
+ .rhs = try p.addExtra(Node.ContainerField{
+ .value_expr = value_expr,
+ .align_expr = align_expr,
+ }),
+ },
+ });
+ }
+}
+
+/// Statement
+/// <- KEYWORD_comptime? VarDecl
+/// / KEYWORD_comptime BlockExprStatement
+/// / KEYWORD_nosuspend BlockExprStatement
+/// / KEYWORD_suspend BlockExprStatement
+/// / KEYWORD_defer BlockExprStatement
+/// / KEYWORD_errdefer Payload? BlockExprStatement
+/// / IfStatement
+/// / LabeledStatement
+/// / SwitchExpr
+/// / AssignExpr SEMICOLON
+fn parseStatement(p: *Parse, allow_defer_var: bool) Error!Node.Index {
+ const comptime_token = p.eatToken(.keyword_comptime);
+
+ if (allow_defer_var) {
+ const var_decl = try p.parseVarDecl();
+ if (var_decl != 0) {
+ try p.expectSemicolon(.expected_semi_after_decl, true);
+ return var_decl;
+ }
+ }
+
+ if (comptime_token) |token| {
+ return p.addNode(.{
+ .tag = .@"comptime",
+ .main_token = token,
+ .data = .{
+ .lhs = try p.expectBlockExprStatement(),
+ .rhs = undefined,
+ },
+ });
+ }
+
+ switch (p.token_tags[p.tok_i]) {
+ .keyword_nosuspend => {
+ return p.addNode(.{
+ .tag = .@"nosuspend",
+ .main_token = p.nextToken(),
+ .data = .{
+ .lhs = try p.expectBlockExprStatement(),
+ .rhs = undefined,
+ },
+ });
+ },
+ .keyword_suspend => {
+ const token = p.nextToken();
+ const block_expr = try p.expectBlockExprStatement();
+ return p.addNode(.{
+ .tag = .@"suspend",
+ .main_token = token,
+ .data = .{
+ .lhs = block_expr,
+ .rhs = undefined,
+ },
+ });
+ },
+ .keyword_defer => if (allow_defer_var) return p.addNode(.{
+ .tag = .@"defer",
+ .main_token = p.nextToken(),
+ .data = .{
+ .lhs = undefined,
+ .rhs = try p.expectBlockExprStatement(),
+ },
+ }),
+ .keyword_errdefer => if (allow_defer_var) return p.addNode(.{
+ .tag = .@"errdefer",
+ .main_token = p.nextToken(),
+ .data = .{
+ .lhs = try p.parsePayload(),
+ .rhs = try p.expectBlockExprStatement(),
+ },
+ }),
+ .keyword_switch => return p.expectSwitchExpr(),
+ .keyword_if => return p.expectIfStatement(),
+ .keyword_enum, .keyword_struct, .keyword_union => {
+ const identifier = p.tok_i + 1;
+ if (try p.parseCStyleContainer()) {
+ // Return something so that `expectStatement` is happy.
+ return p.addNode(.{
+ .tag = .identifier,
+ .main_token = identifier,
+ .data = .{
+ .lhs = undefined,
+ .rhs = undefined,
+ },
+ });
+ }
+ },
+ else => {},
+ }
+
+ const labeled_statement = try p.parseLabeledStatement();
+ if (labeled_statement != 0) return labeled_statement;
+
+ const assign_expr = try p.parseAssignExpr();
+ if (assign_expr != 0) {
+ try p.expectSemicolon(.expected_semi_after_stmt, true);
+ return assign_expr;
+ }
+
+ return null_node;
+}
+
+fn expectStatement(p: *Parse, allow_defer_var: bool) !Node.Index {
+ const statement = try p.parseStatement(allow_defer_var);
+ if (statement == 0) {
+ return p.fail(.expected_statement);
+ }
+ return statement;
+}
+
+/// If a parse error occurs, reports an error, but then finds the next statement
+/// and returns that one instead. If a parse error occurs but there is no following
+/// statement, returns 0.
+fn expectStatementRecoverable(p: *Parse) Error!Node.Index {
+ while (true) {
+ return p.expectStatement(true) catch |err| switch (err) {
+ error.OutOfMemory => return error.OutOfMemory,
+ error.ParseError => {
+ p.findNextStmt(); // Try to skip to the next statement.
+ switch (p.token_tags[p.tok_i]) {
+ .r_brace => return null_node,
+ .eof => return error.ParseError,
+ else => continue,
+ }
+ },
+ };
+ }
+}
+
+/// IfStatement
+/// <- IfPrefix BlockExpr ( KEYWORD_else Payload? Statement )?
+/// / IfPrefix AssignExpr ( SEMICOLON / KEYWORD_else Payload? Statement )
+fn expectIfStatement(p: *Parse) !Node.Index {
+ const if_token = p.assertToken(.keyword_if);
+ _ = try p.expectToken(.l_paren);
+ const condition = try p.expectExpr();
+ _ = try p.expectToken(.r_paren);
+ _ = try p.parsePtrPayload();
+
+ // TODO propose to change the syntax so that semicolons are always required
+ // inside if statements, even if there is an `else`.
+ var else_required = false;
+ const then_expr = blk: {
+ const block_expr = try p.parseBlockExpr();
+ if (block_expr != 0) break :blk block_expr;
+ const assign_expr = try p.parseAssignExpr();
+ if (assign_expr == 0) {
+ return p.fail(.expected_block_or_assignment);
+ }
+ if (p.eatToken(.semicolon)) |_| {
+ return p.addNode(.{
+ .tag = .if_simple,
+ .main_token = if_token,
+ .data = .{
+ .lhs = condition,
+ .rhs = assign_expr,
+ },
+ });
+ }
+ else_required = true;
+ break :blk assign_expr;
+ };
+ _ = p.eatToken(.keyword_else) orelse {
+ if (else_required) {
+ try p.warn(.expected_semi_or_else);
+ }
+ return p.addNode(.{
+ .tag = .if_simple,
+ .main_token = if_token,
+ .data = .{
+ .lhs = condition,
+ .rhs = then_expr,
+ },
+ });
+ };
+ _ = try p.parsePayload();
+ const else_expr = try p.expectStatement(false);
+ return p.addNode(.{
+ .tag = .@"if",
+ .main_token = if_token,
+ .data = .{
+ .lhs = condition,
+ .rhs = try p.addExtra(Node.If{
+ .then_expr = then_expr,
+ .else_expr = else_expr,
+ }),
+ },
+ });
+}
+
+/// LabeledStatement <- BlockLabel? (Block / LoopStatement)
+fn parseLabeledStatement(p: *Parse) !Node.Index {
+ const label_token = p.parseBlockLabel();
+ const block = try p.parseBlock();
+ if (block != 0) return block;
+
+ const loop_stmt = try p.parseLoopStatement();
+ if (loop_stmt != 0) return loop_stmt;
+
+ if (label_token != 0) {
+ const after_colon = p.tok_i;
+ const node = try p.parseTypeExpr();
+ if (node != 0) {
+ const a = try p.parseByteAlign();
+ const b = try p.parseAddrSpace();
+ const c = try p.parseLinkSection();
+ const d = if (p.eatToken(.equal) == null) 0 else try p.expectExpr();
+ if (a != 0 or b != 0 or c != 0 or d != 0) {
+ return p.failMsg(.{ .tag = .expected_var_const, .token = label_token });
+ }
+ }
+ return p.failMsg(.{ .tag = .expected_labelable, .token = after_colon });
+ }
+
+ return null_node;
+}
+
+/// LoopStatement <- KEYWORD_inline? (ForStatement / WhileStatement)
+fn parseLoopStatement(p: *Parse) !Node.Index {
+ const inline_token = p.eatToken(.keyword_inline);
+
+ const for_statement = try p.parseForStatement();
+ if (for_statement != 0) return for_statement;
+
+ const while_statement = try p.parseWhileStatement();
+ if (while_statement != 0) return while_statement;
+
+ if (inline_token == null) return null_node;
+
+ // If we've seen "inline", there should have been a "for" or "while"
+ return p.fail(.expected_inlinable);
+}
+
+/// ForPrefix <- KEYWORD_for LPAREN Expr RPAREN PtrIndexPayload
+///
+/// ForStatement
+/// <- ForPrefix BlockExpr ( KEYWORD_else Statement )?
+/// / ForPrefix AssignExpr ( SEMICOLON / KEYWORD_else Statement )
+fn parseForStatement(p: *Parse) !Node.Index {
+ const for_token = p.eatToken(.keyword_for) orelse return null_node;
+ _ = try p.expectToken(.l_paren);
+ const array_expr = try p.expectExpr();
+ _ = try p.expectToken(.r_paren);
+ const found_payload = try p.parsePtrIndexPayload();
+ if (found_payload == 0) try p.warn(.expected_loop_payload);
+
+ // TODO propose to change the syntax so that semicolons are always required
+ // inside while statements, even if there is an `else`.
+ var else_required = false;
+ const then_expr = blk: {
+ const block_expr = try p.parseBlockExpr();
+ if (block_expr != 0) break :blk block_expr;
+ const assign_expr = try p.parseAssignExpr();
+ if (assign_expr == 0) {
+ return p.fail(.expected_block_or_assignment);
+ }
+ if (p.eatToken(.semicolon)) |_| {
+ return p.addNode(.{
+ .tag = .for_simple,
+ .main_token = for_token,
+ .data = .{
+ .lhs = array_expr,
+ .rhs = assign_expr,
+ },
+ });
+ }
+ else_required = true;
+ break :blk assign_expr;
+ };
+ _ = p.eatToken(.keyword_else) orelse {
+ if (else_required) {
+ try p.warn(.expected_semi_or_else);
+ }
+ return p.addNode(.{
+ .tag = .for_simple,
+ .main_token = for_token,
+ .data = .{
+ .lhs = array_expr,
+ .rhs = then_expr,
+ },
+ });
+ };
+ return p.addNode(.{
+ .tag = .@"for",
+ .main_token = for_token,
+ .data = .{
+ .lhs = array_expr,
+ .rhs = try p.addExtra(Node.If{
+ .then_expr = then_expr,
+ .else_expr = try p.expectStatement(false),
+ }),
+ },
+ });
+}
+
+/// WhilePrefix <- KEYWORD_while LPAREN Expr RPAREN PtrPayload? WhileContinueExpr?
+///
+/// WhileStatement
+/// <- WhilePrefix BlockExpr ( KEYWORD_else Payload? Statement )?
+/// / WhilePrefix AssignExpr ( SEMICOLON / KEYWORD_else Payload? Statement )
+fn parseWhileStatement(p: *Parse) !Node.Index {
+ const while_token = p.eatToken(.keyword_while) orelse return null_node;
+ _ = try p.expectToken(.l_paren);
+ const condition = try p.expectExpr();
+ _ = try p.expectToken(.r_paren);
+ _ = try p.parsePtrPayload();
+ const cont_expr = try p.parseWhileContinueExpr();
+
+ // TODO propose to change the syntax so that semicolons are always required
+ // inside while statements, even if there is an `else`.
+ var else_required = false;
+ const then_expr = blk: {
+ const block_expr = try p.parseBlockExpr();
+ if (block_expr != 0) break :blk block_expr;
+ const assign_expr = try p.parseAssignExpr();
+ if (assign_expr == 0) {
+ return p.fail(.expected_block_or_assignment);
+ }
+ if (p.eatToken(.semicolon)) |_| {
+ if (cont_expr == 0) {
+ return p.addNode(.{
+ .tag = .while_simple,
+ .main_token = while_token,
+ .data = .{
+ .lhs = condition,
+ .rhs = assign_expr,
+ },
+ });
+ } else {
+ return p.addNode(.{
+ .tag = .while_cont,
+ .main_token = while_token,
+ .data = .{
+ .lhs = condition,
+ .rhs = try p.addExtra(Node.WhileCont{
+ .cont_expr = cont_expr,
+ .then_expr = assign_expr,
+ }),
+ },
+ });
+ }
+ }
+ else_required = true;
+ break :blk assign_expr;
+ };
+ _ = p.eatToken(.keyword_else) orelse {
+ if (else_required) {
+ try p.warn(.expected_semi_or_else);
+ }
+ if (cont_expr == 0) {
+ return p.addNode(.{
+ .tag = .while_simple,
+ .main_token = while_token,
+ .data = .{
+ .lhs = condition,
+ .rhs = then_expr,
+ },
+ });
+ } else {
+ return p.addNode(.{
+ .tag = .while_cont,
+ .main_token = while_token,
+ .data = .{
+ .lhs = condition,
+ .rhs = try p.addExtra(Node.WhileCont{
+ .cont_expr = cont_expr,
+ .then_expr = then_expr,
+ }),
+ },
+ });
+ }
+ };
+ _ = try p.parsePayload();
+ const else_expr = try p.expectStatement(false);
+ return p.addNode(.{
+ .tag = .@"while",
+ .main_token = while_token,
+ .data = .{
+ .lhs = condition,
+ .rhs = try p.addExtra(Node.While{
+ .cont_expr = cont_expr,
+ .then_expr = then_expr,
+ .else_expr = else_expr,
+ }),
+ },
+ });
+}
+
+/// BlockExprStatement
+/// <- BlockExpr
+/// / AssignExpr SEMICOLON
+fn parseBlockExprStatement(p: *Parse) !Node.Index {
+ const block_expr = try p.parseBlockExpr();
+ if (block_expr != 0) {
+ return block_expr;
+ }
+ const assign_expr = try p.parseAssignExpr();
+ if (assign_expr != 0) {
+ try p.expectSemicolon(.expected_semi_after_stmt, true);
+ return assign_expr;
+ }
+ return null_node;
+}
+
+fn expectBlockExprStatement(p: *Parse) !Node.Index {
+ const node = try p.parseBlockExprStatement();
+ if (node == 0) {
+ return p.fail(.expected_block_or_expr);
+ }
+ return node;
+}
+
+/// BlockExpr <- BlockLabel? Block
+fn parseBlockExpr(p: *Parse) Error!Node.Index {
+ switch (p.token_tags[p.tok_i]) {
+ .identifier => {
+ if (p.token_tags[p.tok_i + 1] == .colon and
+ p.token_tags[p.tok_i + 2] == .l_brace)
+ {
+ p.tok_i += 2;
+ return p.parseBlock();
+ } else {
+ return null_node;
+ }
+ },
+ .l_brace => return p.parseBlock(),
+ else => return null_node,
+ }
+}
+
+/// AssignExpr <- Expr (AssignOp Expr)?
+///
+/// AssignOp
+/// <- ASTERISKEQUAL
+/// / ASTERISKPIPEEQUAL
+/// / SLASHEQUAL
+/// / PERCENTEQUAL
+/// / PLUSEQUAL
+/// / PLUSPIPEEQUAL
+/// / MINUSEQUAL
+/// / MINUSPIPEEQUAL
+/// / LARROW2EQUAL
+/// / LARROW2PIPEEQUAL
+/// / RARROW2EQUAL
+/// / AMPERSANDEQUAL
+/// / CARETEQUAL
+/// / PIPEEQUAL
+/// / ASTERISKPERCENTEQUAL
+/// / PLUSPERCENTEQUAL
+/// / MINUSPERCENTEQUAL
+/// / EQUAL
+fn parseAssignExpr(p: *Parse) !Node.Index {
+ const expr = try p.parseExpr();
+ if (expr == 0) return null_node;
+
+ const tag: Node.Tag = switch (p.token_tags[p.tok_i]) {
+ .asterisk_equal => .assign_mul,
+ .slash_equal => .assign_div,
+ .percent_equal => .assign_mod,
+ .plus_equal => .assign_add,
+ .minus_equal => .assign_sub,
+ .angle_bracket_angle_bracket_left_equal => .assign_shl,
+ .angle_bracket_angle_bracket_left_pipe_equal => .assign_shl_sat,
+ .angle_bracket_angle_bracket_right_equal => .assign_shr,
+ .ampersand_equal => .assign_bit_and,
+ .caret_equal => .assign_bit_xor,
+ .pipe_equal => .assign_bit_or,
+ .asterisk_percent_equal => .assign_mul_wrap,
+ .plus_percent_equal => .assign_add_wrap,
+ .minus_percent_equal => .assign_sub_wrap,
+ .asterisk_pipe_equal => .assign_mul_sat,
+ .plus_pipe_equal => .assign_add_sat,
+ .minus_pipe_equal => .assign_sub_sat,
+ .equal => .assign,
+ else => return expr,
+ };
+ return p.addNode(.{
+ .tag = tag,
+ .main_token = p.nextToken(),
+ .data = .{
+ .lhs = expr,
+ .rhs = try p.expectExpr(),
+ },
+ });
+}
+
+fn expectAssignExpr(p: *Parse) !Node.Index {
+ const expr = try p.parseAssignExpr();
+ if (expr == 0) {
+ return p.fail(.expected_expr_or_assignment);
+ }
+ return expr;
+}
+
+fn parseExpr(p: *Parse) Error!Node.Index {
+ return p.parseExprPrecedence(0);
+}
+
+fn expectExpr(p: *Parse) Error!Node.Index {
+ const node = try p.parseExpr();
+ if (node == 0) {
+ return p.fail(.expected_expr);
+ } else {
+ return node;
+ }
+}
+
+const Assoc = enum {
+ left,
+ none,
+};
+
+const OperInfo = struct {
+ prec: i8,
+ tag: Node.Tag,
+ assoc: Assoc = Assoc.left,
+};
+
+// A table of binary operator information. Higher precedence numbers are
+// stickier. All operators at the same precedence level should have the same
+// associativity.
+const operTable = std.enums.directEnumArrayDefault(Token.Tag, OperInfo, .{ .prec = -1, .tag = Node.Tag.root }, 0, .{
+ .keyword_or = .{ .prec = 10, .tag = .bool_or },
+
+ .keyword_and = .{ .prec = 20, .tag = .bool_and },
+
+ .equal_equal = .{ .prec = 30, .tag = .equal_equal, .assoc = Assoc.none },
+ .bang_equal = .{ .prec = 30, .tag = .bang_equal, .assoc = Assoc.none },
+ .angle_bracket_left = .{ .prec = 30, .tag = .less_than, .assoc = Assoc.none },
+ .angle_bracket_right = .{ .prec = 30, .tag = .greater_than, .assoc = Assoc.none },
+ .angle_bracket_left_equal = .{ .prec = 30, .tag = .less_or_equal, .assoc = Assoc.none },
+ .angle_bracket_right_equal = .{ .prec = 30, .tag = .greater_or_equal, .assoc = Assoc.none },
+
+ .ampersand = .{ .prec = 40, .tag = .bit_and },
+ .caret = .{ .prec = 40, .tag = .bit_xor },
+ .pipe = .{ .prec = 40, .tag = .bit_or },
+ .keyword_orelse = .{ .prec = 40, .tag = .@"orelse" },
+ .keyword_catch = .{ .prec = 40, .tag = .@"catch" },
+
+ .angle_bracket_angle_bracket_left = .{ .prec = 50, .tag = .shl },
+ .angle_bracket_angle_bracket_left_pipe = .{ .prec = 50, .tag = .shl_sat },
+ .angle_bracket_angle_bracket_right = .{ .prec = 50, .tag = .shr },
+
+ .plus = .{ .prec = 60, .tag = .add },
+ .minus = .{ .prec = 60, .tag = .sub },
+ .plus_plus = .{ .prec = 60, .tag = .array_cat },
+ .plus_percent = .{ .prec = 60, .tag = .add_wrap },
+ .minus_percent = .{ .prec = 60, .tag = .sub_wrap },
+ .plus_pipe = .{ .prec = 60, .tag = .add_sat },
+ .minus_pipe = .{ .prec = 60, .tag = .sub_sat },
+
+ .pipe_pipe = .{ .prec = 70, .tag = .merge_error_sets },
+ .asterisk = .{ .prec = 70, .tag = .mul },
+ .slash = .{ .prec = 70, .tag = .div },
+ .percent = .{ .prec = 70, .tag = .mod },
+ .asterisk_asterisk = .{ .prec = 70, .tag = .array_mult },
+ .asterisk_percent = .{ .prec = 70, .tag = .mul_wrap },
+ .asterisk_pipe = .{ .prec = 70, .tag = .mul_sat },
+});
+
+fn parseExprPrecedence(p: *Parse, min_prec: i32) Error!Node.Index {
+ assert(min_prec >= 0);
+ var node = try p.parsePrefixExpr();
+ if (node == 0) {
+ return null_node;
+ }
+
+ var banned_prec: i8 = -1;
+
+ while (true) {
+ const tok_tag = p.token_tags[p.tok_i];
+ const info = operTable[@intCast(usize, @enumToInt(tok_tag))];
+ if (info.prec < min_prec) {
+ break;
+ }
+ if (info.prec == banned_prec) {
+ return p.fail(.chained_comparison_operators);
+ }
+
+ const oper_token = p.nextToken();
+ // Special-case handling for "catch"
+ if (tok_tag == .keyword_catch) {
+ _ = try p.parsePayload();
+ }
+ const rhs = try p.parseExprPrecedence(info.prec + 1);
+ if (rhs == 0) {
+ try p.warn(.expected_expr);
+ return node;
+ }
+
+ {
+ const tok_len = tok_tag.lexeme().?.len;
+ const char_before = p.source[p.token_starts[oper_token] - 1];
+ const char_after = p.source[p.token_starts[oper_token] + tok_len];
+ if (tok_tag == .ampersand and char_after == '&') {
+ // without types we don't know if '&&' was intended as 'bitwise_and address_of', or a c-style logical_and
+ // The best the parser can do is recommend changing it to 'and' or ' & &'
+ try p.warnMsg(.{ .tag = .invalid_ampersand_ampersand, .token = oper_token });
+ } else if (std.ascii.isWhitespace(char_before) != std.ascii.isWhitespace(char_after)) {
+ try p.warnMsg(.{ .tag = .mismatched_binary_op_whitespace, .token = oper_token });
+ }
+ }
+
+ node = try p.addNode(.{
+ .tag = info.tag,
+ .main_token = oper_token,
+ .data = .{
+ .lhs = node,
+ .rhs = rhs,
+ },
+ });
+
+ if (info.assoc == Assoc.none) {
+ banned_prec = info.prec;
+ }
+ }
+
+ return node;
+}
+
+/// PrefixExpr <- PrefixOp* PrimaryExpr
+///
+/// PrefixOp
+/// <- EXCLAMATIONMARK
+/// / MINUS
+/// / TILDE
+/// / MINUSPERCENT
+/// / AMPERSAND
+/// / KEYWORD_try
+/// / KEYWORD_await
+fn parsePrefixExpr(p: *Parse) Error!Node.Index {
+ const tag: Node.Tag = switch (p.token_tags[p.tok_i]) {
+ .bang => .bool_not,
+ .minus => .negation,
+ .tilde => .bit_not,
+ .minus_percent => .negation_wrap,
+ .ampersand => .address_of,
+ .keyword_try => .@"try",
+ .keyword_await => .@"await",
+ else => return p.parsePrimaryExpr(),
+ };
+ return p.addNode(.{
+ .tag = tag,
+ .main_token = p.nextToken(),
+ .data = .{
+ .lhs = try p.expectPrefixExpr(),
+ .rhs = undefined,
+ },
+ });
+}
+
+fn expectPrefixExpr(p: *Parse) Error!Node.Index {
+ const node = try p.parsePrefixExpr();
+ if (node == 0) {
+ return p.fail(.expected_prefix_expr);
+ }
+ return node;
+}
+
+/// TypeExpr <- PrefixTypeOp* ErrorUnionExpr
+///
+/// PrefixTypeOp
+/// <- QUESTIONMARK
+/// / KEYWORD_anyframe MINUSRARROW
+/// / SliceTypeStart (ByteAlign / AddrSpace / KEYWORD_const / KEYWORD_volatile / KEYWORD_allowzero)*
+/// / PtrTypeStart (AddrSpace / KEYWORD_align LPAREN Expr (COLON Expr COLON Expr)? RPAREN / KEYWORD_const / KEYWORD_volatile / KEYWORD_allowzero)*
+/// / ArrayTypeStart
+///
+/// SliceTypeStart <- LBRACKET (COLON Expr)? RBRACKET
+///
+/// PtrTypeStart
+/// <- ASTERISK
+/// / ASTERISK2
+/// / LBRACKET ASTERISK (LETTERC / COLON Expr)? RBRACKET
+///
+/// ArrayTypeStart <- LBRACKET Expr (COLON Expr)? RBRACKET
+fn parseTypeExpr(p: *Parse) Error!Node.Index {
+ switch (p.token_tags[p.tok_i]) {
+ .question_mark => return p.addNode(.{
+ .tag = .optional_type,
+ .main_token = p.nextToken(),
+ .data = .{
+ .lhs = try p.expectTypeExpr(),
+ .rhs = undefined,
+ },
+ }),
+ .keyword_anyframe => switch (p.token_tags[p.tok_i + 1]) {
+ .arrow => return p.addNode(.{
+ .tag = .anyframe_type,
+ .main_token = p.nextToken(),
+ .data = .{
+ .lhs = p.nextToken(),
+ .rhs = try p.expectTypeExpr(),
+ },
+ }),
+ else => return p.parseErrorUnionExpr(),
+ },
+ .asterisk => {
+ const asterisk = p.nextToken();
+ const mods = try p.parsePtrModifiers();
+ const elem_type = try p.expectTypeExpr();
+ if (mods.bit_range_start != 0) {
+ return p.addNode(.{
+ .tag = .ptr_type_bit_range,
+ .main_token = asterisk,
+ .data = .{
+ .lhs = try p.addExtra(Node.PtrTypeBitRange{
+ .sentinel = 0,
+ .align_node = mods.align_node,
+ .addrspace_node = mods.addrspace_node,
+ .bit_range_start = mods.bit_range_start,
+ .bit_range_end = mods.bit_range_end,
+ }),
+ .rhs = elem_type,
+ },
+ });
+ } else if (mods.addrspace_node != 0) {
+ return p.addNode(.{
+ .tag = .ptr_type,
+ .main_token = asterisk,
+ .data = .{
+ .lhs = try p.addExtra(Node.PtrType{
+ .sentinel = 0,
+ .align_node = mods.align_node,
+ .addrspace_node = mods.addrspace_node,
+ }),
+ .rhs = elem_type,
+ },
+ });
+ } else {
+ return p.addNode(.{
+ .tag = .ptr_type_aligned,
+ .main_token = asterisk,
+ .data = .{
+ .lhs = mods.align_node,
+ .rhs = elem_type,
+ },
+ });
+ }
+ },
+ .asterisk_asterisk => {
+ const asterisk = p.nextToken();
+ const mods = try p.parsePtrModifiers();
+ const elem_type = try p.expectTypeExpr();
+ const inner: Node.Index = inner: {
+ if (mods.bit_range_start != 0) {
+ break :inner try p.addNode(.{
+ .tag = .ptr_type_bit_range,
+ .main_token = asterisk,
+ .data = .{
+ .lhs = try p.addExtra(Node.PtrTypeBitRange{
+ .sentinel = 0,
+ .align_node = mods.align_node,
+ .addrspace_node = mods.addrspace_node,
+ .bit_range_start = mods.bit_range_start,
+ .bit_range_end = mods.bit_range_end,
+ }),
+ .rhs = elem_type,
+ },
+ });
+ } else if (mods.addrspace_node != 0) {
+ break :inner try p.addNode(.{
+ .tag = .ptr_type,
+ .main_token = asterisk,
+ .data = .{
+ .lhs = try p.addExtra(Node.PtrType{
+ .sentinel = 0,
+ .align_node = mods.align_node,
+ .addrspace_node = mods.addrspace_node,
+ }),
+ .rhs = elem_type,
+ },
+ });
+ } else {
+ break :inner try p.addNode(.{
+ .tag = .ptr_type_aligned,
+ .main_token = asterisk,
+ .data = .{
+ .lhs = mods.align_node,
+ .rhs = elem_type,
+ },
+ });
+ }
+ };
+ return p.addNode(.{
+ .tag = .ptr_type_aligned,
+ .main_token = asterisk,
+ .data = .{
+ .lhs = 0,
+ .rhs = inner,
+ },
+ });
+ },
+ .l_bracket => switch (p.token_tags[p.tok_i + 1]) {
+ .asterisk => {
+ _ = p.nextToken();
+ const asterisk = p.nextToken();
+ var sentinel: Node.Index = 0;
+ if (p.eatToken(.identifier)) |ident| {
+ const ident_slice = p.source[p.token_starts[ident]..p.token_starts[ident + 1]];
+ if (!std.mem.eql(u8, std.mem.trimRight(u8, ident_slice, &std.ascii.whitespace), "c")) {
+ p.tok_i -= 1;
+ }
+ } else if (p.eatToken(.colon)) |_| {
+ sentinel = try p.expectExpr();
+ }
+ _ = try p.expectToken(.r_bracket);
+ const mods = try p.parsePtrModifiers();
+ const elem_type = try p.expectTypeExpr();
+ if (mods.bit_range_start == 0) {
+ if (sentinel == 0 and mods.addrspace_node == 0) {
+ return p.addNode(.{
+ .tag = .ptr_type_aligned,
+ .main_token = asterisk,
+ .data = .{
+ .lhs = mods.align_node,
+ .rhs = elem_type,
+ },
+ });
+ } else if (mods.align_node == 0 and mods.addrspace_node == 0) {
+ return p.addNode(.{
+ .tag = .ptr_type_sentinel,
+ .main_token = asterisk,
+ .data = .{
+ .lhs = sentinel,
+ .rhs = elem_type,
+ },
+ });
+ } else {
+ return p.addNode(.{
+ .tag = .ptr_type,
+ .main_token = asterisk,
+ .data = .{
+ .lhs = try p.addExtra(Node.PtrType{
+ .sentinel = sentinel,
+ .align_node = mods.align_node,
+ .addrspace_node = mods.addrspace_node,
+ }),
+ .rhs = elem_type,
+ },
+ });
+ }
+ } else {
+ return p.addNode(.{
+ .tag = .ptr_type_bit_range,
+ .main_token = asterisk,
+ .data = .{
+ .lhs = try p.addExtra(Node.PtrTypeBitRange{
+ .sentinel = sentinel,
+ .align_node = mods.align_node,
+ .addrspace_node = mods.addrspace_node,
+ .bit_range_start = mods.bit_range_start,
+ .bit_range_end = mods.bit_range_end,
+ }),
+ .rhs = elem_type,
+ },
+ });
+ }
+ },
+ else => {
+ const lbracket = p.nextToken();
+ const len_expr = try p.parseExpr();
+ const sentinel: Node.Index = if (p.eatToken(.colon)) |_|
+ try p.expectExpr()
+ else
+ 0;
+ _ = try p.expectToken(.r_bracket);
+ if (len_expr == 0) {
+ const mods = try p.parsePtrModifiers();
+ const elem_type = try p.expectTypeExpr();
+ if (mods.bit_range_start != 0) {
+ try p.warnMsg(.{
+ .tag = .invalid_bit_range,
+ .token = p.nodes.items(.main_token)[mods.bit_range_start],
+ });
+ }
+ if (sentinel == 0 and mods.addrspace_node == 0) {
+ return p.addNode(.{
+ .tag = .ptr_type_aligned,
+ .main_token = lbracket,
+ .data = .{
+ .lhs = mods.align_node,
+ .rhs = elem_type,
+ },
+ });
+ } else if (mods.align_node == 0 and mods.addrspace_node == 0) {
+ return p.addNode(.{
+ .tag = .ptr_type_sentinel,
+ .main_token = lbracket,
+ .data = .{
+ .lhs = sentinel,
+ .rhs = elem_type,
+ },
+ });
+ } else {
+ return p.addNode(.{
+ .tag = .ptr_type,
+ .main_token = lbracket,
+ .data = .{
+ .lhs = try p.addExtra(Node.PtrType{
+ .sentinel = sentinel,
+ .align_node = mods.align_node,
+ .addrspace_node = mods.addrspace_node,
+ }),
+ .rhs = elem_type,
+ },
+ });
+ }
+ } else {
+ switch (p.token_tags[p.tok_i]) {
+ .keyword_align,
+ .keyword_const,
+ .keyword_volatile,
+ .keyword_allowzero,
+ .keyword_addrspace,
+ => return p.fail(.ptr_mod_on_array_child_type),
+ else => {},
+ }
+ const elem_type = try p.expectTypeExpr();
+ if (sentinel == 0) {
+ return p.addNode(.{
+ .tag = .array_type,
+ .main_token = lbracket,
+ .data = .{
+ .lhs = len_expr,
+ .rhs = elem_type,
+ },
+ });
+ } else {
+ return p.addNode(.{
+ .tag = .array_type_sentinel,
+ .main_token = lbracket,
+ .data = .{
+ .lhs = len_expr,
+ .rhs = try p.addExtra(.{
+ .elem_type = elem_type,
+ .sentinel = sentinel,
+ }),
+ },
+ });
+ }
+ }
+ },
+ },
+ else => return p.parseErrorUnionExpr(),
+ }
+}
+
+fn expectTypeExpr(p: *Parse) Error!Node.Index {
+ const node = try p.parseTypeExpr();
+ if (node == 0) {
+ return p.fail(.expected_type_expr);
+ }
+ return node;
+}
+
+/// PrimaryExpr
+/// <- AsmExpr
+/// / IfExpr
+/// / KEYWORD_break BreakLabel? Expr?
+/// / KEYWORD_comptime Expr
+/// / KEYWORD_nosuspend Expr
+/// / KEYWORD_continue BreakLabel?
+/// / KEYWORD_resume Expr
+/// / KEYWORD_return Expr?
+/// / BlockLabel? LoopExpr
+/// / Block
+/// / CurlySuffixExpr
+fn parsePrimaryExpr(p: *Parse) !Node.Index {
+ switch (p.token_tags[p.tok_i]) {
+ .keyword_asm => return p.expectAsmExpr(),
+ .keyword_if => return p.parseIfExpr(),
+ .keyword_break => {
+ p.tok_i += 1;
+ return p.addNode(.{
+ .tag = .@"break",
+ .main_token = p.tok_i - 1,
+ .data = .{
+ .lhs = try p.parseBreakLabel(),
+ .rhs = try p.parseExpr(),
+ },
+ });
+ },
+ .keyword_continue => {
+ p.tok_i += 1;
+ return p.addNode(.{
+ .tag = .@"continue",
+ .main_token = p.tok_i - 1,
+ .data = .{
+ .lhs = try p.parseBreakLabel(),
+ .rhs = undefined,
+ },
+ });
+ },
+ .keyword_comptime => {
+ p.tok_i += 1;
+ return p.addNode(.{
+ .tag = .@"comptime",
+ .main_token = p.tok_i - 1,
+ .data = .{
+ .lhs = try p.expectExpr(),
+ .rhs = undefined,
+ },
+ });
+ },
+ .keyword_nosuspend => {
+ p.tok_i += 1;
+ return p.addNode(.{
+ .tag = .@"nosuspend",
+ .main_token = p.tok_i - 1,
+ .data = .{
+ .lhs = try p.expectExpr(),
+ .rhs = undefined,
+ },
+ });
+ },
+ .keyword_resume => {
+ p.tok_i += 1;
+ return p.addNode(.{
+ .tag = .@"resume",
+ .main_token = p.tok_i - 1,
+ .data = .{
+ .lhs = try p.expectExpr(),
+ .rhs = undefined,
+ },
+ });
+ },
+ .keyword_return => {
+ p.tok_i += 1;
+ return p.addNode(.{
+ .tag = .@"return",
+ .main_token = p.tok_i - 1,
+ .data = .{
+ .lhs = try p.parseExpr(),
+ .rhs = undefined,
+ },
+ });
+ },
+ .identifier => {
+ if (p.token_tags[p.tok_i + 1] == .colon) {
+ switch (p.token_tags[p.tok_i + 2]) {
+ .keyword_inline => {
+ p.tok_i += 3;
+ switch (p.token_tags[p.tok_i]) {
+ .keyword_for => return p.parseForExpr(),
+ .keyword_while => return p.parseWhileExpr(),
+ else => return p.fail(.expected_inlinable),
+ }
+ },
+ .keyword_for => {
+ p.tok_i += 2;
+ return p.parseForExpr();
+ },
+ .keyword_while => {
+ p.tok_i += 2;
+ return p.parseWhileExpr();
+ },
+ .l_brace => {
+ p.tok_i += 2;
+ return p.parseBlock();
+ },
+ else => return p.parseCurlySuffixExpr(),
+ }
+ } else {
+ return p.parseCurlySuffixExpr();
+ }
+ },
+ .keyword_inline => {
+ p.tok_i += 1;
+ switch (p.token_tags[p.tok_i]) {
+ .keyword_for => return p.parseForExpr(),
+ .keyword_while => return p.parseWhileExpr(),
+ else => return p.fail(.expected_inlinable),
+ }
+ },
+ .keyword_for => return p.parseForExpr(),
+ .keyword_while => return p.parseWhileExpr(),
+ .l_brace => return p.parseBlock(),
+ else => return p.parseCurlySuffixExpr(),
+ }
+}
+
+/// IfExpr <- IfPrefix Expr (KEYWORD_else Payload? Expr)?
+fn parseIfExpr(p: *Parse) !Node.Index {
+ return p.parseIf(expectExpr);
+}
+
+/// Block <- LBRACE Statement* RBRACE
+fn parseBlock(p: *Parse) !Node.Index {
+ const lbrace = p.eatToken(.l_brace) orelse return null_node;
+ const scratch_top = p.scratch.items.len;
+ defer p.scratch.shrinkRetainingCapacity(scratch_top);
+ while (true) {
+ if (p.token_tags[p.tok_i] == .r_brace) break;
+ const statement = try p.expectStatementRecoverable();
+ if (statement == 0) break;
+ try p.scratch.append(p.gpa, statement);
+ }
+ _ = try p.expectToken(.r_brace);
+ const semicolon = (p.token_tags[p.tok_i - 2] == .semicolon);
+ const statements = p.scratch.items[scratch_top..];
+ switch (statements.len) {
+ 0 => return p.addNode(.{
+ .tag = .block_two,
+ .main_token = lbrace,
+ .data = .{
+ .lhs = 0,
+ .rhs = 0,
+ },
+ }),
+ 1 => return p.addNode(.{
+ .tag = if (semicolon) .block_two_semicolon else .block_two,
+ .main_token = lbrace,
+ .data = .{
+ .lhs = statements[0],
+ .rhs = 0,
+ },
+ }),
+ 2 => return p.addNode(.{
+ .tag = if (semicolon) .block_two_semicolon else .block_two,
+ .main_token = lbrace,
+ .data = .{
+ .lhs = statements[0],
+ .rhs = statements[1],
+ },
+ }),
+ else => {
+ const span = try p.listToSpan(statements);
+ return p.addNode(.{
+ .tag = if (semicolon) .block_semicolon else .block,
+ .main_token = lbrace,
+ .data = .{
+ .lhs = span.start,
+ .rhs = span.end,
+ },
+ });
+ },
+ }
+}
+
+/// ForPrefix <- KEYWORD_for LPAREN Expr RPAREN PtrIndexPayload
+///
+/// ForExpr <- ForPrefix Expr (KEYWORD_else Expr)?
+fn parseForExpr(p: *Parse) !Node.Index {
+ const for_token = p.eatToken(.keyword_for) orelse return null_node;
+ _ = try p.expectToken(.l_paren);
+ const array_expr = try p.expectExpr();
+ _ = try p.expectToken(.r_paren);
+ const found_payload = try p.parsePtrIndexPayload();
+ if (found_payload == 0) try p.warn(.expected_loop_payload);
+
+ const then_expr = try p.expectExpr();
+ _ = p.eatToken(.keyword_else) orelse {
+ return p.addNode(.{
+ .tag = .for_simple,
+ .main_token = for_token,
+ .data = .{
+ .lhs = array_expr,
+ .rhs = then_expr,
+ },
+ });
+ };
+ const else_expr = try p.expectExpr();
+ return p.addNode(.{
+ .tag = .@"for",
+ .main_token = for_token,
+ .data = .{
+ .lhs = array_expr,
+ .rhs = try p.addExtra(Node.If{
+ .then_expr = then_expr,
+ .else_expr = else_expr,
+ }),
+ },
+ });
+}
+
+/// WhilePrefix <- KEYWORD_while LPAREN Expr RPAREN PtrPayload? WhileContinueExpr?
+///
+/// WhileExpr <- WhilePrefix Expr (KEYWORD_else Payload? Expr)?
+fn parseWhileExpr(p: *Parse) !Node.Index {
+ const while_token = p.eatToken(.keyword_while) orelse return null_node;
+ _ = try p.expectToken(.l_paren);
+ const condition = try p.expectExpr();
+ _ = try p.expectToken(.r_paren);
+ _ = try p.parsePtrPayload();
+ const cont_expr = try p.parseWhileContinueExpr();
+
+ const then_expr = try p.expectExpr();
+ _ = p.eatToken(.keyword_else) orelse {
+ if (cont_expr == 0) {
+ return p.addNode(.{
+ .tag = .while_simple,
+ .main_token = while_token,
+ .data = .{
+ .lhs = condition,
+ .rhs = then_expr,
+ },
+ });
+ } else {
+ return p.addNode(.{
+ .tag = .while_cont,
+ .main_token = while_token,
+ .data = .{
+ .lhs = condition,
+ .rhs = try p.addExtra(Node.WhileCont{
+ .cont_expr = cont_expr,
+ .then_expr = then_expr,
+ }),
+ },
+ });
+ }
+ };
+ _ = try p.parsePayload();
+ const else_expr = try p.expectExpr();
+ return p.addNode(.{
+ .tag = .@"while",
+ .main_token = while_token,
+ .data = .{
+ .lhs = condition,
+ .rhs = try p.addExtra(Node.While{
+ .cont_expr = cont_expr,
+ .then_expr = then_expr,
+ .else_expr = else_expr,
+ }),
+ },
+ });
+}
+
+/// CurlySuffixExpr <- TypeExpr InitList?
+///
+/// InitList
+/// <- LBRACE FieldInit (COMMA FieldInit)* COMMA? RBRACE
+/// / LBRACE Expr (COMMA Expr)* COMMA? RBRACE
+/// / LBRACE RBRACE
+fn parseCurlySuffixExpr(p: *Parse) !Node.Index {
+ const lhs = try p.parseTypeExpr();
+ if (lhs == 0) return null_node;
+ const lbrace = p.eatToken(.l_brace) orelse return lhs;
+
+ // If there are 0 or 1 items, we can use ArrayInitOne/StructInitOne;
+ // otherwise we use the full ArrayInit/StructInit.
+
+ const scratch_top = p.scratch.items.len;
+ defer p.scratch.shrinkRetainingCapacity(scratch_top);
+ const field_init = try p.parseFieldInit();
+ if (field_init != 0) {
+ try p.scratch.append(p.gpa, field_init);
+ while (true) {
+ switch (p.token_tags[p.tok_i]) {
+ .comma => p.tok_i += 1,
+ .r_brace => {
+ p.tok_i += 1;
+ break;
+ },
+ .colon, .r_paren, .r_bracket => return p.failExpected(.r_brace),
+ // Likely just a missing comma; give error but continue parsing.
+ else => try p.warn(.expected_comma_after_initializer),
+ }
+ if (p.eatToken(.r_brace)) |_| break;
+ const next = try p.expectFieldInit();
+ try p.scratch.append(p.gpa, next);
+ }
+ const comma = (p.token_tags[p.tok_i - 2] == .comma);
+ const inits = p.scratch.items[scratch_top..];
+ switch (inits.len) {
+ 0 => unreachable,
+ 1 => return p.addNode(.{
+ .tag = if (comma) .struct_init_one_comma else .struct_init_one,
+ .main_token = lbrace,
+ .data = .{
+ .lhs = lhs,
+ .rhs = inits[0],
+ },
+ }),
+ else => return p.addNode(.{
+ .tag = if (comma) .struct_init_comma else .struct_init,
+ .main_token = lbrace,
+ .data = .{
+ .lhs = lhs,
+ .rhs = try p.addExtra(try p.listToSpan(inits)),
+ },
+ }),
+ }
+ }
+
+ while (true) {
+ if (p.eatToken(.r_brace)) |_| break;
+ const elem_init = try p.expectExpr();
+ try p.scratch.append(p.gpa, elem_init);
+ switch (p.token_tags[p.tok_i]) {
+ .comma => p.tok_i += 1,
+ .r_brace => {
+ p.tok_i += 1;
+ break;
+ },
+ .colon, .r_paren, .r_bracket => return p.failExpected(.r_brace),
+ // Likely just a missing comma; give error but continue parsing.
+ else => try p.warn(.expected_comma_after_initializer),
+ }
+ }
+ const comma = (p.token_tags[p.tok_i - 2] == .comma);
+ const inits = p.scratch.items[scratch_top..];
+ switch (inits.len) {
+ 0 => return p.addNode(.{
+ .tag = .struct_init_one,
+ .main_token = lbrace,
+ .data = .{
+ .lhs = lhs,
+ .rhs = 0,
+ },
+ }),
+ 1 => return p.addNode(.{
+ .tag = if (comma) .array_init_one_comma else .array_init_one,
+ .main_token = lbrace,
+ .data = .{
+ .lhs = lhs,
+ .rhs = inits[0],
+ },
+ }),
+ else => return p.addNode(.{
+ .tag = if (comma) .array_init_comma else .array_init,
+ .main_token = lbrace,
+ .data = .{
+ .lhs = lhs,
+ .rhs = try p.addExtra(try p.listToSpan(inits)),
+ },
+ }),
+ }
+}
+
+/// ErrorUnionExpr <- SuffixExpr (EXCLAMATIONMARK TypeExpr)?
+fn parseErrorUnionExpr(p: *Parse) !Node.Index {
+ const suffix_expr = try p.parseSuffixExpr();
+ if (suffix_expr == 0) return null_node;
+ const bang = p.eatToken(.bang) orelse return suffix_expr;
+ return p.addNode(.{
+ .tag = .error_union,
+ .main_token = bang,
+ .data = .{
+ .lhs = suffix_expr,
+ .rhs = try p.expectTypeExpr(),
+ },
+ });
+}
+
+/// SuffixExpr
+/// <- KEYWORD_async PrimaryTypeExpr SuffixOp* FnCallArguments
+/// / PrimaryTypeExpr (SuffixOp / FnCallArguments)*
+///
+/// FnCallArguments <- LPAREN ExprList RPAREN
+///
+/// ExprList <- (Expr COMMA)* Expr?
+fn parseSuffixExpr(p: *Parse) !Node.Index {
+ if (p.eatToken(.keyword_async)) |_| {
+ var res = try p.expectPrimaryTypeExpr();
+ while (true) {
+ const node = try p.parseSuffixOp(res);
+ if (node == 0) break;
+ res = node;
+ }
+ const lparen = p.eatToken(.l_paren) orelse {
+ try p.warn(.expected_param_list);
+ return res;
+ };
+ const scratch_top = p.scratch.items.len;
+ defer p.scratch.shrinkRetainingCapacity(scratch_top);
+ while (true) {
+ if (p.eatToken(.r_paren)) |_| break;
+ const param = try p.expectExpr();
+ try p.scratch.append(p.gpa, param);
+ switch (p.token_tags[p.tok_i]) {
+ .comma => p.tok_i += 1,
+ .r_paren => {
+ p.tok_i += 1;
+ break;
+ },
+ .colon, .r_brace, .r_bracket => return p.failExpected(.r_paren),
+ // Likely just a missing comma; give error but continue parsing.
+ else => try p.warn(.expected_comma_after_arg),
+ }
+ }
+ const comma = (p.token_tags[p.tok_i - 2] == .comma);
+ const params = p.scratch.items[scratch_top..];
+ switch (params.len) {
+ 0 => return p.addNode(.{
+ .tag = if (comma) .async_call_one_comma else .async_call_one,
+ .main_token = lparen,
+ .data = .{
+ .lhs = res,
+ .rhs = 0,
+ },
+ }),
+ 1 => return p.addNode(.{
+ .tag = if (comma) .async_call_one_comma else .async_call_one,
+ .main_token = lparen,
+ .data = .{
+ .lhs = res,
+ .rhs = params[0],
+ },
+ }),
+ else => return p.addNode(.{
+ .tag = if (comma) .async_call_comma else .async_call,
+ .main_token = lparen,
+ .data = .{
+ .lhs = res,
+ .rhs = try p.addExtra(try p.listToSpan(params)),
+ },
+ }),
+ }
+ }
+
+ var res = try p.parsePrimaryTypeExpr();
+ if (res == 0) return res;
+ while (true) {
+ const suffix_op = try p.parseSuffixOp(res);
+ if (suffix_op != 0) {
+ res = suffix_op;
+ continue;
+ }
+ const lparen = p.eatToken(.l_paren) orelse return res;
+ const scratch_top = p.scratch.items.len;
+ defer p.scratch.shrinkRetainingCapacity(scratch_top);
+ while (true) {
+ if (p.eatToken(.r_paren)) |_| break;
+ const param = try p.expectExpr();
+ try p.scratch.append(p.gpa, param);
+ switch (p.token_tags[p.tok_i]) {
+ .comma => p.tok_i += 1,
+ .r_paren => {
+ p.tok_i += 1;
+ break;
+ },
+ .colon, .r_brace, .r_bracket => return p.failExpected(.r_paren),
+ // Likely just a missing comma; give error but continue parsing.
+ else => try p.warn(.expected_comma_after_arg),
+ }
+ }
+ const comma = (p.token_tags[p.tok_i - 2] == .comma);
+ const params = p.scratch.items[scratch_top..];
+ res = switch (params.len) {
+ 0 => try p.addNode(.{
+ .tag = if (comma) .call_one_comma else .call_one,
+ .main_token = lparen,
+ .data = .{
+ .lhs = res,
+ .rhs = 0,
+ },
+ }),
+ 1 => try p.addNode(.{
+ .tag = if (comma) .call_one_comma else .call_one,
+ .main_token = lparen,
+ .data = .{
+ .lhs = res,
+ .rhs = params[0],
+ },
+ }),
+ else => try p.addNode(.{
+ .tag = if (comma) .call_comma else .call,
+ .main_token = lparen,
+ .data = .{
+ .lhs = res,
+ .rhs = try p.addExtra(try p.listToSpan(params)),
+ },
+ }),
+ };
+ }
+}
+
+/// PrimaryTypeExpr
+/// <- BUILTINIDENTIFIER FnCallArguments
+/// / CHAR_LITERAL
+/// / ContainerDecl
+/// / DOT IDENTIFIER
+/// / DOT InitList
+/// / ErrorSetDecl
+/// / FLOAT
+/// / FnProto
+/// / GroupedExpr
+/// / LabeledTypeExpr
+/// / IDENTIFIER
+/// / IfTypeExpr
+/// / INTEGER
+/// / KEYWORD_comptime TypeExpr
+/// / KEYWORD_error DOT IDENTIFIER
+/// / KEYWORD_anyframe
+/// / KEYWORD_unreachable
+/// / STRINGLITERAL
+/// / SwitchExpr
+///
+/// ContainerDecl <- (KEYWORD_extern / KEYWORD_packed)? ContainerDeclAuto
+///
+/// ContainerDeclAuto <- ContainerDeclType LBRACE container_doc_comment? ContainerMembers RBRACE
+///
+/// InitList
+/// <- LBRACE FieldInit (COMMA FieldInit)* COMMA? RBRACE
+/// / LBRACE Expr (COMMA Expr)* COMMA? RBRACE
+/// / LBRACE RBRACE
+///
+/// ErrorSetDecl <- KEYWORD_error LBRACE IdentifierList RBRACE
+///
+/// GroupedExpr <- LPAREN Expr RPAREN
+///
+/// IfTypeExpr <- IfPrefix TypeExpr (KEYWORD_else Payload? TypeExpr)?
+///
+/// LabeledTypeExpr
+/// <- BlockLabel Block
+/// / BlockLabel? LoopTypeExpr
+///
+/// LoopTypeExpr <- KEYWORD_inline? (ForTypeExpr / WhileTypeExpr)
+fn parsePrimaryTypeExpr(p: *Parse) !Node.Index {
+ switch (p.token_tags[p.tok_i]) {
+ .char_literal => return p.addNode(.{
+ .tag = .char_literal,
+ .main_token = p.nextToken(),
+ .data = .{
+ .lhs = undefined,
+ .rhs = undefined,
+ },
+ }),
+ .number_literal => return p.addNode(.{
+ .tag = .number_literal,
+ .main_token = p.nextToken(),
+ .data = .{
+ .lhs = undefined,
+ .rhs = undefined,
+ },
+ }),
+ .keyword_unreachable => return p.addNode(.{
+ .tag = .unreachable_literal,
+ .main_token = p.nextToken(),
+ .data = .{
+ .lhs = undefined,
+ .rhs = undefined,
+ },
+ }),
+ .keyword_anyframe => return p.addNode(.{
+ .tag = .anyframe_literal,
+ .main_token = p.nextToken(),
+ .data = .{
+ .lhs = undefined,
+ .rhs = undefined,
+ },
+ }),
+ .string_literal => {
+ const main_token = p.nextToken();
+ return p.addNode(.{
+ .tag = .string_literal,
+ .main_token = main_token,
+ .data = .{
+ .lhs = undefined,
+ .rhs = undefined,
+ },
+ });
+ },
+
+ .builtin => return p.parseBuiltinCall(),
+ .keyword_fn => return p.parseFnProto(),
+ .keyword_if => return p.parseIf(expectTypeExpr),
+ .keyword_switch => return p.expectSwitchExpr(),
+
+ .keyword_extern,
+ .keyword_packed,
+ => {
+ p.tok_i += 1;
+ return p.parseContainerDeclAuto();
+ },
+
+ .keyword_struct,
+ .keyword_opaque,
+ .keyword_enum,
+ .keyword_union,
+ => return p.parseContainerDeclAuto(),
+
+ .keyword_comptime => return p.addNode(.{
+ .tag = .@"comptime",
+ .main_token = p.nextToken(),
+ .data = .{
+ .lhs = try p.expectTypeExpr(),
+ .rhs = undefined,
+ },
+ }),
+ .multiline_string_literal_line => {
+ const first_line = p.nextToken();
+ while (p.token_tags[p.tok_i] == .multiline_string_literal_line) {
+ p.tok_i += 1;
+ }
+ return p.addNode(.{
+ .tag = .multiline_string_literal,
+ .main_token = first_line,
+ .data = .{
+ .lhs = first_line,
+ .rhs = p.tok_i - 1,
+ },
+ });
+ },
+ .identifier => switch (p.token_tags[p.tok_i + 1]) {
+ .colon => switch (p.token_tags[p.tok_i + 2]) {
+ .keyword_inline => {
+ p.tok_i += 3;
+ switch (p.token_tags[p.tok_i]) {
+ .keyword_for => return p.parseForTypeExpr(),
+ .keyword_while => return p.parseWhileTypeExpr(),
+ else => return p.fail(.expected_inlinable),
+ }
+ },
+ .keyword_for => {
+ p.tok_i += 2;
+ return p.parseForTypeExpr();
+ },
+ .keyword_while => {
+ p.tok_i += 2;
+ return p.parseWhileTypeExpr();
+ },
+ .l_brace => {
+ p.tok_i += 2;
+ return p.parseBlock();
+ },
+ else => return p.addNode(.{
+ .tag = .identifier,
+ .main_token = p.nextToken(),
+ .data = .{
+ .lhs = undefined,
+ .rhs = undefined,
+ },
+ }),
+ },
+ else => return p.addNode(.{
+ .tag = .identifier,
+ .main_token = p.nextToken(),
+ .data = .{
+ .lhs = undefined,
+ .rhs = undefined,
+ },
+ }),
+ },
+ .keyword_inline => {
+ p.tok_i += 1;
+ switch (p.token_tags[p.tok_i]) {
+ .keyword_for => return p.parseForTypeExpr(),
+ .keyword_while => return p.parseWhileTypeExpr(),
+ else => return p.fail(.expected_inlinable),
+ }
+ },
+ .keyword_for => return p.parseForTypeExpr(),
+ .keyword_while => return p.parseWhileTypeExpr(),
+ .period => switch (p.token_tags[p.tok_i + 1]) {
+ .identifier => return p.addNode(.{
+ .tag = .enum_literal,
+ .data = .{
+ .lhs = p.nextToken(), // dot
+ .rhs = undefined,
+ },
+ .main_token = p.nextToken(), // identifier
+ }),
+ .l_brace => {
+ const lbrace = p.tok_i + 1;
+ p.tok_i = lbrace + 1;
+
+ // If there are 0, 1, or 2 items, we can use ArrayInitDotTwo/StructInitDotTwo;
+ // otherwise we use the full ArrayInitDot/StructInitDot.
+
+ const scratch_top = p.scratch.items.len;
+ defer p.scratch.shrinkRetainingCapacity(scratch_top);
+ const field_init = try p.parseFieldInit();
+ if (field_init != 0) {
+ try p.scratch.append(p.gpa, field_init);
+ while (true) {
+ switch (p.token_tags[p.tok_i]) {
+ .comma => p.tok_i += 1,
+ .r_brace => {
+ p.tok_i += 1;
+ break;
+ },
+ .colon, .r_paren, .r_bracket => return p.failExpected(.r_brace),
+ // Likely just a missing comma; give error but continue parsing.
+ else => try p.warn(.expected_comma_after_initializer),
+ }
+ if (p.eatToken(.r_brace)) |_| break;
+ const next = try p.expectFieldInit();
+ try p.scratch.append(p.gpa, next);
+ }
+ const comma = (p.token_tags[p.tok_i - 2] == .comma);
+ const inits = p.scratch.items[scratch_top..];
+ switch (inits.len) {
+ 0 => unreachable,
+ 1 => return p.addNode(.{
+ .tag = if (comma) .struct_init_dot_two_comma else .struct_init_dot_two,
+ .main_token = lbrace,
+ .data = .{
+ .lhs = inits[0],
+ .rhs = 0,
+ },
+ }),
+ 2 => return p.addNode(.{
+ .tag = if (comma) .struct_init_dot_two_comma else .struct_init_dot_two,
+ .main_token = lbrace,
+ .data = .{
+ .lhs = inits[0],
+ .rhs = inits[1],
+ },
+ }),
+ else => {
+ const span = try p.listToSpan(inits);
+ return p.addNode(.{
+ .tag = if (comma) .struct_init_dot_comma else .struct_init_dot,
+ .main_token = lbrace,
+ .data = .{
+ .lhs = span.start,
+ .rhs = span.end,
+ },
+ });
+ },
+ }
+ }
+
+ while (true) {
+ if (p.eatToken(.r_brace)) |_| break;
+ const elem_init = try p.expectExpr();
+ try p.scratch.append(p.gpa, elem_init);
+ switch (p.token_tags[p.tok_i]) {
+ .comma => p.tok_i += 1,
+ .r_brace => {
+ p.tok_i += 1;
+ break;
+ },
+ .colon, .r_paren, .r_bracket => return p.failExpected(.r_brace),
+ // Likely just a missing comma; give error but continue parsing.
+ else => try p.warn(.expected_comma_after_initializer),
+ }
+ }
+ const comma = (p.token_tags[p.tok_i - 2] == .comma);
+ const inits = p.scratch.items[scratch_top..];
+ switch (inits.len) {
+ 0 => return p.addNode(.{
+ .tag = .struct_init_dot_two,
+ .main_token = lbrace,
+ .data = .{
+ .lhs = 0,
+ .rhs = 0,
+ },
+ }),
+ 1 => return p.addNode(.{
+ .tag = if (comma) .array_init_dot_two_comma else .array_init_dot_two,
+ .main_token = lbrace,
+ .data = .{
+ .lhs = inits[0],
+ .rhs = 0,
+ },
+ }),
+ 2 => return p.addNode(.{
+ .tag = if (comma) .array_init_dot_two_comma else .array_init_dot_two,
+ .main_token = lbrace,
+ .data = .{
+ .lhs = inits[0],
+ .rhs = inits[1],
+ },
+ }),
+ else => {
+ const span = try p.listToSpan(inits);
+ return p.addNode(.{
+ .tag = if (comma) .array_init_dot_comma else .array_init_dot,
+ .main_token = lbrace,
+ .data = .{
+ .lhs = span.start,
+ .rhs = span.end,
+ },
+ });
+ },
+ }
+ },
+ else => return null_node,
+ },
+ .keyword_error => switch (p.token_tags[p.tok_i + 1]) {
+ .l_brace => {
+ const error_token = p.tok_i;
+ p.tok_i += 2;
+ while (true) {
+ if (p.eatToken(.r_brace)) |_| break;
+ _ = try p.eatDocComments();
+ _ = try p.expectToken(.identifier);
+ switch (p.token_tags[p.tok_i]) {
+ .comma => p.tok_i += 1,
+ .r_brace => {
+ p.tok_i += 1;
+ break;
+ },
+ .colon, .r_paren, .r_bracket => return p.failExpected(.r_brace),
+ // Likely just a missing comma; give error but continue parsing.
+ else => try p.warn(.expected_comma_after_field),
+ }
+ }
+ return p.addNode(.{
+ .tag = .error_set_decl,
+ .main_token = error_token,
+ .data = .{
+ .lhs = undefined,
+ .rhs = p.tok_i - 1, // rbrace
+ },
+ });
+ },
+ else => {
+ const main_token = p.nextToken();
+ const period = p.eatToken(.period);
+ if (period == null) try p.warnExpected(.period);
+ const identifier = p.eatToken(.identifier);
+ if (identifier == null) try p.warnExpected(.identifier);
+ return p.addNode(.{
+ .tag = .error_value,
+ .main_token = main_token,
+ .data = .{
+ .lhs = period orelse 0,
+ .rhs = identifier orelse 0,
+ },
+ });
+ },
+ },
+ .l_paren => return p.addNode(.{
+ .tag = .grouped_expression,
+ .main_token = p.nextToken(),
+ .data = .{
+ .lhs = try p.expectExpr(),
+ .rhs = try p.expectToken(.r_paren),
+ },
+ }),
+ else => return null_node,
+ }
+}
+
+fn expectPrimaryTypeExpr(p: *Parse) !Node.Index {
+ const node = try p.parsePrimaryTypeExpr();
+ if (node == 0) {
+ return p.fail(.expected_primary_type_expr);
+ }
+ return node;
+}
+
+/// ForPrefix <- KEYWORD_for LPAREN Expr RPAREN PtrIndexPayload
+///
+/// ForTypeExpr <- ForPrefix TypeExpr (KEYWORD_else TypeExpr)?
+fn parseForTypeExpr(p: *Parse) !Node.Index {
+ const for_token = p.eatToken(.keyword_for) orelse return null_node;
+ _ = try p.expectToken(.l_paren);
+ const array_expr = try p.expectExpr();
+ _ = try p.expectToken(.r_paren);
+ const found_payload = try p.parsePtrIndexPayload();
+ if (found_payload == 0) try p.warn(.expected_loop_payload);
+
+ const then_expr = try p.expectTypeExpr();
+ _ = p.eatToken(.keyword_else) orelse {
+ return p.addNode(.{
+ .tag = .for_simple,
+ .main_token = for_token,
+ .data = .{
+ .lhs = array_expr,
+ .rhs = then_expr,
+ },
+ });
+ };
+ const else_expr = try p.expectTypeExpr();
+ return p.addNode(.{
+ .tag = .@"for",
+ .main_token = for_token,
+ .data = .{
+ .lhs = array_expr,
+ .rhs = try p.addExtra(Node.If{
+ .then_expr = then_expr,
+ .else_expr = else_expr,
+ }),
+ },
+ });
+}
+
+/// WhilePrefix <- KEYWORD_while LPAREN Expr RPAREN PtrPayload? WhileContinueExpr?
+///
+/// WhileTypeExpr <- WhilePrefix TypeExpr (KEYWORD_else Payload? TypeExpr)?
+fn parseWhileTypeExpr(p: *Parse) !Node.Index {
+ const while_token = p.eatToken(.keyword_while) orelse return null_node;
+ _ = try p.expectToken(.l_paren);
+ const condition = try p.expectExpr();
+ _ = try p.expectToken(.r_paren);
+ _ = try p.parsePtrPayload();
+ const cont_expr = try p.parseWhileContinueExpr();
+
+ const then_expr = try p.expectTypeExpr();
+ _ = p.eatToken(.keyword_else) orelse {
+ if (cont_expr == 0) {
+ return p.addNode(.{
+ .tag = .while_simple,
+ .main_token = while_token,
+ .data = .{
+ .lhs = condition,
+ .rhs = then_expr,
+ },
+ });
+ } else {
+ return p.addNode(.{
+ .tag = .while_cont,
+ .main_token = while_token,
+ .data = .{
+ .lhs = condition,
+ .rhs = try p.addExtra(Node.WhileCont{
+ .cont_expr = cont_expr,
+ .then_expr = then_expr,
+ }),
+ },
+ });
+ }
+ };
+ _ = try p.parsePayload();
+ const else_expr = try p.expectTypeExpr();
+ return p.addNode(.{
+ .tag = .@"while",
+ .main_token = while_token,
+ .data = .{
+ .lhs = condition,
+ .rhs = try p.addExtra(Node.While{
+ .cont_expr = cont_expr,
+ .then_expr = then_expr,
+ .else_expr = else_expr,
+ }),
+ },
+ });
+}
+
+/// SwitchExpr <- KEYWORD_switch LPAREN Expr RPAREN LBRACE SwitchProngList RBRACE
+fn expectSwitchExpr(p: *Parse) !Node.Index {
+ const switch_token = p.assertToken(.keyword_switch);
+ _ = try p.expectToken(.l_paren);
+ const expr_node = try p.expectExpr();
+ _ = try p.expectToken(.r_paren);
+ _ = try p.expectToken(.l_brace);
+ const cases = try p.parseSwitchProngList();
+ const trailing_comma = p.token_tags[p.tok_i - 1] == .comma;
+ _ = try p.expectToken(.r_brace);
+
+ return p.addNode(.{
+ .tag = if (trailing_comma) .switch_comma else .@"switch",
+ .main_token = switch_token,
+ .data = .{
+ .lhs = expr_node,
+ .rhs = try p.addExtra(Node.SubRange{
+ .start = cases.start,
+ .end = cases.end,
+ }),
+ },
+ });
+}
+
+/// AsmExpr <- KEYWORD_asm KEYWORD_volatile? LPAREN Expr AsmOutput? RPAREN
+///
+/// AsmOutput <- COLON AsmOutputList AsmInput?
+///
+/// AsmInput <- COLON AsmInputList AsmClobbers?
+///
+/// AsmClobbers <- COLON StringList
+///
+/// StringList <- (STRINGLITERAL COMMA)* STRINGLITERAL?
+///
+/// AsmOutputList <- (AsmOutputItem COMMA)* AsmOutputItem?
+///
+/// AsmInputList <- (AsmInputItem COMMA)* AsmInputItem?
+fn expectAsmExpr(p: *Parse) !Node.Index {
+ const asm_token = p.assertToken(.keyword_asm);
+ _ = p.eatToken(.keyword_volatile);
+ _ = try p.expectToken(.l_paren);
+ const template = try p.expectExpr();
+
+ if (p.eatToken(.r_paren)) |rparen| {
+ return p.addNode(.{
+ .tag = .asm_simple,
+ .main_token = asm_token,
+ .data = .{
+ .lhs = template,
+ .rhs = rparen,
+ },
+ });
+ }
+
+ _ = try p.expectToken(.colon);
+
+ const scratch_top = p.scratch.items.len;
+ defer p.scratch.shrinkRetainingCapacity(scratch_top);
+
+ while (true) {
+ const output_item = try p.parseAsmOutputItem();
+ if (output_item == 0) break;
+ try p.scratch.append(p.gpa, output_item);
+ switch (p.token_tags[p.tok_i]) {
+ .comma => p.tok_i += 1,
+ // All possible delimiters.
+ .colon, .r_paren, .r_brace, .r_bracket => break,
+ // Likely just a missing comma; give error but continue parsing.
+ else => try p.warnExpected(.comma),
+ }
+ }
+ if (p.eatToken(.colon)) |_| {
+ while (true) {
+ const input_item = try p.parseAsmInputItem();
+ if (input_item == 0) break;
+ try p.scratch.append(p.gpa, input_item);
+ switch (p.token_tags[p.tok_i]) {
+ .comma => p.tok_i += 1,
+ // All possible delimiters.
+ .colon, .r_paren, .r_brace, .r_bracket => break,
+ // Likely just a missing comma; give error but continue parsing.
+ else => try p.warnExpected(.comma),
+ }
+ }
+ if (p.eatToken(.colon)) |_| {
+ while (p.eatToken(.string_literal)) |_| {
+ switch (p.token_tags[p.tok_i]) {
+ .comma => p.tok_i += 1,
+ .colon, .r_paren, .r_brace, .r_bracket => break,
+ // Likely just a missing comma; give error but continue parsing.
+ else => try p.warnExpected(.comma),
+ }
+ }
+ }
+ }
+ const rparen = try p.expectToken(.r_paren);
+ const span = try p.listToSpan(p.scratch.items[scratch_top..]);
+ return p.addNode(.{
+ .tag = .@"asm",
+ .main_token = asm_token,
+ .data = .{
+ .lhs = template,
+ .rhs = try p.addExtra(Node.Asm{
+ .items_start = span.start,
+ .items_end = span.end,
+ .rparen = rparen,
+ }),
+ },
+ });
+}
+
+/// AsmOutputItem <- LBRACKET IDENTIFIER RBRACKET STRINGLITERAL LPAREN (MINUSRARROW TypeExpr / IDENTIFIER) RPAREN
+fn parseAsmOutputItem(p: *Parse) !Node.Index {
+ _ = p.eatToken(.l_bracket) orelse return null_node;
+ const identifier = try p.expectToken(.identifier);
+ _ = try p.expectToken(.r_bracket);
+ _ = try p.expectToken(.string_literal);
+ _ = try p.expectToken(.l_paren);
+ const type_expr: Node.Index = blk: {
+ if (p.eatToken(.arrow)) |_| {
+ break :blk try p.expectTypeExpr();
+ } else {
+ _ = try p.expectToken(.identifier);
+ break :blk null_node;
+ }
+ };
+ const rparen = try p.expectToken(.r_paren);
+ return p.addNode(.{
+ .tag = .asm_output,
+ .main_token = identifier,
+ .data = .{
+ .lhs = type_expr,
+ .rhs = rparen,
+ },
+ });
+}
+
+/// AsmInputItem <- LBRACKET IDENTIFIER RBRACKET STRINGLITERAL LPAREN Expr RPAREN
+fn parseAsmInputItem(p: *Parse) !Node.Index {
+ _ = p.eatToken(.l_bracket) orelse return null_node;
+ const identifier = try p.expectToken(.identifier);
+ _ = try p.expectToken(.r_bracket);
+ _ = try p.expectToken(.string_literal);
+ _ = try p.expectToken(.l_paren);
+ const expr = try p.expectExpr();
+ const rparen = try p.expectToken(.r_paren);
+ return p.addNode(.{
+ .tag = .asm_input,
+ .main_token = identifier,
+ .data = .{
+ .lhs = expr,
+ .rhs = rparen,
+ },
+ });
+}
+
+/// BreakLabel <- COLON IDENTIFIER
+fn parseBreakLabel(p: *Parse) !TokenIndex {
+ _ = p.eatToken(.colon) orelse return @as(TokenIndex, 0);
+ return p.expectToken(.identifier);
+}
+
+/// BlockLabel <- IDENTIFIER COLON
+fn parseBlockLabel(p: *Parse) TokenIndex {
+ if (p.token_tags[p.tok_i] == .identifier and
+ p.token_tags[p.tok_i + 1] == .colon)
+ {
+ const identifier = p.tok_i;
+ p.tok_i += 2;
+ return identifier;
+ }
+ return null_node;
+}
+
+/// FieldInit <- DOT IDENTIFIER EQUAL Expr
+fn parseFieldInit(p: *Parse) !Node.Index {
+ if (p.token_tags[p.tok_i + 0] == .period and
+ p.token_tags[p.tok_i + 1] == .identifier and
+ p.token_tags[p.tok_i + 2] == .equal)
+ {
+ p.tok_i += 3;
+ return p.expectExpr();
+ } else {
+ return null_node;
+ }
+}
+
+fn expectFieldInit(p: *Parse) !Node.Index {
+ if (p.token_tags[p.tok_i] != .period or
+ p.token_tags[p.tok_i + 1] != .identifier or
+ p.token_tags[p.tok_i + 2] != .equal)
+ return p.fail(.expected_initializer);
+
+ p.tok_i += 3;
+ return p.expectExpr();
+}
+
+/// WhileContinueExpr <- COLON LPAREN AssignExpr RPAREN
+fn parseWhileContinueExpr(p: *Parse) !Node.Index {
+ _ = p.eatToken(.colon) orelse {
+ if (p.token_tags[p.tok_i] == .l_paren and
+ p.tokensOnSameLine(p.tok_i - 1, p.tok_i))
+ return p.fail(.expected_continue_expr);
+ return null_node;
+ };
+ _ = try p.expectToken(.l_paren);
+ const node = try p.parseAssignExpr();
+ if (node == 0) return p.fail(.expected_expr_or_assignment);
+ _ = try p.expectToken(.r_paren);
+ return node;
+}
+
+/// LinkSection <- KEYWORD_linksection LPAREN Expr RPAREN
+fn parseLinkSection(p: *Parse) !Node.Index {
+ _ = p.eatToken(.keyword_linksection) orelse return null_node;
+ _ = try p.expectToken(.l_paren);
+ const expr_node = try p.expectExpr();
+ _ = try p.expectToken(.r_paren);
+ return expr_node;
+}
+
+/// CallConv <- KEYWORD_callconv LPAREN Expr RPAREN
+fn parseCallconv(p: *Parse) !Node.Index {
+ _ = p.eatToken(.keyword_callconv) orelse return null_node;
+ _ = try p.expectToken(.l_paren);
+ const expr_node = try p.expectExpr();
+ _ = try p.expectToken(.r_paren);
+ return expr_node;
+}
+
+/// AddrSpace <- KEYWORD_addrspace LPAREN Expr RPAREN
+fn parseAddrSpace(p: *Parse) !Node.Index {
+ _ = p.eatToken(.keyword_addrspace) orelse return null_node;
+ _ = try p.expectToken(.l_paren);
+ const expr_node = try p.expectExpr();
+ _ = try p.expectToken(.r_paren);
+ return expr_node;
+}
+
+/// This function can return null nodes and then still return nodes afterwards,
+/// such as in the case of anytype and `...`. Caller must look for rparen to find
+/// out when there are no more param decls left.
+///
+/// ParamDecl
+/// <- doc_comment? (KEYWORD_noalias / KEYWORD_comptime)? (IDENTIFIER COLON)? ParamType
+/// / DOT3
+///
+/// ParamType
+/// <- KEYWORD_anytype
+/// / TypeExpr
+fn expectParamDecl(p: *Parse) !Node.Index {
+ _ = try p.eatDocComments();
+ switch (p.token_tags[p.tok_i]) {
+ .keyword_noalias, .keyword_comptime => p.tok_i += 1,
+ .ellipsis3 => {
+ p.tok_i += 1;
+ return null_node;
+ },
+ else => {},
+ }
+ if (p.token_tags[p.tok_i] == .identifier and
+ p.token_tags[p.tok_i + 1] == .colon)
+ {
+ p.tok_i += 2;
+ }
+ switch (p.token_tags[p.tok_i]) {
+ .keyword_anytype => {
+ p.tok_i += 1;
+ return null_node;
+ },
+ else => return p.expectTypeExpr(),
+ }
+}
+
+/// Payload <- PIPE IDENTIFIER PIPE
+fn parsePayload(p: *Parse) !TokenIndex {
+ _ = p.eatToken(.pipe) orelse return @as(TokenIndex, 0);
+ const identifier = try p.expectToken(.identifier);
+ _ = try p.expectToken(.pipe);
+ return identifier;
+}
+
+/// PtrPayload <- PIPE ASTERISK? IDENTIFIER PIPE
+fn parsePtrPayload(p: *Parse) !TokenIndex {
+ _ = p.eatToken(.pipe) orelse return @as(TokenIndex, 0);
+ _ = p.eatToken(.asterisk);
+ const identifier = try p.expectToken(.identifier);
+ _ = try p.expectToken(.pipe);
+ return identifier;
+}
+
+/// Returns the first identifier token, if any.
+///
+/// PtrIndexPayload <- PIPE ASTERISK? IDENTIFIER (COMMA IDENTIFIER)? PIPE
+fn parsePtrIndexPayload(p: *Parse) !TokenIndex {
+ _ = p.eatToken(.pipe) orelse return @as(TokenIndex, 0);
+ _ = p.eatToken(.asterisk);
+ const identifier = try p.expectToken(.identifier);
+ if (p.eatToken(.comma) != null) {
+ _ = try p.expectToken(.identifier);
+ }
+ _ = try p.expectToken(.pipe);
+ return identifier;
+}
+
+/// SwitchProng <- KEYWORD_inline? SwitchCase EQUALRARROW PtrIndexPayload? AssignExpr
+///
+/// SwitchCase
+/// <- SwitchItem (COMMA SwitchItem)* COMMA?
+/// / KEYWORD_else
+fn parseSwitchProng(p: *Parse) !Node.Index {
+ const scratch_top = p.scratch.items.len;
+ defer p.scratch.shrinkRetainingCapacity(scratch_top);
+
+ const is_inline = p.eatToken(.keyword_inline) != null;
+
+ if (p.eatToken(.keyword_else) == null) {
+ while (true) {
+ const item = try p.parseSwitchItem();
+ if (item == 0) break;
+ try p.scratch.append(p.gpa, item);
+ if (p.eatToken(.comma) == null) break;
+ }
+ if (scratch_top == p.scratch.items.len) {
+ if (is_inline) p.tok_i -= 1;
+ return null_node;
+ }
+ }
+ const arrow_token = try p.expectToken(.equal_angle_bracket_right);
+ _ = try p.parsePtrIndexPayload();
+
+ const items = p.scratch.items[scratch_top..];
+ switch (items.len) {
+ 0 => return p.addNode(.{
+ .tag = if (is_inline) .switch_case_inline_one else .switch_case_one,
+ .main_token = arrow_token,
+ .data = .{
+ .lhs = 0,
+ .rhs = try p.expectAssignExpr(),
+ },
+ }),
+ 1 => return p.addNode(.{
+ .tag = if (is_inline) .switch_case_inline_one else .switch_case_one,
+ .main_token = arrow_token,
+ .data = .{
+ .lhs = items[0],
+ .rhs = try p.expectAssignExpr(),
+ },
+ }),
+ else => return p.addNode(.{
+ .tag = if (is_inline) .switch_case_inline else .switch_case,
+ .main_token = arrow_token,
+ .data = .{
+ .lhs = try p.addExtra(try p.listToSpan(items)),
+ .rhs = try p.expectAssignExpr(),
+ },
+ }),
+ }
+}
+
+/// SwitchItem <- Expr (DOT3 Expr)?
+fn parseSwitchItem(p: *Parse) !Node.Index {
+ const expr = try p.parseExpr();
+ if (expr == 0) return null_node;
+
+ if (p.eatToken(.ellipsis3)) |token| {
+ return p.addNode(.{
+ .tag = .switch_range,
+ .main_token = token,
+ .data = .{
+ .lhs = expr,
+ .rhs = try p.expectExpr(),
+ },
+ });
+ }
+ return expr;
+}
+
+const PtrModifiers = struct {
+ align_node: Node.Index,
+ addrspace_node: Node.Index,
+ bit_range_start: Node.Index,
+ bit_range_end: Node.Index,
+};
+
+fn parsePtrModifiers(p: *Parse) !PtrModifiers {
+ var result: PtrModifiers = .{
+ .align_node = 0,
+ .addrspace_node = 0,
+ .bit_range_start = 0,
+ .bit_range_end = 0,
+ };
+ var saw_const = false;
+ var saw_volatile = false;
+ var saw_allowzero = false;
+ var saw_addrspace = false;
+ while (true) {
+ switch (p.token_tags[p.tok_i]) {
+ .keyword_align => {
+ if (result.align_node != 0) {
+ try p.warn(.extra_align_qualifier);
+ }
+ p.tok_i += 1;
+ _ = try p.expectToken(.l_paren);
+ result.align_node = try p.expectExpr();
+
+ if (p.eatToken(.colon)) |_| {
+ result.bit_range_start = try p.expectExpr();
+ _ = try p.expectToken(.colon);
+ result.bit_range_end = try p.expectExpr();
+ }
+
+ _ = try p.expectToken(.r_paren);
+ },
+ .keyword_const => {
+ if (saw_const) {
+ try p.warn(.extra_const_qualifier);
+ }
+ p.tok_i += 1;
+ saw_const = true;
+ },
+ .keyword_volatile => {
+ if (saw_volatile) {
+ try p.warn(.extra_volatile_qualifier);
+ }
+ p.tok_i += 1;
+ saw_volatile = true;
+ },
+ .keyword_allowzero => {
+ if (saw_allowzero) {
+ try p.warn(.extra_allowzero_qualifier);
+ }
+ p.tok_i += 1;
+ saw_allowzero = true;
+ },
+ .keyword_addrspace => {
+ if (saw_addrspace) {
+ try p.warn(.extra_addrspace_qualifier);
+ }
+ result.addrspace_node = try p.parseAddrSpace();
+ },
+ else => return result,
+ }
+ }
+}
+
+/// SuffixOp
+/// <- LBRACKET Expr (DOT2 (Expr? (COLON Expr)?)?)? RBRACKET
+/// / DOT IDENTIFIER
+/// / DOTASTERISK
+/// / DOTQUESTIONMARK
+fn parseSuffixOp(p: *Parse, lhs: Node.Index) !Node.Index {
+ switch (p.token_tags[p.tok_i]) {
+ .l_bracket => {
+ const lbracket = p.nextToken();
+ const index_expr = try p.expectExpr();
+
+ if (p.eatToken(.ellipsis2)) |_| {
+ const end_expr = try p.parseExpr();
+ if (p.eatToken(.colon)) |_| {
+ const sentinel = try p.expectExpr();
+ _ = try p.expectToken(.r_bracket);
+ return p.addNode(.{
+ .tag = .slice_sentinel,
+ .main_token = lbracket,
+ .data = .{
+ .lhs = lhs,
+ .rhs = try p.addExtra(Node.SliceSentinel{
+ .start = index_expr,
+ .end = end_expr,
+ .sentinel = sentinel,
+ }),
+ },
+ });
+ }
+ _ = try p.expectToken(.r_bracket);
+ if (end_expr == 0) {
+ return p.addNode(.{
+ .tag = .slice_open,
+ .main_token = lbracket,
+ .data = .{
+ .lhs = lhs,
+ .rhs = index_expr,
+ },
+ });
+ }
+ return p.addNode(.{
+ .tag = .slice,
+ .main_token = lbracket,
+ .data = .{
+ .lhs = lhs,
+ .rhs = try p.addExtra(Node.Slice{
+ .start = index_expr,
+ .end = end_expr,
+ }),
+ },
+ });
+ }
+ _ = try p.expectToken(.r_bracket);
+ return p.addNode(.{
+ .tag = .array_access,
+ .main_token = lbracket,
+ .data = .{
+ .lhs = lhs,
+ .rhs = index_expr,
+ },
+ });
+ },
+ .period_asterisk => return p.addNode(.{
+ .tag = .deref,
+ .main_token = p.nextToken(),
+ .data = .{
+ .lhs = lhs,
+ .rhs = undefined,
+ },
+ }),
+ .invalid_periodasterisks => {
+ try p.warn(.asterisk_after_ptr_deref);
+ return p.addNode(.{
+ .tag = .deref,
+ .main_token = p.nextToken(),
+ .data = .{
+ .lhs = lhs,
+ .rhs = undefined,
+ },
+ });
+ },
+ .period => switch (p.token_tags[p.tok_i + 1]) {
+ .identifier => return p.addNode(.{
+ .tag = .field_access,
+ .main_token = p.nextToken(),
+ .data = .{
+ .lhs = lhs,
+ .rhs = p.nextToken(),
+ },
+ }),
+ .question_mark => return p.addNode(.{
+ .tag = .unwrap_optional,
+ .main_token = p.nextToken(),
+ .data = .{
+ .lhs = lhs,
+ .rhs = p.nextToken(),
+ },
+ }),
+ .l_brace => {
+ // this a misplaced `.{`, handle the error somewhere else
+ return null_node;
+ },
+ else => {
+ p.tok_i += 1;
+ try p.warn(.expected_suffix_op);
+ return null_node;
+ },
+ },
+ else => return null_node,
+ }
+}
+
+/// Caller must have already verified the first token.
+///
+/// ContainerDeclAuto <- ContainerDeclType LBRACE container_doc_comment? ContainerMembers RBRACE
+///
+/// ContainerDeclType
+/// <- KEYWORD_struct (LPAREN Expr RPAREN)?
+/// / KEYWORD_opaque
+/// / KEYWORD_enum (LPAREN Expr RPAREN)?
+/// / KEYWORD_union (LPAREN (KEYWORD_enum (LPAREN Expr RPAREN)? / Expr) RPAREN)?
+fn parseContainerDeclAuto(p: *Parse) !Node.Index {
+ const main_token = p.nextToken();
+ const arg_expr = switch (p.token_tags[main_token]) {
+ .keyword_opaque => null_node,
+ .keyword_struct, .keyword_enum => blk: {
+ if (p.eatToken(.l_paren)) |_| {
+ const expr = try p.expectExpr();
+ _ = try p.expectToken(.r_paren);
+ break :blk expr;
+ } else {
+ break :blk null_node;
+ }
+ },
+ .keyword_union => blk: {
+ if (p.eatToken(.l_paren)) |_| {
+ if (p.eatToken(.keyword_enum)) |_| {
+ if (p.eatToken(.l_paren)) |_| {
+ const enum_tag_expr = try p.expectExpr();
+ _ = try p.expectToken(.r_paren);
+ _ = try p.expectToken(.r_paren);
+
+ _ = try p.expectToken(.l_brace);
+ const members = try p.parseContainerMembers();
+ const members_span = try members.toSpan(p);
+ _ = try p.expectToken(.r_brace);
+ return p.addNode(.{
+ .tag = switch (members.trailing) {
+ true => .tagged_union_enum_tag_trailing,
+ false => .tagged_union_enum_tag,
+ },
+ .main_token = main_token,
+ .data = .{
+ .lhs = enum_tag_expr,
+ .rhs = try p.addExtra(members_span),
+ },
+ });
+ } else {
+ _ = try p.expectToken(.r_paren);
+
+ _ = try p.expectToken(.l_brace);
+ const members = try p.parseContainerMembers();
+ _ = try p.expectToken(.r_brace);
+ if (members.len <= 2) {
+ return p.addNode(.{
+ .tag = switch (members.trailing) {
+ true => .tagged_union_two_trailing,
+ false => .tagged_union_two,
+ },
+ .main_token = main_token,
+ .data = .{
+ .lhs = members.lhs,
+ .rhs = members.rhs,
+ },
+ });
+ } else {
+ const span = try members.toSpan(p);
+ return p.addNode(.{
+ .tag = switch (members.trailing) {
+ true => .tagged_union_trailing,
+ false => .tagged_union,
+ },
+ .main_token = main_token,
+ .data = .{
+ .lhs = span.start,
+ .rhs = span.end,
+ },
+ });
+ }
+ }
+ } else {
+ const expr = try p.expectExpr();
+ _ = try p.expectToken(.r_paren);
+ break :blk expr;
+ }
+ } else {
+ break :blk null_node;
+ }
+ },
+ else => {
+ p.tok_i -= 1;
+ return p.fail(.expected_container);
+ },
+ };
+ _ = try p.expectToken(.l_brace);
+ const members = try p.parseContainerMembers();
+ _ = try p.expectToken(.r_brace);
+ if (arg_expr == 0) {
+ if (members.len <= 2) {
+ return p.addNode(.{
+ .tag = switch (members.trailing) {
+ true => .container_decl_two_trailing,
+ false => .container_decl_two,
+ },
+ .main_token = main_token,
+ .data = .{
+ .lhs = members.lhs,
+ .rhs = members.rhs,
+ },
+ });
+ } else {
+ const span = try members.toSpan(p);
+ return p.addNode(.{
+ .tag = switch (members.trailing) {
+ true => .container_decl_trailing,
+ false => .container_decl,
+ },
+ .main_token = main_token,
+ .data = .{
+ .lhs = span.start,
+ .rhs = span.end,
+ },
+ });
+ }
+ } else {
+ const span = try members.toSpan(p);
+ return p.addNode(.{
+ .tag = switch (members.trailing) {
+ true => .container_decl_arg_trailing,
+ false => .container_decl_arg,
+ },
+ .main_token = main_token,
+ .data = .{
+ .lhs = arg_expr,
+ .rhs = try p.addExtra(Node.SubRange{
+ .start = span.start,
+ .end = span.end,
+ }),
+ },
+ });
+ }
+}
+
+/// Give a helpful error message for those transitioning from
+/// C's 'struct Foo {};' to Zig's 'const Foo = struct {};'.
+fn parseCStyleContainer(p: *Parse) Error!bool {
+ const main_token = p.tok_i;
+ switch (p.token_tags[p.tok_i]) {
+ .keyword_enum, .keyword_union, .keyword_struct => {},
+ else => return false,
+ }
+ const identifier = p.tok_i + 1;
+ if (p.token_tags[identifier] != .identifier) return false;
+ p.tok_i += 2;
+
+ try p.warnMsg(.{
+ .tag = .c_style_container,
+ .token = identifier,
+ .extra = .{ .expected_tag = p.token_tags[main_token] },
+ });
+ try p.warnMsg(.{
+ .tag = .zig_style_container,
+ .is_note = true,
+ .token = identifier,
+ .extra = .{ .expected_tag = p.token_tags[main_token] },
+ });
+
+ _ = try p.expectToken(.l_brace);
+ _ = try p.parseContainerMembers();
+ _ = try p.expectToken(.r_brace);
+ try p.expectSemicolon(.expected_semi_after_decl, true);
+ return true;
+}
+
+/// Holds temporary data until we are ready to construct the full ContainerDecl AST node.
+///
+/// ByteAlign <- KEYWORD_align LPAREN Expr RPAREN
+fn parseByteAlign(p: *Parse) !Node.Index {
+ _ = p.eatToken(.keyword_align) orelse return null_node;
+ _ = try p.expectToken(.l_paren);
+ const expr = try p.expectExpr();
+ _ = try p.expectToken(.r_paren);
+ return expr;
+}
+
+/// SwitchProngList <- (SwitchProng COMMA)* SwitchProng?
+fn parseSwitchProngList(p: *Parse) !Node.SubRange {
+ const scratch_top = p.scratch.items.len;
+ defer p.scratch.shrinkRetainingCapacity(scratch_top);
+
+ while (true) {
+ const item = try parseSwitchProng(p);
+ if (item == 0) break;
+
+ try p.scratch.append(p.gpa, item);
+
+ switch (p.token_tags[p.tok_i]) {
+ .comma => p.tok_i += 1,
+ // All possible delimiters.
+ .colon, .r_paren, .r_brace, .r_bracket => break,
+ // Likely just a missing comma; give error but continue parsing.
+ else => try p.warn(.expected_comma_after_switch_prong),
+ }
+ }
+ return p.listToSpan(p.scratch.items[scratch_top..]);
+}
+
+/// ParamDeclList <- (ParamDecl COMMA)* ParamDecl?
+fn parseParamDeclList(p: *Parse) !SmallSpan {
+ _ = try p.expectToken(.l_paren);
+ const scratch_top = p.scratch.items.len;
+ defer p.scratch.shrinkRetainingCapacity(scratch_top);
+ var varargs: union(enum) { none, seen, nonfinal: TokenIndex } = .none;
+ while (true) {
+ if (p.eatToken(.r_paren)) |_| break;
+ if (varargs == .seen) varargs = .{ .nonfinal = p.tok_i };
+ const param = try p.expectParamDecl();
+ if (param != 0) {
+ try p.scratch.append(p.gpa, param);
+ } else if (p.token_tags[p.tok_i - 1] == .ellipsis3) {
+ if (varargs == .none) varargs = .seen;
+ }
+ switch (p.token_tags[p.tok_i]) {
+ .comma => p.tok_i += 1,
+ .r_paren => {
+ p.tok_i += 1;
+ break;
+ },
+ .colon, .r_brace, .r_bracket => return p.failExpected(.r_paren),
+ // Likely just a missing comma; give error but continue parsing.
+ else => try p.warn(.expected_comma_after_param),
+ }
+ }
+ if (varargs == .nonfinal) {
+ try p.warnMsg(.{ .tag = .varargs_nonfinal, .token = varargs.nonfinal });
+ }
+ const params = p.scratch.items[scratch_top..];
+ return switch (params.len) {
+ 0 => SmallSpan{ .zero_or_one = 0 },
+ 1 => SmallSpan{ .zero_or_one = params[0] },
+ else => SmallSpan{ .multi = try p.listToSpan(params) },
+ };
+}
+
+/// FnCallArguments <- LPAREN ExprList RPAREN
+///
+/// ExprList <- (Expr COMMA)* Expr?
+fn parseBuiltinCall(p: *Parse) !Node.Index {
+ const builtin_token = p.assertToken(.builtin);
+ if (p.token_tags[p.nextToken()] != .l_paren) {
+ p.tok_i -= 1;
+ try p.warn(.expected_param_list);
+ // Pretend this was an identifier so we can continue parsing.
+ return p.addNode(.{
+ .tag = .identifier,
+ .main_token = builtin_token,
+ .data = .{
+ .lhs = undefined,
+ .rhs = undefined,
+ },
+ });
+ }
+ const scratch_top = p.scratch.items.len;
+ defer p.scratch.shrinkRetainingCapacity(scratch_top);
+ while (true) {
+ if (p.eatToken(.r_paren)) |_| break;
+ const param = try p.expectExpr();
+ try p.scratch.append(p.gpa, param);
+ switch (p.token_tags[p.tok_i]) {
+ .comma => p.tok_i += 1,
+ .r_paren => {
+ p.tok_i += 1;
+ break;
+ },
+ // Likely just a missing comma; give error but continue parsing.
+ else => try p.warn(.expected_comma_after_arg),
+ }
+ }
+ const comma = (p.token_tags[p.tok_i - 2] == .comma);
+ const params = p.scratch.items[scratch_top..];
+ switch (params.len) {
+ 0 => return p.addNode(.{
+ .tag = .builtin_call_two,
+ .main_token = builtin_token,
+ .data = .{
+ .lhs = 0,
+ .rhs = 0,
+ },
+ }),
+ 1 => return p.addNode(.{
+ .tag = if (comma) .builtin_call_two_comma else .builtin_call_two,
+ .main_token = builtin_token,
+ .data = .{
+ .lhs = params[0],
+ .rhs = 0,
+ },
+ }),
+ 2 => return p.addNode(.{
+ .tag = if (comma) .builtin_call_two_comma else .builtin_call_two,
+ .main_token = builtin_token,
+ .data = .{
+ .lhs = params[0],
+ .rhs = params[1],
+ },
+ }),
+ else => {
+ const span = try p.listToSpan(params);
+ return p.addNode(.{
+ .tag = if (comma) .builtin_call_comma else .builtin_call,
+ .main_token = builtin_token,
+ .data = .{
+ .lhs = span.start,
+ .rhs = span.end,
+ },
+ });
+ },
+ }
+}
+
+/// IfPrefix <- KEYWORD_if LPAREN Expr RPAREN PtrPayload?
+fn parseIf(p: *Parse, comptime bodyParseFn: fn (p: *Parse) Error!Node.Index) !Node.Index {
+ const if_token = p.eatToken(.keyword_if) orelse return null_node;
+ _ = try p.expectToken(.l_paren);
+ const condition = try p.expectExpr();
+ _ = try p.expectToken(.r_paren);
+ _ = try p.parsePtrPayload();
+
+ const then_expr = try bodyParseFn(p);
+ assert(then_expr != 0);
+
+ _ = p.eatToken(.keyword_else) orelse return p.addNode(.{
+ .tag = .if_simple,
+ .main_token = if_token,
+ .data = .{
+ .lhs = condition,
+ .rhs = then_expr,
+ },
+ });
+ _ = try p.parsePayload();
+ const else_expr = try bodyParseFn(p);
+ assert(then_expr != 0);
+
+ return p.addNode(.{
+ .tag = .@"if",
+ .main_token = if_token,
+ .data = .{
+ .lhs = condition,
+ .rhs = try p.addExtra(Node.If{
+ .then_expr = then_expr,
+ .else_expr = else_expr,
+ }),
+ },
+ });
+}
+
+/// Skips over doc comment tokens. Returns the first one, if any.
+fn eatDocComments(p: *Parse) !?TokenIndex {
+ if (p.eatToken(.doc_comment)) |tok| {
+ var first_line = tok;
+ if (tok > 0 and tokensOnSameLine(p, tok - 1, tok)) {
+ try p.warnMsg(.{
+ .tag = .same_line_doc_comment,
+ .token = tok,
+ });
+ first_line = p.eatToken(.doc_comment) orelse return null;
+ }
+ while (p.eatToken(.doc_comment)) |_| {}
+ return first_line;
+ }
+ return null;
+}
+
+fn tokensOnSameLine(p: *Parse, token1: TokenIndex, token2: TokenIndex) bool {
+ return std.mem.indexOfScalar(u8, p.source[p.token_starts[token1]..p.token_starts[token2]], '\n') == null;
+}
+
+fn eatToken(p: *Parse, tag: Token.Tag) ?TokenIndex {
+ return if (p.token_tags[p.tok_i] == tag) p.nextToken() else null;
+}
+
+fn assertToken(p: *Parse, tag: Token.Tag) TokenIndex {
+ const token = p.nextToken();
+ assert(p.token_tags[token] == tag);
+ return token;
+}
+
+fn expectToken(p: *Parse, tag: Token.Tag) Error!TokenIndex {
+ if (p.token_tags[p.tok_i] != tag) {
+ return p.failMsg(.{
+ .tag = .expected_token,
+ .token = p.tok_i,
+ .extra = .{ .expected_tag = tag },
+ });
+ }
+ return p.nextToken();
+}
+
+fn expectSemicolon(p: *Parse, error_tag: AstError.Tag, recoverable: bool) Error!void {
+ if (p.token_tags[p.tok_i] == .semicolon) {
+ _ = p.nextToken();
+ return;
+ }
+ try p.warn(error_tag);
+ if (!recoverable) return error.ParseError;
+}
+
+fn nextToken(p: *Parse) TokenIndex {
+ const result = p.tok_i;
+ p.tok_i += 1;
+ return result;
+}
+
+const null_node: Node.Index = 0;
+
+const Parse = @This();
+const std = @import("../std.zig");
+const assert = std.debug.assert;
+const Allocator = std.mem.Allocator;
+const Ast = std.zig.Ast;
+const Node = Ast.Node;
+const AstError = Ast.Error;
+const TokenIndex = Ast.TokenIndex;
+const Token = std.zig.Token;
+
+test {
+ _ = @import("parser_test.zig");
+}
diff --git a/lib/std/zig/parse.zig b/lib/std/zig/parse.zig
deleted file mode 100644
index fdb122b19d..0000000000
--- a/lib/std/zig/parse.zig
+++ /dev/null
@@ -1,3852 +0,0 @@
-const std = @import("../std.zig");
-const assert = std.debug.assert;
-const Allocator = std.mem.Allocator;
-const Ast = std.zig.Ast;
-const Node = Ast.Node;
-const AstError = Ast.Error;
-const TokenIndex = Ast.TokenIndex;
-const Token = std.zig.Token;
-
-pub const Error = error{ParseError} || Allocator.Error;
-
-/// Result should be freed with tree.deinit() when there are
-/// no more references to any of the tokens or nodes.
-pub fn parse(gpa: Allocator, source: [:0]const u8) Allocator.Error!Ast {
- var tokens = Ast.TokenList{};
- defer tokens.deinit(gpa);
-
- // Empirically, the zig std lib has an 8:1 ratio of source bytes to token count.
- const estimated_token_count = source.len / 8;
- try tokens.ensureTotalCapacity(gpa, estimated_token_count);
-
- var tokenizer = std.zig.Tokenizer.init(source);
- while (true) {
- const token = tokenizer.next();
- try tokens.append(gpa, .{
- .tag = token.tag,
- .start = @intCast(u32, token.loc.start),
- });
- if (token.tag == .eof) break;
- }
-
- var parser: Parser = .{
- .source = source,
- .gpa = gpa,
- .token_tags = tokens.items(.tag),
- .token_starts = tokens.items(.start),
- .errors = .{},
- .nodes = .{},
- .extra_data = .{},
- .scratch = .{},
- .tok_i = 0,
- };
- defer parser.errors.deinit(gpa);
- defer parser.nodes.deinit(gpa);
- defer parser.extra_data.deinit(gpa);
- defer parser.scratch.deinit(gpa);
-
- // Empirically, Zig source code has a 2:1 ratio of tokens to AST nodes.
- // Make sure at least 1 so we can use appendAssumeCapacity on the root node below.
- const estimated_node_count = (tokens.len + 2) / 2;
- try parser.nodes.ensureTotalCapacity(gpa, estimated_node_count);
-
- try parser.parseRoot();
-
- // TODO experiment with compacting the MultiArrayList slices here
- return Ast{
- .source = source,
- .tokens = tokens.toOwnedSlice(),
- .nodes = parser.nodes.toOwnedSlice(),
- .extra_data = try parser.extra_data.toOwnedSlice(gpa),
- .errors = try parser.errors.toOwnedSlice(gpa),
- };
-}
-
-const null_node: Node.Index = 0;
-
-/// Represents in-progress parsing, will be converted to an Ast after completion.
-const Parser = struct {
- gpa: Allocator,
- source: []const u8,
- token_tags: []const Token.Tag,
- token_starts: []const Ast.ByteOffset,
- tok_i: TokenIndex,
- errors: std.ArrayListUnmanaged(AstError),
- nodes: Ast.NodeList,
- extra_data: std.ArrayListUnmanaged(Node.Index),
- scratch: std.ArrayListUnmanaged(Node.Index),
-
- const SmallSpan = union(enum) {
- zero_or_one: Node.Index,
- multi: Node.SubRange,
- };
-
- const Members = struct {
- len: usize,
- lhs: Node.Index,
- rhs: Node.Index,
- trailing: bool,
-
- fn toSpan(self: Members, p: *Parser) !Node.SubRange {
- if (self.len <= 2) {
- const nodes = [2]Node.Index{ self.lhs, self.rhs };
- return p.listToSpan(nodes[0..self.len]);
- } else {
- return Node.SubRange{ .start = self.lhs, .end = self.rhs };
- }
- }
- };
-
- fn listToSpan(p: *Parser, list: []const Node.Index) !Node.SubRange {
- try p.extra_data.appendSlice(p.gpa, list);
- return Node.SubRange{
- .start = @intCast(Node.Index, p.extra_data.items.len - list.len),
- .end = @intCast(Node.Index, p.extra_data.items.len),
- };
- }
-
- fn addNode(p: *Parser, elem: Ast.NodeList.Elem) Allocator.Error!Node.Index {
- const result = @intCast(Node.Index, p.nodes.len);
- try p.nodes.append(p.gpa, elem);
- return result;
- }
-
- fn setNode(p: *Parser, i: usize, elem: Ast.NodeList.Elem) Node.Index {
- p.nodes.set(i, elem);
- return @intCast(Node.Index, i);
- }
-
- fn reserveNode(p: *Parser, tag: Ast.Node.Tag) !usize {
- try p.nodes.resize(p.gpa, p.nodes.len + 1);
- p.nodes.items(.tag)[p.nodes.len - 1] = tag;
- return p.nodes.len - 1;
- }
-
- fn unreserveNode(p: *Parser, node_index: usize) void {
- if (p.nodes.len == node_index) {
- p.nodes.resize(p.gpa, p.nodes.len - 1) catch unreachable;
- } else {
- // There is zombie node left in the tree, let's make it as inoffensive as possible
- // (sadly there's no no-op node)
- p.nodes.items(.tag)[node_index] = .unreachable_literal;
- p.nodes.items(.main_token)[node_index] = p.tok_i;
- }
- }
-
- fn addExtra(p: *Parser, extra: anytype) Allocator.Error!Node.Index {
- const fields = std.meta.fields(@TypeOf(extra));
- try p.extra_data.ensureUnusedCapacity(p.gpa, fields.len);
- const result = @intCast(u32, p.extra_data.items.len);
- inline for (fields) |field| {
- comptime assert(field.type == Node.Index);
- p.extra_data.appendAssumeCapacity(@field(extra, field.name));
- }
- return result;
- }
-
- fn warnExpected(p: *Parser, expected_token: Token.Tag) error{OutOfMemory}!void {
- @setCold(true);
- try p.warnMsg(.{
- .tag = .expected_token,
- .token = p.tok_i,
- .extra = .{ .expected_tag = expected_token },
- });
- }
-
- fn warn(p: *Parser, error_tag: AstError.Tag) error{OutOfMemory}!void {
- @setCold(true);
- try p.warnMsg(.{ .tag = error_tag, .token = p.tok_i });
- }
-
- fn warnMsg(p: *Parser, msg: Ast.Error) error{OutOfMemory}!void {
- @setCold(true);
- switch (msg.tag) {
- .expected_semi_after_decl,
- .expected_semi_after_stmt,
- .expected_comma_after_field,
- .expected_comma_after_arg,
- .expected_comma_after_param,
- .expected_comma_after_initializer,
- .expected_comma_after_switch_prong,
- .expected_semi_or_else,
- .expected_semi_or_lbrace,
- .expected_token,
- .expected_block,
- .expected_block_or_assignment,
- .expected_block_or_expr,
- .expected_block_or_field,
- .expected_expr,
- .expected_expr_or_assignment,
- .expected_fn,
- .expected_inlinable,
- .expected_labelable,
- .expected_param_list,
- .expected_prefix_expr,
- .expected_primary_type_expr,
- .expected_pub_item,
- .expected_return_type,
- .expected_suffix_op,
- .expected_type_expr,
- .expected_var_decl,
- .expected_var_decl_or_fn,
- .expected_loop_payload,
- .expected_container,
- => if (msg.token != 0 and !p.tokensOnSameLine(msg.token - 1, msg.token)) {
- var copy = msg;
- copy.token_is_prev = true;
- copy.token -= 1;
- return p.errors.append(p.gpa, copy);
- },
- else => {},
- }
- try p.errors.append(p.gpa, msg);
- }
-
- fn fail(p: *Parser, tag: Ast.Error.Tag) error{ ParseError, OutOfMemory } {
- @setCold(true);
- return p.failMsg(.{ .tag = tag, .token = p.tok_i });
- }
-
- fn failExpected(p: *Parser, expected_token: Token.Tag) error{ ParseError, OutOfMemory } {
- @setCold(true);
- return p.failMsg(.{
- .tag = .expected_token,
- .token = p.tok_i,
- .extra = .{ .expected_tag = expected_token },
- });
- }
-
- fn failMsg(p: *Parser, msg: Ast.Error) error{ ParseError, OutOfMemory } {
- @setCold(true);
- try p.warnMsg(msg);
- return error.ParseError;
- }
-
- /// Root <- skip container_doc_comment? ContainerMembers eof
- fn parseRoot(p: *Parser) !void {
- // Root node must be index 0.
- p.nodes.appendAssumeCapacity(.{
- .tag = .root,
- .main_token = 0,
- .data = undefined,
- });
- const root_members = try p.parseContainerMembers();
- const root_decls = try root_members.toSpan(p);
- if (p.token_tags[p.tok_i] != .eof) {
- try p.warnExpected(.eof);
- }
- p.nodes.items(.data)[0] = .{
- .lhs = root_decls.start,
- .rhs = root_decls.end,
- };
- }
-
- /// ContainerMembers <- ContainerDeclarations (ContainerField COMMA)* (ContainerField / ContainerDeclarations)
- ///
- /// ContainerDeclarations
- /// <- TestDecl ContainerDeclarations
- /// / ComptimeDecl ContainerDeclarations
- /// / doc_comment? KEYWORD_pub? Decl ContainerDeclarations
- /// /
- ///
- /// ComptimeDecl <- KEYWORD_comptime Block
- fn parseContainerMembers(p: *Parser) !Members {
- const scratch_top = p.scratch.items.len;
- defer p.scratch.shrinkRetainingCapacity(scratch_top);
-
- var field_state: union(enum) {
- /// No fields have been seen.
- none,
- /// Currently parsing fields.
- seen,
- /// Saw fields and then a declaration after them.
- /// Payload is first token of previous declaration.
- end: Node.Index,
- /// There was a declaration between fields, don't report more errors.
- err,
- } = .none;
-
- var last_field: TokenIndex = undefined;
-
- // Skip container doc comments.
- while (p.eatToken(.container_doc_comment)) |_| {}
-
- var trailing = false;
- while (true) {
- const doc_comment = try p.eatDocComments();
-
- switch (p.token_tags[p.tok_i]) {
- .keyword_test => {
- if (doc_comment) |some| {
- try p.warnMsg(.{ .tag = .test_doc_comment, .token = some });
- }
- const test_decl_node = try p.expectTestDeclRecoverable();
- if (test_decl_node != 0) {
- if (field_state == .seen) {
- field_state = .{ .end = test_decl_node };
- }
- try p.scratch.append(p.gpa, test_decl_node);
- }
- trailing = false;
- },
- .keyword_comptime => switch (p.token_tags[p.tok_i + 1]) {
- .l_brace => {
- if (doc_comment) |some| {
- try p.warnMsg(.{ .tag = .comptime_doc_comment, .token = some });
- }
- const comptime_token = p.nextToken();
- const block = p.parseBlock() catch |err| switch (err) {
- error.OutOfMemory => return error.OutOfMemory,
- error.ParseError => blk: {
- p.findNextContainerMember();
- break :blk null_node;
- },
- };
- if (block != 0) {
- const comptime_node = try p.addNode(.{
- .tag = .@"comptime",
- .main_token = comptime_token,
- .data = .{
- .lhs = block,
- .rhs = undefined,
- },
- });
- if (field_state == .seen) {
- field_state = .{ .end = comptime_node };
- }
- try p.scratch.append(p.gpa, comptime_node);
- }
- trailing = false;
- },
- else => {
- const identifier = p.tok_i;
- defer last_field = identifier;
- const container_field = p.expectContainerField() catch |err| switch (err) {
- error.OutOfMemory => return error.OutOfMemory,
- error.ParseError => {
- p.findNextContainerMember();
- continue;
- },
- };
- switch (field_state) {
- .none => field_state = .seen,
- .err, .seen => {},
- .end => |node| {
- try p.warnMsg(.{
- .tag = .decl_between_fields,
- .token = p.nodes.items(.main_token)[node],
- });
- try p.warnMsg(.{
- .tag = .previous_field,
- .is_note = true,
- .token = last_field,
- });
- try p.warnMsg(.{
- .tag = .next_field,
- .is_note = true,
- .token = identifier,
- });
- // Continue parsing; error will be reported later.
- field_state = .err;
- },
- }
- try p.scratch.append(p.gpa, container_field);
- switch (p.token_tags[p.tok_i]) {
- .comma => {
- p.tok_i += 1;
- trailing = true;
- continue;
- },
- .r_brace, .eof => {
- trailing = false;
- break;
- },
- else => {},
- }
- // There is not allowed to be a decl after a field with no comma.
- // Report error but recover parser.
- try p.warn(.expected_comma_after_field);
- p.findNextContainerMember();
- },
- },
- .keyword_pub => {
- p.tok_i += 1;
- const top_level_decl = try p.expectTopLevelDeclRecoverable();
- if (top_level_decl != 0) {
- if (field_state == .seen) {
- field_state = .{ .end = top_level_decl };
- }
- try p.scratch.append(p.gpa, top_level_decl);
- }
- trailing = p.token_tags[p.tok_i - 1] == .semicolon;
- },
- .keyword_usingnamespace => {
- const node = try p.expectUsingNamespaceRecoverable();
- if (node != 0) {
- if (field_state == .seen) {
- field_state = .{ .end = node };
- }
- try p.scratch.append(p.gpa, node);
- }
- trailing = p.token_tags[p.tok_i - 1] == .semicolon;
- },
- .keyword_const,
- .keyword_var,
- .keyword_threadlocal,
- .keyword_export,
- .keyword_extern,
- .keyword_inline,
- .keyword_noinline,
- .keyword_fn,
- => {
- const top_level_decl = try p.expectTopLevelDeclRecoverable();
- if (top_level_decl != 0) {
- if (field_state == .seen) {
- field_state = .{ .end = top_level_decl };
- }
- try p.scratch.append(p.gpa, top_level_decl);
- }
- trailing = p.token_tags[p.tok_i - 1] == .semicolon;
- },
- .eof, .r_brace => {
- if (doc_comment) |tok| {
- try p.warnMsg(.{
- .tag = .unattached_doc_comment,
- .token = tok,
- });
- }
- break;
- },
- else => {
- const c_container = p.parseCStyleContainer() catch |err| switch (err) {
- error.OutOfMemory => return error.OutOfMemory,
- error.ParseError => false,
- };
- if (c_container) continue;
-
- const identifier = p.tok_i;
- defer last_field = identifier;
- const container_field = p.expectContainerField() catch |err| switch (err) {
- error.OutOfMemory => return error.OutOfMemory,
- error.ParseError => {
- p.findNextContainerMember();
- continue;
- },
- };
- switch (field_state) {
- .none => field_state = .seen,
- .err, .seen => {},
- .end => |node| {
- try p.warnMsg(.{
- .tag = .decl_between_fields,
- .token = p.nodes.items(.main_token)[node],
- });
- try p.warnMsg(.{
- .tag = .previous_field,
- .is_note = true,
- .token = last_field,
- });
- try p.warnMsg(.{
- .tag = .next_field,
- .is_note = true,
- .token = identifier,
- });
- // Continue parsing; error will be reported later.
- field_state = .err;
- },
- }
- try p.scratch.append(p.gpa, container_field);
- switch (p.token_tags[p.tok_i]) {
- .comma => {
- p.tok_i += 1;
- trailing = true;
- continue;
- },
- .r_brace, .eof => {
- trailing = false;
- break;
- },
- else => {},
- }
- // There is not allowed to be a decl after a field with no comma.
- // Report error but recover parser.
- try p.warn(.expected_comma_after_field);
- if (p.token_tags[p.tok_i] == .semicolon and p.token_tags[identifier] == .identifier) {
- try p.warnMsg(.{
- .tag = .var_const_decl,
- .is_note = true,
- .token = identifier,
- });
- }
- p.findNextContainerMember();
- continue;
- },
- }
- }
-
- const items = p.scratch.items[scratch_top..];
- switch (items.len) {
- 0 => return Members{
- .len = 0,
- .lhs = 0,
- .rhs = 0,
- .trailing = trailing,
- },
- 1 => return Members{
- .len = 1,
- .lhs = items[0],
- .rhs = 0,
- .trailing = trailing,
- },
- 2 => return Members{
- .len = 2,
- .lhs = items[0],
- .rhs = items[1],
- .trailing = trailing,
- },
- else => {
- const span = try p.listToSpan(items);
- return Members{
- .len = items.len,
- .lhs = span.start,
- .rhs = span.end,
- .trailing = trailing,
- };
- },
- }
- }
-
- /// Attempts to find next container member by searching for certain tokens
- fn findNextContainerMember(p: *Parser) void {
- var level: u32 = 0;
- while (true) {
- const tok = p.nextToken();
- switch (p.token_tags[tok]) {
- // Any of these can start a new top level declaration.
- .keyword_test,
- .keyword_comptime,
- .keyword_pub,
- .keyword_export,
- .keyword_extern,
- .keyword_inline,
- .keyword_noinline,
- .keyword_usingnamespace,
- .keyword_threadlocal,
- .keyword_const,
- .keyword_var,
- .keyword_fn,
- => {
- if (level == 0) {
- p.tok_i -= 1;
- return;
- }
- },
- .identifier => {
- if (p.token_tags[tok + 1] == .comma and level == 0) {
- p.tok_i -= 1;
- return;
- }
- },
- .comma, .semicolon => {
- // this decl was likely meant to end here
- if (level == 0) {
- return;
- }
- },
- .l_paren, .l_bracket, .l_brace => level += 1,
- .r_paren, .r_bracket => {
- if (level != 0) level -= 1;
- },
- .r_brace => {
- if (level == 0) {
- // end of container, exit
- p.tok_i -= 1;
- return;
- }
- level -= 1;
- },
- .eof => {
- p.tok_i -= 1;
- return;
- },
- else => {},
- }
- }
- }
-
- /// Attempts to find the next statement by searching for a semicolon
- fn findNextStmt(p: *Parser) void {
- var level: u32 = 0;
- while (true) {
- const tok = p.nextToken();
- switch (p.token_tags[tok]) {
- .l_brace => level += 1,
- .r_brace => {
- if (level == 0) {
- p.tok_i -= 1;
- return;
- }
- level -= 1;
- },
- .semicolon => {
- if (level == 0) {
- return;
- }
- },
- .eof => {
- p.tok_i -= 1;
- return;
- },
- else => {},
- }
- }
- }
-
- /// TestDecl <- KEYWORD_test (STRINGLITERALSINGLE / IDENTIFIER)? Block
- fn expectTestDecl(p: *Parser) !Node.Index {
- const test_token = p.assertToken(.keyword_test);
- const name_token = switch (p.token_tags[p.nextToken()]) {
- .string_literal, .identifier => p.tok_i - 1,
- else => blk: {
- p.tok_i -= 1;
- break :blk null;
- },
- };
- const block_node = try p.parseBlock();
- if (block_node == 0) return p.fail(.expected_block);
- return p.addNode(.{
- .tag = .test_decl,
- .main_token = test_token,
- .data = .{
- .lhs = name_token orelse 0,
- .rhs = block_node,
- },
- });
- }
-
- fn expectTestDeclRecoverable(p: *Parser) error{OutOfMemory}!Node.Index {
- return p.expectTestDecl() catch |err| switch (err) {
- error.OutOfMemory => return error.OutOfMemory,
- error.ParseError => {
- p.findNextContainerMember();
- return null_node;
- },
- };
- }
-
- /// Decl
- /// <- (KEYWORD_export / KEYWORD_extern STRINGLITERALSINGLE? / (KEYWORD_inline / KEYWORD_noinline))? FnProto (SEMICOLON / Block)
- /// / (KEYWORD_export / KEYWORD_extern STRINGLITERALSINGLE?)? KEYWORD_threadlocal? VarDecl
- /// / KEYWORD_usingnamespace Expr SEMICOLON
- fn expectTopLevelDecl(p: *Parser) !Node.Index {
- const extern_export_inline_token = p.nextToken();
- var is_extern: bool = false;
- var expect_fn: bool = false;
- var expect_var_or_fn: bool = false;
- switch (p.token_tags[extern_export_inline_token]) {
- .keyword_extern => {
- _ = p.eatToken(.string_literal);
- is_extern = true;
- expect_var_or_fn = true;
- },
- .keyword_export => expect_var_or_fn = true,
- .keyword_inline, .keyword_noinline => expect_fn = true,
- else => p.tok_i -= 1,
- }
- const fn_proto = try p.parseFnProto();
- if (fn_proto != 0) {
- switch (p.token_tags[p.tok_i]) {
- .semicolon => {
- p.tok_i += 1;
- return fn_proto;
- },
- .l_brace => {
- if (is_extern) {
- try p.warnMsg(.{ .tag = .extern_fn_body, .token = extern_export_inline_token });
- return null_node;
- }
- const fn_decl_index = try p.reserveNode(.fn_decl);
- errdefer p.unreserveNode(fn_decl_index);
-
- const body_block = try p.parseBlock();
- assert(body_block != 0);
- return p.setNode(fn_decl_index, .{
- .tag = .fn_decl,
- .main_token = p.nodes.items(.main_token)[fn_proto],
- .data = .{
- .lhs = fn_proto,
- .rhs = body_block,
- },
- });
- },
- else => {
- // Since parseBlock only return error.ParseError on
- // a missing '}' we can assume this function was
- // supposed to end here.
- try p.warn(.expected_semi_or_lbrace);
- return null_node;
- },
- }
- }
- if (expect_fn) {
- try p.warn(.expected_fn);
- return error.ParseError;
- }
-
- const thread_local_token = p.eatToken(.keyword_threadlocal);
- const var_decl = try p.parseVarDecl();
- if (var_decl != 0) {
- try p.expectSemicolon(.expected_semi_after_decl, false);
- return var_decl;
- }
- if (thread_local_token != null) {
- return p.fail(.expected_var_decl);
- }
- if (expect_var_or_fn) {
- return p.fail(.expected_var_decl_or_fn);
- }
- if (p.token_tags[p.tok_i] != .keyword_usingnamespace) {
- return p.fail(.expected_pub_item);
- }
- return p.expectUsingNamespace();
- }
-
- fn expectTopLevelDeclRecoverable(p: *Parser) error{OutOfMemory}!Node.Index {
- return p.expectTopLevelDecl() catch |err| switch (err) {
- error.OutOfMemory => return error.OutOfMemory,
- error.ParseError => {
- p.findNextContainerMember();
- return null_node;
- },
- };
- }
-
- fn expectUsingNamespace(p: *Parser) !Node.Index {
- const usingnamespace_token = p.assertToken(.keyword_usingnamespace);
- const expr = try p.expectExpr();
- try p.expectSemicolon(.expected_semi_after_decl, false);
- return p.addNode(.{
- .tag = .@"usingnamespace",
- .main_token = usingnamespace_token,
- .data = .{
- .lhs = expr,
- .rhs = undefined,
- },
- });
- }
-
- fn expectUsingNamespaceRecoverable(p: *Parser) error{OutOfMemory}!Node.Index {
- return p.expectUsingNamespace() catch |err| switch (err) {
- error.OutOfMemory => return error.OutOfMemory,
- error.ParseError => {
- p.findNextContainerMember();
- return null_node;
- },
- };
- }
-
- /// FnProto <- KEYWORD_fn IDENTIFIER? LPAREN ParamDeclList RPAREN ByteAlign? AddrSpace? LinkSection? CallConv? EXCLAMATIONMARK? TypeExpr
- fn parseFnProto(p: *Parser) !Node.Index {
- const fn_token = p.eatToken(.keyword_fn) orelse return null_node;
-
- // We want the fn proto node to be before its children in the array.
- const fn_proto_index = try p.reserveNode(.fn_proto);
- errdefer p.unreserveNode(fn_proto_index);
-
- _ = p.eatToken(.identifier);
- const params = try p.parseParamDeclList();
- const align_expr = try p.parseByteAlign();
- const addrspace_expr = try p.parseAddrSpace();
- const section_expr = try p.parseLinkSection();
- const callconv_expr = try p.parseCallconv();
- _ = p.eatToken(.bang);
-
- const return_type_expr = try p.parseTypeExpr();
- if (return_type_expr == 0) {
- // most likely the user forgot to specify the return type.
- // Mark return type as invalid and try to continue.
- try p.warn(.expected_return_type);
- }
-
- if (align_expr == 0 and section_expr == 0 and callconv_expr == 0 and addrspace_expr == 0) {
- switch (params) {
- .zero_or_one => |param| return p.setNode(fn_proto_index, .{
- .tag = .fn_proto_simple,
- .main_token = fn_token,
- .data = .{
- .lhs = param,
- .rhs = return_type_expr,
- },
- }),
- .multi => |span| {
- return p.setNode(fn_proto_index, .{
- .tag = .fn_proto_multi,
- .main_token = fn_token,
- .data = .{
- .lhs = try p.addExtra(Node.SubRange{
- .start = span.start,
- .end = span.end,
- }),
- .rhs = return_type_expr,
- },
- });
- },
- }
- }
- switch (params) {
- .zero_or_one => |param| return p.setNode(fn_proto_index, .{
- .tag = .fn_proto_one,
- .main_token = fn_token,
- .data = .{
- .lhs = try p.addExtra(Node.FnProtoOne{
- .param = param,
- .align_expr = align_expr,
- .addrspace_expr = addrspace_expr,
- .section_expr = section_expr,
- .callconv_expr = callconv_expr,
- }),
- .rhs = return_type_expr,
- },
- }),
- .multi => |span| {
- return p.setNode(fn_proto_index, .{
- .tag = .fn_proto,
- .main_token = fn_token,
- .data = .{
- .lhs = try p.addExtra(Node.FnProto{
- .params_start = span.start,
- .params_end = span.end,
- .align_expr = align_expr,
- .addrspace_expr = addrspace_expr,
- .section_expr = section_expr,
- .callconv_expr = callconv_expr,
- }),
- .rhs = return_type_expr,
- },
- });
- },
- }
- }
-
- /// VarDecl <- (KEYWORD_const / KEYWORD_var) IDENTIFIER (COLON TypeExpr)? ByteAlign? AddrSpace? LinkSection? (EQUAL Expr)? SEMICOLON
- fn parseVarDecl(p: *Parser) !Node.Index {
- const mut_token = p.eatToken(.keyword_const) orelse
- p.eatToken(.keyword_var) orelse
- return null_node;
-
- _ = try p.expectToken(.identifier);
- const type_node: Node.Index = if (p.eatToken(.colon) == null) 0 else try p.expectTypeExpr();
- const align_node = try p.parseByteAlign();
- const addrspace_node = try p.parseAddrSpace();
- const section_node = try p.parseLinkSection();
- const init_node: Node.Index = switch (p.token_tags[p.tok_i]) {
- .equal_equal => blk: {
- try p.warn(.wrong_equal_var_decl);
- p.tok_i += 1;
- break :blk try p.expectExpr();
- },
- .equal => blk: {
- p.tok_i += 1;
- break :blk try p.expectExpr();
- },
- else => 0,
- };
- if (section_node == 0 and addrspace_node == 0) {
- if (align_node == 0) {
- return p.addNode(.{
- .tag = .simple_var_decl,
- .main_token = mut_token,
- .data = .{
- .lhs = type_node,
- .rhs = init_node,
- },
- });
- } else if (type_node == 0) {
- return p.addNode(.{
- .tag = .aligned_var_decl,
- .main_token = mut_token,
- .data = .{
- .lhs = align_node,
- .rhs = init_node,
- },
- });
- } else {
- return p.addNode(.{
- .tag = .local_var_decl,
- .main_token = mut_token,
- .data = .{
- .lhs = try p.addExtra(Node.LocalVarDecl{
- .type_node = type_node,
- .align_node = align_node,
- }),
- .rhs = init_node,
- },
- });
- }
- } else {
- return p.addNode(.{
- .tag = .global_var_decl,
- .main_token = mut_token,
- .data = .{
- .lhs = try p.addExtra(Node.GlobalVarDecl{
- .type_node = type_node,
- .align_node = align_node,
- .addrspace_node = addrspace_node,
- .section_node = section_node,
- }),
- .rhs = init_node,
- },
- });
- }
- }
-
- /// ContainerField
- /// <- doc_comment? KEYWORD_comptime? IDENTIFIER (COLON TypeExpr)? ByteAlign? (EQUAL Expr)?
- /// / doc_comment? KEYWORD_comptime? (IDENTIFIER COLON)? !KEYWORD_fn TypeExpr ByteAlign? (EQUAL Expr)?
- fn expectContainerField(p: *Parser) !Node.Index {
- var main_token = p.tok_i;
- _ = p.eatToken(.keyword_comptime);
- const tuple_like = p.token_tags[p.tok_i] != .identifier or p.token_tags[p.tok_i + 1] != .colon;
- if (!tuple_like) {
- main_token = p.assertToken(.identifier);
- }
-
- var align_expr: Node.Index = 0;
- var type_expr: Node.Index = 0;
- if (p.eatToken(.colon) != null or tuple_like) {
- type_expr = try p.expectTypeExpr();
- align_expr = try p.parseByteAlign();
- }
-
- const value_expr: Node.Index = if (p.eatToken(.equal) == null) 0 else try p.expectExpr();
-
- if (align_expr == 0) {
- return p.addNode(.{
- .tag = .container_field_init,
- .main_token = main_token,
- .data = .{
- .lhs = type_expr,
- .rhs = value_expr,
- },
- });
- } else if (value_expr == 0) {
- return p.addNode(.{
- .tag = .container_field_align,
- .main_token = main_token,
- .data = .{
- .lhs = type_expr,
- .rhs = align_expr,
- },
- });
- } else {
- return p.addNode(.{
- .tag = .container_field,
- .main_token = main_token,
- .data = .{
- .lhs = type_expr,
- .rhs = try p.addExtra(Node.ContainerField{
- .value_expr = value_expr,
- .align_expr = align_expr,
- }),
- },
- });
- }
- }
-
- /// Statement
- /// <- KEYWORD_comptime? VarDecl
- /// / KEYWORD_comptime BlockExprStatement
- /// / KEYWORD_nosuspend BlockExprStatement
- /// / KEYWORD_suspend BlockExprStatement
- /// / KEYWORD_defer BlockExprStatement
- /// / KEYWORD_errdefer Payload? BlockExprStatement
- /// / IfStatement
- /// / LabeledStatement
- /// / SwitchExpr
- /// / AssignExpr SEMICOLON
- fn parseStatement(p: *Parser, allow_defer_var: bool) Error!Node.Index {
- const comptime_token = p.eatToken(.keyword_comptime);
-
- if (allow_defer_var) {
- const var_decl = try p.parseVarDecl();
- if (var_decl != 0) {
- try p.expectSemicolon(.expected_semi_after_decl, true);
- return var_decl;
- }
- }
-
- if (comptime_token) |token| {
- return p.addNode(.{
- .tag = .@"comptime",
- .main_token = token,
- .data = .{
- .lhs = try p.expectBlockExprStatement(),
- .rhs = undefined,
- },
- });
- }
-
- switch (p.token_tags[p.tok_i]) {
- .keyword_nosuspend => {
- return p.addNode(.{
- .tag = .@"nosuspend",
- .main_token = p.nextToken(),
- .data = .{
- .lhs = try p.expectBlockExprStatement(),
- .rhs = undefined,
- },
- });
- },
- .keyword_suspend => {
- const token = p.nextToken();
- const block_expr = try p.expectBlockExprStatement();
- return p.addNode(.{
- .tag = .@"suspend",
- .main_token = token,
- .data = .{
- .lhs = block_expr,
- .rhs = undefined,
- },
- });
- },
- .keyword_defer => if (allow_defer_var) return p.addNode(.{
- .tag = .@"defer",
- .main_token = p.nextToken(),
- .data = .{
- .lhs = undefined,
- .rhs = try p.expectBlockExprStatement(),
- },
- }),
- .keyword_errdefer => if (allow_defer_var) return p.addNode(.{
- .tag = .@"errdefer",
- .main_token = p.nextToken(),
- .data = .{
- .lhs = try p.parsePayload(),
- .rhs = try p.expectBlockExprStatement(),
- },
- }),
- .keyword_switch => return p.expectSwitchExpr(),
- .keyword_if => return p.expectIfStatement(),
- .keyword_enum, .keyword_struct, .keyword_union => {
- const identifier = p.tok_i + 1;
- if (try p.parseCStyleContainer()) {
- // Return something so that `expectStatement` is happy.
- return p.addNode(.{
- .tag = .identifier,
- .main_token = identifier,
- .data = .{
- .lhs = undefined,
- .rhs = undefined,
- },
- });
- }
- },
- else => {},
- }
-
- const labeled_statement = try p.parseLabeledStatement();
- if (labeled_statement != 0) return labeled_statement;
-
- const assign_expr = try p.parseAssignExpr();
- if (assign_expr != 0) {
- try p.expectSemicolon(.expected_semi_after_stmt, true);
- return assign_expr;
- }
-
- return null_node;
- }
-
- fn expectStatement(p: *Parser, allow_defer_var: bool) !Node.Index {
- const statement = try p.parseStatement(allow_defer_var);
- if (statement == 0) {
- return p.fail(.expected_statement);
- }
- return statement;
- }
-
- /// If a parse error occurs, reports an error, but then finds the next statement
- /// and returns that one instead. If a parse error occurs but there is no following
- /// statement, returns 0.
- fn expectStatementRecoverable(p: *Parser) Error!Node.Index {
- while (true) {
- return p.expectStatement(true) catch |err| switch (err) {
- error.OutOfMemory => return error.OutOfMemory,
- error.ParseError => {
- p.findNextStmt(); // Try to skip to the next statement.
- switch (p.token_tags[p.tok_i]) {
- .r_brace => return null_node,
- .eof => return error.ParseError,
- else => continue,
- }
- },
- };
- }
- }
-
- /// IfStatement
- /// <- IfPrefix BlockExpr ( KEYWORD_else Payload? Statement )?
- /// / IfPrefix AssignExpr ( SEMICOLON / KEYWORD_else Payload? Statement )
- fn expectIfStatement(p: *Parser) !Node.Index {
- const if_token = p.assertToken(.keyword_if);
- _ = try p.expectToken(.l_paren);
- const condition = try p.expectExpr();
- _ = try p.expectToken(.r_paren);
- _ = try p.parsePtrPayload();
-
- // TODO propose to change the syntax so that semicolons are always required
- // inside if statements, even if there is an `else`.
- var else_required = false;
- const then_expr = blk: {
- const block_expr = try p.parseBlockExpr();
- if (block_expr != 0) break :blk block_expr;
- const assign_expr = try p.parseAssignExpr();
- if (assign_expr == 0) {
- return p.fail(.expected_block_or_assignment);
- }
- if (p.eatToken(.semicolon)) |_| {
- return p.addNode(.{
- .tag = .if_simple,
- .main_token = if_token,
- .data = .{
- .lhs = condition,
- .rhs = assign_expr,
- },
- });
- }
- else_required = true;
- break :blk assign_expr;
- };
- _ = p.eatToken(.keyword_else) orelse {
- if (else_required) {
- try p.warn(.expected_semi_or_else);
- }
- return p.addNode(.{
- .tag = .if_simple,
- .main_token = if_token,
- .data = .{
- .lhs = condition,
- .rhs = then_expr,
- },
- });
- };
- _ = try p.parsePayload();
- const else_expr = try p.expectStatement(false);
- return p.addNode(.{
- .tag = .@"if",
- .main_token = if_token,
- .data = .{
- .lhs = condition,
- .rhs = try p.addExtra(Node.If{
- .then_expr = then_expr,
- .else_expr = else_expr,
- }),
- },
- });
- }
-
- /// LabeledStatement <- BlockLabel? (Block / LoopStatement)
- fn parseLabeledStatement(p: *Parser) !Node.Index {
- const label_token = p.parseBlockLabel();
- const block = try p.parseBlock();
- if (block != 0) return block;
-
- const loop_stmt = try p.parseLoopStatement();
- if (loop_stmt != 0) return loop_stmt;
-
- if (label_token != 0) {
- const after_colon = p.tok_i;
- const node = try p.parseTypeExpr();
- if (node != 0) {
- const a = try p.parseByteAlign();
- const b = try p.parseAddrSpace();
- const c = try p.parseLinkSection();
- const d = if (p.eatToken(.equal) == null) 0 else try p.expectExpr();
- if (a != 0 or b != 0 or c != 0 or d != 0) {
- return p.failMsg(.{ .tag = .expected_var_const, .token = label_token });
- }
- }
- return p.failMsg(.{ .tag = .expected_labelable, .token = after_colon });
- }
-
- return null_node;
- }
-
- /// LoopStatement <- KEYWORD_inline? (ForStatement / WhileStatement)
- fn parseLoopStatement(p: *Parser) !Node.Index {
- const inline_token = p.eatToken(.keyword_inline);
-
- const for_statement = try p.parseForStatement();
- if (for_statement != 0) return for_statement;
-
- const while_statement = try p.parseWhileStatement();
- if (while_statement != 0) return while_statement;
-
- if (inline_token == null) return null_node;
-
- // If we've seen "inline", there should have been a "for" or "while"
- return p.fail(.expected_inlinable);
- }
-
- /// ForPrefix <- KEYWORD_for LPAREN Expr RPAREN PtrIndexPayload
- ///
- /// ForStatement
- /// <- ForPrefix BlockExpr ( KEYWORD_else Statement )?
- /// / ForPrefix AssignExpr ( SEMICOLON / KEYWORD_else Statement )
- fn parseForStatement(p: *Parser) !Node.Index {
- const for_token = p.eatToken(.keyword_for) orelse return null_node;
- _ = try p.expectToken(.l_paren);
- const array_expr = try p.expectExpr();
- _ = try p.expectToken(.r_paren);
- const found_payload = try p.parsePtrIndexPayload();
- if (found_payload == 0) try p.warn(.expected_loop_payload);
-
- // TODO propose to change the syntax so that semicolons are always required
- // inside while statements, even if there is an `else`.
- var else_required = false;
- const then_expr = blk: {
- const block_expr = try p.parseBlockExpr();
- if (block_expr != 0) break :blk block_expr;
- const assign_expr = try p.parseAssignExpr();
- if (assign_expr == 0) {
- return p.fail(.expected_block_or_assignment);
- }
- if (p.eatToken(.semicolon)) |_| {
- return p.addNode(.{
- .tag = .for_simple,
- .main_token = for_token,
- .data = .{
- .lhs = array_expr,
- .rhs = assign_expr,
- },
- });
- }
- else_required = true;
- break :blk assign_expr;
- };
- _ = p.eatToken(.keyword_else) orelse {
- if (else_required) {
- try p.warn(.expected_semi_or_else);
- }
- return p.addNode(.{
- .tag = .for_simple,
- .main_token = for_token,
- .data = .{
- .lhs = array_expr,
- .rhs = then_expr,
- },
- });
- };
- return p.addNode(.{
- .tag = .@"for",
- .main_token = for_token,
- .data = .{
- .lhs = array_expr,
- .rhs = try p.addExtra(Node.If{
- .then_expr = then_expr,
- .else_expr = try p.expectStatement(false),
- }),
- },
- });
- }
-
- /// WhilePrefix <- KEYWORD_while LPAREN Expr RPAREN PtrPayload? WhileContinueExpr?
- ///
- /// WhileStatement
- /// <- WhilePrefix BlockExpr ( KEYWORD_else Payload? Statement )?
- /// / WhilePrefix AssignExpr ( SEMICOLON / KEYWORD_else Payload? Statement )
- fn parseWhileStatement(p: *Parser) !Node.Index {
- const while_token = p.eatToken(.keyword_while) orelse return null_node;
- _ = try p.expectToken(.l_paren);
- const condition = try p.expectExpr();
- _ = try p.expectToken(.r_paren);
- _ = try p.parsePtrPayload();
- const cont_expr = try p.parseWhileContinueExpr();
-
- // TODO propose to change the syntax so that semicolons are always required
- // inside while statements, even if there is an `else`.
- var else_required = false;
- const then_expr = blk: {
- const block_expr = try p.parseBlockExpr();
- if (block_expr != 0) break :blk block_expr;
- const assign_expr = try p.parseAssignExpr();
- if (assign_expr == 0) {
- return p.fail(.expected_block_or_assignment);
- }
- if (p.eatToken(.semicolon)) |_| {
- if (cont_expr == 0) {
- return p.addNode(.{
- .tag = .while_simple,
- .main_token = while_token,
- .data = .{
- .lhs = condition,
- .rhs = assign_expr,
- },
- });
- } else {
- return p.addNode(.{
- .tag = .while_cont,
- .main_token = while_token,
- .data = .{
- .lhs = condition,
- .rhs = try p.addExtra(Node.WhileCont{
- .cont_expr = cont_expr,
- .then_expr = assign_expr,
- }),
- },
- });
- }
- }
- else_required = true;
- break :blk assign_expr;
- };
- _ = p.eatToken(.keyword_else) orelse {
- if (else_required) {
- try p.warn(.expected_semi_or_else);
- }
- if (cont_expr == 0) {
- return p.addNode(.{
- .tag = .while_simple,
- .main_token = while_token,
- .data = .{
- .lhs = condition,
- .rhs = then_expr,
- },
- });
- } else {
- return p.addNode(.{
- .tag = .while_cont,
- .main_token = while_token,
- .data = .{
- .lhs = condition,
- .rhs = try p.addExtra(Node.WhileCont{
- .cont_expr = cont_expr,
- .then_expr = then_expr,
- }),
- },
- });
- }
- };
- _ = try p.parsePayload();
- const else_expr = try p.expectStatement(false);
- return p.addNode(.{
- .tag = .@"while",
- .main_token = while_token,
- .data = .{
- .lhs = condition,
- .rhs = try p.addExtra(Node.While{
- .cont_expr = cont_expr,
- .then_expr = then_expr,
- .else_expr = else_expr,
- }),
- },
- });
- }
-
- /// BlockExprStatement
- /// <- BlockExpr
- /// / AssignExpr SEMICOLON
- fn parseBlockExprStatement(p: *Parser) !Node.Index {
- const block_expr = try p.parseBlockExpr();
- if (block_expr != 0) {
- return block_expr;
- }
- const assign_expr = try p.parseAssignExpr();
- if (assign_expr != 0) {
- try p.expectSemicolon(.expected_semi_after_stmt, true);
- return assign_expr;
- }
- return null_node;
- }
-
- fn expectBlockExprStatement(p: *Parser) !Node.Index {
- const node = try p.parseBlockExprStatement();
- if (node == 0) {
- return p.fail(.expected_block_or_expr);
- }
- return node;
- }
-
- /// BlockExpr <- BlockLabel? Block
- fn parseBlockExpr(p: *Parser) Error!Node.Index {
- switch (p.token_tags[p.tok_i]) {
- .identifier => {
- if (p.token_tags[p.tok_i + 1] == .colon and
- p.token_tags[p.tok_i + 2] == .l_brace)
- {
- p.tok_i += 2;
- return p.parseBlock();
- } else {
- return null_node;
- }
- },
- .l_brace => return p.parseBlock(),
- else => return null_node,
- }
- }
-
- /// AssignExpr <- Expr (AssignOp Expr)?
- ///
- /// AssignOp
- /// <- ASTERISKEQUAL
- /// / ASTERISKPIPEEQUAL
- /// / SLASHEQUAL
- /// / PERCENTEQUAL
- /// / PLUSEQUAL
- /// / PLUSPIPEEQUAL
- /// / MINUSEQUAL
- /// / MINUSPIPEEQUAL
- /// / LARROW2EQUAL
- /// / LARROW2PIPEEQUAL
- /// / RARROW2EQUAL
- /// / AMPERSANDEQUAL
- /// / CARETEQUAL
- /// / PIPEEQUAL
- /// / ASTERISKPERCENTEQUAL
- /// / PLUSPERCENTEQUAL
- /// / MINUSPERCENTEQUAL
- /// / EQUAL
- fn parseAssignExpr(p: *Parser) !Node.Index {
- const expr = try p.parseExpr();
- if (expr == 0) return null_node;
-
- const tag: Node.Tag = switch (p.token_tags[p.tok_i]) {
- .asterisk_equal => .assign_mul,
- .slash_equal => .assign_div,
- .percent_equal => .assign_mod,
- .plus_equal => .assign_add,
- .minus_equal => .assign_sub,
- .angle_bracket_angle_bracket_left_equal => .assign_shl,
- .angle_bracket_angle_bracket_left_pipe_equal => .assign_shl_sat,
- .angle_bracket_angle_bracket_right_equal => .assign_shr,
- .ampersand_equal => .assign_bit_and,
- .caret_equal => .assign_bit_xor,
- .pipe_equal => .assign_bit_or,
- .asterisk_percent_equal => .assign_mul_wrap,
- .plus_percent_equal => .assign_add_wrap,
- .minus_percent_equal => .assign_sub_wrap,
- .asterisk_pipe_equal => .assign_mul_sat,
- .plus_pipe_equal => .assign_add_sat,
- .minus_pipe_equal => .assign_sub_sat,
- .equal => .assign,
- else => return expr,
- };
- return p.addNode(.{
- .tag = tag,
- .main_token = p.nextToken(),
- .data = .{
- .lhs = expr,
- .rhs = try p.expectExpr(),
- },
- });
- }
-
- fn expectAssignExpr(p: *Parser) !Node.Index {
- const expr = try p.parseAssignExpr();
- if (expr == 0) {
- return p.fail(.expected_expr_or_assignment);
- }
- return expr;
- }
-
- fn parseExpr(p: *Parser) Error!Node.Index {
- return p.parseExprPrecedence(0);
- }
-
- fn expectExpr(p: *Parser) Error!Node.Index {
- const node = try p.parseExpr();
- if (node == 0) {
- return p.fail(.expected_expr);
- } else {
- return node;
- }
- }
-
- const Assoc = enum {
- left,
- none,
- };
-
- const OperInfo = struct {
- prec: i8,
- tag: Node.Tag,
- assoc: Assoc = Assoc.left,
- };
-
- // A table of binary operator information. Higher precedence numbers are
- // stickier. All operators at the same precedence level should have the same
- // associativity.
- const operTable = std.enums.directEnumArrayDefault(Token.Tag, OperInfo, .{ .prec = -1, .tag = Node.Tag.root }, 0, .{
- .keyword_or = .{ .prec = 10, .tag = .bool_or },
-
- .keyword_and = .{ .prec = 20, .tag = .bool_and },
-
- .equal_equal = .{ .prec = 30, .tag = .equal_equal, .assoc = Assoc.none },
- .bang_equal = .{ .prec = 30, .tag = .bang_equal, .assoc = Assoc.none },
- .angle_bracket_left = .{ .prec = 30, .tag = .less_than, .assoc = Assoc.none },
- .angle_bracket_right = .{ .prec = 30, .tag = .greater_than, .assoc = Assoc.none },
- .angle_bracket_left_equal = .{ .prec = 30, .tag = .less_or_equal, .assoc = Assoc.none },
- .angle_bracket_right_equal = .{ .prec = 30, .tag = .greater_or_equal, .assoc = Assoc.none },
-
- .ampersand = .{ .prec = 40, .tag = .bit_and },
- .caret = .{ .prec = 40, .tag = .bit_xor },
- .pipe = .{ .prec = 40, .tag = .bit_or },
- .keyword_orelse = .{ .prec = 40, .tag = .@"orelse" },
- .keyword_catch = .{ .prec = 40, .tag = .@"catch" },
-
- .angle_bracket_angle_bracket_left = .{ .prec = 50, .tag = .shl },
- .angle_bracket_angle_bracket_left_pipe = .{ .prec = 50, .tag = .shl_sat },
- .angle_bracket_angle_bracket_right = .{ .prec = 50, .tag = .shr },
-
- .plus = .{ .prec = 60, .tag = .add },
- .minus = .{ .prec = 60, .tag = .sub },
- .plus_plus = .{ .prec = 60, .tag = .array_cat },
- .plus_percent = .{ .prec = 60, .tag = .add_wrap },
- .minus_percent = .{ .prec = 60, .tag = .sub_wrap },
- .plus_pipe = .{ .prec = 60, .tag = .add_sat },
- .minus_pipe = .{ .prec = 60, .tag = .sub_sat },
-
- .pipe_pipe = .{ .prec = 70, .tag = .merge_error_sets },
- .asterisk = .{ .prec = 70, .tag = .mul },
- .slash = .{ .prec = 70, .tag = .div },
- .percent = .{ .prec = 70, .tag = .mod },
- .asterisk_asterisk = .{ .prec = 70, .tag = .array_mult },
- .asterisk_percent = .{ .prec = 70, .tag = .mul_wrap },
- .asterisk_pipe = .{ .prec = 70, .tag = .mul_sat },
- });
-
- fn parseExprPrecedence(p: *Parser, min_prec: i32) Error!Node.Index {
- assert(min_prec >= 0);
- var node = try p.parsePrefixExpr();
- if (node == 0) {
- return null_node;
- }
-
- var banned_prec: i8 = -1;
-
- while (true) {
- const tok_tag = p.token_tags[p.tok_i];
- const info = operTable[@intCast(usize, @enumToInt(tok_tag))];
- if (info.prec < min_prec) {
- break;
- }
- if (info.prec == banned_prec) {
- return p.fail(.chained_comparison_operators);
- }
-
- const oper_token = p.nextToken();
- // Special-case handling for "catch"
- if (tok_tag == .keyword_catch) {
- _ = try p.parsePayload();
- }
- const rhs = try p.parseExprPrecedence(info.prec + 1);
- if (rhs == 0) {
- try p.warn(.expected_expr);
- return node;
- }
-
- {
- const tok_len = tok_tag.lexeme().?.len;
- const char_before = p.source[p.token_starts[oper_token] - 1];
- const char_after = p.source[p.token_starts[oper_token] + tok_len];
- if (tok_tag == .ampersand and char_after == '&') {
- // without types we don't know if '&&' was intended as 'bitwise_and address_of', or a c-style logical_and
- // The best the parser can do is recommend changing it to 'and' or ' & &'
- try p.warnMsg(.{ .tag = .invalid_ampersand_ampersand, .token = oper_token });
- } else if (std.ascii.isWhitespace(char_before) != std.ascii.isWhitespace(char_after)) {
- try p.warnMsg(.{ .tag = .mismatched_binary_op_whitespace, .token = oper_token });
- }
- }
-
- node = try p.addNode(.{
- .tag = info.tag,
- .main_token = oper_token,
- .data = .{
- .lhs = node,
- .rhs = rhs,
- },
- });
-
- if (info.assoc == Assoc.none) {
- banned_prec = info.prec;
- }
- }
-
- return node;
- }
-
- /// PrefixExpr <- PrefixOp* PrimaryExpr
- ///
- /// PrefixOp
- /// <- EXCLAMATIONMARK
- /// / MINUS
- /// / TILDE
- /// / MINUSPERCENT
- /// / AMPERSAND
- /// / KEYWORD_try
- /// / KEYWORD_await
- fn parsePrefixExpr(p: *Parser) Error!Node.Index {
- const tag: Node.Tag = switch (p.token_tags[p.tok_i]) {
- .bang => .bool_not,
- .minus => .negation,
- .tilde => .bit_not,
- .minus_percent => .negation_wrap,
- .ampersand => .address_of,
- .keyword_try => .@"try",
- .keyword_await => .@"await",
- else => return p.parsePrimaryExpr(),
- };
- return p.addNode(.{
- .tag = tag,
- .main_token = p.nextToken(),
- .data = .{
- .lhs = try p.expectPrefixExpr(),
- .rhs = undefined,
- },
- });
- }
-
- fn expectPrefixExpr(p: *Parser) Error!Node.Index {
- const node = try p.parsePrefixExpr();
- if (node == 0) {
- return p.fail(.expected_prefix_expr);
- }
- return node;
- }
-
- /// TypeExpr <- PrefixTypeOp* ErrorUnionExpr
- ///
- /// PrefixTypeOp
- /// <- QUESTIONMARK
- /// / KEYWORD_anyframe MINUSRARROW
- /// / SliceTypeStart (ByteAlign / AddrSpace / KEYWORD_const / KEYWORD_volatile / KEYWORD_allowzero)*
- /// / PtrTypeStart (AddrSpace / KEYWORD_align LPAREN Expr (COLON Expr COLON Expr)? RPAREN / KEYWORD_const / KEYWORD_volatile / KEYWORD_allowzero)*
- /// / ArrayTypeStart
- ///
- /// SliceTypeStart <- LBRACKET (COLON Expr)? RBRACKET
- ///
- /// PtrTypeStart
- /// <- ASTERISK
- /// / ASTERISK2
- /// / LBRACKET ASTERISK (LETTERC / COLON Expr)? RBRACKET
- ///
- /// ArrayTypeStart <- LBRACKET Expr (COLON Expr)? RBRACKET
- fn parseTypeExpr(p: *Parser) Error!Node.Index {
- switch (p.token_tags[p.tok_i]) {
- .question_mark => return p.addNode(.{
- .tag = .optional_type,
- .main_token = p.nextToken(),
- .data = .{
- .lhs = try p.expectTypeExpr(),
- .rhs = undefined,
- },
- }),
- .keyword_anyframe => switch (p.token_tags[p.tok_i + 1]) {
- .arrow => return p.addNode(.{
- .tag = .anyframe_type,
- .main_token = p.nextToken(),
- .data = .{
- .lhs = p.nextToken(),
- .rhs = try p.expectTypeExpr(),
- },
- }),
- else => return p.parseErrorUnionExpr(),
- },
- .asterisk => {
- const asterisk = p.nextToken();
- const mods = try p.parsePtrModifiers();
- const elem_type = try p.expectTypeExpr();
- if (mods.bit_range_start != 0) {
- return p.addNode(.{
- .tag = .ptr_type_bit_range,
- .main_token = asterisk,
- .data = .{
- .lhs = try p.addExtra(Node.PtrTypeBitRange{
- .sentinel = 0,
- .align_node = mods.align_node,
- .addrspace_node = mods.addrspace_node,
- .bit_range_start = mods.bit_range_start,
- .bit_range_end = mods.bit_range_end,
- }),
- .rhs = elem_type,
- },
- });
- } else if (mods.addrspace_node != 0) {
- return p.addNode(.{
- .tag = .ptr_type,
- .main_token = asterisk,
- .data = .{
- .lhs = try p.addExtra(Node.PtrType{
- .sentinel = 0,
- .align_node = mods.align_node,
- .addrspace_node = mods.addrspace_node,
- }),
- .rhs = elem_type,
- },
- });
- } else {
- return p.addNode(.{
- .tag = .ptr_type_aligned,
- .main_token = asterisk,
- .data = .{
- .lhs = mods.align_node,
- .rhs = elem_type,
- },
- });
- }
- },
- .asterisk_asterisk => {
- const asterisk = p.nextToken();
- const mods = try p.parsePtrModifiers();
- const elem_type = try p.expectTypeExpr();
- const inner: Node.Index = inner: {
- if (mods.bit_range_start != 0) {
- break :inner try p.addNode(.{
- .tag = .ptr_type_bit_range,
- .main_token = asterisk,
- .data = .{
- .lhs = try p.addExtra(Node.PtrTypeBitRange{
- .sentinel = 0,
- .align_node = mods.align_node,
- .addrspace_node = mods.addrspace_node,
- .bit_range_start = mods.bit_range_start,
- .bit_range_end = mods.bit_range_end,
- }),
- .rhs = elem_type,
- },
- });
- } else if (mods.addrspace_node != 0) {
- break :inner try p.addNode(.{
- .tag = .ptr_type,
- .main_token = asterisk,
- .data = .{
- .lhs = try p.addExtra(Node.PtrType{
- .sentinel = 0,
- .align_node = mods.align_node,
- .addrspace_node = mods.addrspace_node,
- }),
- .rhs = elem_type,
- },
- });
- } else {
- break :inner try p.addNode(.{
- .tag = .ptr_type_aligned,
- .main_token = asterisk,
- .data = .{
- .lhs = mods.align_node,
- .rhs = elem_type,
- },
- });
- }
- };
- return p.addNode(.{
- .tag = .ptr_type_aligned,
- .main_token = asterisk,
- .data = .{
- .lhs = 0,
- .rhs = inner,
- },
- });
- },
- .l_bracket => switch (p.token_tags[p.tok_i + 1]) {
- .asterisk => {
- _ = p.nextToken();
- const asterisk = p.nextToken();
- var sentinel: Node.Index = 0;
- if (p.eatToken(.identifier)) |ident| {
- const ident_slice = p.source[p.token_starts[ident]..p.token_starts[ident + 1]];
- if (!std.mem.eql(u8, std.mem.trimRight(u8, ident_slice, &std.ascii.whitespace), "c")) {
- p.tok_i -= 1;
- }
- } else if (p.eatToken(.colon)) |_| {
- sentinel = try p.expectExpr();
- }
- _ = try p.expectToken(.r_bracket);
- const mods = try p.parsePtrModifiers();
- const elem_type = try p.expectTypeExpr();
- if (mods.bit_range_start == 0) {
- if (sentinel == 0 and mods.addrspace_node == 0) {
- return p.addNode(.{
- .tag = .ptr_type_aligned,
- .main_token = asterisk,
- .data = .{
- .lhs = mods.align_node,
- .rhs = elem_type,
- },
- });
- } else if (mods.align_node == 0 and mods.addrspace_node == 0) {
- return p.addNode(.{
- .tag = .ptr_type_sentinel,
- .main_token = asterisk,
- .data = .{
- .lhs = sentinel,
- .rhs = elem_type,
- },
- });
- } else {
- return p.addNode(.{
- .tag = .ptr_type,
- .main_token = asterisk,
- .data = .{
- .lhs = try p.addExtra(Node.PtrType{
- .sentinel = sentinel,
- .align_node = mods.align_node,
- .addrspace_node = mods.addrspace_node,
- }),
- .rhs = elem_type,
- },
- });
- }
- } else {
- return p.addNode(.{
- .tag = .ptr_type_bit_range,
- .main_token = asterisk,
- .data = .{
- .lhs = try p.addExtra(Node.PtrTypeBitRange{
- .sentinel = sentinel,
- .align_node = mods.align_node,
- .addrspace_node = mods.addrspace_node,
- .bit_range_start = mods.bit_range_start,
- .bit_range_end = mods.bit_range_end,
- }),
- .rhs = elem_type,
- },
- });
- }
- },
- else => {
- const lbracket = p.nextToken();
- const len_expr = try p.parseExpr();
- const sentinel: Node.Index = if (p.eatToken(.colon)) |_|
- try p.expectExpr()
- else
- 0;
- _ = try p.expectToken(.r_bracket);
- if (len_expr == 0) {
- const mods = try p.parsePtrModifiers();
- const elem_type = try p.expectTypeExpr();
- if (mods.bit_range_start != 0) {
- try p.warnMsg(.{
- .tag = .invalid_bit_range,
- .token = p.nodes.items(.main_token)[mods.bit_range_start],
- });
- }
- if (sentinel == 0 and mods.addrspace_node == 0) {
- return p.addNode(.{
- .tag = .ptr_type_aligned,
- .main_token = lbracket,
- .data = .{
- .lhs = mods.align_node,
- .rhs = elem_type,
- },
- });
- } else if (mods.align_node == 0 and mods.addrspace_node == 0) {
- return p.addNode(.{
- .tag = .ptr_type_sentinel,
- .main_token = lbracket,
- .data = .{
- .lhs = sentinel,
- .rhs = elem_type,
- },
- });
- } else {
- return p.addNode(.{
- .tag = .ptr_type,
- .main_token = lbracket,
- .data = .{
- .lhs = try p.addExtra(Node.PtrType{
- .sentinel = sentinel,
- .align_node = mods.align_node,
- .addrspace_node = mods.addrspace_node,
- }),
- .rhs = elem_type,
- },
- });
- }
- } else {
- switch (p.token_tags[p.tok_i]) {
- .keyword_align,
- .keyword_const,
- .keyword_volatile,
- .keyword_allowzero,
- .keyword_addrspace,
- => return p.fail(.ptr_mod_on_array_child_type),
- else => {},
- }
- const elem_type = try p.expectTypeExpr();
- if (sentinel == 0) {
- return p.addNode(.{
- .tag = .array_type,
- .main_token = lbracket,
- .data = .{
- .lhs = len_expr,
- .rhs = elem_type,
- },
- });
- } else {
- return p.addNode(.{
- .tag = .array_type_sentinel,
- .main_token = lbracket,
- .data = .{
- .lhs = len_expr,
- .rhs = try p.addExtra(.{
- .elem_type = elem_type,
- .sentinel = sentinel,
- }),
- },
- });
- }
- }
- },
- },
- else => return p.parseErrorUnionExpr(),
- }
- }
-
- fn expectTypeExpr(p: *Parser) Error!Node.Index {
- const node = try p.parseTypeExpr();
- if (node == 0) {
- return p.fail(.expected_type_expr);
- }
- return node;
- }
-
- /// PrimaryExpr
- /// <- AsmExpr
- /// / IfExpr
- /// / KEYWORD_break BreakLabel? Expr?
- /// / KEYWORD_comptime Expr
- /// / KEYWORD_nosuspend Expr
- /// / KEYWORD_continue BreakLabel?
- /// / KEYWORD_resume Expr
- /// / KEYWORD_return Expr?
- /// / BlockLabel? LoopExpr
- /// / Block
- /// / CurlySuffixExpr
- fn parsePrimaryExpr(p: *Parser) !Node.Index {
- switch (p.token_tags[p.tok_i]) {
- .keyword_asm => return p.expectAsmExpr(),
- .keyword_if => return p.parseIfExpr(),
- .keyword_break => {
- p.tok_i += 1;
- return p.addNode(.{
- .tag = .@"break",
- .main_token = p.tok_i - 1,
- .data = .{
- .lhs = try p.parseBreakLabel(),
- .rhs = try p.parseExpr(),
- },
- });
- },
- .keyword_continue => {
- p.tok_i += 1;
- return p.addNode(.{
- .tag = .@"continue",
- .main_token = p.tok_i - 1,
- .data = .{
- .lhs = try p.parseBreakLabel(),
- .rhs = undefined,
- },
- });
- },
- .keyword_comptime => {
- p.tok_i += 1;
- return p.addNode(.{
- .tag = .@"comptime",
- .main_token = p.tok_i - 1,
- .data = .{
- .lhs = try p.expectExpr(),
- .rhs = undefined,
- },
- });
- },
- .keyword_nosuspend => {
- p.tok_i += 1;
- return p.addNode(.{
- .tag = .@"nosuspend",
- .main_token = p.tok_i - 1,
- .data = .{
- .lhs = try p.expectExpr(),
- .rhs = undefined,
- },
- });
- },
- .keyword_resume => {
- p.tok_i += 1;
- return p.addNode(.{
- .tag = .@"resume",
- .main_token = p.tok_i - 1,
- .data = .{
- .lhs = try p.expectExpr(),
- .rhs = undefined,
- },
- });
- },
- .keyword_return => {
- p.tok_i += 1;
- return p.addNode(.{
- .tag = .@"return",
- .main_token = p.tok_i - 1,
- .data = .{
- .lhs = try p.parseExpr(),
- .rhs = undefined,
- },
- });
- },
- .identifier => {
- if (p.token_tags[p.tok_i + 1] == .colon) {
- switch (p.token_tags[p.tok_i + 2]) {
- .keyword_inline => {
- p.tok_i += 3;
- switch (p.token_tags[p.tok_i]) {
- .keyword_for => return p.parseForExpr(),
- .keyword_while => return p.parseWhileExpr(),
- else => return p.fail(.expected_inlinable),
- }
- },
- .keyword_for => {
- p.tok_i += 2;
- return p.parseForExpr();
- },
- .keyword_while => {
- p.tok_i += 2;
- return p.parseWhileExpr();
- },
- .l_brace => {
- p.tok_i += 2;
- return p.parseBlock();
- },
- else => return p.parseCurlySuffixExpr(),
- }
- } else {
- return p.parseCurlySuffixExpr();
- }
- },
- .keyword_inline => {
- p.tok_i += 1;
- switch (p.token_tags[p.tok_i]) {
- .keyword_for => return p.parseForExpr(),
- .keyword_while => return p.parseWhileExpr(),
- else => return p.fail(.expected_inlinable),
- }
- },
- .keyword_for => return p.parseForExpr(),
- .keyword_while => return p.parseWhileExpr(),
- .l_brace => return p.parseBlock(),
- else => return p.parseCurlySuffixExpr(),
- }
- }
-
- /// IfExpr <- IfPrefix Expr (KEYWORD_else Payload? Expr)?
- fn parseIfExpr(p: *Parser) !Node.Index {
- return p.parseIf(expectExpr);
- }
-
- /// Block <- LBRACE Statement* RBRACE
- fn parseBlock(p: *Parser) !Node.Index {
- const lbrace = p.eatToken(.l_brace) orelse return null_node;
- const scratch_top = p.scratch.items.len;
- defer p.scratch.shrinkRetainingCapacity(scratch_top);
- while (true) {
- if (p.token_tags[p.tok_i] == .r_brace) break;
- const statement = try p.expectStatementRecoverable();
- if (statement == 0) break;
- try p.scratch.append(p.gpa, statement);
- }
- _ = try p.expectToken(.r_brace);
- const semicolon = (p.token_tags[p.tok_i - 2] == .semicolon);
- const statements = p.scratch.items[scratch_top..];
- switch (statements.len) {
- 0 => return p.addNode(.{
- .tag = .block_two,
- .main_token = lbrace,
- .data = .{
- .lhs = 0,
- .rhs = 0,
- },
- }),
- 1 => return p.addNode(.{
- .tag = if (semicolon) .block_two_semicolon else .block_two,
- .main_token = lbrace,
- .data = .{
- .lhs = statements[0],
- .rhs = 0,
- },
- }),
- 2 => return p.addNode(.{
- .tag = if (semicolon) .block_two_semicolon else .block_two,
- .main_token = lbrace,
- .data = .{
- .lhs = statements[0],
- .rhs = statements[1],
- },
- }),
- else => {
- const span = try p.listToSpan(statements);
- return p.addNode(.{
- .tag = if (semicolon) .block_semicolon else .block,
- .main_token = lbrace,
- .data = .{
- .lhs = span.start,
- .rhs = span.end,
- },
- });
- },
- }
- }
-
- /// ForPrefix <- KEYWORD_for LPAREN Expr RPAREN PtrIndexPayload
- ///
- /// ForExpr <- ForPrefix Expr (KEYWORD_else Expr)?
- fn parseForExpr(p: *Parser) !Node.Index {
- const for_token = p.eatToken(.keyword_for) orelse return null_node;
- _ = try p.expectToken(.l_paren);
- const array_expr = try p.expectExpr();
- _ = try p.expectToken(.r_paren);
- const found_payload = try p.parsePtrIndexPayload();
- if (found_payload == 0) try p.warn(.expected_loop_payload);
-
- const then_expr = try p.expectExpr();
- _ = p.eatToken(.keyword_else) orelse {
- return p.addNode(.{
- .tag = .for_simple,
- .main_token = for_token,
- .data = .{
- .lhs = array_expr,
- .rhs = then_expr,
- },
- });
- };
- const else_expr = try p.expectExpr();
- return p.addNode(.{
- .tag = .@"for",
- .main_token = for_token,
- .data = .{
- .lhs = array_expr,
- .rhs = try p.addExtra(Node.If{
- .then_expr = then_expr,
- .else_expr = else_expr,
- }),
- },
- });
- }
-
- /// WhilePrefix <- KEYWORD_while LPAREN Expr RPAREN PtrPayload? WhileContinueExpr?
- ///
- /// WhileExpr <- WhilePrefix Expr (KEYWORD_else Payload? Expr)?
- fn parseWhileExpr(p: *Parser) !Node.Index {
- const while_token = p.eatToken(.keyword_while) orelse return null_node;
- _ = try p.expectToken(.l_paren);
- const condition = try p.expectExpr();
- _ = try p.expectToken(.r_paren);
- _ = try p.parsePtrPayload();
- const cont_expr = try p.parseWhileContinueExpr();
-
- const then_expr = try p.expectExpr();
- _ = p.eatToken(.keyword_else) orelse {
- if (cont_expr == 0) {
- return p.addNode(.{
- .tag = .while_simple,
- .main_token = while_token,
- .data = .{
- .lhs = condition,
- .rhs = then_expr,
- },
- });
- } else {
- return p.addNode(.{
- .tag = .while_cont,
- .main_token = while_token,
- .data = .{
- .lhs = condition,
- .rhs = try p.addExtra(Node.WhileCont{
- .cont_expr = cont_expr,
- .then_expr = then_expr,
- }),
- },
- });
- }
- };
- _ = try p.parsePayload();
- const else_expr = try p.expectExpr();
- return p.addNode(.{
- .tag = .@"while",
- .main_token = while_token,
- .data = .{
- .lhs = condition,
- .rhs = try p.addExtra(Node.While{
- .cont_expr = cont_expr,
- .then_expr = then_expr,
- .else_expr = else_expr,
- }),
- },
- });
- }
-
- /// CurlySuffixExpr <- TypeExpr InitList?
- ///
- /// InitList
- /// <- LBRACE FieldInit (COMMA FieldInit)* COMMA? RBRACE
- /// / LBRACE Expr (COMMA Expr)* COMMA? RBRACE
- /// / LBRACE RBRACE
- fn parseCurlySuffixExpr(p: *Parser) !Node.Index {
- const lhs = try p.parseTypeExpr();
- if (lhs == 0) return null_node;
- const lbrace = p.eatToken(.l_brace) orelse return lhs;
-
- // If there are 0 or 1 items, we can use ArrayInitOne/StructInitOne;
- // otherwise we use the full ArrayInit/StructInit.
-
- const scratch_top = p.scratch.items.len;
- defer p.scratch.shrinkRetainingCapacity(scratch_top);
- const field_init = try p.parseFieldInit();
- if (field_init != 0) {
- try p.scratch.append(p.gpa, field_init);
- while (true) {
- switch (p.token_tags[p.tok_i]) {
- .comma => p.tok_i += 1,
- .r_brace => {
- p.tok_i += 1;
- break;
- },
- .colon, .r_paren, .r_bracket => return p.failExpected(.r_brace),
- // Likely just a missing comma; give error but continue parsing.
- else => try p.warn(.expected_comma_after_initializer),
- }
- if (p.eatToken(.r_brace)) |_| break;
- const next = try p.expectFieldInit();
- try p.scratch.append(p.gpa, next);
- }
- const comma = (p.token_tags[p.tok_i - 2] == .comma);
- const inits = p.scratch.items[scratch_top..];
- switch (inits.len) {
- 0 => unreachable,
- 1 => return p.addNode(.{
- .tag = if (comma) .struct_init_one_comma else .struct_init_one,
- .main_token = lbrace,
- .data = .{
- .lhs = lhs,
- .rhs = inits[0],
- },
- }),
- else => return p.addNode(.{
- .tag = if (comma) .struct_init_comma else .struct_init,
- .main_token = lbrace,
- .data = .{
- .lhs = lhs,
- .rhs = try p.addExtra(try p.listToSpan(inits)),
- },
- }),
- }
- }
-
- while (true) {
- if (p.eatToken(.r_brace)) |_| break;
- const elem_init = try p.expectExpr();
- try p.scratch.append(p.gpa, elem_init);
- switch (p.token_tags[p.tok_i]) {
- .comma => p.tok_i += 1,
- .r_brace => {
- p.tok_i += 1;
- break;
- },
- .colon, .r_paren, .r_bracket => return p.failExpected(.r_brace),
- // Likely just a missing comma; give error but continue parsing.
- else => try p.warn(.expected_comma_after_initializer),
- }
- }
- const comma = (p.token_tags[p.tok_i - 2] == .comma);
- const inits = p.scratch.items[scratch_top..];
- switch (inits.len) {
- 0 => return p.addNode(.{
- .tag = .struct_init_one,
- .main_token = lbrace,
- .data = .{
- .lhs = lhs,
- .rhs = 0,
- },
- }),
- 1 => return p.addNode(.{
- .tag = if (comma) .array_init_one_comma else .array_init_one,
- .main_token = lbrace,
- .data = .{
- .lhs = lhs,
- .rhs = inits[0],
- },
- }),
- else => return p.addNode(.{
- .tag = if (comma) .array_init_comma else .array_init,
- .main_token = lbrace,
- .data = .{
- .lhs = lhs,
- .rhs = try p.addExtra(try p.listToSpan(inits)),
- },
- }),
- }
- }
-
- /// ErrorUnionExpr <- SuffixExpr (EXCLAMATIONMARK TypeExpr)?
- fn parseErrorUnionExpr(p: *Parser) !Node.Index {
- const suffix_expr = try p.parseSuffixExpr();
- if (suffix_expr == 0) return null_node;
- const bang = p.eatToken(.bang) orelse return suffix_expr;
- return p.addNode(.{
- .tag = .error_union,
- .main_token = bang,
- .data = .{
- .lhs = suffix_expr,
- .rhs = try p.expectTypeExpr(),
- },
- });
- }
-
- /// SuffixExpr
- /// <- KEYWORD_async PrimaryTypeExpr SuffixOp* FnCallArguments
- /// / PrimaryTypeExpr (SuffixOp / FnCallArguments)*
- ///
- /// FnCallArguments <- LPAREN ExprList RPAREN
- ///
- /// ExprList <- (Expr COMMA)* Expr?
- fn parseSuffixExpr(p: *Parser) !Node.Index {
- if (p.eatToken(.keyword_async)) |_| {
- var res = try p.expectPrimaryTypeExpr();
- while (true) {
- const node = try p.parseSuffixOp(res);
- if (node == 0) break;
- res = node;
- }
- const lparen = p.eatToken(.l_paren) orelse {
- try p.warn(.expected_param_list);
- return res;
- };
- const scratch_top = p.scratch.items.len;
- defer p.scratch.shrinkRetainingCapacity(scratch_top);
- while (true) {
- if (p.eatToken(.r_paren)) |_| break;
- const param = try p.expectExpr();
- try p.scratch.append(p.gpa, param);
- switch (p.token_tags[p.tok_i]) {
- .comma => p.tok_i += 1,
- .r_paren => {
- p.tok_i += 1;
- break;
- },
- .colon, .r_brace, .r_bracket => return p.failExpected(.r_paren),
- // Likely just a missing comma; give error but continue parsing.
- else => try p.warn(.expected_comma_after_arg),
- }
- }
- const comma = (p.token_tags[p.tok_i - 2] == .comma);
- const params = p.scratch.items[scratch_top..];
- switch (params.len) {
- 0 => return p.addNode(.{
- .tag = if (comma) .async_call_one_comma else .async_call_one,
- .main_token = lparen,
- .data = .{
- .lhs = res,
- .rhs = 0,
- },
- }),
- 1 => return p.addNode(.{
- .tag = if (comma) .async_call_one_comma else .async_call_one,
- .main_token = lparen,
- .data = .{
- .lhs = res,
- .rhs = params[0],
- },
- }),
- else => return p.addNode(.{
- .tag = if (comma) .async_call_comma else .async_call,
- .main_token = lparen,
- .data = .{
- .lhs = res,
- .rhs = try p.addExtra(try p.listToSpan(params)),
- },
- }),
- }
- }
-
- var res = try p.parsePrimaryTypeExpr();
- if (res == 0) return res;
- while (true) {
- const suffix_op = try p.parseSuffixOp(res);
- if (suffix_op != 0) {
- res = suffix_op;
- continue;
- }
- const lparen = p.eatToken(.l_paren) orelse return res;
- const scratch_top = p.scratch.items.len;
- defer p.scratch.shrinkRetainingCapacity(scratch_top);
- while (true) {
- if (p.eatToken(.r_paren)) |_| break;
- const param = try p.expectExpr();
- try p.scratch.append(p.gpa, param);
- switch (p.token_tags[p.tok_i]) {
- .comma => p.tok_i += 1,
- .r_paren => {
- p.tok_i += 1;
- break;
- },
- .colon, .r_brace, .r_bracket => return p.failExpected(.r_paren),
- // Likely just a missing comma; give error but continue parsing.
- else => try p.warn(.expected_comma_after_arg),
- }
- }
- const comma = (p.token_tags[p.tok_i - 2] == .comma);
- const params = p.scratch.items[scratch_top..];
- res = switch (params.len) {
- 0 => try p.addNode(.{
- .tag = if (comma) .call_one_comma else .call_one,
- .main_token = lparen,
- .data = .{
- .lhs = res,
- .rhs = 0,
- },
- }),
- 1 => try p.addNode(.{
- .tag = if (comma) .call_one_comma else .call_one,
- .main_token = lparen,
- .data = .{
- .lhs = res,
- .rhs = params[0],
- },
- }),
- else => try p.addNode(.{
- .tag = if (comma) .call_comma else .call,
- .main_token = lparen,
- .data = .{
- .lhs = res,
- .rhs = try p.addExtra(try p.listToSpan(params)),
- },
- }),
- };
- }
- }
-
- /// PrimaryTypeExpr
- /// <- BUILTINIDENTIFIER FnCallArguments
- /// / CHAR_LITERAL
- /// / ContainerDecl
- /// / DOT IDENTIFIER
- /// / DOT InitList
- /// / ErrorSetDecl
- /// / FLOAT
- /// / FnProto
- /// / GroupedExpr
- /// / LabeledTypeExpr
- /// / IDENTIFIER
- /// / IfTypeExpr
- /// / INTEGER
- /// / KEYWORD_comptime TypeExpr
- /// / KEYWORD_error DOT IDENTIFIER
- /// / KEYWORD_anyframe
- /// / KEYWORD_unreachable
- /// / STRINGLITERAL
- /// / SwitchExpr
- ///
- /// ContainerDecl <- (KEYWORD_extern / KEYWORD_packed)? ContainerDeclAuto
- ///
- /// ContainerDeclAuto <- ContainerDeclType LBRACE container_doc_comment? ContainerMembers RBRACE
- ///
- /// InitList
- /// <- LBRACE FieldInit (COMMA FieldInit)* COMMA? RBRACE
- /// / LBRACE Expr (COMMA Expr)* COMMA? RBRACE
- /// / LBRACE RBRACE
- ///
- /// ErrorSetDecl <- KEYWORD_error LBRACE IdentifierList RBRACE
- ///
- /// GroupedExpr <- LPAREN Expr RPAREN
- ///
- /// IfTypeExpr <- IfPrefix TypeExpr (KEYWORD_else Payload? TypeExpr)?
- ///
- /// LabeledTypeExpr
- /// <- BlockLabel Block
- /// / BlockLabel? LoopTypeExpr
- ///
- /// LoopTypeExpr <- KEYWORD_inline? (ForTypeExpr / WhileTypeExpr)
- fn parsePrimaryTypeExpr(p: *Parser) !Node.Index {
- switch (p.token_tags[p.tok_i]) {
- .char_literal => return p.addNode(.{
- .tag = .char_literal,
- .main_token = p.nextToken(),
- .data = .{
- .lhs = undefined,
- .rhs = undefined,
- },
- }),
- .number_literal => return p.addNode(.{
- .tag = .number_literal,
- .main_token = p.nextToken(),
- .data = .{
- .lhs = undefined,
- .rhs = undefined,
- },
- }),
- .keyword_unreachable => return p.addNode(.{
- .tag = .unreachable_literal,
- .main_token = p.nextToken(),
- .data = .{
- .lhs = undefined,
- .rhs = undefined,
- },
- }),
- .keyword_anyframe => return p.addNode(.{
- .tag = .anyframe_literal,
- .main_token = p.nextToken(),
- .data = .{
- .lhs = undefined,
- .rhs = undefined,
- },
- }),
- .string_literal => {
- const main_token = p.nextToken();
- return p.addNode(.{
- .tag = .string_literal,
- .main_token = main_token,
- .data = .{
- .lhs = undefined,
- .rhs = undefined,
- },
- });
- },
-
- .builtin => return p.parseBuiltinCall(),
- .keyword_fn => return p.parseFnProto(),
- .keyword_if => return p.parseIf(expectTypeExpr),
- .keyword_switch => return p.expectSwitchExpr(),
-
- .keyword_extern,
- .keyword_packed,
- => {
- p.tok_i += 1;
- return p.parseContainerDeclAuto();
- },
-
- .keyword_struct,
- .keyword_opaque,
- .keyword_enum,
- .keyword_union,
- => return p.parseContainerDeclAuto(),
-
- .keyword_comptime => return p.addNode(.{
- .tag = .@"comptime",
- .main_token = p.nextToken(),
- .data = .{
- .lhs = try p.expectTypeExpr(),
- .rhs = undefined,
- },
- }),
- .multiline_string_literal_line => {
- const first_line = p.nextToken();
- while (p.token_tags[p.tok_i] == .multiline_string_literal_line) {
- p.tok_i += 1;
- }
- return p.addNode(.{
- .tag = .multiline_string_literal,
- .main_token = first_line,
- .data = .{
- .lhs = first_line,
- .rhs = p.tok_i - 1,
- },
- });
- },
- .identifier => switch (p.token_tags[p.tok_i + 1]) {
- .colon => switch (p.token_tags[p.tok_i + 2]) {
- .keyword_inline => {
- p.tok_i += 3;
- switch (p.token_tags[p.tok_i]) {
- .keyword_for => return p.parseForTypeExpr(),
- .keyword_while => return p.parseWhileTypeExpr(),
- else => return p.fail(.expected_inlinable),
- }
- },
- .keyword_for => {
- p.tok_i += 2;
- return p.parseForTypeExpr();
- },
- .keyword_while => {
- p.tok_i += 2;
- return p.parseWhileTypeExpr();
- },
- .l_brace => {
- p.tok_i += 2;
- return p.parseBlock();
- },
- else => return p.addNode(.{
- .tag = .identifier,
- .main_token = p.nextToken(),
- .data = .{
- .lhs = undefined,
- .rhs = undefined,
- },
- }),
- },
- else => return p.addNode(.{
- .tag = .identifier,
- .main_token = p.nextToken(),
- .data = .{
- .lhs = undefined,
- .rhs = undefined,
- },
- }),
- },
- .keyword_inline => {
- p.tok_i += 1;
- switch (p.token_tags[p.tok_i]) {
- .keyword_for => return p.parseForTypeExpr(),
- .keyword_while => return p.parseWhileTypeExpr(),
- else => return p.fail(.expected_inlinable),
- }
- },
- .keyword_for => return p.parseForTypeExpr(),
- .keyword_while => return p.parseWhileTypeExpr(),
- .period => switch (p.token_tags[p.tok_i + 1]) {
- .identifier => return p.addNode(.{
- .tag = .enum_literal,
- .data = .{
- .lhs = p.nextToken(), // dot
- .rhs = undefined,
- },
- .main_token = p.nextToken(), // identifier
- }),
- .l_brace => {
- const lbrace = p.tok_i + 1;
- p.tok_i = lbrace + 1;
-
- // If there are 0, 1, or 2 items, we can use ArrayInitDotTwo/StructInitDotTwo;
- // otherwise we use the full ArrayInitDot/StructInitDot.
-
- const scratch_top = p.scratch.items.len;
- defer p.scratch.shrinkRetainingCapacity(scratch_top);
- const field_init = try p.parseFieldInit();
- if (field_init != 0) {
- try p.scratch.append(p.gpa, field_init);
- while (true) {
- switch (p.token_tags[p.tok_i]) {
- .comma => p.tok_i += 1,
- .r_brace => {
- p.tok_i += 1;
- break;
- },
- .colon, .r_paren, .r_bracket => return p.failExpected(.r_brace),
- // Likely just a missing comma; give error but continue parsing.
- else => try p.warn(.expected_comma_after_initializer),
- }
- if (p.eatToken(.r_brace)) |_| break;
- const next = try p.expectFieldInit();
- try p.scratch.append(p.gpa, next);
- }
- const comma = (p.token_tags[p.tok_i - 2] == .comma);
- const inits = p.scratch.items[scratch_top..];
- switch (inits.len) {
- 0 => unreachable,
- 1 => return p.addNode(.{
- .tag = if (comma) .struct_init_dot_two_comma else .struct_init_dot_two,
- .main_token = lbrace,
- .data = .{
- .lhs = inits[0],
- .rhs = 0,
- },
- }),
- 2 => return p.addNode(.{
- .tag = if (comma) .struct_init_dot_two_comma else .struct_init_dot_two,
- .main_token = lbrace,
- .data = .{
- .lhs = inits[0],
- .rhs = inits[1],
- },
- }),
- else => {
- const span = try p.listToSpan(inits);
- return p.addNode(.{
- .tag = if (comma) .struct_init_dot_comma else .struct_init_dot,
- .main_token = lbrace,
- .data = .{
- .lhs = span.start,
- .rhs = span.end,
- },
- });
- },
- }
- }
-
- while (true) {
- if (p.eatToken(.r_brace)) |_| break;
- const elem_init = try p.expectExpr();
- try p.scratch.append(p.gpa, elem_init);
- switch (p.token_tags[p.tok_i]) {
- .comma => p.tok_i += 1,
- .r_brace => {
- p.tok_i += 1;
- break;
- },
- .colon, .r_paren, .r_bracket => return p.failExpected(.r_brace),
- // Likely just a missing comma; give error but continue parsing.
- else => try p.warn(.expected_comma_after_initializer),
- }
- }
- const comma = (p.token_tags[p.tok_i - 2] == .comma);
- const inits = p.scratch.items[scratch_top..];
- switch (inits.len) {
- 0 => return p.addNode(.{
- .tag = .struct_init_dot_two,
- .main_token = lbrace,
- .data = .{
- .lhs = 0,
- .rhs = 0,
- },
- }),
- 1 => return p.addNode(.{
- .tag = if (comma) .array_init_dot_two_comma else .array_init_dot_two,
- .main_token = lbrace,
- .data = .{
- .lhs = inits[0],
- .rhs = 0,
- },
- }),
- 2 => return p.addNode(.{
- .tag = if (comma) .array_init_dot_two_comma else .array_init_dot_two,
- .main_token = lbrace,
- .data = .{
- .lhs = inits[0],
- .rhs = inits[1],
- },
- }),
- else => {
- const span = try p.listToSpan(inits);
- return p.addNode(.{
- .tag = if (comma) .array_init_dot_comma else .array_init_dot,
- .main_token = lbrace,
- .data = .{
- .lhs = span.start,
- .rhs = span.end,
- },
- });
- },
- }
- },
- else => return null_node,
- },
- .keyword_error => switch (p.token_tags[p.tok_i + 1]) {
- .l_brace => {
- const error_token = p.tok_i;
- p.tok_i += 2;
- while (true) {
- if (p.eatToken(.r_brace)) |_| break;
- _ = try p.eatDocComments();
- _ = try p.expectToken(.identifier);
- switch (p.token_tags[p.tok_i]) {
- .comma => p.tok_i += 1,
- .r_brace => {
- p.tok_i += 1;
- break;
- },
- .colon, .r_paren, .r_bracket => return p.failExpected(.r_brace),
- // Likely just a missing comma; give error but continue parsing.
- else => try p.warn(.expected_comma_after_field),
- }
- }
- return p.addNode(.{
- .tag = .error_set_decl,
- .main_token = error_token,
- .data = .{
- .lhs = undefined,
- .rhs = p.tok_i - 1, // rbrace
- },
- });
- },
- else => {
- const main_token = p.nextToken();
- const period = p.eatToken(.period);
- if (period == null) try p.warnExpected(.period);
- const identifier = p.eatToken(.identifier);
- if (identifier == null) try p.warnExpected(.identifier);
- return p.addNode(.{
- .tag = .error_value,
- .main_token = main_token,
- .data = .{
- .lhs = period orelse 0,
- .rhs = identifier orelse 0,
- },
- });
- },
- },
- .l_paren => return p.addNode(.{
- .tag = .grouped_expression,
- .main_token = p.nextToken(),
- .data = .{
- .lhs = try p.expectExpr(),
- .rhs = try p.expectToken(.r_paren),
- },
- }),
- else => return null_node,
- }
- }
-
- fn expectPrimaryTypeExpr(p: *Parser) !Node.Index {
- const node = try p.parsePrimaryTypeExpr();
- if (node == 0) {
- return p.fail(.expected_primary_type_expr);
- }
- return node;
- }
-
- /// ForPrefix <- KEYWORD_for LPAREN Expr RPAREN PtrIndexPayload
- ///
- /// ForTypeExpr <- ForPrefix TypeExpr (KEYWORD_else TypeExpr)?
- fn parseForTypeExpr(p: *Parser) !Node.Index {
- const for_token = p.eatToken(.keyword_for) orelse return null_node;
- _ = try p.expectToken(.l_paren);
- const array_expr = try p.expectExpr();
- _ = try p.expectToken(.r_paren);
- const found_payload = try p.parsePtrIndexPayload();
- if (found_payload == 0) try p.warn(.expected_loop_payload);
-
- const then_expr = try p.expectTypeExpr();
- _ = p.eatToken(.keyword_else) orelse {
- return p.addNode(.{
- .tag = .for_simple,
- .main_token = for_token,
- .data = .{
- .lhs = array_expr,
- .rhs = then_expr,
- },
- });
- };
- const else_expr = try p.expectTypeExpr();
- return p.addNode(.{
- .tag = .@"for",
- .main_token = for_token,
- .data = .{
- .lhs = array_expr,
- .rhs = try p.addExtra(Node.If{
- .then_expr = then_expr,
- .else_expr = else_expr,
- }),
- },
- });
- }
-
- /// WhilePrefix <- KEYWORD_while LPAREN Expr RPAREN PtrPayload? WhileContinueExpr?
- ///
- /// WhileTypeExpr <- WhilePrefix TypeExpr (KEYWORD_else Payload? TypeExpr)?
- fn parseWhileTypeExpr(p: *Parser) !Node.Index {
- const while_token = p.eatToken(.keyword_while) orelse return null_node;
- _ = try p.expectToken(.l_paren);
- const condition = try p.expectExpr();
- _ = try p.expectToken(.r_paren);
- _ = try p.parsePtrPayload();
- const cont_expr = try p.parseWhileContinueExpr();
-
- const then_expr = try p.expectTypeExpr();
- _ = p.eatToken(.keyword_else) orelse {
- if (cont_expr == 0) {
- return p.addNode(.{
- .tag = .while_simple,
- .main_token = while_token,
- .data = .{
- .lhs = condition,
- .rhs = then_expr,
- },
- });
- } else {
- return p.addNode(.{
- .tag = .while_cont,
- .main_token = while_token,
- .data = .{
- .lhs = condition,
- .rhs = try p.addExtra(Node.WhileCont{
- .cont_expr = cont_expr,
- .then_expr = then_expr,
- }),
- },
- });
- }
- };
- _ = try p.parsePayload();
- const else_expr = try p.expectTypeExpr();
- return p.addNode(.{
- .tag = .@"while",
- .main_token = while_token,
- .data = .{
- .lhs = condition,
- .rhs = try p.addExtra(Node.While{
- .cont_expr = cont_expr,
- .then_expr = then_expr,
- .else_expr = else_expr,
- }),
- },
- });
- }
-
- /// SwitchExpr <- KEYWORD_switch LPAREN Expr RPAREN LBRACE SwitchProngList RBRACE
- fn expectSwitchExpr(p: *Parser) !Node.Index {
- const switch_token = p.assertToken(.keyword_switch);
- _ = try p.expectToken(.l_paren);
- const expr_node = try p.expectExpr();
- _ = try p.expectToken(.r_paren);
- _ = try p.expectToken(.l_brace);
- const cases = try p.parseSwitchProngList();
- const trailing_comma = p.token_tags[p.tok_i - 1] == .comma;
- _ = try p.expectToken(.r_brace);
-
- return p.addNode(.{
- .tag = if (trailing_comma) .switch_comma else .@"switch",
- .main_token = switch_token,
- .data = .{
- .lhs = expr_node,
- .rhs = try p.addExtra(Node.SubRange{
- .start = cases.start,
- .end = cases.end,
- }),
- },
- });
- }
-
- /// AsmExpr <- KEYWORD_asm KEYWORD_volatile? LPAREN Expr AsmOutput? RPAREN
- ///
- /// AsmOutput <- COLON AsmOutputList AsmInput?
- ///
- /// AsmInput <- COLON AsmInputList AsmClobbers?
- ///
- /// AsmClobbers <- COLON StringList
- ///
- /// StringList <- (STRINGLITERAL COMMA)* STRINGLITERAL?
- ///
- /// AsmOutputList <- (AsmOutputItem COMMA)* AsmOutputItem?
- ///
- /// AsmInputList <- (AsmInputItem COMMA)* AsmInputItem?
- fn expectAsmExpr(p: *Parser) !Node.Index {
- const asm_token = p.assertToken(.keyword_asm);
- _ = p.eatToken(.keyword_volatile);
- _ = try p.expectToken(.l_paren);
- const template = try p.expectExpr();
-
- if (p.eatToken(.r_paren)) |rparen| {
- return p.addNode(.{
- .tag = .asm_simple,
- .main_token = asm_token,
- .data = .{
- .lhs = template,
- .rhs = rparen,
- },
- });
- }
-
- _ = try p.expectToken(.colon);
-
- const scratch_top = p.scratch.items.len;
- defer p.scratch.shrinkRetainingCapacity(scratch_top);
-
- while (true) {
- const output_item = try p.parseAsmOutputItem();
- if (output_item == 0) break;
- try p.scratch.append(p.gpa, output_item);
- switch (p.token_tags[p.tok_i]) {
- .comma => p.tok_i += 1,
- // All possible delimiters.
- .colon, .r_paren, .r_brace, .r_bracket => break,
- // Likely just a missing comma; give error but continue parsing.
- else => try p.warnExpected(.comma),
- }
- }
- if (p.eatToken(.colon)) |_| {
- while (true) {
- const input_item = try p.parseAsmInputItem();
- if (input_item == 0) break;
- try p.scratch.append(p.gpa, input_item);
- switch (p.token_tags[p.tok_i]) {
- .comma => p.tok_i += 1,
- // All possible delimiters.
- .colon, .r_paren, .r_brace, .r_bracket => break,
- // Likely just a missing comma; give error but continue parsing.
- else => try p.warnExpected(.comma),
- }
- }
- if (p.eatToken(.colon)) |_| {
- while (p.eatToken(.string_literal)) |_| {
- switch (p.token_tags[p.tok_i]) {
- .comma => p.tok_i += 1,
- .colon, .r_paren, .r_brace, .r_bracket => break,
- // Likely just a missing comma; give error but continue parsing.
- else => try p.warnExpected(.comma),
- }
- }
- }
- }
- const rparen = try p.expectToken(.r_paren);
- const span = try p.listToSpan(p.scratch.items[scratch_top..]);
- return p.addNode(.{
- .tag = .@"asm",
- .main_token = asm_token,
- .data = .{
- .lhs = template,
- .rhs = try p.addExtra(Node.Asm{
- .items_start = span.start,
- .items_end = span.end,
- .rparen = rparen,
- }),
- },
- });
- }
-
- /// AsmOutputItem <- LBRACKET IDENTIFIER RBRACKET STRINGLITERAL LPAREN (MINUSRARROW TypeExpr / IDENTIFIER) RPAREN
- fn parseAsmOutputItem(p: *Parser) !Node.Index {
- _ = p.eatToken(.l_bracket) orelse return null_node;
- const identifier = try p.expectToken(.identifier);
- _ = try p.expectToken(.r_bracket);
- _ = try p.expectToken(.string_literal);
- _ = try p.expectToken(.l_paren);
- const type_expr: Node.Index = blk: {
- if (p.eatToken(.arrow)) |_| {
- break :blk try p.expectTypeExpr();
- } else {
- _ = try p.expectToken(.identifier);
- break :blk null_node;
- }
- };
- const rparen = try p.expectToken(.r_paren);
- return p.addNode(.{
- .tag = .asm_output,
- .main_token = identifier,
- .data = .{
- .lhs = type_expr,
- .rhs = rparen,
- },
- });
- }
-
- /// AsmInputItem <- LBRACKET IDENTIFIER RBRACKET STRINGLITERAL LPAREN Expr RPAREN
- fn parseAsmInputItem(p: *Parser) !Node.Index {
- _ = p.eatToken(.l_bracket) orelse return null_node;
- const identifier = try p.expectToken(.identifier);
- _ = try p.expectToken(.r_bracket);
- _ = try p.expectToken(.string_literal);
- _ = try p.expectToken(.l_paren);
- const expr = try p.expectExpr();
- const rparen = try p.expectToken(.r_paren);
- return p.addNode(.{
- .tag = .asm_input,
- .main_token = identifier,
- .data = .{
- .lhs = expr,
- .rhs = rparen,
- },
- });
- }
-
- /// BreakLabel <- COLON IDENTIFIER
- fn parseBreakLabel(p: *Parser) !TokenIndex {
- _ = p.eatToken(.colon) orelse return @as(TokenIndex, 0);
- return p.expectToken(.identifier);
- }
-
- /// BlockLabel <- IDENTIFIER COLON
- fn parseBlockLabel(p: *Parser) TokenIndex {
- if (p.token_tags[p.tok_i] == .identifier and
- p.token_tags[p.tok_i + 1] == .colon)
- {
- const identifier = p.tok_i;
- p.tok_i += 2;
- return identifier;
- }
- return null_node;
- }
-
- /// FieldInit <- DOT IDENTIFIER EQUAL Expr
- fn parseFieldInit(p: *Parser) !Node.Index {
- if (p.token_tags[p.tok_i + 0] == .period and
- p.token_tags[p.tok_i + 1] == .identifier and
- p.token_tags[p.tok_i + 2] == .equal)
- {
- p.tok_i += 3;
- return p.expectExpr();
- } else {
- return null_node;
- }
- }
-
- fn expectFieldInit(p: *Parser) !Node.Index {
- if (p.token_tags[p.tok_i] != .period or
- p.token_tags[p.tok_i + 1] != .identifier or
- p.token_tags[p.tok_i + 2] != .equal)
- return p.fail(.expected_initializer);
-
- p.tok_i += 3;
- return p.expectExpr();
- }
-
- /// WhileContinueExpr <- COLON LPAREN AssignExpr RPAREN
- fn parseWhileContinueExpr(p: *Parser) !Node.Index {
- _ = p.eatToken(.colon) orelse {
- if (p.token_tags[p.tok_i] == .l_paren and
- p.tokensOnSameLine(p.tok_i - 1, p.tok_i))
- return p.fail(.expected_continue_expr);
- return null_node;
- };
- _ = try p.expectToken(.l_paren);
- const node = try p.parseAssignExpr();
- if (node == 0) return p.fail(.expected_expr_or_assignment);
- _ = try p.expectToken(.r_paren);
- return node;
- }
-
- /// LinkSection <- KEYWORD_linksection LPAREN Expr RPAREN
- fn parseLinkSection(p: *Parser) !Node.Index {
- _ = p.eatToken(.keyword_linksection) orelse return null_node;
- _ = try p.expectToken(.l_paren);
- const expr_node = try p.expectExpr();
- _ = try p.expectToken(.r_paren);
- return expr_node;
- }
-
- /// CallConv <- KEYWORD_callconv LPAREN Expr RPAREN
- fn parseCallconv(p: *Parser) !Node.Index {
- _ = p.eatToken(.keyword_callconv) orelse return null_node;
- _ = try p.expectToken(.l_paren);
- const expr_node = try p.expectExpr();
- _ = try p.expectToken(.r_paren);
- return expr_node;
- }
-
- /// AddrSpace <- KEYWORD_addrspace LPAREN Expr RPAREN
- fn parseAddrSpace(p: *Parser) !Node.Index {
- _ = p.eatToken(.keyword_addrspace) orelse return null_node;
- _ = try p.expectToken(.l_paren);
- const expr_node = try p.expectExpr();
- _ = try p.expectToken(.r_paren);
- return expr_node;
- }
-
- /// This function can return null nodes and then still return nodes afterwards,
- /// such as in the case of anytype and `...`. Caller must look for rparen to find
- /// out when there are no more param decls left.
- ///
- /// ParamDecl
- /// <- doc_comment? (KEYWORD_noalias / KEYWORD_comptime)? (IDENTIFIER COLON)? ParamType
- /// / DOT3
- ///
- /// ParamType
- /// <- KEYWORD_anytype
- /// / TypeExpr
- fn expectParamDecl(p: *Parser) !Node.Index {
- _ = try p.eatDocComments();
- switch (p.token_tags[p.tok_i]) {
- .keyword_noalias, .keyword_comptime => p.tok_i += 1,
- .ellipsis3 => {
- p.tok_i += 1;
- return null_node;
- },
- else => {},
- }
- if (p.token_tags[p.tok_i] == .identifier and
- p.token_tags[p.tok_i + 1] == .colon)
- {
- p.tok_i += 2;
- }
- switch (p.token_tags[p.tok_i]) {
- .keyword_anytype => {
- p.tok_i += 1;
- return null_node;
- },
- else => return p.expectTypeExpr(),
- }
- }
-
- /// Payload <- PIPE IDENTIFIER PIPE
- fn parsePayload(p: *Parser) !TokenIndex {
- _ = p.eatToken(.pipe) orelse return @as(TokenIndex, 0);
- const identifier = try p.expectToken(.identifier);
- _ = try p.expectToken(.pipe);
- return identifier;
- }
-
- /// PtrPayload <- PIPE ASTERISK? IDENTIFIER PIPE
- fn parsePtrPayload(p: *Parser) !TokenIndex {
- _ = p.eatToken(.pipe) orelse return @as(TokenIndex, 0);
- _ = p.eatToken(.asterisk);
- const identifier = try p.expectToken(.identifier);
- _ = try p.expectToken(.pipe);
- return identifier;
- }
-
- /// Returns the first identifier token, if any.
- ///
- /// PtrIndexPayload <- PIPE ASTERISK? IDENTIFIER (COMMA IDENTIFIER)? PIPE
- fn parsePtrIndexPayload(p: *Parser) !TokenIndex {
- _ = p.eatToken(.pipe) orelse return @as(TokenIndex, 0);
- _ = p.eatToken(.asterisk);
- const identifier = try p.expectToken(.identifier);
- if (p.eatToken(.comma) != null) {
- _ = try p.expectToken(.identifier);
- }
- _ = try p.expectToken(.pipe);
- return identifier;
- }
-
- /// SwitchProng <- KEYWORD_inline? SwitchCase EQUALRARROW PtrIndexPayload? AssignExpr
- ///
- /// SwitchCase
- /// <- SwitchItem (COMMA SwitchItem)* COMMA?
- /// / KEYWORD_else
- fn parseSwitchProng(p: *Parser) !Node.Index {
- const scratch_top = p.scratch.items.len;
- defer p.scratch.shrinkRetainingCapacity(scratch_top);
-
- const is_inline = p.eatToken(.keyword_inline) != null;
-
- if (p.eatToken(.keyword_else) == null) {
- while (true) {
- const item = try p.parseSwitchItem();
- if (item == 0) break;
- try p.scratch.append(p.gpa, item);
- if (p.eatToken(.comma) == null) break;
- }
- if (scratch_top == p.scratch.items.len) {
- if (is_inline) p.tok_i -= 1;
- return null_node;
- }
- }
- const arrow_token = try p.expectToken(.equal_angle_bracket_right);
- _ = try p.parsePtrIndexPayload();
-
- const items = p.scratch.items[scratch_top..];
- switch (items.len) {
- 0 => return p.addNode(.{
- .tag = if (is_inline) .switch_case_inline_one else .switch_case_one,
- .main_token = arrow_token,
- .data = .{
- .lhs = 0,
- .rhs = try p.expectAssignExpr(),
- },
- }),
- 1 => return p.addNode(.{
- .tag = if (is_inline) .switch_case_inline_one else .switch_case_one,
- .main_token = arrow_token,
- .data = .{
- .lhs = items[0],
- .rhs = try p.expectAssignExpr(),
- },
- }),
- else => return p.addNode(.{
- .tag = if (is_inline) .switch_case_inline else .switch_case,
- .main_token = arrow_token,
- .data = .{
- .lhs = try p.addExtra(try p.listToSpan(items)),
- .rhs = try p.expectAssignExpr(),
- },
- }),
- }
- }
-
- /// SwitchItem <- Expr (DOT3 Expr)?
- fn parseSwitchItem(p: *Parser) !Node.Index {
- const expr = try p.parseExpr();
- if (expr == 0) return null_node;
-
- if (p.eatToken(.ellipsis3)) |token| {
- return p.addNode(.{
- .tag = .switch_range,
- .main_token = token,
- .data = .{
- .lhs = expr,
- .rhs = try p.expectExpr(),
- },
- });
- }
- return expr;
- }
-
- const PtrModifiers = struct {
- align_node: Node.Index,
- addrspace_node: Node.Index,
- bit_range_start: Node.Index,
- bit_range_end: Node.Index,
- };
-
- fn parsePtrModifiers(p: *Parser) !PtrModifiers {
- var result: PtrModifiers = .{
- .align_node = 0,
- .addrspace_node = 0,
- .bit_range_start = 0,
- .bit_range_end = 0,
- };
- var saw_const = false;
- var saw_volatile = false;
- var saw_allowzero = false;
- var saw_addrspace = false;
- while (true) {
- switch (p.token_tags[p.tok_i]) {
- .keyword_align => {
- if (result.align_node != 0) {
- try p.warn(.extra_align_qualifier);
- }
- p.tok_i += 1;
- _ = try p.expectToken(.l_paren);
- result.align_node = try p.expectExpr();
-
- if (p.eatToken(.colon)) |_| {
- result.bit_range_start = try p.expectExpr();
- _ = try p.expectToken(.colon);
- result.bit_range_end = try p.expectExpr();
- }
-
- _ = try p.expectToken(.r_paren);
- },
- .keyword_const => {
- if (saw_const) {
- try p.warn(.extra_const_qualifier);
- }
- p.tok_i += 1;
- saw_const = true;
- },
- .keyword_volatile => {
- if (saw_volatile) {
- try p.warn(.extra_volatile_qualifier);
- }
- p.tok_i += 1;
- saw_volatile = true;
- },
- .keyword_allowzero => {
- if (saw_allowzero) {
- try p.warn(.extra_allowzero_qualifier);
- }
- p.tok_i += 1;
- saw_allowzero = true;
- },
- .keyword_addrspace => {
- if (saw_addrspace) {
- try p.warn(.extra_addrspace_qualifier);
- }
- result.addrspace_node = try p.parseAddrSpace();
- },
- else => return result,
- }
- }
- }
-
- /// SuffixOp
- /// <- LBRACKET Expr (DOT2 (Expr? (COLON Expr)?)?)? RBRACKET
- /// / DOT IDENTIFIER
- /// / DOTASTERISK
- /// / DOTQUESTIONMARK
- fn parseSuffixOp(p: *Parser, lhs: Node.Index) !Node.Index {
- switch (p.token_tags[p.tok_i]) {
- .l_bracket => {
- const lbracket = p.nextToken();
- const index_expr = try p.expectExpr();
-
- if (p.eatToken(.ellipsis2)) |_| {
- const end_expr = try p.parseExpr();
- if (p.eatToken(.colon)) |_| {
- const sentinel = try p.expectExpr();
- _ = try p.expectToken(.r_bracket);
- return p.addNode(.{
- .tag = .slice_sentinel,
- .main_token = lbracket,
- .data = .{
- .lhs = lhs,
- .rhs = try p.addExtra(Node.SliceSentinel{
- .start = index_expr,
- .end = end_expr,
- .sentinel = sentinel,
- }),
- },
- });
- }
- _ = try p.expectToken(.r_bracket);
- if (end_expr == 0) {
- return p.addNode(.{
- .tag = .slice_open,
- .main_token = lbracket,
- .data = .{
- .lhs = lhs,
- .rhs = index_expr,
- },
- });
- }
- return p.addNode(.{
- .tag = .slice,
- .main_token = lbracket,
- .data = .{
- .lhs = lhs,
- .rhs = try p.addExtra(Node.Slice{
- .start = index_expr,
- .end = end_expr,
- }),
- },
- });
- }
- _ = try p.expectToken(.r_bracket);
- return p.addNode(.{
- .tag = .array_access,
- .main_token = lbracket,
- .data = .{
- .lhs = lhs,
- .rhs = index_expr,
- },
- });
- },
- .period_asterisk => return p.addNode(.{
- .tag = .deref,
- .main_token = p.nextToken(),
- .data = .{
- .lhs = lhs,
- .rhs = undefined,
- },
- }),
- .invalid_periodasterisks => {
- try p.warn(.asterisk_after_ptr_deref);
- return p.addNode(.{
- .tag = .deref,
- .main_token = p.nextToken(),
- .data = .{
- .lhs = lhs,
- .rhs = undefined,
- },
- });
- },
- .period => switch (p.token_tags[p.tok_i + 1]) {
- .identifier => return p.addNode(.{
- .tag = .field_access,
- .main_token = p.nextToken(),
- .data = .{
- .lhs = lhs,
- .rhs = p.nextToken(),
- },
- }),
- .question_mark => return p.addNode(.{
- .tag = .unwrap_optional,
- .main_token = p.nextToken(),
- .data = .{
- .lhs = lhs,
- .rhs = p.nextToken(),
- },
- }),
- .l_brace => {
- // this a misplaced `.{`, handle the error somewhere else
- return null_node;
- },
- else => {
- p.tok_i += 1;
- try p.warn(.expected_suffix_op);
- return null_node;
- },
- },
- else => return null_node,
- }
- }
-
- /// Caller must have already verified the first token.
- ///
- /// ContainerDeclAuto <- ContainerDeclType LBRACE container_doc_comment? ContainerMembers RBRACE
- ///
- /// ContainerDeclType
- /// <- KEYWORD_struct (LPAREN Expr RPAREN)?
- /// / KEYWORD_opaque
- /// / KEYWORD_enum (LPAREN Expr RPAREN)?
- /// / KEYWORD_union (LPAREN (KEYWORD_enum (LPAREN Expr RPAREN)? / Expr) RPAREN)?
- fn parseContainerDeclAuto(p: *Parser) !Node.Index {
- const main_token = p.nextToken();
- const arg_expr = switch (p.token_tags[main_token]) {
- .keyword_opaque => null_node,
- .keyword_struct, .keyword_enum => blk: {
- if (p.eatToken(.l_paren)) |_| {
- const expr = try p.expectExpr();
- _ = try p.expectToken(.r_paren);
- break :blk expr;
- } else {
- break :blk null_node;
- }
- },
- .keyword_union => blk: {
- if (p.eatToken(.l_paren)) |_| {
- if (p.eatToken(.keyword_enum)) |_| {
- if (p.eatToken(.l_paren)) |_| {
- const enum_tag_expr = try p.expectExpr();
- _ = try p.expectToken(.r_paren);
- _ = try p.expectToken(.r_paren);
-
- _ = try p.expectToken(.l_brace);
- const members = try p.parseContainerMembers();
- const members_span = try members.toSpan(p);
- _ = try p.expectToken(.r_brace);
- return p.addNode(.{
- .tag = switch (members.trailing) {
- true => .tagged_union_enum_tag_trailing,
- false => .tagged_union_enum_tag,
- },
- .main_token = main_token,
- .data = .{
- .lhs = enum_tag_expr,
- .rhs = try p.addExtra(members_span),
- },
- });
- } else {
- _ = try p.expectToken(.r_paren);
-
- _ = try p.expectToken(.l_brace);
- const members = try p.parseContainerMembers();
- _ = try p.expectToken(.r_brace);
- if (members.len <= 2) {
- return p.addNode(.{
- .tag = switch (members.trailing) {
- true => .tagged_union_two_trailing,
- false => .tagged_union_two,
- },
- .main_token = main_token,
- .data = .{
- .lhs = members.lhs,
- .rhs = members.rhs,
- },
- });
- } else {
- const span = try members.toSpan(p);
- return p.addNode(.{
- .tag = switch (members.trailing) {
- true => .tagged_union_trailing,
- false => .tagged_union,
- },
- .main_token = main_token,
- .data = .{
- .lhs = span.start,
- .rhs = span.end,
- },
- });
- }
- }
- } else {
- const expr = try p.expectExpr();
- _ = try p.expectToken(.r_paren);
- break :blk expr;
- }
- } else {
- break :blk null_node;
- }
- },
- else => {
- p.tok_i -= 1;
- return p.fail(.expected_container);
- },
- };
- _ = try p.expectToken(.l_brace);
- const members = try p.parseContainerMembers();
- _ = try p.expectToken(.r_brace);
- if (arg_expr == 0) {
- if (members.len <= 2) {
- return p.addNode(.{
- .tag = switch (members.trailing) {
- true => .container_decl_two_trailing,
- false => .container_decl_two,
- },
- .main_token = main_token,
- .data = .{
- .lhs = members.lhs,
- .rhs = members.rhs,
- },
- });
- } else {
- const span = try members.toSpan(p);
- return p.addNode(.{
- .tag = switch (members.trailing) {
- true => .container_decl_trailing,
- false => .container_decl,
- },
- .main_token = main_token,
- .data = .{
- .lhs = span.start,
- .rhs = span.end,
- },
- });
- }
- } else {
- const span = try members.toSpan(p);
- return p.addNode(.{
- .tag = switch (members.trailing) {
- true => .container_decl_arg_trailing,
- false => .container_decl_arg,
- },
- .main_token = main_token,
- .data = .{
- .lhs = arg_expr,
- .rhs = try p.addExtra(Node.SubRange{
- .start = span.start,
- .end = span.end,
- }),
- },
- });
- }
- }
-
- /// Give a helpful error message for those transitioning from
- /// C's 'struct Foo {};' to Zig's 'const Foo = struct {};'.
- fn parseCStyleContainer(p: *Parser) Error!bool {
- const main_token = p.tok_i;
- switch (p.token_tags[p.tok_i]) {
- .keyword_enum, .keyword_union, .keyword_struct => {},
- else => return false,
- }
- const identifier = p.tok_i + 1;
- if (p.token_tags[identifier] != .identifier) return false;
- p.tok_i += 2;
-
- try p.warnMsg(.{
- .tag = .c_style_container,
- .token = identifier,
- .extra = .{ .expected_tag = p.token_tags[main_token] },
- });
- try p.warnMsg(.{
- .tag = .zig_style_container,
- .is_note = true,
- .token = identifier,
- .extra = .{ .expected_tag = p.token_tags[main_token] },
- });
-
- _ = try p.expectToken(.l_brace);
- _ = try p.parseContainerMembers();
- _ = try p.expectToken(.r_brace);
- try p.expectSemicolon(.expected_semi_after_decl, true);
- return true;
- }
-
- /// Holds temporary data until we are ready to construct the full ContainerDecl AST node.
- ///
- /// ByteAlign <- KEYWORD_align LPAREN Expr RPAREN
- fn parseByteAlign(p: *Parser) !Node.Index {
- _ = p.eatToken(.keyword_align) orelse return null_node;
- _ = try p.expectToken(.l_paren);
- const expr = try p.expectExpr();
- _ = try p.expectToken(.r_paren);
- return expr;
- }
-
- /// SwitchProngList <- (SwitchProng COMMA)* SwitchProng?
- fn parseSwitchProngList(p: *Parser) !Node.SubRange {
- const scratch_top = p.scratch.items.len;
- defer p.scratch.shrinkRetainingCapacity(scratch_top);
-
- while (true) {
- const item = try parseSwitchProng(p);
- if (item == 0) break;
-
- try p.scratch.append(p.gpa, item);
-
- switch (p.token_tags[p.tok_i]) {
- .comma => p.tok_i += 1,
- // All possible delimiters.
- .colon, .r_paren, .r_brace, .r_bracket => break,
- // Likely just a missing comma; give error but continue parsing.
- else => try p.warn(.expected_comma_after_switch_prong),
- }
- }
- return p.listToSpan(p.scratch.items[scratch_top..]);
- }
-
- /// ParamDeclList <- (ParamDecl COMMA)* ParamDecl?
- fn parseParamDeclList(p: *Parser) !SmallSpan {
- _ = try p.expectToken(.l_paren);
- const scratch_top = p.scratch.items.len;
- defer p.scratch.shrinkRetainingCapacity(scratch_top);
- var varargs: union(enum) { none, seen, nonfinal: TokenIndex } = .none;
- while (true) {
- if (p.eatToken(.r_paren)) |_| break;
- if (varargs == .seen) varargs = .{ .nonfinal = p.tok_i };
- const param = try p.expectParamDecl();
- if (param != 0) {
- try p.scratch.append(p.gpa, param);
- } else if (p.token_tags[p.tok_i - 1] == .ellipsis3) {
- if (varargs == .none) varargs = .seen;
- }
- switch (p.token_tags[p.tok_i]) {
- .comma => p.tok_i += 1,
- .r_paren => {
- p.tok_i += 1;
- break;
- },
- .colon, .r_brace, .r_bracket => return p.failExpected(.r_paren),
- // Likely just a missing comma; give error but continue parsing.
- else => try p.warn(.expected_comma_after_param),
- }
- }
- if (varargs == .nonfinal) {
- try p.warnMsg(.{ .tag = .varargs_nonfinal, .token = varargs.nonfinal });
- }
- const params = p.scratch.items[scratch_top..];
- return switch (params.len) {
- 0 => SmallSpan{ .zero_or_one = 0 },
- 1 => SmallSpan{ .zero_or_one = params[0] },
- else => SmallSpan{ .multi = try p.listToSpan(params) },
- };
- }
-
- /// FnCallArguments <- LPAREN ExprList RPAREN
- ///
- /// ExprList <- (Expr COMMA)* Expr?
- fn parseBuiltinCall(p: *Parser) !Node.Index {
- const builtin_token = p.assertToken(.builtin);
- if (p.token_tags[p.nextToken()] != .l_paren) {
- p.tok_i -= 1;
- try p.warn(.expected_param_list);
- // Pretend this was an identifier so we can continue parsing.
- return p.addNode(.{
- .tag = .identifier,
- .main_token = builtin_token,
- .data = .{
- .lhs = undefined,
- .rhs = undefined,
- },
- });
- }
- const scratch_top = p.scratch.items.len;
- defer p.scratch.shrinkRetainingCapacity(scratch_top);
- while (true) {
- if (p.eatToken(.r_paren)) |_| break;
- const param = try p.expectExpr();
- try p.scratch.append(p.gpa, param);
- switch (p.token_tags[p.tok_i]) {
- .comma => p.tok_i += 1,
- .r_paren => {
- p.tok_i += 1;
- break;
- },
- // Likely just a missing comma; give error but continue parsing.
- else => try p.warn(.expected_comma_after_arg),
- }
- }
- const comma = (p.token_tags[p.tok_i - 2] == .comma);
- const params = p.scratch.items[scratch_top..];
- switch (params.len) {
- 0 => return p.addNode(.{
- .tag = .builtin_call_two,
- .main_token = builtin_token,
- .data = .{
- .lhs = 0,
- .rhs = 0,
- },
- }),
- 1 => return p.addNode(.{
- .tag = if (comma) .builtin_call_two_comma else .builtin_call_two,
- .main_token = builtin_token,
- .data = .{
- .lhs = params[0],
- .rhs = 0,
- },
- }),
- 2 => return p.addNode(.{
- .tag = if (comma) .builtin_call_two_comma else .builtin_call_two,
- .main_token = builtin_token,
- .data = .{
- .lhs = params[0],
- .rhs = params[1],
- },
- }),
- else => {
- const span = try p.listToSpan(params);
- return p.addNode(.{
- .tag = if (comma) .builtin_call_comma else .builtin_call,
- .main_token = builtin_token,
- .data = .{
- .lhs = span.start,
- .rhs = span.end,
- },
- });
- },
- }
- }
-
- /// IfPrefix <- KEYWORD_if LPAREN Expr RPAREN PtrPayload?
- fn parseIf(p: *Parser, comptime bodyParseFn: fn (p: *Parser) Error!Node.Index) !Node.Index {
- const if_token = p.eatToken(.keyword_if) orelse return null_node;
- _ = try p.expectToken(.l_paren);
- const condition = try p.expectExpr();
- _ = try p.expectToken(.r_paren);
- _ = try p.parsePtrPayload();
-
- const then_expr = try bodyParseFn(p);
- assert(then_expr != 0);
-
- _ = p.eatToken(.keyword_else) orelse return p.addNode(.{
- .tag = .if_simple,
- .main_token = if_token,
- .data = .{
- .lhs = condition,
- .rhs = then_expr,
- },
- });
- _ = try p.parsePayload();
- const else_expr = try bodyParseFn(p);
- assert(then_expr != 0);
-
- return p.addNode(.{
- .tag = .@"if",
- .main_token = if_token,
- .data = .{
- .lhs = condition,
- .rhs = try p.addExtra(Node.If{
- .then_expr = then_expr,
- .else_expr = else_expr,
- }),
- },
- });
- }
-
- /// Skips over doc comment tokens. Returns the first one, if any.
- fn eatDocComments(p: *Parser) !?TokenIndex {
- if (p.eatToken(.doc_comment)) |tok| {
- var first_line = tok;
- if (tok > 0 and tokensOnSameLine(p, tok - 1, tok)) {
- try p.warnMsg(.{
- .tag = .same_line_doc_comment,
- .token = tok,
- });
- first_line = p.eatToken(.doc_comment) orelse return null;
- }
- while (p.eatToken(.doc_comment)) |_| {}
- return first_line;
- }
- return null;
- }
-
- fn tokensOnSameLine(p: *Parser, token1: TokenIndex, token2: TokenIndex) bool {
- return std.mem.indexOfScalar(u8, p.source[p.token_starts[token1]..p.token_starts[token2]], '\n') == null;
- }
-
- fn eatToken(p: *Parser, tag: Token.Tag) ?TokenIndex {
- return if (p.token_tags[p.tok_i] == tag) p.nextToken() else null;
- }
-
- fn assertToken(p: *Parser, tag: Token.Tag) TokenIndex {
- const token = p.nextToken();
- assert(p.token_tags[token] == tag);
- return token;
- }
-
- fn expectToken(p: *Parser, tag: Token.Tag) Error!TokenIndex {
- if (p.token_tags[p.tok_i] != tag) {
- return p.failMsg(.{
- .tag = .expected_token,
- .token = p.tok_i,
- .extra = .{ .expected_tag = tag },
- });
- }
- return p.nextToken();
- }
-
- fn expectSemicolon(p: *Parser, error_tag: AstError.Tag, recoverable: bool) Error!void {
- if (p.token_tags[p.tok_i] == .semicolon) {
- _ = p.nextToken();
- return;
- }
- try p.warn(error_tag);
- if (!recoverable) return error.ParseError;
- }
-
- fn nextToken(p: *Parser) TokenIndex {
- const result = p.tok_i;
- p.tok_i += 1;
- return result;
- }
-};
-
-test {
- _ = @import("parser_test.zig");
-}
diff --git a/lib/std/zig/parser_test.zig b/lib/std/zig/parser_test.zig
index 49b0715695..3c44322ccc 100644
--- a/lib/std/zig/parser_test.zig
+++ b/lib/std/zig/parser_test.zig
@@ -6073,7 +6073,7 @@ var fixed_buffer_mem: [100 * 1024]u8 = undefined;
fn testParse(source: [:0]const u8, allocator: mem.Allocator, anything_changed: *bool) ![]u8 {
const stderr = io.getStdErr().writer();
- var tree = try std.zig.parse(allocator, source);
+ var tree = try std.zig.Ast.parse(allocator, source, .zig);
defer tree.deinit(allocator);
for (tree.errors) |parse_error| {
@@ -6124,7 +6124,7 @@ fn testCanonical(source: [:0]const u8) !void {
const Error = std.zig.Ast.Error.Tag;
fn testError(source: [:0]const u8, expected_errors: []const Error) !void {
- var tree = try std.zig.parse(std.testing.allocator, source);
+ var tree = try std.zig.Ast.parse(std.testing.allocator, source, .zig);
defer tree.deinit(std.testing.allocator);
std.testing.expectEqual(expected_errors.len, tree.errors.len) catch |err| {
diff --git a/lib/std/zig/perf_test.zig b/lib/std/zig/perf_test.zig
index d3fc90eaea..58f7a67694 100644
--- a/lib/std/zig/perf_test.zig
+++ b/lib/std/zig/perf_test.zig
@@ -1,7 +1,6 @@
const std = @import("std");
const mem = std.mem;
const Tokenizer = std.zig.Tokenizer;
-const Parser = std.zig.Parser;
const io = std.io;
const fmtIntSizeBin = std.fmt.fmtIntSizeBin;
@@ -34,6 +33,6 @@ pub fn main() !void {
fn testOnce() usize {
var fixed_buf_alloc = std.heap.FixedBufferAllocator.init(fixed_buffer_mem[0..]);
var allocator = fixed_buf_alloc.allocator();
- _ = std.zig.parse(allocator, source) catch @panic("parse failure");
+ _ = std.zig.Ast.parse(allocator, source, .zig) catch @panic("parse failure");
return fixed_buf_alloc.end_index;
}
diff --git a/src/Module.zig b/src/Module.zig
index b395c0a950..3bb15e78c3 100644
--- a/src/Module.zig
+++ b/src/Module.zig
@@ -2057,7 +2057,7 @@ pub const File = struct {
if (file.tree_loaded) return &file.tree;
const source = try file.getSource(gpa);
- file.tree = try std.zig.parse(gpa, source.bytes);
+ file.tree = try Ast.parse(gpa, source.bytes, .zig);
file.tree_loaded = true;
return &file.tree;
}
@@ -3662,7 +3662,7 @@ pub fn astGenFile(mod: *Module, file: *File) !void {
file.source = source;
file.source_loaded = true;
- file.tree = try std.zig.parse(gpa, source);
+ file.tree = try Ast.parse(gpa, source, .zig);
defer if (!file.tree_loaded) file.tree.deinit(gpa);
if (file.tree.errors.len != 0) {
@@ -3977,7 +3977,7 @@ pub fn populateBuiltinFile(mod: *Module) !void {
else => |e| return e,
}
- file.tree = try std.zig.parse(gpa, file.source);
+ file.tree = try Ast.parse(gpa, file.source, .zig);
file.tree_loaded = true;
assert(file.tree.errors.len == 0); // builtin.zig must parse
diff --git a/src/main.zig b/src/main.zig
index 72e7e094e6..06c36bad87 100644
--- a/src/main.zig
+++ b/src/main.zig
@@ -4361,7 +4361,7 @@ pub fn cmdFmt(gpa: Allocator, arena: Allocator, args: []const []const u8) !void
};
defer gpa.free(source_code);
- var tree = std.zig.parse(gpa, source_code) catch |err| {
+ var tree = Ast.parse(gpa, source_code, .zig) catch |err| {
fatal("error parsing stdin: {}", .{err});
};
defer tree.deinit(gpa);
@@ -4566,7 +4566,7 @@ fn fmtPathFile(
// Add to set after no longer possible to get error.IsDir.
if (try fmt.seen.fetchPut(stat.inode, {})) |_| return;
- var tree = try std.zig.parse(fmt.gpa, source_code);
+ var tree = try Ast.parse(fmt.gpa, source_code, .zig);
defer tree.deinit(fmt.gpa);
try printErrsMsgToStdErr(fmt.gpa, fmt.arena, tree.errors, tree, file_path, fmt.color);
@@ -5312,7 +5312,7 @@ pub fn cmdAstCheck(
file.pkg = try Package.create(gpa, "root", null, file.sub_file_path);
defer file.pkg.destroy(gpa);
- file.tree = try std.zig.parse(gpa, file.source);
+ file.tree = try Ast.parse(gpa, file.source, .zig);
file.tree_loaded = true;
defer file.tree.deinit(gpa);
@@ -5438,7 +5438,7 @@ pub fn cmdChangelist(
file.source = source;
file.source_loaded = true;
- file.tree = try std.zig.parse(gpa, file.source);
+ file.tree = try Ast.parse(gpa, file.source, .zig);
file.tree_loaded = true;
defer file.tree.deinit(gpa);
@@ -5476,7 +5476,7 @@ pub fn cmdChangelist(
if (new_amt != new_stat.size)
return error.UnexpectedEndOfFile;
- var new_tree = try std.zig.parse(gpa, new_source);
+ var new_tree = try Ast.parse(gpa, new_source, .zig);
defer new_tree.deinit(gpa);
try printErrsMsgToStdErr(gpa, arena, new_tree.errors, new_tree, new_source_file, .auto);
--
cgit v1.2.3
From 81c27c74bc8ccc8087b75c5d4eb1b350ad907cd0 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Thu, 2 Feb 2023 23:45:23 -0700
Subject: use build.zig.zon instead of build.zig.ini for the manifest file
* improve error message when build manifest file is missing
* update std.zig.Ast to support ZON
* Compilation.AllErrors.Message: make the notes field a const slice
* move build manifest parsing logic into src/Manifest.zig and add more
checks, and make the checks integrate into the standard error
reporting code so that reported errors look sexy
closes #14290
---
lib/std/Build.zig | 4 +-
lib/std/array_hash_map.zig | 3 +-
lib/std/zig/Ast.zig | 4 +
lib/std/zig/Parse.zig | 13 +-
src/Compilation.zig | 2 +-
src/Manifest.zig | 499 +++++++++++++++++++++++++++++++++++++++++++++
src/Package.zig | 347 ++++++++++++-------------------
src/main.zig | 17 +-
8 files changed, 665 insertions(+), 224 deletions(-)
create mode 100644 src/Manifest.zig
(limited to 'src')
diff --git a/lib/std/Build.zig b/lib/std/Build.zig
index d695637fc3..6846007443 100644
--- a/lib/std/Build.zig
+++ b/lib/std/Build.zig
@@ -1496,8 +1496,8 @@ pub fn dependency(b: *Build, name: []const u8, args: anytype) *Dependency {
}
}
- const full_path = b.pathFromRoot("build.zig.ini");
- std.debug.print("no dependency named '{s}' in '{s}'\n", .{ name, full_path });
+ const full_path = b.pathFromRoot("build.zig.zon");
+ std.debug.print("no dependency named '{s}' in '{s}'. All packages used in build.zig must be declared in this file.\n", .{ name, full_path });
std.process.exit(1);
}
diff --git a/lib/std/array_hash_map.zig b/lib/std/array_hash_map.zig
index cf04a54116..57821d1b51 100644
--- a/lib/std/array_hash_map.zig
+++ b/lib/std/array_hash_map.zig
@@ -1145,7 +1145,8 @@ pub fn ArrayHashMapUnmanaged(
}
/// Create a copy of the hash map which can be modified separately.
- /// The copy uses the same context and allocator as this instance.
+ /// The copy uses the same context as this instance, but is allocated
+ /// with the provided allocator.
pub fn clone(self: Self, allocator: Allocator) !Self {
if (@sizeOf(ByIndexContext) != 0)
@compileError("Cannot infer context " ++ @typeName(Context) ++ ", call cloneContext instead.");
diff --git a/lib/std/zig/Ast.zig b/lib/std/zig/Ast.zig
index a9a02606eb..80dda052ab 100644
--- a/lib/std/zig/Ast.zig
+++ b/lib/std/zig/Ast.zig
@@ -1,4 +1,8 @@
//! Abstract Syntax Tree for Zig source code.
+//! For Zig syntax, the root node is at nodes[0] and contains the list of
+//! sub-nodes.
+//! For Zon syntax, the root node is at nodes[0] and contains lhs as the node
+//! index of the main expression.
/// Reference to externally-owned data.
source: [:0]const u8,
diff --git a/lib/std/zig/Parse.zig b/lib/std/zig/Parse.zig
index f599a08f55..d498366b34 100644
--- a/lib/std/zig/Parse.zig
+++ b/lib/std/zig/Parse.zig
@@ -181,17 +181,26 @@ pub fn parseRoot(p: *Parse) !void {
/// TODO: set a flag in Parse struct, and honor that flag
/// by emitting compilation errors when non-zon nodes are encountered.
pub fn parseZon(p: *Parse) !void {
- const node_index = p.parseExpr() catch |err| switch (err) {
+ // We must use index 0 so that 0 can be used as null elsewhere.
+ p.nodes.appendAssumeCapacity(.{
+ .tag = .root,
+ .main_token = 0,
+ .data = undefined,
+ });
+ const node_index = p.expectExpr() catch |err| switch (err) {
error.ParseError => {
assert(p.errors.items.len > 0);
return;
},
else => |e| return e,
};
- assert(node_index == 0);
if (p.token_tags[p.tok_i] != .eof) {
try p.warnExpected(.eof);
}
+ p.nodes.items(.data)[0] = .{
+ .lhs = node_index,
+ .rhs = undefined,
+ };
}
/// ContainerMembers <- ContainerDeclarations (ContainerField COMMA)* (ContainerField / ContainerDeclarations)
diff --git a/src/Compilation.zig b/src/Compilation.zig
index 7d42d3b610..e09b8f18ab 100644
--- a/src/Compilation.zig
+++ b/src/Compilation.zig
@@ -385,7 +385,7 @@ pub const AllErrors = struct {
count: u32 = 1,
/// Does not include the trailing newline.
source_line: ?[]const u8,
- notes: []Message = &.{},
+ notes: []const Message = &.{},
reference_trace: []Message = &.{},
/// Splits the error message up into lines to properly indent them
diff --git a/src/Manifest.zig b/src/Manifest.zig
new file mode 100644
index 0000000000..c3f77aec98
--- /dev/null
+++ b/src/Manifest.zig
@@ -0,0 +1,499 @@
+pub const basename = "build.zig.zon";
+pub const Hash = std.crypto.hash.sha2.Sha256;
+
+pub const Dependency = struct {
+ url: []const u8,
+ url_tok: Ast.TokenIndex,
+ hash: ?[]const u8,
+ hash_tok: Ast.TokenIndex,
+};
+
+pub const ErrorMessage = struct {
+ msg: []const u8,
+ tok: Ast.TokenIndex,
+ off: u32,
+};
+
+pub const MultihashFunction = enum(u16) {
+ identity = 0x00,
+ sha1 = 0x11,
+ @"sha2-256" = 0x12,
+ @"sha2-512" = 0x13,
+ @"sha3-512" = 0x14,
+ @"sha3-384" = 0x15,
+ @"sha3-256" = 0x16,
+ @"sha3-224" = 0x17,
+ @"sha2-384" = 0x20,
+ @"sha2-256-trunc254-padded" = 0x1012,
+ @"sha2-224" = 0x1013,
+ @"sha2-512-224" = 0x1014,
+ @"sha2-512-256" = 0x1015,
+ @"blake2b-256" = 0xb220,
+ _,
+};
+
+pub const multihash_function: MultihashFunction = switch (Hash) {
+ std.crypto.hash.sha2.Sha256 => .@"sha2-256",
+ else => @compileError("unreachable"),
+};
+comptime {
+ // We avoid unnecessary uleb128 code in hexDigest by asserting here the
+ // values are small enough to be contained in the one-byte encoding.
+ assert(@enumToInt(multihash_function) < 127);
+ assert(Hash.digest_length < 127);
+}
+pub const multihash_len = 1 + 1 + Hash.digest_length;
+
+name: []const u8,
+version: std.SemanticVersion,
+dependencies: std.StringArrayHashMapUnmanaged(Dependency),
+
+errors: []ErrorMessage,
+arena_state: std.heap.ArenaAllocator.State,
+
+pub const Error = Allocator.Error;
+
+pub fn parse(gpa: Allocator, ast: std.zig.Ast) Error!Manifest {
+ const node_tags = ast.nodes.items(.tag);
+ const node_datas = ast.nodes.items(.data);
+ assert(node_tags[0] == .root);
+ const main_node_index = node_datas[0].lhs;
+
+ var arena_instance = std.heap.ArenaAllocator.init(gpa);
+ errdefer arena_instance.deinit();
+
+ var p: Parse = .{
+ .gpa = gpa,
+ .ast = ast,
+ .arena = arena_instance.allocator(),
+ .errors = .{},
+
+ .name = undefined,
+ .version = undefined,
+ .dependencies = .{},
+ .buf = .{},
+ };
+ defer p.buf.deinit(gpa);
+ defer p.errors.deinit(gpa);
+ defer p.dependencies.deinit(gpa);
+
+ p.parseRoot(main_node_index) catch |err| switch (err) {
+ error.ParseFailure => assert(p.errors.items.len > 0),
+ else => |e| return e,
+ };
+
+ return .{
+ .name = p.name,
+ .version = p.version,
+ .dependencies = try p.dependencies.clone(p.arena),
+ .errors = try p.arena.dupe(ErrorMessage, p.errors.items),
+ .arena_state = arena_instance.state,
+ };
+}
+
+pub fn deinit(man: *Manifest, gpa: Allocator) void {
+ man.arena_state.promote(gpa).deinit();
+ man.* = undefined;
+}
+
+const hex_charset = "0123456789abcdef";
+
+pub fn hex64(x: u64) [16]u8 {
+ var result: [16]u8 = undefined;
+ var i: usize = 0;
+ while (i < 8) : (i += 1) {
+ const byte = @truncate(u8, x >> @intCast(u6, 8 * i));
+ result[i * 2 + 0] = hex_charset[byte >> 4];
+ result[i * 2 + 1] = hex_charset[byte & 15];
+ }
+ return result;
+}
+
+test hex64 {
+ const s = "[" ++ hex64(0x12345678_abcdef00) ++ "]";
+ try std.testing.expectEqualStrings("[00efcdab78563412]", s);
+}
+
+pub fn hexDigest(digest: [Hash.digest_length]u8) [multihash_len * 2]u8 {
+ var result: [multihash_len * 2]u8 = undefined;
+
+ result[0] = hex_charset[@enumToInt(multihash_function) >> 4];
+ result[1] = hex_charset[@enumToInt(multihash_function) & 15];
+
+ result[2] = hex_charset[Hash.digest_length >> 4];
+ result[3] = hex_charset[Hash.digest_length & 15];
+
+ for (digest) |byte, i| {
+ result[4 + i * 2] = hex_charset[byte >> 4];
+ result[5 + i * 2] = hex_charset[byte & 15];
+ }
+ return result;
+}
+
+const Parse = struct {
+ gpa: Allocator,
+ ast: std.zig.Ast,
+ arena: Allocator,
+ buf: std.ArrayListUnmanaged(u8),
+ errors: std.ArrayListUnmanaged(ErrorMessage),
+
+ name: []const u8,
+ version: std.SemanticVersion,
+ dependencies: std.StringArrayHashMapUnmanaged(Dependency),
+
+ const InnerError = error{ ParseFailure, OutOfMemory };
+
+ fn parseRoot(p: *Parse, node: Ast.Node.Index) !void {
+ const ast = p.ast;
+ const main_tokens = ast.nodes.items(.main_token);
+ const main_token = main_tokens[node];
+
+ var buf: [2]Ast.Node.Index = undefined;
+ const struct_init = ast.fullStructInit(&buf, node) orelse {
+ return fail(p, main_token, "expected top level expression to be a struct", .{});
+ };
+
+ var have_name = false;
+ var have_version = false;
+
+ for (struct_init.ast.fields) |field_init| {
+ const name_token = ast.firstToken(field_init) - 2;
+ const field_name = try identifierTokenString(p, name_token);
+ // We could get fancy with reflection and comptime logic here but doing
+ // things manually provides an opportunity to do any additional verification
+ // that is desirable on a per-field basis.
+ if (mem.eql(u8, field_name, "dependencies")) {
+ try parseDependencies(p, field_init);
+ } else if (mem.eql(u8, field_name, "name")) {
+ p.name = try parseString(p, field_init);
+ have_name = true;
+ } else if (mem.eql(u8, field_name, "version")) {
+ const version_text = try parseString(p, field_init);
+ p.version = std.SemanticVersion.parse(version_text) catch |err| v: {
+ try appendError(p, main_tokens[field_init], "unable to parse semantic version: {s}", .{@errorName(err)});
+ break :v undefined;
+ };
+ have_version = true;
+ } else {
+ // Ignore unknown fields so that we can add fields in future zig
+ // versions without breaking older zig versions.
+ }
+ }
+
+ if (!have_name) {
+ try appendError(p, main_token, "missing top-level 'name' field", .{});
+ }
+
+ if (!have_version) {
+ try appendError(p, main_token, "missing top-level 'version' field", .{});
+ }
+ }
+
+ fn parseDependencies(p: *Parse, node: Ast.Node.Index) !void {
+ const ast = p.ast;
+ const main_tokens = ast.nodes.items(.main_token);
+
+ var buf: [2]Ast.Node.Index = undefined;
+ const struct_init = ast.fullStructInit(&buf, node) orelse {
+ const tok = main_tokens[node];
+ return fail(p, tok, "expected dependencies expression to be a struct", .{});
+ };
+
+ for (struct_init.ast.fields) |field_init| {
+ const name_token = ast.firstToken(field_init) - 2;
+ const dep_name = try identifierTokenString(p, name_token);
+ const dep = try parseDependency(p, field_init);
+ try p.dependencies.put(p.gpa, dep_name, dep);
+ }
+ }
+
+ fn parseDependency(p: *Parse, node: Ast.Node.Index) !Dependency {
+ const ast = p.ast;
+ const main_tokens = ast.nodes.items(.main_token);
+
+ var buf: [2]Ast.Node.Index = undefined;
+ const struct_init = ast.fullStructInit(&buf, node) orelse {
+ const tok = main_tokens[node];
+ return fail(p, tok, "expected dependency expression to be a struct", .{});
+ };
+
+ var dep: Dependency = .{
+ .url = undefined,
+ .url_tok = undefined,
+ .hash = null,
+ .hash_tok = undefined,
+ };
+ var have_url = false;
+
+ for (struct_init.ast.fields) |field_init| {
+ const name_token = ast.firstToken(field_init) - 2;
+ const field_name = try identifierTokenString(p, name_token);
+ // We could get fancy with reflection and comptime logic here but doing
+ // things manually provides an opportunity to do any additional verification
+ // that is desirable on a per-field basis.
+ if (mem.eql(u8, field_name, "url")) {
+ dep.url = parseString(p, field_init) catch |err| switch (err) {
+ error.ParseFailure => continue,
+ else => |e| return e,
+ };
+ dep.url_tok = main_tokens[field_init];
+ have_url = true;
+ } else if (mem.eql(u8, field_name, "hash")) {
+ dep.hash = parseHash(p, field_init) catch |err| switch (err) {
+ error.ParseFailure => continue,
+ else => |e| return e,
+ };
+ dep.hash_tok = main_tokens[field_init];
+ } else {
+ // Ignore unknown fields so that we can add fields in future zig
+ // versions without breaking older zig versions.
+ }
+ }
+
+ if (!have_url) {
+ try appendError(p, main_tokens[node], "dependency is missing 'url' field", .{});
+ }
+
+ return dep;
+ }
+
+ fn parseString(p: *Parse, node: Ast.Node.Index) ![]const u8 {
+ const ast = p.ast;
+ const node_tags = ast.nodes.items(.tag);
+ const main_tokens = ast.nodes.items(.main_token);
+ if (node_tags[node] != .string_literal) {
+ return fail(p, main_tokens[node], "expected string literal", .{});
+ }
+ const str_lit_token = main_tokens[node];
+ const token_bytes = ast.tokenSlice(str_lit_token);
+ p.buf.clearRetainingCapacity();
+ try parseStrLit(p, str_lit_token, &p.buf, token_bytes, 0);
+ const duped = try p.arena.dupe(u8, p.buf.items);
+ return duped;
+ }
+
+ fn parseHash(p: *Parse, node: Ast.Node.Index) ![]const u8 {
+ const ast = p.ast;
+ const main_tokens = ast.nodes.items(.main_token);
+ const tok = main_tokens[node];
+ const h = try parseString(p, node);
+
+ if (h.len >= 2) {
+ const their_multihash_func = std.fmt.parseInt(u8, h[0..2], 16) catch |err| {
+ return fail(p, tok, "invalid multihash value: unable to parse hash function: {s}", .{
+ @errorName(err),
+ });
+ };
+ if (@intToEnum(MultihashFunction, their_multihash_func) != multihash_function) {
+ return fail(p, tok, "unsupported hash function: only sha2-256 is supported", .{});
+ }
+ }
+
+ const hex_multihash_len = 2 * Manifest.multihash_len;
+ if (h.len != hex_multihash_len) {
+ return fail(p, tok, "wrong hash size. expected: {d}, found: {d}", .{
+ hex_multihash_len, h.len,
+ });
+ }
+
+ return h;
+ }
+
+ /// TODO: try to DRY this with AstGen.identifierTokenString
+ fn identifierTokenString(p: *Parse, token: Ast.TokenIndex) InnerError![]const u8 {
+ const ast = p.ast;
+ const token_tags = ast.tokens.items(.tag);
+ assert(token_tags[token] == .identifier);
+ const ident_name = ast.tokenSlice(token);
+ if (!mem.startsWith(u8, ident_name, "@")) {
+ return ident_name;
+ }
+ p.buf.clearRetainingCapacity();
+ try parseStrLit(p, token, &p.buf, ident_name, 1);
+ const duped = try p.arena.dupe(u8, p.buf.items);
+ return duped;
+ }
+
+ /// TODO: try to DRY this with AstGen.parseStrLit
+ fn parseStrLit(
+ p: *Parse,
+ token: Ast.TokenIndex,
+ buf: *std.ArrayListUnmanaged(u8),
+ bytes: []const u8,
+ offset: u32,
+ ) InnerError!void {
+ const raw_string = bytes[offset..];
+ var buf_managed = buf.toManaged(p.gpa);
+ const result = std.zig.string_literal.parseWrite(buf_managed.writer(), raw_string);
+ buf.* = buf_managed.moveToUnmanaged();
+ switch (try result) {
+ .success => {},
+ .failure => |err| try p.appendStrLitError(err, token, bytes, offset),
+ }
+ }
+
+ /// TODO: try to DRY this with AstGen.failWithStrLitError
+ fn appendStrLitError(
+ p: *Parse,
+ err: std.zig.string_literal.Error,
+ token: Ast.TokenIndex,
+ bytes: []const u8,
+ offset: u32,
+ ) Allocator.Error!void {
+ const raw_string = bytes[offset..];
+ switch (err) {
+ .invalid_escape_character => |bad_index| {
+ try p.appendErrorOff(
+ token,
+ offset + @intCast(u32, bad_index),
+ "invalid escape character: '{c}'",
+ .{raw_string[bad_index]},
+ );
+ },
+ .expected_hex_digit => |bad_index| {
+ try p.appendErrorOff(
+ token,
+ offset + @intCast(u32, bad_index),
+ "expected hex digit, found '{c}'",
+ .{raw_string[bad_index]},
+ );
+ },
+ .empty_unicode_escape_sequence => |bad_index| {
+ try p.appendErrorOff(
+ token,
+ offset + @intCast(u32, bad_index),
+ "empty unicode escape sequence",
+ .{},
+ );
+ },
+ .expected_hex_digit_or_rbrace => |bad_index| {
+ try p.appendErrorOff(
+ token,
+ offset + @intCast(u32, bad_index),
+ "expected hex digit or '}}', found '{c}'",
+ .{raw_string[bad_index]},
+ );
+ },
+ .invalid_unicode_codepoint => |bad_index| {
+ try p.appendErrorOff(
+ token,
+ offset + @intCast(u32, bad_index),
+ "unicode escape does not correspond to a valid codepoint",
+ .{},
+ );
+ },
+ .expected_lbrace => |bad_index| {
+ try p.appendErrorOff(
+ token,
+ offset + @intCast(u32, bad_index),
+ "expected '{{', found '{c}",
+ .{raw_string[bad_index]},
+ );
+ },
+ .expected_rbrace => |bad_index| {
+ try p.appendErrorOff(
+ token,
+ offset + @intCast(u32, bad_index),
+ "expected '}}', found '{c}",
+ .{raw_string[bad_index]},
+ );
+ },
+ .expected_single_quote => |bad_index| {
+ try p.appendErrorOff(
+ token,
+ offset + @intCast(u32, bad_index),
+ "expected single quote ('), found '{c}",
+ .{raw_string[bad_index]},
+ );
+ },
+ .invalid_character => |bad_index| {
+ try p.appendErrorOff(
+ token,
+ offset + @intCast(u32, bad_index),
+ "invalid byte in string or character literal: '{c}'",
+ .{raw_string[bad_index]},
+ );
+ },
+ }
+ }
+
+ fn fail(
+ p: *Parse,
+ tok: Ast.TokenIndex,
+ comptime fmt: []const u8,
+ args: anytype,
+ ) InnerError {
+ try appendError(p, tok, fmt, args);
+ return error.ParseFailure;
+ }
+
+ fn appendError(p: *Parse, tok: Ast.TokenIndex, comptime fmt: []const u8, args: anytype) !void {
+ return appendErrorOff(p, tok, 0, fmt, args);
+ }
+
+ fn appendErrorOff(
+ p: *Parse,
+ tok: Ast.TokenIndex,
+ byte_offset: u32,
+ comptime fmt: []const u8,
+ args: anytype,
+ ) Allocator.Error!void {
+ try p.errors.append(p.gpa, .{
+ .msg = try std.fmt.allocPrint(p.arena, fmt, args),
+ .tok = tok,
+ .off = byte_offset,
+ });
+ }
+};
+
+const Manifest = @This();
+const std = @import("std");
+const mem = std.mem;
+const Allocator = std.mem.Allocator;
+const assert = std.debug.assert;
+const Ast = std.zig.Ast;
+const testing = std.testing;
+
+test "basic" {
+ const gpa = testing.allocator;
+
+ const example =
+ \\.{
+ \\ .name = "foo",
+ \\ .version = "3.2.1",
+ \\ .dependencies = .{
+ \\ .bar = .{
+ \\ .url = "https://example.com/baz.tar.gz",
+ \\ .hash = "1220f1b680b6065fcfc94fe777f22e73bcb7e2767e5f4d99d4255fe76ded69c7a35f",
+ \\ },
+ \\ },
+ \\}
+ ;
+
+ var ast = try std.zig.Ast.parse(gpa, example, .zon);
+ defer ast.deinit(gpa);
+
+ try testing.expect(ast.errors.len == 0);
+
+ var manifest = try Manifest.parse(gpa, ast);
+ defer manifest.deinit(gpa);
+
+ try testing.expectEqualStrings("foo", manifest.name);
+
+ try testing.expectEqual(@as(std.SemanticVersion, .{
+ .major = 3,
+ .minor = 2,
+ .patch = 1,
+ }), manifest.version);
+
+ try testing.expect(manifest.dependencies.count() == 1);
+ try testing.expectEqualStrings("bar", manifest.dependencies.keys()[0]);
+ try testing.expectEqualStrings(
+ "https://example.com/baz.tar.gz",
+ manifest.dependencies.values()[0].url,
+ );
+ try testing.expectEqualStrings(
+ "1220f1b680b6065fcfc94fe777f22e73bcb7e2767e5f4d99d4255fe76ded69c7a35f",
+ manifest.dependencies.values()[0].hash orelse return error.TestFailed,
+ );
+}
diff --git a/src/Package.zig b/src/Package.zig
index 35b1ff5056..401eef2121 100644
--- a/src/Package.zig
+++ b/src/Package.zig
@@ -6,8 +6,8 @@ const fs = std.fs;
const mem = std.mem;
const Allocator = mem.Allocator;
const assert = std.debug.assert;
-const Hash = std.crypto.hash.sha2.Sha256;
const log = std.log.scoped(.package);
+const main = @import("main.zig");
const Compilation = @import("Compilation.zig");
const Module = @import("Module.zig");
@@ -15,6 +15,7 @@ const ThreadPool = @import("ThreadPool.zig");
const WaitGroup = @import("WaitGroup.zig");
const Cache = @import("Cache.zig");
const build_options = @import("build_options");
+const Manifest = @import("Manifest.zig");
pub const Table = std.StringHashMapUnmanaged(*Package);
@@ -141,10 +142,10 @@ pub fn addAndAdopt(parent: *Package, gpa: Allocator, child: *Package) !void {
}
pub const build_zig_basename = "build.zig";
-pub const ini_basename = build_zig_basename ++ ".ini";
pub fn fetchAndAddDependencies(
pkg: *Package,
+ arena: Allocator,
thread_pool: *ThreadPool,
http_client: *std.http.Client,
directory: Compilation.Directory,
@@ -153,89 +154,77 @@ pub fn fetchAndAddDependencies(
dependencies_source: *std.ArrayList(u8),
build_roots_source: *std.ArrayList(u8),
name_prefix: []const u8,
+ color: main.Color,
) !void {
const max_bytes = 10 * 1024 * 1024;
const gpa = thread_pool.allocator;
- const build_zig_ini = directory.handle.readFileAlloc(gpa, ini_basename, max_bytes) catch |err| switch (err) {
+ const build_zig_zon_bytes = directory.handle.readFileAllocOptions(
+ arena,
+ Manifest.basename,
+ max_bytes,
+ null,
+ 1,
+ 0,
+ ) catch |err| switch (err) {
error.FileNotFound => {
// Handle the same as no dependencies.
return;
},
else => |e| return e,
};
- defer gpa.free(build_zig_ini);
- const ini: std.Ini = .{ .bytes = build_zig_ini };
- var any_error = false;
- var it = ini.iterateSection("\n[dependency]\n");
- while (it.next()) |dep| {
- var line_it = mem.split(u8, dep, "\n");
- var opt_name: ?[]const u8 = null;
- var opt_url: ?[]const u8 = null;
- var expected_hash: ?[]const u8 = null;
- while (line_it.next()) |kv| {
- const eq_pos = mem.indexOfScalar(u8, kv, '=') orelse continue;
- const key = kv[0..eq_pos];
- const value = kv[eq_pos + 1 ..];
- if (mem.eql(u8, key, "name")) {
- opt_name = value;
- } else if (mem.eql(u8, key, "url")) {
- opt_url = value;
- } else if (mem.eql(u8, key, "hash")) {
- expected_hash = value;
- } else {
- const loc = std.zig.findLineColumn(ini.bytes, @ptrToInt(key.ptr) - @ptrToInt(ini.bytes.ptr));
- std.log.warn("{s}/{s}:{d}:{d} unrecognized key: '{s}'", .{
- directory.path orelse ".",
- "build.zig.ini",
- loc.line,
- loc.column,
- key,
- });
- }
- }
+ var ast = try std.zig.Ast.parse(gpa, build_zig_zon_bytes, .zon);
+ defer ast.deinit(gpa);
- const name = opt_name orelse {
- const loc = std.zig.findLineColumn(ini.bytes, @ptrToInt(dep.ptr) - @ptrToInt(ini.bytes.ptr));
- std.log.err("{s}/{s}:{d}:{d} missing key: 'name'", .{
- directory.path orelse ".",
- "build.zig.ini",
- loc.line,
- loc.column,
- });
- any_error = true;
- continue;
- };
+ if (ast.errors.len > 0) {
+ const file_path = try directory.join(arena, &.{Manifest.basename});
+ try main.printErrsMsgToStdErr(gpa, arena, ast, file_path, color);
+ return error.PackageFetchFailed;
+ }
- const url = opt_url orelse {
- const loc = std.zig.findLineColumn(ini.bytes, @ptrToInt(dep.ptr) - @ptrToInt(ini.bytes.ptr));
- std.log.err("{s}/{s}:{d}:{d} missing key: 'name'", .{
- directory.path orelse ".",
- "build.zig.ini",
- loc.line,
- loc.column,
- });
- any_error = true;
- continue;
+ var manifest = try Manifest.parse(gpa, ast);
+ defer manifest.deinit(gpa);
+
+ if (manifest.errors.len > 0) {
+ const ttyconf: std.debug.TTY.Config = switch (color) {
+ .auto => std.debug.detectTTYConfig(std.io.getStdErr()),
+ .on => .escape_codes,
+ .off => .no_color,
};
+ const file_path = try directory.join(arena, &.{Manifest.basename});
+ for (manifest.errors) |msg| {
+ Report.renderErrorMessage(ast, file_path, ttyconf, msg, &.{});
+ }
+ return error.PackageFetchFailed;
+ }
+
+ const report: Report = .{
+ .ast = &ast,
+ .directory = directory,
+ .color = color,
+ .arena = arena,
+ };
+
+ var any_error = false;
+ const deps_list = manifest.dependencies.values();
+ for (manifest.dependencies.keys()) |name, i| {
+ const dep = deps_list[i];
- const sub_prefix = try std.fmt.allocPrint(gpa, "{s}{s}.", .{ name_prefix, name });
- defer gpa.free(sub_prefix);
+ const sub_prefix = try std.fmt.allocPrint(arena, "{s}{s}.", .{ name_prefix, name });
const fqn = sub_prefix[0 .. sub_prefix.len - 1];
const sub_pkg = try fetchAndUnpack(
thread_pool,
http_client,
global_cache_directory,
- url,
- expected_hash,
- ini,
- directory,
+ dep,
+ report,
build_roots_source,
fqn,
);
try pkg.fetchAndAddDependencies(
+ arena,
thread_pool,
http_client,
sub_pkg.root_src_directory,
@@ -244,6 +233,7 @@ pub fn fetchAndAddDependencies(
dependencies_source,
build_roots_source,
sub_prefix,
+ color,
);
try addAndAdopt(pkg, gpa, sub_pkg);
@@ -253,7 +243,7 @@ pub fn fetchAndAddDependencies(
});
}
- if (any_error) return error.InvalidBuildZigIniFile;
+ if (any_error) return error.InvalidBuildManifestFile;
}
pub fn createFilePkg(
@@ -264,7 +254,7 @@ pub fn createFilePkg(
contents: []const u8,
) !*Package {
const rand_int = std.crypto.random.int(u64);
- const tmp_dir_sub_path = "tmp" ++ fs.path.sep_str ++ hex64(rand_int);
+ const tmp_dir_sub_path = "tmp" ++ fs.path.sep_str ++ Manifest.hex64(rand_int);
{
var tmp_dir = try cache_directory.handle.makeOpenPath(tmp_dir_sub_path, .{});
defer tmp_dir.close();
@@ -282,14 +272,73 @@ pub fn createFilePkg(
return createWithDir(gpa, name, cache_directory, o_dir_sub_path, basename);
}
+const Report = struct {
+ ast: *const std.zig.Ast,
+ directory: Compilation.Directory,
+ color: main.Color,
+ arena: Allocator,
+
+ fn fail(
+ report: Report,
+ tok: std.zig.Ast.TokenIndex,
+ comptime fmt_string: []const u8,
+ fmt_args: anytype,
+ ) error{ PackageFetchFailed, OutOfMemory } {
+ return failWithNotes(report, &.{}, tok, fmt_string, fmt_args);
+ }
+
+ fn failWithNotes(
+ report: Report,
+ notes: []const Compilation.AllErrors.Message,
+ tok: std.zig.Ast.TokenIndex,
+ comptime fmt_string: []const u8,
+ fmt_args: anytype,
+ ) error{ PackageFetchFailed, OutOfMemory } {
+ const ttyconf: std.debug.TTY.Config = switch (report.color) {
+ .auto => std.debug.detectTTYConfig(std.io.getStdErr()),
+ .on => .escape_codes,
+ .off => .no_color,
+ };
+ const file_path = try report.directory.join(report.arena, &.{Manifest.basename});
+ renderErrorMessage(report.ast.*, file_path, ttyconf, .{
+ .tok = tok,
+ .off = 0,
+ .msg = try std.fmt.allocPrint(report.arena, fmt_string, fmt_args),
+ }, notes);
+ return error.PackageFetchFailed;
+ }
+
+ fn renderErrorMessage(
+ ast: std.zig.Ast,
+ file_path: []const u8,
+ ttyconf: std.debug.TTY.Config,
+ msg: Manifest.ErrorMessage,
+ notes: []const Compilation.AllErrors.Message,
+ ) void {
+ const token_starts = ast.tokens.items(.start);
+ const start_loc = ast.tokenLocation(0, msg.tok);
+ Compilation.AllErrors.Message.renderToStdErr(.{ .src = .{
+ .msg = msg.msg,
+ .src_path = file_path,
+ .line = @intCast(u32, start_loc.line),
+ .column = @intCast(u32, start_loc.column),
+ .span = .{
+ .start = token_starts[msg.tok],
+ .end = @intCast(u32, token_starts[msg.tok] + ast.tokenSlice(msg.tok).len),
+ .main = token_starts[msg.tok] + msg.off,
+ },
+ .source_line = ast.source[start_loc.line_start..start_loc.line_end],
+ .notes = notes,
+ } }, ttyconf);
+ }
+};
+
fn fetchAndUnpack(
thread_pool: *ThreadPool,
http_client: *std.http.Client,
global_cache_directory: Compilation.Directory,
- url: []const u8,
- expected_hash: ?[]const u8,
- ini: std.Ini,
- comp_directory: Compilation.Directory,
+ dep: Manifest.Dependency,
+ report: Report,
build_roots_source: *std.ArrayList(u8),
fqn: []const u8,
) !*Package {
@@ -298,37 +347,8 @@ fn fetchAndUnpack(
// Check if the expected_hash is already present in the global package
// cache, and thereby avoid both fetching and unpacking.
- if (expected_hash) |h| cached: {
- const hex_multihash_len = 2 * multihash_len;
- if (h.len >= 2) {
- const their_multihash_func = std.fmt.parseInt(u8, h[0..2], 16) catch |err| {
- return reportError(
- ini,
- comp_directory,
- h.ptr,
- "invalid multihash value: unable to parse hash function: {s}",
- .{@errorName(err)},
- );
- };
- if (@intToEnum(MultihashFunction, their_multihash_func) != multihash_function) {
- return reportError(
- ini,
- comp_directory,
- h.ptr,
- "unsupported hash function: only sha2-256 is supported",
- .{},
- );
- }
- }
- if (h.len != hex_multihash_len) {
- return reportError(
- ini,
- comp_directory,
- h.ptr,
- "wrong hash size. expected: {d}, found: {d}",
- .{ hex_multihash_len, h.len },
- );
- }
+ if (dep.hash) |h| cached: {
+ const hex_multihash_len = 2 * Manifest.multihash_len;
const hex_digest = h[0..hex_multihash_len];
const pkg_dir_sub_path = "p" ++ s ++ hex_digest;
var pkg_dir = global_cache_directory.handle.openDir(pkg_dir_sub_path, .{}) catch |err| switch (err) {
@@ -366,10 +386,10 @@ fn fetchAndUnpack(
return ptr;
}
- const uri = try std.Uri.parse(url);
+ const uri = try std.Uri.parse(dep.url);
const rand_int = std.crypto.random.int(u64);
- const tmp_dir_sub_path = "tmp" ++ s ++ hex64(rand_int);
+ const tmp_dir_sub_path = "tmp" ++ s ++ Manifest.hex64(rand_int);
const actual_hash = a: {
var tmp_directory: Compilation.Directory = d: {
@@ -398,13 +418,9 @@ fn fetchAndUnpack(
// by default, so the same logic applies for buffering the reader as for gzip.
try unpackTarball(gpa, &req, tmp_directory.handle, std.compress.xz);
} else {
- return reportError(
- ini,
- comp_directory,
- uri.path.ptr,
- "unknown file extension for path '{s}'",
- .{uri.path},
- );
+ return report.fail(dep.url_tok, "unknown file extension for path '{s}'", .{
+ uri.path,
+ });
}
// TODO: delete files not included in the package prior to computing the package hash.
@@ -415,28 +431,21 @@ fn fetchAndUnpack(
break :a try computePackageHash(thread_pool, .{ .dir = tmp_directory.handle });
};
- const pkg_dir_sub_path = "p" ++ s ++ hexDigest(actual_hash);
+ const pkg_dir_sub_path = "p" ++ s ++ Manifest.hexDigest(actual_hash);
try renameTmpIntoCache(global_cache_directory.handle, tmp_dir_sub_path, pkg_dir_sub_path);
- const actual_hex = hexDigest(actual_hash);
- if (expected_hash) |h| {
+ const actual_hex = Manifest.hexDigest(actual_hash);
+ if (dep.hash) |h| {
if (!mem.eql(u8, h, &actual_hex)) {
- return reportError(
- ini,
- comp_directory,
- h.ptr,
- "hash mismatch: expected: {s}, found: {s}",
- .{ h, actual_hex },
- );
+ return report.fail(dep.hash_tok, "hash mismatch: expected: {s}, found: {s}", .{
+ h, actual_hex,
+ });
}
} else {
- return reportError(
- ini,
- comp_directory,
- url.ptr,
- "url field is missing corresponding hash field: hash={s}",
- .{&actual_hex},
- );
+ const notes: [1]Compilation.AllErrors.Message = .{.{ .plain = .{
+ .msg = try std.fmt.allocPrint(report.arena, "expected .hash = \"{s}\",", .{&actual_hex}),
+ } }};
+ return report.failWithNotes(¬es, dep.url_tok, "url field is missing corresponding hash field", .{});
}
const build_root = try global_cache_directory.join(gpa, &.{pkg_dir_sub_path});
@@ -471,29 +480,9 @@ fn unpackTarball(
});
}
-fn reportError(
- ini: std.Ini,
- comp_directory: Compilation.Directory,
- src_ptr: [*]const u8,
- comptime fmt_string: []const u8,
- fmt_args: anytype,
-) error{PackageFetchFailed} {
- const loc = std.zig.findLineColumn(ini.bytes, @ptrToInt(src_ptr) - @ptrToInt(ini.bytes.ptr));
- if (comp_directory.path) |p| {
- std.debug.print("{s}{c}{s}:{d}:{d}: error: " ++ fmt_string ++ "\n", .{
- p, fs.path.sep, ini_basename, loc.line + 1, loc.column + 1,
- } ++ fmt_args);
- } else {
- std.debug.print("{s}:{d}:{d}: error: " ++ fmt_string ++ "\n", .{
- ini_basename, loc.line + 1, loc.column + 1,
- } ++ fmt_args);
- }
- return error.PackageFetchFailed;
-}
-
const HashedFile = struct {
path: []const u8,
- hash: [Hash.digest_length]u8,
+ hash: [Manifest.Hash.digest_length]u8,
failure: Error!void,
const Error = fs.File.OpenError || fs.File.ReadError || fs.File.StatError;
@@ -507,7 +496,7 @@ const HashedFile = struct {
fn computePackageHash(
thread_pool: *ThreadPool,
pkg_dir: fs.IterableDir,
-) ![Hash.digest_length]u8 {
+) ![Manifest.Hash.digest_length]u8 {
const gpa = thread_pool.allocator;
// We'll use an arena allocator for the path name strings since they all
@@ -550,7 +539,7 @@ fn computePackageHash(
std.sort.sort(*HashedFile, all_files.items, {}, HashedFile.lessThan);
- var hasher = Hash.init(.{});
+ var hasher = Manifest.Hash.init(.{});
var any_failures = false;
for (all_files.items) |hashed_file| {
hashed_file.failure catch |err| {
@@ -571,7 +560,7 @@ fn workerHashFile(dir: fs.Dir, hashed_file: *HashedFile, wg: *WaitGroup) void {
fn hashFileFallible(dir: fs.Dir, hashed_file: *HashedFile) HashedFile.Error!void {
var buf: [8000]u8 = undefined;
var file = try dir.openFile(hashed_file.path, .{});
- var hasher = Hash.init(.{});
+ var hasher = Manifest.Hash.init(.{});
hasher.update(hashed_file.path);
hasher.update(&.{ 0, @boolToInt(try isExecutable(file)) });
while (true) {
@@ -595,52 +584,6 @@ fn isExecutable(file: fs.File) !bool {
}
}
-const hex_charset = "0123456789abcdef";
-
-fn hex64(x: u64) [16]u8 {
- var result: [16]u8 = undefined;
- var i: usize = 0;
- while (i < 8) : (i += 1) {
- const byte = @truncate(u8, x >> @intCast(u6, 8 * i));
- result[i * 2 + 0] = hex_charset[byte >> 4];
- result[i * 2 + 1] = hex_charset[byte & 15];
- }
- return result;
-}
-
-test hex64 {
- const s = "[" ++ hex64(0x12345678_abcdef00) ++ "]";
- try std.testing.expectEqualStrings("[00efcdab78563412]", s);
-}
-
-const multihash_function: MultihashFunction = switch (Hash) {
- std.crypto.hash.sha2.Sha256 => .@"sha2-256",
- else => @compileError("unreachable"),
-};
-comptime {
- // We avoid unnecessary uleb128 code in hexDigest by asserting here the
- // values are small enough to be contained in the one-byte encoding.
- assert(@enumToInt(multihash_function) < 127);
- assert(Hash.digest_length < 127);
-}
-const multihash_len = 1 + 1 + Hash.digest_length;
-
-fn hexDigest(digest: [Hash.digest_length]u8) [multihash_len * 2]u8 {
- var result: [multihash_len * 2]u8 = undefined;
-
- result[0] = hex_charset[@enumToInt(multihash_function) >> 4];
- result[1] = hex_charset[@enumToInt(multihash_function) & 15];
-
- result[2] = hex_charset[Hash.digest_length >> 4];
- result[3] = hex_charset[Hash.digest_length & 15];
-
- for (digest) |byte, i| {
- result[4 + i * 2] = hex_charset[byte >> 4];
- result[5 + i * 2] = hex_charset[byte & 15];
- }
- return result;
-}
-
fn renameTmpIntoCache(
cache_dir: fs.Dir,
tmp_dir_sub_path: []const u8,
@@ -669,21 +612,3 @@ fn renameTmpIntoCache(
break;
}
}
-
-const MultihashFunction = enum(u16) {
- identity = 0x00,
- sha1 = 0x11,
- @"sha2-256" = 0x12,
- @"sha2-512" = 0x13,
- @"sha3-512" = 0x14,
- @"sha3-384" = 0x15,
- @"sha3-256" = 0x16,
- @"sha3-224" = 0x17,
- @"sha2-384" = 0x20,
- @"sha2-256-trunc254-padded" = 0x1012,
- @"sha2-224" = 0x1013,
- @"sha2-512-224" = 0x1014,
- @"sha2-512-256" = 0x1015,
- @"blake2b-256" = 0xb220,
- _,
-};
diff --git a/src/main.zig b/src/main.zig
index 06c36bad87..f634c259ff 100644
--- a/src/main.zig
+++ b/src/main.zig
@@ -3915,6 +3915,7 @@ pub const usage_build =
;
pub fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
+ var color: Color = .auto;
var prominent_compile_errors: bool = false;
// We want to release all the locks before executing the child process, so we make a nice
@@ -4117,6 +4118,7 @@ pub fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !voi
// Here we borrow main package's table and will replace it with a fresh
// one after this process completes.
main_pkg.fetchAndAddDependencies(
+ arena,
&thread_pool,
&http_client,
build_directory,
@@ -4125,6 +4127,7 @@ pub fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !voi
&dependencies_source,
&build_roots_source,
"",
+ color,
) catch |err| switch (err) {
error.PackageFetchFailed => process.exit(1),
else => |e| return e,
@@ -4366,7 +4369,7 @@ pub fn cmdFmt(gpa: Allocator, arena: Allocator, args: []const []const u8) !void
};
defer tree.deinit(gpa);
- try printErrsMsgToStdErr(gpa, arena, tree.errors, tree, "", color);
+ try printErrsMsgToStdErr(gpa, arena, tree, "", color);
var has_ast_error = false;
if (check_ast_flag) {
const Module = @import("Module.zig");
@@ -4569,7 +4572,7 @@ fn fmtPathFile(
var tree = try Ast.parse(fmt.gpa, source_code, .zig);
defer tree.deinit(fmt.gpa);
- try printErrsMsgToStdErr(fmt.gpa, fmt.arena, tree.errors, tree, file_path, fmt.color);
+ try printErrsMsgToStdErr(fmt.gpa, fmt.arena, tree, file_path, fmt.color);
if (tree.errors.len != 0) {
fmt.any_error = true;
return;
@@ -4649,14 +4652,14 @@ fn fmtPathFile(
}
}
-fn printErrsMsgToStdErr(
+pub fn printErrsMsgToStdErr(
gpa: mem.Allocator,
arena: mem.Allocator,
- parse_errors: []const Ast.Error,
tree: Ast,
path: []const u8,
color: Color,
) !void {
+ const parse_errors: []const Ast.Error = tree.errors;
var i: usize = 0;
while (i < parse_errors.len) : (i += 1) {
const parse_error = parse_errors[i];
@@ -5316,7 +5319,7 @@ pub fn cmdAstCheck(
file.tree_loaded = true;
defer file.tree.deinit(gpa);
- try printErrsMsgToStdErr(gpa, arena, file.tree.errors, file.tree, file.sub_file_path, color);
+ try printErrsMsgToStdErr(gpa, arena, file.tree, file.sub_file_path, color);
if (file.tree.errors.len != 0) {
process.exit(1);
}
@@ -5442,7 +5445,7 @@ pub fn cmdChangelist(
file.tree_loaded = true;
defer file.tree.deinit(gpa);
- try printErrsMsgToStdErr(gpa, arena, file.tree.errors, file.tree, old_source_file, .auto);
+ try printErrsMsgToStdErr(gpa, arena, file.tree, old_source_file, .auto);
if (file.tree.errors.len != 0) {
process.exit(1);
}
@@ -5479,7 +5482,7 @@ pub fn cmdChangelist(
var new_tree = try Ast.parse(gpa, new_source, .zig);
defer new_tree.deinit(gpa);
- try printErrsMsgToStdErr(gpa, arena, new_tree.errors, new_tree, new_source_file, .auto);
+ try printErrsMsgToStdErr(gpa, arena, new_tree, new_source_file, .auto);
if (new_tree.errors.len != 0) {
process.exit(1);
}
--
cgit v1.2.3