aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/Compilation.zig2
-rw-r--r--src/Module.zig85
-rw-r--r--src/Sema.zig10
-rw-r--r--src/arch/aarch64/CodeGen.zig121
-rw-r--r--src/arch/aarch64/Emit.zig16
-rw-r--r--src/arch/arm/CodeGen.zig37
-rw-r--r--src/arch/riscv64/CodeGen.zig33
-rw-r--r--src/arch/sparc64/CodeGen.zig22
-rw-r--r--src/arch/wasm/CodeGen.zig40
-rw-r--r--src/arch/wasm/Emit.zig29
-rw-r--r--src/arch/x86_64/CodeGen.zig71
-rw-r--r--src/arch/x86_64/Emit.zig16
-rw-r--r--src/codegen/spirv.zig30
-rw-r--r--src/link.zig50
-rw-r--r--src/link/C.zig4
-rw-r--r--src/link/Coff.zig475
-rw-r--r--src/link/Coff/Atom.zig48
-rw-r--r--src/link/Coff/Relocation.zig18
-rw-r--r--src/link/Dwarf.zig579
-rw-r--r--src/link/Elf.zig1206
-rw-r--r--src/link/Elf/Atom.zig62
-rw-r--r--src/link/MachO.zig624
-rw-r--r--src/link/MachO/Atom.zig82
-rw-r--r--src/link/MachO/DebugSymbols.zig12
-rw-r--r--src/link/MachO/Relocation.zig16
-rw-r--r--src/link/Plan9.zig232
-rw-r--r--src/link/SpirV.zig18
-rw-r--r--src/link/Wasm.zig539
-rw-r--r--src/link/Wasm/Atom.zig47
-rw-r--r--src/link/Wasm/Object.zig15
30 files changed, 2374 insertions, 2165 deletions
diff --git a/src/Compilation.zig b/src/Compilation.zig
index 09c6e1c686..7d42d3b610 100644
--- a/src/Compilation.zig
+++ b/src/Compilation.zig
@@ -3299,7 +3299,7 @@ fn processOneJob(comp: *Compilation, job: Job) !void {
const gpa = comp.gpa;
const module = comp.bin_file.options.module.?;
const decl = module.declPtr(decl_index);
- comp.bin_file.updateDeclLineNumber(module, decl) catch |err| {
+ comp.bin_file.updateDeclLineNumber(module, decl_index) catch |err| {
try module.failed_decls.ensureUnusedCapacity(gpa, 1);
module.failed_decls.putAssumeCapacityNoClobber(decl_index, try Module.ErrorMsg.create(
gpa,
diff --git a/src/Module.zig b/src/Module.zig
index dcdbeec322..b395c0a950 100644
--- a/src/Module.zig
+++ b/src/Module.zig
@@ -328,8 +328,6 @@ pub const ErrorInt = u32;
pub const Export = struct {
options: std.builtin.ExportOptions,
src: LazySrcLoc,
- /// Represents the position of the export, if any, in the output file.
- link: link.File.Export,
/// The Decl that performs the export. Note that this is *not* the Decl being exported.
owner_decl: Decl.Index,
/// The Decl containing the export statement. Inline function calls
@@ -533,16 +531,8 @@ pub const Decl = struct {
/// What kind of a declaration is this.
kind: Kind,
- /// Represents the position of the code in the output file.
- /// This is populated regardless of semantic analysis and code generation.
- link: link.File.LinkBlock,
-
- /// Represents the function in the linked output file, if the `Decl` is a function.
- /// This is stored here and not in `Fn` because `Decl` survives across updates but
- /// `Fn` does not.
- /// TODO Look into making `Fn` a longer lived structure and moving this field there
- /// to save on memory usage.
- fn_link: link.File.LinkFn,
+ /// TODO remove this once Wasm backend catches up
+ fn_link: ?link.File.Wasm.FnData = null,
/// The shallow set of other decls whose typed_value could possibly change if this Decl's
/// typed_value is modified.
@@ -4098,7 +4088,7 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void {
// The exports this Decl performs will be re-discovered, so we remove them here
// prior to re-analysis.
- mod.deleteDeclExports(decl_index);
+ try mod.deleteDeclExports(decl_index);
// Similarly, `@setAlignStack` invocations will be re-discovered.
if (decl.getFunction()) |func| {
@@ -5183,20 +5173,7 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) Allocator.Err
decl.zir_decl_index = @intCast(u32, decl_sub_index);
if (decl.getFunction()) |_| {
switch (comp.bin_file.tag) {
- .coff => {
- // TODO Implement for COFF
- },
- .elf => if (decl.fn_link.elf.len != 0) {
- // TODO Look into detecting when this would be unnecessary by storing enough state
- // in `Decl` to notice that the line number did not change.
- comp.work_queue.writeItemAssumeCapacity(.{ .update_line_number = decl_index });
- },
- .macho => if (decl.fn_link.macho.len != 0) {
- // TODO Look into detecting when this would be unnecessary by storing enough state
- // in `Decl` to notice that the line number did not change.
- comp.work_queue.writeItemAssumeCapacity(.{ .update_line_number = decl_index });
- },
- .plan9 => {
+ .coff, .elf, .macho, .plan9 => {
// TODO Look into detecting when this would be unnecessary by storing enough state
// in `Decl` to notice that the line number did not change.
comp.work_queue.writeItemAssumeCapacity(.{ .update_line_number = decl_index });
@@ -5265,33 +5242,15 @@ pub fn clearDecl(
assert(emit_h.decl_table.swapRemove(decl_index));
}
_ = mod.compile_log_decls.swapRemove(decl_index);
- mod.deleteDeclExports(decl_index);
+ try mod.deleteDeclExports(decl_index);
if (decl.has_tv) {
if (decl.ty.isFnOrHasRuntimeBits()) {
mod.comp.bin_file.freeDecl(decl_index);
- // TODO instead of a union, put this memory trailing Decl objects,
- // and allow it to be variably sized.
- decl.link = switch (mod.comp.bin_file.tag) {
- .coff => .{ .coff = link.File.Coff.Atom.empty },
- .elf => .{ .elf = link.File.Elf.TextBlock.empty },
- .macho => .{ .macho = link.File.MachO.Atom.empty },
- .plan9 => .{ .plan9 = link.File.Plan9.DeclBlock.empty },
- .c => .{ .c = {} },
- .wasm => .{ .wasm = link.File.Wasm.DeclBlock.empty },
- .spirv => .{ .spirv = {} },
- .nvptx => .{ .nvptx = {} },
- };
decl.fn_link = switch (mod.comp.bin_file.tag) {
- .coff => .{ .coff = {} },
- .elf => .{ .elf = link.File.Dwarf.SrcFn.empty },
- .macho => .{ .macho = link.File.Dwarf.SrcFn.empty },
- .plan9 => .{ .plan9 = {} },
- .c => .{ .c = {} },
- .wasm => .{ .wasm = link.File.Wasm.FnData.empty },
- .spirv => .{ .spirv = .{} },
- .nvptx => .{ .nvptx = {} },
+ .wasm => link.File.Wasm.FnData.empty,
+ else => null,
};
}
if (decl.getInnerNamespace()) |namespace| {
@@ -5358,7 +5317,7 @@ pub fn abortAnonDecl(mod: *Module, decl_index: Decl.Index) void {
/// Delete all the Export objects that are caused by this Decl. Re-analysis of
/// this Decl will cause them to be re-created (or not).
-fn deleteDeclExports(mod: *Module, decl_index: Decl.Index) void {
+fn deleteDeclExports(mod: *Module, decl_index: Decl.Index) Allocator.Error!void {
var export_owners = (mod.export_owners.fetchSwapRemove(decl_index) orelse return).value;
for (export_owners.items) |exp| {
@@ -5381,16 +5340,16 @@ fn deleteDeclExports(mod: *Module, decl_index: Decl.Index) void {
}
}
if (mod.comp.bin_file.cast(link.File.Elf)) |elf| {
- elf.deleteExport(exp.link.elf);
+ elf.deleteDeclExport(decl_index, exp.options.name);
}
if (mod.comp.bin_file.cast(link.File.MachO)) |macho| {
- macho.deleteExport(exp.link.macho);
+ try macho.deleteDeclExport(decl_index, exp.options.name);
}
if (mod.comp.bin_file.cast(link.File.Wasm)) |wasm| {
- wasm.deleteExport(exp.link.wasm);
+ wasm.deleteDeclExport(decl_index);
}
if (mod.comp.bin_file.cast(link.File.Coff)) |coff| {
- coff.deleteExport(exp.link.coff);
+ coff.deleteDeclExport(decl_index, exp.options.name);
}
if (mod.failed_exports.fetchSwapRemove(exp)) |failed_kv| {
failed_kv.value.destroy(mod.gpa);
@@ -5693,25 +5652,9 @@ pub fn allocateNewDecl(
.deletion_flag = false,
.zir_decl_index = 0,
.src_scope = src_scope,
- .link = switch (mod.comp.bin_file.tag) {
- .coff => .{ .coff = link.File.Coff.Atom.empty },
- .elf => .{ .elf = link.File.Elf.TextBlock.empty },
- .macho => .{ .macho = link.File.MachO.Atom.empty },
- .plan9 => .{ .plan9 = link.File.Plan9.DeclBlock.empty },
- .c => .{ .c = {} },
- .wasm => .{ .wasm = link.File.Wasm.DeclBlock.empty },
- .spirv => .{ .spirv = {} },
- .nvptx => .{ .nvptx = {} },
- },
.fn_link = switch (mod.comp.bin_file.tag) {
- .coff => .{ .coff = {} },
- .elf => .{ .elf = link.File.Dwarf.SrcFn.empty },
- .macho => .{ .macho = link.File.Dwarf.SrcFn.empty },
- .plan9 => .{ .plan9 = {} },
- .c => .{ .c = {} },
- .wasm => .{ .wasm = link.File.Wasm.FnData.empty },
- .spirv => .{ .spirv = .{} },
- .nvptx => .{ .nvptx = {} },
+ .wasm => link.File.Wasm.FnData.empty,
+ else => null,
},
.generation = 0,
.is_pub = false,
diff --git a/src/Sema.zig b/src/Sema.zig
index 87be3de7be..610f1bf5af 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -5564,16 +5564,6 @@ pub fn analyzeExport(
.visibility = borrowed_options.visibility,
},
.src = src,
- .link = switch (mod.comp.bin_file.tag) {
- .coff => .{ .coff = .{} },
- .elf => .{ .elf = .{} },
- .macho => .{ .macho = .{} },
- .plan9 => .{ .plan9 = null },
- .c => .{ .c = {} },
- .wasm => .{ .wasm = .{} },
- .spirv => .{ .spirv = {} },
- .nvptx => .{ .nvptx = {} },
- },
.owner_decl = sema.owner_decl_index,
.src_decl = block.src_decl,
.exported_decl = exported_decl_index,
diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig
index 0efd34937a..473a62fd83 100644
--- a/src/arch/aarch64/CodeGen.zig
+++ b/src/arch/aarch64/CodeGen.zig
@@ -203,13 +203,7 @@ const DbgInfoReloc = struct {
else => unreachable, // not a possible argument
};
- try dw.genArgDbgInfo(
- reloc.name,
- reloc.ty,
- function.bin_file.tag,
- function.mod_fn.owner_decl,
- loc,
- );
+ try dw.genArgDbgInfo(reloc.name, reloc.ty, function.mod_fn.owner_decl, loc);
},
.plan9 => {},
.none => {},
@@ -255,14 +249,7 @@ const DbgInfoReloc = struct {
break :blk .nop;
},
};
- try dw.genVarDbgInfo(
- reloc.name,
- reloc.ty,
- function.bin_file.tag,
- function.mod_fn.owner_decl,
- is_ptr,
- loc,
- );
+ try dw.genVarDbgInfo(reloc.name, reloc.ty, function.mod_fn.owner_decl, is_ptr, loc);
},
.plan9 => {},
.none => {},
@@ -4019,11 +4006,17 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
.direct => .load_memory_ptr_direct,
.import => unreachable,
};
- const mod = self.bin_file.options.module.?;
- const owner_decl = mod.declPtr(self.mod_fn.owner_decl);
const atom_index = switch (self.bin_file.tag) {
- .macho => owner_decl.link.macho.getSymbolIndex().?,
- .coff => owner_decl.link.coff.getSymbolIndex().?,
+ .macho => blk: {
+ const macho_file = self.bin_file.cast(link.File.MachO).?;
+ const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
+ break :blk macho_file.getAtom(atom).getSymbolIndex().?;
+ },
+ .coff => blk: {
+ const coff_file = self.bin_file.cast(link.File.Coff).?;
+ const atom = try coff_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
+ break :blk coff_file.getAtom(atom).getSymbolIndex().?;
+ },
else => unreachable, // unsupported target format
};
_ = try self.addInst(.{
@@ -4301,34 +4294,37 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
if (self.air.value(callee)) |func_value| {
if (func_value.castTag(.function)) |func_payload| {
const func = func_payload.data;
- const fn_owner_decl = mod.declPtr(func.owner_decl);
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
- try fn_owner_decl.link.elf.ensureInitialized(elf_file);
- const got_addr = @intCast(u32, fn_owner_decl.link.elf.getOffsetTableAddress(elf_file));
+ const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl);
+ const atom = elf_file.getAtom(atom_index);
+ const got_addr = @intCast(u32, atom.getOffsetTableAddress(elf_file));
try self.genSetReg(Type.initTag(.usize), .x30, .{ .memory = got_addr });
} else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
- try fn_owner_decl.link.macho.ensureInitialized(macho_file);
+ const atom = try macho_file.getOrCreateAtomForDecl(func.owner_decl);
+ const sym_index = macho_file.getAtom(atom).getSymbolIndex().?;
try self.genSetReg(Type.initTag(.u64), .x30, .{
.linker_load = .{
.type = .got,
- .sym_index = fn_owner_decl.link.macho.getSymbolIndex().?,
+ .sym_index = sym_index,
},
});
} else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
- try fn_owner_decl.link.coff.ensureInitialized(coff_file);
+ const atom = try coff_file.getOrCreateAtomForDecl(func.owner_decl);
+ const sym_index = coff_file.getAtom(atom).getSymbolIndex().?;
try self.genSetReg(Type.initTag(.u64), .x30, .{
.linker_load = .{
.type = .got,
- .sym_index = fn_owner_decl.link.coff.getSymbolIndex().?,
+ .sym_index = sym_index,
},
});
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
- try p9.seeDecl(func.owner_decl);
+ const decl_block_index = try p9.seeDecl(func.owner_decl);
+ const decl_block = p9.getDeclBlock(decl_block_index);
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
const got_addr = p9.bases.data;
- const got_index = fn_owner_decl.link.plan9.got_index.?;
+ const got_index = decl_block.got_index.?;
const fn_got_addr = got_addr + got_index * ptr_bytes;
try self.genSetReg(Type.initTag(.usize), .x30, .{ .memory = fn_got_addr });
} else unreachable;
@@ -4349,11 +4345,13 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
if (self.bin_file.cast(link.File.MachO)) |macho_file| {
const sym_index = try macho_file.getGlobalSymbol(mem.sliceTo(decl_name, 0));
+ const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
+ const atom_index = macho_file.getAtom(atom).getSymbolIndex().?;
_ = try self.addInst(.{
.tag = .call_extern,
.data = .{
.relocation = .{
- .atom_index = mod.declPtr(self.mod_fn.owner_decl).link.macho.getSymbolIndex().?,
+ .atom_index = atom_index,
.sym_index = sym_index,
},
},
@@ -5488,11 +5486,17 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
.direct => .load_memory_ptr_direct,
.import => unreachable,
};
- const mod = self.bin_file.options.module.?;
- const owner_decl = mod.declPtr(self.mod_fn.owner_decl);
const atom_index = switch (self.bin_file.tag) {
- .macho => owner_decl.link.macho.getSymbolIndex().?,
- .coff => owner_decl.link.coff.getSymbolIndex().?,
+ .macho => blk: {
+ const macho_file = self.bin_file.cast(link.File.MachO).?;
+ const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
+ break :blk macho_file.getAtom(atom).getSymbolIndex().?;
+ },
+ .coff => blk: {
+ const coff_file = self.bin_file.cast(link.File.Coff).?;
+ const atom = try coff_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
+ break :blk coff_file.getAtom(atom).getSymbolIndex().?;
+ },
else => unreachable, // unsupported target format
};
_ = try self.addInst(.{
@@ -5602,11 +5606,17 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
.direct => .load_memory_direct,
.import => .load_memory_import,
};
- const mod = self.bin_file.options.module.?;
- const owner_decl = mod.declPtr(self.mod_fn.owner_decl);
const atom_index = switch (self.bin_file.tag) {
- .macho => owner_decl.link.macho.getSymbolIndex().?,
- .coff => owner_decl.link.coff.getSymbolIndex().?,
+ .macho => blk: {
+ const macho_file = self.bin_file.cast(link.File.MachO).?;
+ const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
+ break :blk macho_file.getAtom(atom).getSymbolIndex().?;
+ },
+ .coff => blk: {
+ const coff_file = self.bin_file.cast(link.File.Coff).?;
+ const atom = try coff_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
+ break :blk coff_file.getAtom(atom).getSymbolIndex().?;
+ },
else => unreachable, // unsupported target format
};
_ = try self.addInst(.{
@@ -5796,11 +5806,17 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I
.direct => .load_memory_ptr_direct,
.import => unreachable,
};
- const mod = self.bin_file.options.module.?;
- const owner_decl = mod.declPtr(self.mod_fn.owner_decl);
const atom_index = switch (self.bin_file.tag) {
- .macho => owner_decl.link.macho.getSymbolIndex().?,
- .coff => owner_decl.link.coff.getSymbolIndex().?,
+ .macho => blk: {
+ const macho_file = self.bin_file.cast(link.File.MachO).?;
+ const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
+ break :blk macho_file.getAtom(atom).getSymbolIndex().?;
+ },
+ .coff => blk: {
+ const coff_file = self.bin_file.cast(link.File.Coff).?;
+ const atom = try coff_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
+ break :blk coff_file.getAtom(atom).getSymbolIndex().?;
+ },
else => unreachable, // unsupported target format
};
_ = try self.addInst(.{
@@ -6119,23 +6135,27 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) Inne
mod.markDeclAlive(decl);
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
- try decl.link.elf.ensureInitialized(elf_file);
- return MCValue{ .memory = decl.link.elf.getOffsetTableAddress(elf_file) };
+ const atom_index = try elf_file.getOrCreateAtomForDecl(decl_index);
+ const atom = elf_file.getAtom(atom_index);
+ return MCValue{ .memory = atom.getOffsetTableAddress(elf_file) };
} else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
- try decl.link.macho.ensureInitialized(macho_file);
+ const atom = try macho_file.getOrCreateAtomForDecl(decl_index);
+ const sym_index = macho_file.getAtom(atom).getSymbolIndex().?;
return MCValue{ .linker_load = .{
.type = .got,
- .sym_index = decl.link.macho.getSymbolIndex().?,
+ .sym_index = sym_index,
} };
} else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
- try decl.link.coff.ensureInitialized(coff_file);
+ const atom_index = try coff_file.getOrCreateAtomForDecl(decl_index);
+ const sym_index = coff_file.getAtom(atom_index).getSymbolIndex().?;
return MCValue{ .linker_load = .{
.type = .got,
- .sym_index = decl.link.coff.getSymbolIndex().?,
+ .sym_index = sym_index,
} };
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
- try p9.seeDecl(decl_index);
- const got_addr = p9.bases.data + decl.link.plan9.got_index.? * ptr_bytes;
+ const decl_block_index = try p9.seeDecl(decl_index);
+ const decl_block = p9.getDeclBlock(decl_block_index);
+ const got_addr = p9.bases.data + decl_block.got_index.? * ptr_bytes;
return MCValue{ .memory = got_addr };
} else {
return self.fail("TODO codegen non-ELF const Decl pointer", .{});
@@ -6148,8 +6168,7 @@ fn lowerUnnamedConst(self: *Self, tv: TypedValue) InnerError!MCValue {
return self.fail("lowering unnamed constant failed: {s}", .{@errorName(err)});
};
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
- const vaddr = elf_file.local_symbols.items[local_sym_index].st_value;
- return MCValue{ .memory = vaddr };
+ return MCValue{ .memory = elf_file.getSymbol(local_sym_index).st_value };
} else if (self.bin_file.cast(link.File.MachO)) |_| {
return MCValue{ .linker_load = .{
.type = .direct,
diff --git a/src/arch/aarch64/Emit.zig b/src/arch/aarch64/Emit.zig
index 3812597789..3c2a81d5d1 100644
--- a/src/arch/aarch64/Emit.zig
+++ b/src/arch/aarch64/Emit.zig
@@ -670,9 +670,9 @@ fn mirCallExtern(emit: *Emit, inst: Mir.Inst.Index) !void {
if (emit.bin_file.cast(link.File.MachO)) |macho_file| {
// Add relocation to the decl.
- const atom = macho_file.getAtomForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
+ const atom_index = macho_file.getAtomIndexForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
const target = macho_file.getGlobalByIndex(relocation.sym_index);
- try atom.addRelocation(macho_file, .{
+ try link.File.MachO.Atom.addRelocation(macho_file, atom_index, .{
.type = @enumToInt(std.macho.reloc_type_arm64.ARM64_RELOC_BRANCH26),
.target = target,
.offset = offset,
@@ -883,10 +883,10 @@ fn mirLoadMemoryPie(emit: *Emit, inst: Mir.Inst.Index) !void {
}
if (emit.bin_file.cast(link.File.MachO)) |macho_file| {
- const atom = macho_file.getAtomForSymbol(.{ .sym_index = data.atom_index, .file = null }).?;
+ const atom_index = macho_file.getAtomIndexForSymbol(.{ .sym_index = data.atom_index, .file = null }).?;
// TODO this causes segfault in stage1
// try atom.addRelocations(macho_file, 2, .{
- try atom.addRelocation(macho_file, .{
+ try link.File.MachO.Atom.addRelocation(macho_file, atom_index, .{
.target = .{ .sym_index = data.sym_index, .file = null },
.offset = offset,
.addend = 0,
@@ -902,7 +902,7 @@ fn mirLoadMemoryPie(emit: *Emit, inst: Mir.Inst.Index) !void {
else => unreachable,
},
});
- try atom.addRelocation(macho_file, .{
+ try link.File.MachO.Atom.addRelocation(macho_file, atom_index, .{
.target = .{ .sym_index = data.sym_index, .file = null },
.offset = offset + 4,
.addend = 0,
@@ -919,7 +919,7 @@ fn mirLoadMemoryPie(emit: *Emit, inst: Mir.Inst.Index) !void {
},
});
} else if (emit.bin_file.cast(link.File.Coff)) |coff_file| {
- const atom = coff_file.getAtomForSymbol(.{ .sym_index = data.atom_index, .file = null }).?;
+ const atom_index = coff_file.getAtomIndexForSymbol(.{ .sym_index = data.atom_index, .file = null }).?;
const target = switch (tag) {
.load_memory_got,
.load_memory_ptr_got,
@@ -929,7 +929,7 @@ fn mirLoadMemoryPie(emit: *Emit, inst: Mir.Inst.Index) !void {
.load_memory_import => coff_file.getGlobalByIndex(data.sym_index),
else => unreachable,
};
- try atom.addRelocation(coff_file, .{
+ try link.File.Coff.Atom.addRelocation(coff_file, atom_index, .{
.target = target,
.offset = offset,
.addend = 0,
@@ -946,7 +946,7 @@ fn mirLoadMemoryPie(emit: *Emit, inst: Mir.Inst.Index) !void {
else => unreachable,
},
});
- try atom.addRelocation(coff_file, .{
+ try link.File.Coff.Atom.addRelocation(coff_file, atom_index, .{
.target = target,
.offset = offset + 4,
.addend = 0,
diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig
index 49f979624d..57a8aed699 100644
--- a/src/arch/arm/CodeGen.zig
+++ b/src/arch/arm/CodeGen.zig
@@ -282,13 +282,7 @@ const DbgInfoReloc = struct {
else => unreachable, // not a possible argument
};
- try dw.genArgDbgInfo(
- reloc.name,
- reloc.ty,
- function.bin_file.tag,
- function.mod_fn.owner_decl,
- loc,
- );
+ try dw.genArgDbgInfo(reloc.name, reloc.ty, function.mod_fn.owner_decl, loc);
},
.plan9 => {},
.none => {},
@@ -331,14 +325,7 @@ const DbgInfoReloc = struct {
break :blk .nop;
},
};
- try dw.genVarDbgInfo(
- reloc.name,
- reloc.ty,
- function.bin_file.tag,
- function.mod_fn.owner_decl,
- is_ptr,
- loc,
- );
+ try dw.genVarDbgInfo(reloc.name, reloc.ty, function.mod_fn.owner_decl, is_ptr, loc);
},
.plan9 => {},
.none => {},
@@ -4256,12 +4243,11 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
if (self.air.value(callee)) |func_value| {
if (func_value.castTag(.function)) |func_payload| {
const func = func_payload.data;
- const mod = self.bin_file.options.module.?;
- const fn_owner_decl = mod.declPtr(func.owner_decl);
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
- try fn_owner_decl.link.elf.ensureInitialized(elf_file);
- const got_addr = @intCast(u32, fn_owner_decl.link.elf.getOffsetTableAddress(elf_file));
+ const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl);
+ const atom = elf_file.getAtom(atom_index);
+ const got_addr = @intCast(u32, atom.getOffsetTableAddress(elf_file));
try self.genSetReg(Type.initTag(.usize), .lr, .{ .memory = got_addr });
} else if (self.bin_file.cast(link.File.MachO)) |_| {
unreachable; // unsupported architecture for MachO
@@ -6084,15 +6070,17 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) Inne
mod.markDeclAlive(decl);
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
- try decl.link.elf.ensureInitialized(elf_file);
- return MCValue{ .memory = decl.link.elf.getOffsetTableAddress(elf_file) };
+ const atom_index = try elf_file.getOrCreateAtomForDecl(decl_index);
+ const atom = elf_file.getAtom(atom_index);
+ return MCValue{ .memory = atom.getOffsetTableAddress(elf_file) };
} else if (self.bin_file.cast(link.File.MachO)) |_| {
unreachable; // unsupported architecture for MachO
} else if (self.bin_file.cast(link.File.Coff)) |_| {
return self.fail("TODO codegen COFF const Decl pointer", .{});
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
- try p9.seeDecl(decl_index);
- const got_addr = p9.bases.data + decl.link.plan9.got_index.? * ptr_bytes;
+ const decl_block_index = try p9.seeDecl(decl_index);
+ const decl_block = p9.getDeclBlock(decl_block_index);
+ const got_addr = p9.bases.data + decl_block.got_index.? * ptr_bytes;
return MCValue{ .memory = got_addr };
} else {
return self.fail("TODO codegen non-ELF const Decl pointer", .{});
@@ -6106,8 +6094,7 @@ fn lowerUnnamedConst(self: *Self, tv: TypedValue) InnerError!MCValue {
return self.fail("lowering unnamed constant failed: {s}", .{@errorName(err)});
};
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
- const vaddr = elf_file.local_symbols.items[local_sym_index].st_value;
- return MCValue{ .memory = vaddr };
+ return MCValue{ .memory = elf_file.getSymbol(local_sym_index).st_value };
} else if (self.bin_file.cast(link.File.MachO)) |_| {
unreachable;
} else if (self.bin_file.cast(link.File.Coff)) |_| {
diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig
index d50a614206..8b8fca4859 100644
--- a/src/arch/riscv64/CodeGen.zig
+++ b/src/arch/riscv64/CodeGen.zig
@@ -1615,13 +1615,9 @@ fn genArgDbgInfo(self: Self, inst: Air.Inst.Index, mcv: MCValue) !void {
switch (self.debug_output) {
.dwarf => |dw| switch (mcv) {
- .register => |reg| try dw.genArgDbgInfo(
- name,
- ty,
- self.bin_file.tag,
- self.mod_fn.owner_decl,
- .{ .register = reg.dwarfLocOp() },
- ),
+ .register => |reg| try dw.genArgDbgInfo(name, ty, self.mod_fn.owner_decl, .{
+ .register = reg.dwarfLocOp(),
+ }),
.stack_offset => {},
else => {},
},
@@ -1721,12 +1717,9 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
if (self.air.value(callee)) |func_value| {
if (func_value.castTag(.function)) |func_payload| {
const func = func_payload.data;
-
- const mod = self.bin_file.options.module.?;
- const fn_owner_decl = mod.declPtr(func.owner_decl);
- try fn_owner_decl.link.elf.ensureInitialized(elf_file);
- const got_addr = @intCast(u32, fn_owner_decl.link.elf.getOffsetTableAddress(elf_file));
-
+ const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl);
+ const atom = elf_file.getAtom(atom_index);
+ const got_addr = @intCast(u32, atom.getOffsetTableAddress(elf_file));
try self.genSetReg(Type.initTag(.usize), .ra, .{ .memory = got_addr });
_ = try self.addInst(.{
.tag = .jalr,
@@ -2553,17 +2546,17 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) Inne
const decl = mod.declPtr(decl_index);
mod.markDeclAlive(decl);
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
- try decl.link.elf.ensureInitialized(elf_file);
- return MCValue{ .memory = decl.link.elf.getOffsetTableAddress(elf_file) };
+ const atom_index = try elf_file.getOrCreateAtomForDecl(decl_index);
+ const atom = elf_file.getAtom(atom_index);
+ return MCValue{ .memory = atom.getOffsetTableAddress(elf_file) };
} else if (self.bin_file.cast(link.File.MachO)) |_| {
- // TODO I'm hacking my way through here by repurposing .memory for storing
- // index to the GOT target symbol index.
- return MCValue{ .memory = decl.link.macho.sym_index };
+ unreachable;
} else if (self.bin_file.cast(link.File.Coff)) |_| {
return self.fail("TODO codegen COFF const Decl pointer", .{});
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
- try p9.seeDecl(decl_index);
- const got_addr = p9.bases.data + decl.link.plan9.got_index.? * ptr_bytes;
+ const decl_block_index = try p9.seeDecl(decl_index);
+ const decl_block = p9.getDeclBlock(decl_block_index);
+ const got_addr = p9.bases.data + decl_block.got_index.? * ptr_bytes;
return MCValue{ .memory = got_addr };
} else {
return self.fail("TODO codegen non-ELF const Decl pointer", .{});
diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig
index 5e9326d23b..418c67c580 100644
--- a/src/arch/sparc64/CodeGen.zig
+++ b/src/arch/sparc64/CodeGen.zig
@@ -1216,11 +1216,10 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
if (self.bin_file.tag == link.File.Elf.base_tag) {
if (func_value.castTag(.function)) |func_payload| {
const func = func_payload.data;
- const mod = self.bin_file.options.module.?;
- const fn_owner_decl = mod.declPtr(func.owner_decl);
const got_addr = if (self.bin_file.cast(link.File.Elf)) |elf_file| blk: {
- try fn_owner_decl.link.elf.ensureInitialized(elf_file);
- break :blk @intCast(u32, fn_owner_decl.link.elf.getOffsetTableAddress(elf_file));
+ const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl);
+ const atom = elf_file.getAtom(atom_index);
+ break :blk @intCast(u32, atom.getOffsetTableAddress(elf_file));
} else unreachable;
try self.genSetReg(Type.initTag(.usize), .o7, .{ .memory = got_addr });
@@ -3413,13 +3412,9 @@ fn genArgDbgInfo(self: Self, inst: Air.Inst.Index, mcv: MCValue) !void {
switch (self.debug_output) {
.dwarf => |dw| switch (mcv) {
- .register => |reg| try dw.genArgDbgInfo(
- name,
- ty,
- self.bin_file.tag,
- self.mod_fn.owner_decl,
- .{ .register = reg.dwarfLocOp() },
- ),
+ .register => |reg| try dw.genArgDbgInfo(name, ty, self.mod_fn.owner_decl, .{
+ .register = reg.dwarfLocOp(),
+ }),
else => {},
},
else => {},
@@ -4205,8 +4200,9 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) Inne
mod.markDeclAlive(decl);
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
- try decl.link.elf.ensureInitialized(elf_file);
- return MCValue{ .memory = decl.link.elf.getOffsetTableAddress(elf_file) };
+ const atom_index = try elf_file.getOrCreateAtomForDecl(decl_index);
+ const atom = elf_file.getAtom(atom_index);
+ return MCValue{ .memory = atom.getOffsetTableAddress(elf_file) };
} else {
return self.fail("TODO codegen non-ELF const Decl pointer", .{});
}
diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig
index c0d0c11b56..ec494b1a57 100644
--- a/src/arch/wasm/CodeGen.zig
+++ b/src/arch/wasm/CodeGen.zig
@@ -1194,7 +1194,7 @@ fn genFunc(func: *CodeGen) InnerError!void {
const fn_info = func.decl.ty.fnInfo();
var func_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types, fn_info.return_type, func.target);
defer func_type.deinit(func.gpa);
- func.decl.fn_link.wasm.type_index = try func.bin_file.putOrGetFuncType(func_type);
+ func.decl.fn_link.?.type_index = try func.bin_file.putOrGetFuncType(func_type);
var cc_result = try func.resolveCallingConventionValues(func.decl.ty);
defer cc_result.deinit(func.gpa);
@@ -1269,10 +1269,10 @@ fn genFunc(func: *CodeGen) InnerError!void {
var emit: Emit = .{
.mir = mir,
- .bin_file = &func.bin_file.base,
+ .bin_file = func.bin_file,
.code = func.code,
.locals = func.locals.items,
- .decl = func.decl,
+ .decl_index = func.decl_index,
.dbg_output = func.debug_output,
.prev_di_line = 0,
.prev_di_column = 0,
@@ -2117,33 +2117,31 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
const fn_info = fn_ty.fnInfo();
const first_param_sret = firstParamSRet(fn_info.cc, fn_info.return_type, func.target);
- const callee: ?*Decl = blk: {
+ const callee: ?Decl.Index = blk: {
const func_val = func.air.value(pl_op.operand) orelse break :blk null;
const module = func.bin_file.base.options.module.?;
if (func_val.castTag(.function)) |function| {
- const decl = module.declPtr(function.data.owner_decl);
- try decl.link.wasm.ensureInitialized(func.bin_file);
- break :blk decl;
+ _ = try func.bin_file.getOrCreateAtomForDecl(function.data.owner_decl);
+ break :blk function.data.owner_decl;
} else if (func_val.castTag(.extern_fn)) |extern_fn| {
const ext_decl = module.declPtr(extern_fn.data.owner_decl);
const ext_info = ext_decl.ty.fnInfo();
var func_type = try genFunctype(func.gpa, ext_info.cc, ext_info.param_types, ext_info.return_type, func.target);
defer func_type.deinit(func.gpa);
- const atom = &ext_decl.link.wasm;
- try atom.ensureInitialized(func.bin_file);
- ext_decl.fn_link.wasm.type_index = try func.bin_file.putOrGetFuncType(func_type);
+ const atom_index = try func.bin_file.getOrCreateAtomForDecl(extern_fn.data.owner_decl);
+ const atom = func.bin_file.getAtomPtr(atom_index);
+ ext_decl.fn_link.?.type_index = try func.bin_file.putOrGetFuncType(func_type);
try func.bin_file.addOrUpdateImport(
mem.sliceTo(ext_decl.name, 0),
atom.getSymbolIndex().?,
ext_decl.getExternFn().?.lib_name,
- ext_decl.fn_link.wasm.type_index,
+ ext_decl.fn_link.?.type_index,
);
- break :blk ext_decl;
+ break :blk extern_fn.data.owner_decl;
} else if (func_val.castTag(.decl_ref)) |decl_ref| {
- const decl = module.declPtr(decl_ref.data);
- try decl.link.wasm.ensureInitialized(func.bin_file);
- break :blk decl;
+ _ = try func.bin_file.getOrCreateAtomForDecl(decl_ref.data);
+ break :blk decl_ref.data;
}
return func.fail("Expected a function, but instead found type '{}'", .{func_val.tag()});
};
@@ -2164,7 +2162,8 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
}
if (callee) |direct| {
- try func.addLabel(.call, direct.link.wasm.sym_index);
+ const atom_index = func.bin_file.decls.get(direct).?;
+ try func.addLabel(.call, func.bin_file.getAtom(atom_index).sym_index);
} else {
// in this case we call a function pointer
// so load its value onto the stack
@@ -2477,7 +2476,7 @@ fn airArg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
.dwarf => |dwarf| {
const src_index = func.air.instructions.items(.data)[inst].arg.src_index;
const name = func.mod_fn.getParamName(func.bin_file.base.options.module.?, src_index);
- try dwarf.genArgDbgInfo(name, arg_ty, .wasm, func.mod_fn.owner_decl, .{
+ try dwarf.genArgDbgInfo(name, arg_ty, func.mod_fn.owner_decl, .{
.wasm_local = arg.local.value,
});
},
@@ -2760,9 +2759,10 @@ fn lowerDeclRefValue(func: *CodeGen, tv: TypedValue, decl_index: Module.Decl.Ind
}
module.markDeclAlive(decl);
- try decl.link.wasm.ensureInitialized(func.bin_file);
+ const atom_index = try func.bin_file.getOrCreateAtomForDecl(decl_index);
+ const atom = func.bin_file.getAtom(atom_index);
- const target_sym_index = decl.link.wasm.sym_index;
+ const target_sym_index = atom.sym_index;
if (decl.ty.zigTypeTag() == .Fn) {
try func.bin_file.addTableFunction(target_sym_index);
return WValue{ .function_index = target_sym_index };
@@ -5547,7 +5547,7 @@ fn airDbgVar(func: *CodeGen, inst: Air.Inst.Index, is_ptr: bool) !void {
break :blk .nop;
},
};
- try func.debug_output.dwarf.genVarDbgInfo(name, ty, .wasm, func.mod_fn.owner_decl, is_ptr, loc);
+ try func.debug_output.dwarf.genVarDbgInfo(name, ty, func.mod_fn.owner_decl, is_ptr, loc);
func.finishAir(inst, .none, &.{});
}
diff --git a/src/arch/wasm/Emit.zig b/src/arch/wasm/Emit.zig
index 71d21d2797..a340ac5da8 100644
--- a/src/arch/wasm/Emit.zig
+++ b/src/arch/wasm/Emit.zig
@@ -11,8 +11,8 @@ const leb128 = std.leb;
/// Contains our list of instructions
mir: Mir,
-/// Reference to the file handler
-bin_file: *link.File,
+/// Reference to the Wasm module linker
+bin_file: *link.File.Wasm,
/// Possible error message. When set, the value is allocated and
/// must be freed manually.
error_msg: ?*Module.ErrorMsg = null,
@@ -21,7 +21,7 @@ code: *std.ArrayList(u8),
/// List of allocated locals.
locals: []const u8,
/// The declaration that code is being generated for.
-decl: *Module.Decl,
+decl_index: Module.Decl.Index,
// Debug information
/// Holds the debug information for this emission
@@ -252,8 +252,8 @@ fn offset(self: Emit) u32 {
fn fail(emit: *Emit, comptime format: []const u8, args: anytype) InnerError {
@setCold(true);
std.debug.assert(emit.error_msg == null);
- // TODO: Determine the source location.
- emit.error_msg = try Module.ErrorMsg.create(emit.bin_file.allocator, emit.decl.srcLoc(), format, args);
+ const mod = emit.bin_file.base.options.module.?;
+ emit.error_msg = try Module.ErrorMsg.create(emit.bin_file.base.allocator, mod.declPtr(emit.decl_index).srcLoc(), format, args);
return error.EmitFail;
}
@@ -304,8 +304,9 @@ fn emitGlobal(emit: *Emit, tag: Mir.Inst.Tag, inst: Mir.Inst.Index) !void {
const global_offset = emit.offset();
try emit.code.appendSlice(&buf);
- // globals can have index 0 as it represents the stack pointer
- try emit.decl.link.wasm.relocs.append(emit.bin_file.allocator, .{
+ const atom_index = emit.bin_file.decls.get(emit.decl_index).?;
+ const atom = emit.bin_file.getAtomPtr(atom_index);
+ try atom.relocs.append(emit.bin_file.base.allocator, .{
.index = label,
.offset = global_offset,
.relocation_type = .R_WASM_GLOBAL_INDEX_LEB,
@@ -361,7 +362,9 @@ fn emitCall(emit: *Emit, inst: Mir.Inst.Index) !void {
try emit.code.appendSlice(&buf);
if (label != 0) {
- try emit.decl.link.wasm.relocs.append(emit.bin_file.allocator, .{
+ const atom_index = emit.bin_file.decls.get(emit.decl_index).?;
+ const atom = emit.bin_file.getAtomPtr(atom_index);
+ try atom.relocs.append(emit.bin_file.base.allocator, .{
.offset = call_offset,
.index = label,
.relocation_type = .R_WASM_FUNCTION_INDEX_LEB,
@@ -387,7 +390,9 @@ fn emitFunctionIndex(emit: *Emit, inst: Mir.Inst.Index) !void {
try emit.code.appendSlice(&buf);
if (symbol_index != 0) {
- try emit.decl.link.wasm.relocs.append(emit.bin_file.allocator, .{
+ const atom_index = emit.bin_file.decls.get(emit.decl_index).?;
+ const atom = emit.bin_file.getAtomPtr(atom_index);
+ try atom.relocs.append(emit.bin_file.base.allocator, .{
.offset = index_offset,
.index = symbol_index,
.relocation_type = .R_WASM_TABLE_INDEX_SLEB,
@@ -399,7 +404,7 @@ fn emitMemAddress(emit: *Emit, inst: Mir.Inst.Index) !void {
const extra_index = emit.mir.instructions.items(.data)[inst].payload;
const mem = emit.mir.extraData(Mir.Memory, extra_index).data;
const mem_offset = emit.offset() + 1;
- const is_wasm32 = emit.bin_file.options.target.cpu.arch == .wasm32;
+ const is_wasm32 = emit.bin_file.base.options.target.cpu.arch == .wasm32;
if (is_wasm32) {
try emit.code.append(std.wasm.opcode(.i32_const));
var buf: [5]u8 = undefined;
@@ -413,7 +418,9 @@ fn emitMemAddress(emit: *Emit, inst: Mir.Inst.Index) !void {
}
if (mem.pointer != 0) {
- try emit.decl.link.wasm.relocs.append(emit.bin_file.allocator, .{
+ const atom_index = emit.bin_file.decls.get(emit.decl_index).?;
+ const atom = emit.bin_file.getAtomPtr(atom_index);
+ try atom.relocs.append(emit.bin_file.base.allocator, .{
.offset = mem_offset,
.index = mem.pointer,
.relocation_type = if (is_wasm32) .R_WASM_MEMORY_ADDR_LEB else .R_WASM_MEMORY_ADDR_LEB64,
diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig
index df24fe5e7d..c11ea4e63e 100644
--- a/src/arch/x86_64/CodeGen.zig
+++ b/src/arch/x86_64/CodeGen.zig
@@ -2668,12 +2668,13 @@ fn loadMemPtrIntoRegister(self: *Self, reg: Register, ptr_ty: Type, ptr: MCValue
switch (ptr) {
.linker_load => |load_struct| {
const abi_size = @intCast(u32, ptr_ty.abiSize(self.target.*));
- const mod = self.bin_file.options.module.?;
- const fn_owner_decl = mod.declPtr(self.mod_fn.owner_decl);
- const atom_index = if (self.bin_file.tag == link.File.MachO.base_tag)
- fn_owner_decl.link.macho.getSymbolIndex().?
- else
- fn_owner_decl.link.coff.getSymbolIndex().?;
+ const atom_index = if (self.bin_file.cast(link.File.MachO)) |macho_file| blk: {
+ const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
+ break :blk macho_file.getAtom(atom).getSymbolIndex().?;
+ } else if (self.bin_file.cast(link.File.Coff)) |coff_file| blk: {
+ const atom = try coff_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
+ break :blk coff_file.getAtom(atom).getSymbolIndex().?;
+ } else unreachable;
const flags: u2 = switch (load_struct.type) {
.got => 0b00,
.direct => 0b01,
@@ -3835,7 +3836,7 @@ fn genArgDbgInfo(self: Self, ty: Type, name: [:0]const u8, mcv: MCValue) !void {
},
else => unreachable, // not a valid function parameter
};
- try dw.genArgDbgInfo(name, ty, self.bin_file.tag, self.mod_fn.owner_decl, loc);
+ try dw.genArgDbgInfo(name, ty, self.mod_fn.owner_decl, loc);
},
.plan9 => {},
.none => {},
@@ -3875,7 +3876,7 @@ fn genVarDbgInfo(
break :blk .nop;
},
};
- try dw.genVarDbgInfo(name, ty, self.bin_file.tag, self.mod_fn.owner_decl, is_ptr, loc);
+ try dw.genVarDbgInfo(name, ty, self.mod_fn.owner_decl, is_ptr, loc);
},
.plan9 => {},
.none => {},
@@ -3995,19 +3996,19 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
if (self.air.value(callee)) |func_value| {
if (func_value.castTag(.function)) |func_payload| {
const func = func_payload.data;
- const fn_owner_decl = mod.declPtr(func.owner_decl);
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
- try fn_owner_decl.link.elf.ensureInitialized(elf_file);
- const got_addr = @intCast(u32, fn_owner_decl.link.elf.getOffsetTableAddress(elf_file));
+ const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl);
+ const atom = elf_file.getAtom(atom_index);
+ const got_addr = @intCast(u32, atom.getOffsetTableAddress(elf_file));
_ = try self.addInst(.{
.tag = .call,
.ops = Mir.Inst.Ops.encode(.{ .flags = 0b01 }),
.data = .{ .imm = got_addr },
});
} else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
- try fn_owner_decl.link.coff.ensureInitialized(coff_file);
- const sym_index = fn_owner_decl.link.coff.getSymbolIndex().?;
+ const atom_index = try coff_file.getOrCreateAtomForDecl(func.owner_decl);
+ const sym_index = coff_file.getAtom(atom_index).getSymbolIndex().?;
try self.genSetReg(Type.initTag(.usize), .rax, .{
.linker_load = .{
.type = .got,
@@ -4023,8 +4024,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
.data = undefined,
});
} else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
- try fn_owner_decl.link.macho.ensureInitialized(macho_file);
- const sym_index = fn_owner_decl.link.macho.getSymbolIndex().?;
+ const atom_index = try macho_file.getOrCreateAtomForDecl(func.owner_decl);
+ const sym_index = macho_file.getAtom(atom_index).getSymbolIndex().?;
try self.genSetReg(Type.initTag(.usize), .rax, .{
.linker_load = .{
.type = .got,
@@ -4040,11 +4041,12 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
.data = undefined,
});
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
- try p9.seeDecl(func.owner_decl);
+ const decl_block_index = try p9.seeDecl(func.owner_decl);
+ const decl_block = p9.getDeclBlock(decl_block_index);
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
const got_addr = p9.bases.data;
- const got_index = fn_owner_decl.link.plan9.got_index.?;
+ const got_index = decl_block.got_index.?;
const fn_got_addr = got_addr + got_index * ptr_bytes;
_ = try self.addInst(.{
.tag = .call,
@@ -4080,15 +4082,15 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
});
} else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
const sym_index = try macho_file.getGlobalSymbol(mem.sliceTo(decl_name, 0));
+ const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
+ const atom_index = macho_file.getAtom(atom).getSymbolIndex().?;
_ = try self.addInst(.{
.tag = .call_extern,
.ops = undefined,
- .data = .{
- .relocation = .{
- .atom_index = mod.declPtr(self.mod_fn.owner_decl).link.macho.getSymbolIndex().?,
- .sym_index = sym_index,
- },
- },
+ .data = .{ .relocation = .{
+ .atom_index = atom_index,
+ .sym_index = sym_index,
+ } },
});
} else {
return self.fail("TODO implement calling extern functions", .{});
@@ -6719,23 +6721,27 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) Inne
module.markDeclAlive(decl);
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
- try decl.link.elf.ensureInitialized(elf_file);
- return MCValue{ .memory = decl.link.elf.getOffsetTableAddress(elf_file) };
+ const atom_index = try elf_file.getOrCreateAtomForDecl(decl_index);
+ const atom = elf_file.getAtom(atom_index);
+ return MCValue{ .memory = atom.getOffsetTableAddress(elf_file) };
} else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
- try decl.link.macho.ensureInitialized(macho_file);
+ const atom_index = try macho_file.getOrCreateAtomForDecl(decl_index);
+ const sym_index = macho_file.getAtom(atom_index).getSymbolIndex().?;
return MCValue{ .linker_load = .{
.type = .got,
- .sym_index = decl.link.macho.getSymbolIndex().?,
+ .sym_index = sym_index,
} };
} else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
- try decl.link.coff.ensureInitialized(coff_file);
+ const atom_index = try coff_file.getOrCreateAtomForDecl(decl_index);
+ const sym_index = coff_file.getAtom(atom_index).getSymbolIndex().?;
return MCValue{ .linker_load = .{
.type = .got,
- .sym_index = decl.link.coff.getSymbolIndex().?,
+ .sym_index = sym_index,
} };
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
- try p9.seeDecl(decl_index);
- const got_addr = p9.bases.data + decl.link.plan9.got_index.? * ptr_bytes;
+ const decl_block_index = try p9.seeDecl(decl_index);
+ const decl_block = p9.getDeclBlock(decl_block_index);
+ const got_addr = p9.bases.data + decl_block.got_index.? * ptr_bytes;
return MCValue{ .memory = got_addr };
} else {
return self.fail("TODO codegen non-ELF const Decl pointer", .{});
@@ -6748,8 +6754,7 @@ fn lowerUnnamedConst(self: *Self, tv: TypedValue) InnerError!MCValue {
return self.fail("lowering unnamed constant failed: {s}", .{@errorName(err)});
};
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
- const vaddr = elf_file.local_symbols.items[local_sym_index].st_value;
- return MCValue{ .memory = vaddr };
+ return MCValue{ .memory = elf_file.getSymbol(local_sym_index).st_value };
} else if (self.bin_file.cast(link.File.MachO)) |_| {
return MCValue{ .linker_load = .{
.type = .direct,
diff --git a/src/arch/x86_64/Emit.zig b/src/arch/x86_64/Emit.zig
index af3ed5e053..c4f9b4eb42 100644
--- a/src/arch/x86_64/Emit.zig
+++ b/src/arch/x86_64/Emit.zig
@@ -1001,8 +1001,8 @@ fn mirLeaPic(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
0b01 => @enumToInt(std.macho.reloc_type_x86_64.X86_64_RELOC_SIGNED),
else => unreachable,
};
- const atom = macho_file.getAtomForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
- try atom.addRelocation(macho_file, .{
+ const atom_index = macho_file.getAtomIndexForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
+ try link.File.MachO.Atom.addRelocation(macho_file, atom_index, .{
.type = reloc_type,
.target = .{ .sym_index = relocation.sym_index, .file = null },
.offset = @intCast(u32, end_offset - 4),
@@ -1011,8 +1011,8 @@ fn mirLeaPic(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
.length = 2,
});
} else if (emit.bin_file.cast(link.File.Coff)) |coff_file| {
- const atom = coff_file.getAtomForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
- try atom.addRelocation(coff_file, .{
+ const atom_index = coff_file.getAtomIndexForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
+ try link.File.Coff.Atom.addRelocation(coff_file, atom_index, .{
.type = switch (ops.flags) {
0b00 => .got,
0b01 => .direct,
@@ -1140,9 +1140,9 @@ fn mirCallExtern(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
if (emit.bin_file.cast(link.File.MachO)) |macho_file| {
// Add relocation to the decl.
- const atom = macho_file.getAtomForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
+ const atom_index = macho_file.getAtomIndexForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
const target = macho_file.getGlobalByIndex(relocation.sym_index);
- try atom.addRelocation(macho_file, .{
+ try link.File.MachO.Atom.addRelocation(macho_file, atom_index, .{
.type = @enumToInt(std.macho.reloc_type_x86_64.X86_64_RELOC_BRANCH),
.target = target,
.offset = offset,
@@ -1152,9 +1152,9 @@ fn mirCallExtern(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
});
} else if (emit.bin_file.cast(link.File.Coff)) |coff_file| {
// Add relocation to the decl.
- const atom = coff_file.getAtomForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
+ const atom_index = coff_file.getAtomIndexForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
const target = coff_file.getGlobalByIndex(relocation.sym_index);
- try atom.addRelocation(coff_file, .{
+ try link.File.Coff.Atom.addRelocation(coff_file, atom_index, .{
.type = .direct,
.target = target,
.offset = offset,
diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig
index e1af8c847f..c5a3d57d07 100644
--- a/src/codegen/spirv.zig
+++ b/src/codegen/spirv.zig
@@ -49,7 +49,7 @@ pub const DeclGen = struct {
spv: *SpvModule,
/// The decl we are currently generating code for.
- decl: *Decl,
+ decl_index: Decl.Index,
/// The intermediate code of the declaration we are currently generating. Note: If
/// the declaration is not a function, this value will be undefined!
@@ -59,6 +59,8 @@ pub const DeclGen = struct {
/// Note: If the declaration is not a function, this value will be undefined!
liveness: Liveness,
+ ids: *const std.AutoHashMap(Decl.Index, IdResult),
+
/// An array of function argument result-ids. Each index corresponds with the
/// function argument of the same index.
args: std.ArrayListUnmanaged(IdRef) = .{},
@@ -133,14 +135,20 @@ pub const DeclGen = struct {
/// Initialize the common resources of a DeclGen. Some fields are left uninitialized,
/// only set when `gen` is called.
- pub fn init(allocator: Allocator, module: *Module, spv: *SpvModule) DeclGen {
+ pub fn init(
+ allocator: Allocator,
+ module: *Module,
+ spv: *SpvModule,
+ ids: *const std.AutoHashMap(Decl.Index, IdResult),
+ ) DeclGen {
return .{
.gpa = allocator,
.module = module,
.spv = spv,
- .decl = undefined,
+ .decl_index = undefined,
.air = undefined,
.liveness = undefined,
+ .ids = ids,
.next_arg_index = undefined,
.current_block_label_id = undefined,
.error_msg = undefined,
@@ -150,9 +158,9 @@ pub const DeclGen = struct {
/// Generate the code for `decl`. If a reportable error occurred during code generation,
/// a message is returned by this function. Callee owns the memory. If this function
/// returns such a reportable error, it is valid to be called again for a different decl.
- pub fn gen(self: *DeclGen, decl: *Decl, air: Air, liveness: Liveness) !?*Module.ErrorMsg {
+ pub fn gen(self: *DeclGen, decl_index: Decl.Index, air: Air, liveness: Liveness) !?*Module.ErrorMsg {
// Reset internal resources, we don't want to re-allocate these.
- self.decl = decl;
+ self.decl_index = decl_index;
self.air = air;
self.liveness = liveness;
self.args.items.len = 0;
@@ -194,7 +202,7 @@ pub const DeclGen = struct {
pub fn fail(self: *DeclGen, comptime format: []const u8, args: anytype) Error {
@setCold(true);
const src = LazySrcLoc.nodeOffset(0);
- const src_loc = src.toSrcLoc(self.decl);
+ const src_loc = src.toSrcLoc(self.module.declPtr(self.decl_index));
assert(self.error_msg == null);
self.error_msg = try Module.ErrorMsg.create(self.module.gpa, src_loc, format, args);
return error.CodegenFail;
@@ -332,7 +340,7 @@ pub const DeclGen = struct {
};
const decl = self.module.declPtr(fn_decl_index);
self.module.markDeclAlive(decl);
- return decl.fn_link.spirv.id.toRef();
+ return self.ids.get(fn_decl_index).?.toRef();
}
const target = self.getTarget();
@@ -553,8 +561,8 @@ pub const DeclGen = struct {
}
fn genDecl(self: *DeclGen) !void {
- const decl = self.decl;
- const result_id = decl.fn_link.spirv.id;
+ const result_id = self.ids.get(self.decl_index).?;
+ const decl = self.module.declPtr(self.decl_index);
if (decl.val.castTag(.function)) |_| {
assert(decl.ty.zigTypeTag() == .Fn);
@@ -945,7 +953,7 @@ pub const DeclGen = struct {
fn airDbgStmt(self: *DeclGen, inst: Air.Inst.Index) !void {
const dbg_stmt = self.air.instructions.items(.data)[inst].dbg_stmt;
- const src_fname_id = try self.spv.resolveSourceFileName(self.decl);
+ const src_fname_id = try self.spv.resolveSourceFileName(self.module.declPtr(self.decl_index));
try self.func.body.emit(self.spv.gpa, .OpLine, .{
.file = src_fname_id,
.line = dbg_stmt.line,
@@ -1106,7 +1114,7 @@ pub const DeclGen = struct {
assert(as.errors.items.len != 0);
assert(self.error_msg == null);
const loc = LazySrcLoc.nodeOffset(0);
- const src_loc = loc.toSrcLoc(self.decl);
+ const src_loc = loc.toSrcLoc(self.module.declPtr(self.decl_index));
self.error_msg = try Module.ErrorMsg.create(self.module.gpa, src_loc, "failed to assemble SPIR-V inline assembly", .{});
const notes = try self.module.gpa.alloc(Module.ErrorMsg, as.errors.items.len);
diff --git a/src/link.zig b/src/link.zig
index 668c5b72e3..2b3ce51667 100644
--- a/src/link.zig
+++ b/src/link.zig
@@ -261,39 +261,6 @@ pub const File = struct {
/// of this linking operation.
lock: ?Cache.Lock = null,
- pub const LinkBlock = union {
- elf: Elf.TextBlock,
- coff: Coff.Atom,
- macho: MachO.Atom,
- plan9: Plan9.DeclBlock,
- c: void,
- wasm: Wasm.DeclBlock,
- spirv: void,
- nvptx: void,
- };
-
- pub const LinkFn = union {
- elf: Dwarf.SrcFn,
- coff: Coff.SrcFn,
- macho: Dwarf.SrcFn,
- plan9: void,
- c: void,
- wasm: Wasm.FnData,
- spirv: SpirV.FnData,
- nvptx: void,
- };
-
- pub const Export = union {
- elf: Elf.Export,
- coff: Coff.Export,
- macho: MachO.Export,
- plan9: Plan9.Export,
- c: void,
- wasm: Wasm.Export,
- spirv: void,
- nvptx: void,
- };
-
/// Attempts incremental linking, if the file already exists. If
/// incremental linking fails, falls back to truncating the file and
/// rewriting it. A malicious file is detected as incremental link failure
@@ -580,22 +547,23 @@ pub const File = struct {
}
}
- pub fn updateDeclLineNumber(base: *File, module: *Module, decl: *Module.Decl) UpdateDeclError!void {
+ pub fn updateDeclLineNumber(base: *File, module: *Module, decl_index: Module.Decl.Index) UpdateDeclError!void {
+ const decl = module.declPtr(decl_index);
log.debug("updateDeclLineNumber {*} ({s}), line={}", .{
decl, decl.name, decl.src_line + 1,
});
assert(decl.has_tv);
if (build_options.only_c) {
assert(base.tag == .c);
- return @fieldParentPtr(C, "base", base).updateDeclLineNumber(module, decl);
+ return @fieldParentPtr(C, "base", base).updateDeclLineNumber(module, decl_index);
}
switch (base.tag) {
- .coff => return @fieldParentPtr(Coff, "base", base).updateDeclLineNumber(module, decl),
- .elf => return @fieldParentPtr(Elf, "base", base).updateDeclLineNumber(module, decl),
- .macho => return @fieldParentPtr(MachO, "base", base).updateDeclLineNumber(module, decl),
- .c => return @fieldParentPtr(C, "base", base).updateDeclLineNumber(module, decl),
- .wasm => return @fieldParentPtr(Wasm, "base", base).updateDeclLineNumber(module, decl),
- .plan9 => return @fieldParentPtr(Plan9, "base", base).updateDeclLineNumber(module, decl),
+ .coff => return @fieldParentPtr(Coff, "base", base).updateDeclLineNumber(module, decl_index),
+ .elf => return @fieldParentPtr(Elf, "base", base).updateDeclLineNumber(module, decl_index),
+ .macho => return @fieldParentPtr(MachO, "base", base).updateDeclLineNumber(module, decl_index),
+ .c => return @fieldParentPtr(C, "base", base).updateDeclLineNumber(module, decl_index),
+ .wasm => return @fieldParentPtr(Wasm, "base", base).updateDeclLineNumber(module, decl_index),
+ .plan9 => return @fieldParentPtr(Plan9, "base", base).updateDeclLineNumber(module, decl_index),
.spirv, .nvptx => {},
}
}
diff --git a/src/link/C.zig b/src/link/C.zig
index 8b05b8b22d..02e5cadfbc 100644
--- a/src/link/C.zig
+++ b/src/link/C.zig
@@ -219,12 +219,12 @@ pub fn updateDecl(self: *C, module: *Module, decl_index: Module.Decl.Index) !voi
code.shrinkAndFree(module.gpa, code.items.len);
}
-pub fn updateDeclLineNumber(self: *C, module: *Module, decl: *Module.Decl) !void {
+pub fn updateDeclLineNumber(self: *C, module: *Module, decl_index: Module.Decl.Index) !void {
// The C backend does not have the ability to fix line numbers without re-generating
// the entire Decl.
_ = self;
_ = module;
- _ = decl;
+ _ = decl_index;
}
pub fn flush(self: *C, comp: *Compilation, prog_node: *std.Progress.Node) !void {
diff --git a/src/link/Coff.zig b/src/link/Coff.zig
index dee3c7c381..2922e783e1 100644
--- a/src/link/Coff.zig
+++ b/src/link/Coff.zig
@@ -79,13 +79,13 @@ entry_addr: ?u32 = null,
/// We store them here so that we can properly dispose of any allocated
/// memory within the atom in the incremental linker.
/// TODO consolidate this.
-decls: std.AutoHashMapUnmanaged(Module.Decl.Index, ?u16) = .{},
+decls: std.AutoHashMapUnmanaged(Module.Decl.Index, DeclMetadata) = .{},
/// List of atoms that are either synthetic or map directly to the Zig source program.
-managed_atoms: std.ArrayListUnmanaged(*Atom) = .{},
+atoms: std.ArrayListUnmanaged(Atom) = .{},
/// Table of atoms indexed by the symbol index.
-atom_by_index_table: std.AutoHashMapUnmanaged(u32, *Atom) = .{},
+atom_by_index_table: std.AutoHashMapUnmanaged(u32, Atom.Index) = .{},
/// Table of unnamed constants associated with a parent `Decl`.
/// We store them here so that we can free the constants whenever the `Decl`
@@ -124,9 +124,9 @@ const Entry = struct {
sym_index: u32,
};
-const RelocTable = std.AutoHashMapUnmanaged(*Atom, std.ArrayListUnmanaged(Relocation));
-const BaseRelocationTable = std.AutoHashMapUnmanaged(*Atom, std.ArrayListUnmanaged(u32));
-const UnnamedConstTable = std.AutoHashMapUnmanaged(Module.Decl.Index, std.ArrayListUnmanaged(*Atom));
+const RelocTable = std.AutoHashMapUnmanaged(Atom.Index, std.ArrayListUnmanaged(Relocation));
+const BaseRelocationTable = std.AutoHashMapUnmanaged(Atom.Index, std.ArrayListUnmanaged(u32));
+const UnnamedConstTable = std.AutoHashMapUnmanaged(Module.Decl.Index, std.ArrayListUnmanaged(Atom.Index));
const default_file_alignment: u16 = 0x200;
const default_size_of_stack_reserve: u32 = 0x1000000;
@@ -137,7 +137,7 @@ const default_size_of_heap_commit: u32 = 0x1000;
const Section = struct {
header: coff.SectionHeader,
- last_atom: ?*Atom = null,
+ last_atom_index: ?Atom.Index = null,
/// A list of atoms that have surplus capacity. This list can have false
/// positives, as functions grow and shrink over time, only sometimes being added
@@ -154,7 +154,34 @@ const Section = struct {
/// overcapacity can be negative. A simple way to have negative overcapacity is to
/// allocate a fresh atom, which will have ideal capacity, and then grow it
/// by 1 byte. It will then have -1 overcapacity.
- free_list: std.ArrayListUnmanaged(*Atom) = .{},
+ free_list: std.ArrayListUnmanaged(Atom.Index) = .{},
+};
+
+const DeclMetadata = struct {
+ atom: Atom.Index,
+ section: u16,
+ /// A list of all exports aliases of this Decl.
+ exports: std.ArrayListUnmanaged(u32) = .{},
+
+ fn getExport(m: DeclMetadata, coff_file: *const Coff, name: []const u8) ?u32 {
+ for (m.exports.items) |exp| {
+ if (mem.eql(u8, name, coff_file.getSymbolName(.{
+ .sym_index = exp,
+ .file = null,
+ }))) return exp;
+ }
+ return null;
+ }
+
+ fn getExportPtr(m: *DeclMetadata, coff_file: *Coff, name: []const u8) ?*u32 {
+ for (m.exports.items) |*exp| {
+ if (mem.eql(u8, name, coff_file.getSymbolName(.{
+ .sym_index = exp.*,
+ .file = null,
+ }))) return exp;
+ }
+ return null;
+ }
};
pub const PtrWidth = enum {
@@ -168,11 +195,6 @@ pub const PtrWidth = enum {
};
}
};
-pub const SrcFn = void;
-
-pub const Export = struct {
- sym_index: ?u32 = null,
-};
pub const SymbolWithLoc = struct {
// Index into the respective symbol table.
@@ -271,11 +293,7 @@ pub fn deinit(self: *Coff) void {
}
self.sections.deinit(gpa);
- for (self.managed_atoms.items) |atom| {
- gpa.destroy(atom);
- }
- self.managed_atoms.deinit(gpa);
-
+ self.atoms.deinit(gpa);
self.locals.deinit(gpa);
self.globals.deinit(gpa);
@@ -297,7 +315,15 @@ pub fn deinit(self: *Coff) void {
self.imports.deinit(gpa);
self.imports_free_list.deinit(gpa);
self.imports_table.deinit(gpa);
- self.decls.deinit(gpa);
+
+ {
+ var it = self.decls.iterator();
+ while (it.next()) |entry| {
+ entry.value_ptr.exports.deinit(gpa);
+ }
+ self.decls.deinit(gpa);
+ }
+
self.atom_by_index_table.deinit(gpa);
{
@@ -461,17 +487,18 @@ fn growSectionVM(self: *Coff, sect_id: u32, needed_size: u32) !void {
// TODO: enforce order by increasing VM addresses in self.sections container.
// This is required by the loader anyhow as far as I can tell.
for (self.sections.items(.header)[sect_id + 1 ..]) |*next_header, next_sect_id| {
- const maybe_last_atom = &self.sections.items(.last_atom)[sect_id + 1 + next_sect_id];
+ const maybe_last_atom_index = self.sections.items(.last_atom_index)[sect_id + 1 + next_sect_id];
next_header.virtual_address += diff;
- if (maybe_last_atom.*) |last_atom| {
- var atom = last_atom;
+ if (maybe_last_atom_index) |last_atom_index| {
+ var atom_index = last_atom_index;
while (true) {
+ const atom = self.getAtom(atom_index);
const sym = atom.getSymbolPtr(self);
sym.value += diff;
- if (atom.prev) |prev| {
- atom = prev;
+ if (atom.prev_index) |prev_index| {
+ atom_index = prev_index;
} else break;
}
}
@@ -480,14 +507,15 @@ fn growSectionVM(self: *Coff, sect_id: u32, needed_size: u32) !void {
header.virtual_size = increased_size;
}
-fn allocateAtom(self: *Coff, atom: *Atom, new_atom_size: u32, alignment: u32) !u32 {
+fn allocateAtom(self: *Coff, atom_index: Atom.Index, new_atom_size: u32, alignment: u32) !u32 {
const tracy = trace(@src());
defer tracy.end();
+ const atom = self.getAtom(atom_index);
const sect_id = @enumToInt(atom.getSymbol(self).section_number) - 1;
const header = &self.sections.items(.header)[sect_id];
const free_list = &self.sections.items(.free_list)[sect_id];
- const maybe_last_atom = &self.sections.items(.last_atom)[sect_id];
+ const maybe_last_atom_index = &self.sections.items(.last_atom_index)[sect_id];
const new_atom_ideal_capacity = if (header.isCode()) padToIdeal(new_atom_size) else new_atom_size;
// We use these to indicate our intention to update metadata, placing the new atom,
@@ -495,7 +523,7 @@ fn allocateAtom(self: *Coff, atom: *Atom, new_atom_size: u32, alignment: u32) !u
// It would be simpler to do it inside the for loop below, but that would cause a
// problem if an error was returned later in the function. So this action
// is actually carried out at the end of the function, when errors are no longer possible.
- var atom_placement: ?*Atom = null;
+ var atom_placement: ?Atom.Index = null;
var free_list_removal: ?usize = null;
// First we look for an appropriately sized free list node.
@@ -503,7 +531,8 @@ fn allocateAtom(self: *Coff, atom: *Atom, new_atom_size: u32, alignment: u32) !u
var vaddr = blk: {
var i: usize = 0;
while (i < free_list.items.len) {
- const big_atom = free_list.items[i];
+ const big_atom_index = free_list.items[i];
+ const big_atom = self.getAtom(big_atom_index);
// We now have a pointer to a live atom that has too much capacity.
// Is it enough that we could fit this new atom?
const sym = big_atom.getSymbol(self);
@@ -531,34 +560,43 @@ fn allocateAtom(self: *Coff, atom: *Atom, new_atom_size: u32, alignment: u32) !u
const keep_free_list_node = remaining_capacity >= min_text_capacity;
// Set up the metadata to be updated, after errors are no longer possible.
- atom_placement = big_atom;
+ atom_placement = big_atom_index;
if (!keep_free_list_node) {
free_list_removal = i;
}
break :blk new_start_vaddr;
- } else if (maybe_last_atom.*) |last| {
+ } else if (maybe_last_atom_index.*) |last_index| {
+ const last = self.getAtom(last_index);
const last_symbol = last.getSymbol(self);
const ideal_capacity = if (header.isCode()) padToIdeal(last.size) else last.size;
const ideal_capacity_end_vaddr = last_symbol.value + ideal_capacity;
const new_start_vaddr = mem.alignForwardGeneric(u32, ideal_capacity_end_vaddr, alignment);
- atom_placement = last;
+ atom_placement = last_index;
break :blk new_start_vaddr;
} else {
break :blk mem.alignForwardGeneric(u32, header.virtual_address, alignment);
}
};
- const expand_section = atom_placement == null or atom_placement.?.next == null;
+ const expand_section = if (atom_placement) |placement_index|
+ self.getAtom(placement_index).next_index == null
+ else
+ true;
if (expand_section) {
const sect_capacity = self.allocatedSize(header.pointer_to_raw_data);
const needed_size: u32 = (vaddr + new_atom_size) - header.virtual_address;
if (needed_size > sect_capacity) {
const new_offset = self.findFreeSpace(needed_size, default_file_alignment);
- const current_size = if (maybe_last_atom.*) |last_atom| blk: {
+ const current_size = if (maybe_last_atom_index.*) |last_atom_index| blk: {
+ const last_atom = self.getAtom(last_atom_index);
const sym = last_atom.getSymbol(self);
break :blk (sym.value + last_atom.size) - header.virtual_address;
} else 0;
- log.debug("moving {s} from 0x{x} to 0x{x}", .{ self.getSectionName(header), header.pointer_to_raw_data, new_offset });
+ log.debug("moving {s} from 0x{x} to 0x{x}", .{
+ self.getSectionName(header),
+ header.pointer_to_raw_data,
+ new_offset,
+ });
const amt = try self.base.file.?.copyRangeAll(
header.pointer_to_raw_data,
self.base.file.?,
@@ -577,26 +615,34 @@ fn allocateAtom(self: *Coff, atom: *Atom, new_atom_size: u32, alignment: u32) !u
header.virtual_size = @max(header.virtual_size, needed_size);
header.size_of_raw_data = needed_size;
- maybe_last_atom.* = atom;
+ maybe_last_atom_index.* = atom_index;
}
- atom.size = new_atom_size;
- atom.alignment = alignment;
+ {
+ const atom_ptr = self.getAtomPtr(atom_index);
+ atom_ptr.size = new_atom_size;
+ atom_ptr.alignment = alignment;
+ }
- if (atom.prev) |prev| {
- prev.next = atom.next;
+ if (atom.prev_index) |prev_index| {
+ const prev = self.getAtomPtr(prev_index);
+ prev.next_index = atom.next_index;
}
- if (atom.next) |next| {
- next.prev = atom.prev;
+ if (atom.next_index) |next_index| {
+ const next = self.getAtomPtr(next_index);
+ next.prev_index = atom.prev_index;
}
- if (atom_placement) |big_atom| {
- atom.prev = big_atom;
- atom.next = big_atom.next;
- big_atom.next = atom;
+ if (atom_placement) |big_atom_index| {
+ const big_atom = self.getAtomPtr(big_atom_index);
+ const atom_ptr = self.getAtomPtr(atom_index);
+ atom_ptr.prev_index = big_atom_index;
+ atom_ptr.next_index = big_atom.next_index;
+ big_atom.next_index = atom_index;
} else {
- atom.prev = null;
- atom.next = null;
+ const atom_ptr = self.getAtomPtr(atom_index);
+ atom_ptr.prev_index = null;
+ atom_ptr.next_index = null;
}
if (free_list_removal) |i| {
_ = free_list.swapRemove(i);
@@ -701,24 +747,37 @@ pub fn allocateImportEntry(self: *Coff, target: SymbolWithLoc) !u32 {
return index;
}
-fn createGotAtom(self: *Coff, target: SymbolWithLoc) !*Atom {
+pub fn createAtom(self: *Coff) !Atom.Index {
const gpa = self.base.allocator;
- const atom = try gpa.create(Atom);
- errdefer gpa.destroy(atom);
- atom.* = Atom.empty;
- try atom.ensureInitialized(self);
+ const atom_index = @intCast(Atom.Index, self.atoms.items.len);
+ const atom = try self.atoms.addOne(gpa);
+ const sym_index = try self.allocateSymbol();
+ try self.atom_by_index_table.putNoClobber(gpa, sym_index, atom_index);
+ atom.* = .{
+ .sym_index = sym_index,
+ .file = null,
+ .size = 0,
+ .alignment = 0,
+ .prev_index = null,
+ .next_index = null,
+ };
+ log.debug("creating ATOM(%{d}) at index {d}", .{ sym_index, atom_index });
+ return atom_index;
+}
+
+fn createGotAtom(self: *Coff, target: SymbolWithLoc) !Atom.Index {
+ const atom_index = try self.createAtom();
+ const atom = self.getAtomPtr(atom_index);
atom.size = @sizeOf(u64);
atom.alignment = @alignOf(u64);
- try self.managed_atoms.append(gpa, atom);
-
const sym = atom.getSymbolPtr(self);
sym.section_number = @intToEnum(coff.SectionNumber, self.got_section_index.? + 1);
- sym.value = try self.allocateAtom(atom, atom.size, atom.alignment);
+ sym.value = try self.allocateAtom(atom_index, atom.size, atom.alignment);
log.debug("allocated GOT atom at 0x{x}", .{sym.value});
- try atom.addRelocation(self, .{
+ try Atom.addRelocation(self, atom_index, .{
.type = .direct,
.target = target,
.offset = 0,
@@ -732,49 +791,46 @@ fn createGotAtom(self: *Coff, target: SymbolWithLoc) !*Atom {
.UNDEFINED => @panic("TODO generate a binding for undefined GOT target"),
.ABSOLUTE => {},
.DEBUG => unreachable, // not possible
- else => try atom.addBaseRelocation(self, 0),
+ else => try Atom.addBaseRelocation(self, atom_index, 0),
}
- return atom;
+ return atom_index;
}
-fn createImportAtom(self: *Coff) !*Atom {
- const gpa = self.base.allocator;
- const atom = try gpa.create(Atom);
- errdefer gpa.destroy(atom);
- atom.* = Atom.empty;
- try atom.ensureInitialized(self);
+fn createImportAtom(self: *Coff) !Atom.Index {
+ const atom_index = try self.createAtom();
+ const atom = self.getAtomPtr(atom_index);
atom.size = @sizeOf(u64);
atom.alignment = @alignOf(u64);
- try self.managed_atoms.append(gpa, atom);
-
const sym = atom.getSymbolPtr(self);
sym.section_number = @intToEnum(coff.SectionNumber, self.idata_section_index.? + 1);
- sym.value = try self.allocateAtom(atom, atom.size, atom.alignment);
+ sym.value = try self.allocateAtom(atom_index, atom.size, atom.alignment);
log.debug("allocated import atom at 0x{x}", .{sym.value});
- return atom;
+ return atom_index;
}
-fn growAtom(self: *Coff, atom: *Atom, new_atom_size: u32, alignment: u32) !u32 {
+fn growAtom(self: *Coff, atom_index: Atom.Index, new_atom_size: u32, alignment: u32) !u32 {
+ const atom = self.getAtom(atom_index);
const sym = atom.getSymbol(self);
const align_ok = mem.alignBackwardGeneric(u32, sym.value, alignment) == sym.value;
const need_realloc = !align_ok or new_atom_size > atom.capacity(self);
if (!need_realloc) return sym.value;
- return self.allocateAtom(atom, new_atom_size, alignment);
+ return self.allocateAtom(atom_index, new_atom_size, alignment);
}
-fn shrinkAtom(self: *Coff, atom: *Atom, new_block_size: u32) void {
+fn shrinkAtom(self: *Coff, atom_index: Atom.Index, new_block_size: u32) void {
_ = self;
- _ = atom;
+ _ = atom_index;
_ = new_block_size;
// TODO check the new capacity, and if it crosses the size threshold into a big enough
// capacity, insert a free list node for it.
}
-fn writeAtom(self: *Coff, atom: *Atom, code: []const u8) !void {
+fn writeAtom(self: *Coff, atom_index: Atom.Index, code: []const u8) !void {
+ const atom = self.getAtom(atom_index);
const sym = atom.getSymbol(self);
const section = self.sections.get(@enumToInt(sym.section_number) - 1);
const file_offset = section.header.pointer_to_raw_data + sym.value - section.header.virtual_address;
@@ -784,18 +840,18 @@ fn writeAtom(self: *Coff, atom: *Atom, code: []const u8) !void {
file_offset + code.len,
});
try self.base.file.?.pwriteAll(code, file_offset);
- try self.resolveRelocs(atom);
+ try self.resolveRelocs(atom_index);
}
-fn writePtrWidthAtom(self: *Coff, atom: *Atom) !void {
+fn writePtrWidthAtom(self: *Coff, atom_index: Atom.Index) !void {
switch (self.ptr_width) {
.p32 => {
var buffer: [@sizeOf(u32)]u8 = [_]u8{0} ** @sizeOf(u32);
- try self.writeAtom(atom, &buffer);
+ try self.writeAtom(atom_index, &buffer);
},
.p64 => {
var buffer: [@sizeOf(u64)]u8 = [_]u8{0} ** @sizeOf(u64);
- try self.writeAtom(atom, &buffer);
+ try self.writeAtom(atom_index, &buffer);
},
}
}
@@ -815,7 +871,8 @@ fn markRelocsDirtyByAddress(self: *Coff, addr: u32) void {
var it = self.relocs.valueIterator();
while (it.next()) |relocs| {
for (relocs.items) |*reloc| {
- const target_atom = reloc.getTargetAtom(self) orelse continue;
+ const target_atom_index = reloc.getTargetAtomIndex(self) orelse continue;
+ const target_atom = self.getAtom(target_atom_index);
const target_sym = target_atom.getSymbol(self);
if (target_sym.value < addr) continue;
reloc.dirty = true;
@@ -823,24 +880,26 @@ fn markRelocsDirtyByAddress(self: *Coff, addr: u32) void {
}
}
-fn resolveRelocs(self: *Coff, atom: *Atom) !void {
- const relocs = self.relocs.get(atom) orelse return;
+fn resolveRelocs(self: *Coff, atom_index: Atom.Index) !void {
+ const relocs = self.relocs.get(atom_index) orelse return;
- log.debug("relocating '{s}'", .{atom.getName(self)});
+ log.debug("relocating '{s}'", .{self.getAtom(atom_index).getName(self)});
for (relocs.items) |*reloc| {
if (!reloc.dirty) continue;
- try reloc.resolve(atom, self);
+ try reloc.resolve(atom_index, self);
}
}
-fn freeAtom(self: *Coff, atom: *Atom) void {
- log.debug("freeAtom {*}", .{atom});
+fn freeAtom(self: *Coff, atom_index: Atom.Index) void {
+ log.debug("freeAtom {d}", .{atom_index});
+
+ const gpa = self.base.allocator;
// Remove any relocs and base relocs associated with this Atom
- self.freeRelocationsForAtom(atom);
+ Atom.freeRelocations(self, atom_index);
- const gpa = self.base.allocator;
+ const atom = self.getAtom(atom_index);
const sym = atom.getSymbol(self);
const sect_id = @enumToInt(sym.section_number) - 1;
const free_list = &self.sections.items(.free_list)[sect_id];
@@ -849,45 +908,46 @@ fn freeAtom(self: *Coff, atom: *Atom) void {
var i: usize = 0;
// TODO turn free_list into a hash map
while (i < free_list.items.len) {
- if (free_list.items[i] == atom) {
+ if (free_list.items[i] == atom_index) {
_ = free_list.swapRemove(i);
continue;
}
- if (free_list.items[i] == atom.prev) {
+ if (free_list.items[i] == atom.prev_index) {
already_have_free_list_node = true;
}
i += 1;
}
}
- const maybe_last_atom = &self.sections.items(.last_atom)[sect_id];
- if (maybe_last_atom.*) |last_atom| {
- if (last_atom == atom) {
- if (atom.prev) |prev| {
+ const maybe_last_atom_index = &self.sections.items(.last_atom_index)[sect_id];
+ if (maybe_last_atom_index.*) |last_atom_index| {
+ if (last_atom_index == atom_index) {
+ if (atom.prev_index) |prev_index| {
// TODO shrink the section size here
- maybe_last_atom.* = prev;
+ maybe_last_atom_index.* = prev_index;
} else {
- maybe_last_atom.* = null;
+ maybe_last_atom_index.* = null;
}
}
}
- if (atom.prev) |prev| {
- prev.next = atom.next;
+ if (atom.prev_index) |prev_index| {
+ const prev = self.getAtomPtr(prev_index);
+ prev.next_index = atom.next_index;
- if (!already_have_free_list_node and prev.freeListEligible(self)) {
+ if (!already_have_free_list_node and prev.*.freeListEligible(self)) {
// The free list is heuristics, it doesn't have to be perfect, so we can
// ignore the OOM here.
- free_list.append(gpa, prev) catch {};
+ free_list.append(gpa, prev_index) catch {};
}
} else {
- atom.prev = null;
+ self.getAtomPtr(atom_index).prev_index = null;
}
- if (atom.next) |next| {
- next.prev = atom.prev;
+ if (atom.next_index) |next_index| {
+ self.getAtomPtr(next_index).prev_index = atom.prev_index;
} else {
- atom.next = null;
+ self.getAtomPtr(atom_index).next_index = null;
}
// Appending to free lists is allowed to fail because the free lists are heuristics based anyway.
@@ -910,7 +970,7 @@ fn freeAtom(self: *Coff, atom: *Atom) void {
self.locals.items[sym_index].section_number = .UNDEFINED;
_ = self.atom_by_index_table.remove(sym_index);
log.debug(" adding local symbol index {d} to free list", .{sym_index});
- atom.sym_index = 0;
+ self.getAtomPtr(atom_index).sym_index = 0;
}
pub fn updateFunc(self: *Coff, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void {
@@ -927,15 +987,10 @@ pub fn updateFunc(self: *Coff, module: *Module, func: *Module.Fn, air: Air, live
const decl_index = func.owner_decl;
const decl = module.declPtr(decl_index);
- const atom = &decl.link.coff;
- try atom.ensureInitialized(self);
- const gop = try self.decls.getOrPut(self.base.allocator, decl_index);
- if (gop.found_existing) {
- self.freeUnnamedConsts(decl_index);
- self.freeRelocationsForAtom(&decl.link.coff);
- } else {
- gop.value_ptr.* = null;
- }
+
+ const atom_index = try self.getOrCreateAtomForDecl(decl_index);
+ self.freeUnnamedConsts(decl_index);
+ Atom.freeRelocations(self, atom_index);
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
defer code_buffer.deinit();
@@ -979,11 +1034,7 @@ pub fn lowerUnnamedConst(self: *Coff, tv: TypedValue, decl_index: Module.Decl.In
}
const unnamed_consts = gop.value_ptr;
- const atom = try gpa.create(Atom);
- errdefer gpa.destroy(atom);
- atom.* = Atom.empty;
- try atom.ensureInitialized(self);
- try self.managed_atoms.append(gpa, atom);
+ const atom_index = try self.createAtom();
const sym_name = blk: {
const decl_name = try decl.getFullyQualifiedName(mod);
@@ -993,11 +1044,15 @@ pub fn lowerUnnamedConst(self: *Coff, tv: TypedValue, decl_index: Module.Decl.In
break :blk try std.fmt.allocPrint(gpa, "__unnamed_{s}_{d}", .{ decl_name, index });
};
defer gpa.free(sym_name);
- try self.setSymbolName(atom.getSymbolPtr(self), sym_name);
- atom.getSymbolPtr(self).section_number = @intToEnum(coff.SectionNumber, self.rdata_section_index.? + 1);
+ {
+ const atom = self.getAtom(atom_index);
+ const sym = atom.getSymbolPtr(self);
+ try self.setSymbolName(sym, sym_name);
+ sym.section_number = @intToEnum(coff.SectionNumber, self.rdata_section_index.? + 1);
+ }
const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), tv, &code_buffer, .none, .{
- .parent_atom_index = atom.getSymbolIndex().?,
+ .parent_atom_index = self.getAtom(atom_index).getSymbolIndex().?,
});
const code = switch (res) {
.ok => code_buffer.items,
@@ -1010,17 +1065,18 @@ pub fn lowerUnnamedConst(self: *Coff, tv: TypedValue, decl_index: Module.Decl.In
};
const required_alignment = tv.ty.abiAlignment(self.base.options.target);
+ const atom = self.getAtomPtr(atom_index);
atom.alignment = required_alignment;
atom.size = @intCast(u32, code.len);
- atom.getSymbolPtr(self).value = try self.allocateAtom(atom, atom.size, atom.alignment);
- errdefer self.freeAtom(atom);
+ atom.getSymbolPtr(self).value = try self.allocateAtom(atom_index, atom.size, atom.alignment);
+ errdefer self.freeAtom(atom_index);
- try unnamed_consts.append(gpa, atom);
+ try unnamed_consts.append(gpa, atom_index);
log.debug("allocated atom for {s} at 0x{x}", .{ sym_name, atom.getSymbol(self).value });
log.debug(" (required alignment 0x{x})", .{required_alignment});
- try self.writeAtom(atom, code);
+ try self.writeAtom(atom_index, code);
return atom.getSymbolIndex().?;
}
@@ -1047,14 +1103,9 @@ pub fn updateDecl(self: *Coff, module: *Module, decl_index: Module.Decl.Index) !
}
}
- const atom = &decl.link.coff;
- try atom.ensureInitialized(self);
- const gop = try self.decls.getOrPut(self.base.allocator, decl_index);
- if (gop.found_existing) {
- self.freeRelocationsForAtom(atom);
- } else {
- gop.value_ptr.* = null;
- }
+ const atom_index = try self.getOrCreateAtomForDecl(decl_index);
+ Atom.freeRelocations(self, atom_index);
+ const atom = self.getAtom(atom_index);
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
defer code_buffer.deinit();
@@ -1064,7 +1115,7 @@ pub fn updateDecl(self: *Coff, module: *Module, decl_index: Module.Decl.Index) !
.ty = decl.ty,
.val = decl_val,
}, &code_buffer, .none, .{
- .parent_atom_index = decl.link.coff.getSymbolIndex().?,
+ .parent_atom_index = atom.getSymbolIndex().?,
});
const code = switch (res) {
.ok => code_buffer.items,
@@ -1082,7 +1133,20 @@ pub fn updateDecl(self: *Coff, module: *Module, decl_index: Module.Decl.Index) !
return self.updateDeclExports(module, decl_index, module.getDeclExports(decl_index));
}
-fn getDeclOutputSection(self: *Coff, decl: *Module.Decl) u16 {
+pub fn getOrCreateAtomForDecl(self: *Coff, decl_index: Module.Decl.Index) !Atom.Index {
+ const gop = try self.decls.getOrPut(self.base.allocator, decl_index);
+ if (!gop.found_existing) {
+ gop.value_ptr.* = .{
+ .atom = try self.createAtom(),
+ .section = self.getDeclOutputSection(decl_index),
+ .exports = .{},
+ };
+ }
+ return gop.value_ptr.atom;
+}
+
+fn getDeclOutputSection(self: *Coff, decl_index: Module.Decl.Index) u16 {
+ const decl = self.base.options.module.?.declPtr(decl_index);
const ty = decl.ty;
const zig_ty = ty.zigTypeTag();
const val = decl.val;
@@ -1117,14 +1181,11 @@ fn updateDeclCode(self: *Coff, decl_index: Module.Decl.Index, code: []const u8,
log.debug("updateDeclCode {s}{*}", .{ decl_name, decl });
const required_alignment = decl.getAlignment(self.base.options.target);
- const decl_ptr = self.decls.getPtr(decl_index).?;
- if (decl_ptr.* == null) {
- decl_ptr.* = self.getDeclOutputSection(decl);
- }
- const sect_index = decl_ptr.*.?;
-
+ const decl_metadata = self.decls.get(decl_index).?;
+ const atom_index = decl_metadata.atom;
+ const atom = self.getAtom(atom_index);
+ const sect_index = decl_metadata.section;
const code_len = @intCast(u32, code.len);
- const atom = &decl.link.coff;
if (atom.size != 0) {
const sym = atom.getSymbolPtr(self);
@@ -1135,7 +1196,7 @@ fn updateDeclCode(self: *Coff, decl_index: Module.Decl.Index, code: []const u8,
const capacity = atom.capacity(self);
const need_realloc = code.len > capacity or !mem.isAlignedGeneric(u64, sym.value, required_alignment);
if (need_realloc) {
- const vaddr = try self.growAtom(atom, code_len, required_alignment);
+ const vaddr = try self.growAtom(atom_index, code_len, required_alignment);
log.debug("growing {s} from 0x{x} to 0x{x}", .{ decl_name, sym.value, vaddr });
log.debug(" (required alignment 0x{x}", .{required_alignment});
@@ -1143,49 +1204,43 @@ fn updateDeclCode(self: *Coff, decl_index: Module.Decl.Index, code: []const u8,
sym.value = vaddr;
log.debug(" (updating GOT entry)", .{});
const got_target = SymbolWithLoc{ .sym_index = atom.getSymbolIndex().?, .file = null };
- const got_atom = self.getGotAtomForSymbol(got_target).?;
+ const got_atom_index = self.getGotAtomIndexForSymbol(got_target).?;
self.markRelocsDirtyByTarget(got_target);
- try self.writePtrWidthAtom(got_atom);
+ try self.writePtrWidthAtom(got_atom_index);
}
} else if (code_len < atom.size) {
- self.shrinkAtom(atom, code_len);
+ self.shrinkAtom(atom_index, code_len);
}
- atom.size = code_len;
+ self.getAtomPtr(atom_index).size = code_len;
} else {
const sym = atom.getSymbolPtr(self);
try self.setSymbolName(sym, decl_name);
sym.section_number = @intToEnum(coff.SectionNumber, sect_index + 1);
sym.type = .{ .complex_type = complex_type, .base_type = .NULL };
- const vaddr = try self.allocateAtom(atom, code_len, required_alignment);
- errdefer self.freeAtom(atom);
+ const vaddr = try self.allocateAtom(atom_index, code_len, required_alignment);
+ errdefer self.freeAtom(atom_index);
log.debug("allocated atom for {s} at 0x{x}", .{ decl_name, vaddr });
- atom.size = code_len;
+ self.getAtomPtr(atom_index).size = code_len;
sym.value = vaddr;
const got_target = SymbolWithLoc{ .sym_index = atom.getSymbolIndex().?, .file = null };
const got_index = try self.allocateGotEntry(got_target);
- const got_atom = try self.createGotAtom(got_target);
+ const got_atom_index = try self.createGotAtom(got_target);
+ const got_atom = self.getAtom(got_atom_index);
self.got_entries.items[got_index].sym_index = got_atom.getSymbolIndex().?;
- try self.writePtrWidthAtom(got_atom);
+ try self.writePtrWidthAtom(got_atom_index);
}
self.markRelocsDirtyByTarget(atom.getSymbolWithLoc());
- try self.writeAtom(atom, code);
-}
-
-fn freeRelocationsForAtom(self: *Coff, atom: *Atom) void {
- var removed_relocs = self.relocs.fetchRemove(atom);
- if (removed_relocs) |*relocs| relocs.value.deinit(self.base.allocator);
- var removed_base_relocs = self.base_relocs.fetchRemove(atom);
- if (removed_base_relocs) |*base_relocs| base_relocs.value.deinit(self.base.allocator);
+ try self.writeAtom(atom_index, code);
}
fn freeUnnamedConsts(self: *Coff, decl_index: Module.Decl.Index) void {
const gpa = self.base.allocator;
const unnamed_consts = self.unnamed_const_atoms.getPtr(decl_index) orelse return;
- for (unnamed_consts.items) |atom| {
- self.freeAtom(atom);
+ for (unnamed_consts.items) |atom_index| {
+ self.freeAtom(atom_index);
}
unnamed_consts.clearAndFree(gpa);
}
@@ -1200,11 +1255,11 @@ pub fn freeDecl(self: *Coff, decl_index: Module.Decl.Index) void {
log.debug("freeDecl {*}", .{decl});
- if (self.decls.fetchRemove(decl_index)) |kv| {
- if (kv.value) |_| {
- self.freeAtom(&decl.link.coff);
- self.freeUnnamedConsts(decl_index);
- }
+ if (self.decls.fetchRemove(decl_index)) |const_kv| {
+ var kv = const_kv;
+ self.freeAtom(kv.value.atom);
+ self.freeUnnamedConsts(decl_index);
+ kv.value.exports.deinit(self.base.allocator);
}
}
@@ -1257,16 +1312,10 @@ pub fn updateDeclExports(
const gpa = self.base.allocator;
const decl = module.declPtr(decl_index);
- const atom = &decl.link.coff;
-
- if (atom.getSymbolIndex() == null) return;
-
- const gop = try self.decls.getOrPut(gpa, decl_index);
- if (!gop.found_existing) {
- gop.value_ptr.* = self.getDeclOutputSection(decl);
- }
-
+ const atom_index = try self.getOrCreateAtomForDecl(decl_index);
+ const atom = self.getAtom(atom_index);
const decl_sym = atom.getSymbol(self);
+ const decl_metadata = self.decls.getPtr(decl_index).?;
for (exports) |exp| {
log.debug("adding new export '{s}'", .{exp.options.name});
@@ -1301,9 +1350,9 @@ pub fn updateDeclExports(
continue;
}
- const sym_index = exp.link.coff.sym_index orelse blk: {
+ const sym_index = decl_metadata.getExport(self, exp.options.name) orelse blk: {
const sym_index = try self.allocateSymbol();
- exp.link.coff.sym_index = sym_index;
+ try decl_metadata.exports.append(gpa, sym_index);
break :blk sym_index;
};
const sym_loc = SymbolWithLoc{ .sym_index = sym_index, .file = null };
@@ -1326,16 +1375,15 @@ pub fn updateDeclExports(
}
}
-pub fn deleteExport(self: *Coff, exp: Export) void {
+pub fn deleteDeclExport(self: *Coff, decl_index: Module.Decl.Index, name: []const u8) void {
if (self.llvm_object) |_| return;
- const sym_index = exp.sym_index orelse return;
+ const metadata = self.decls.getPtr(decl_index) orelse return;
+ const sym_index = metadata.getExportPtr(self, name) orelse return;
const gpa = self.base.allocator;
-
- const sym_loc = SymbolWithLoc{ .sym_index = sym_index, .file = null };
+ const sym_loc = SymbolWithLoc{ .sym_index = sym_index.*, .file = null };
const sym = self.getSymbolPtr(sym_loc);
- const sym_name = self.getSymbolName(sym_loc);
- log.debug("deleting export '{s}'", .{sym_name});
+ log.debug("deleting export '{s}'", .{name});
assert(sym.storage_class == .EXTERNAL and sym.section_number != .UNDEFINED);
sym.* = .{
.name = [_]u8{0} ** 8,
@@ -1345,9 +1393,9 @@ pub fn deleteExport(self: *Coff, exp: Export) void {
.storage_class = .NULL,
.number_of_aux_symbols = 0,
};
- self.locals_free_list.append(gpa, sym_index) catch {};
+ self.locals_free_list.append(gpa, sym_index.*) catch {};
- if (self.resolver.fetchRemove(sym_name)) |entry| {
+ if (self.resolver.fetchRemove(name)) |entry| {
defer gpa.free(entry.key);
self.globals_free_list.append(gpa, entry.value) catch {};
self.globals.items[entry.value] = .{
@@ -1355,6 +1403,8 @@ pub fn deleteExport(self: *Coff, exp: Export) void {
.file = null,
};
}
+
+ sym_index.* = 0;
}
fn resolveGlobalSymbol(self: *Coff, current: SymbolWithLoc) !void {
@@ -1419,9 +1469,10 @@ pub fn flushModule(self: *Coff, comp: *Compilation, prog_node: *std.Progress.Nod
if (self.imports_table.contains(global)) continue;
const import_index = try self.allocateImportEntry(global);
- const import_atom = try self.createImportAtom();
+ const import_atom_index = try self.createImportAtom();
+ const import_atom = self.getAtom(import_atom_index);
self.imports.items[import_index].sym_index = import_atom.getSymbolIndex().?;
- try self.writePtrWidthAtom(import_atom);
+ try self.writePtrWidthAtom(import_atom_index);
}
if (build_options.enable_logging) {
@@ -1455,22 +1506,14 @@ pub fn flushModule(self: *Coff, comp: *Compilation, prog_node: *std.Progress.Nod
}
}
-pub fn getDeclVAddr(
- self: *Coff,
- decl_index: Module.Decl.Index,
- reloc_info: link.File.RelocInfo,
-) !u64 {
- const mod = self.base.options.module.?;
- const decl = mod.declPtr(decl_index);
-
+pub fn getDeclVAddr(self: *Coff, decl_index: Module.Decl.Index, reloc_info: link.File.RelocInfo) !u64 {
assert(self.llvm_object == null);
- try decl.link.coff.ensureInitialized(self);
- const sym_index = decl.link.coff.getSymbolIndex().?;
-
- const atom = self.getAtomForSymbol(.{ .sym_index = reloc_info.parent_atom_index, .file = null }).?;
+ const this_atom_index = try self.getOrCreateAtomForDecl(decl_index);
+ const sym_index = self.getAtom(this_atom_index).getSymbolIndex().?;
+ const atom_index = self.getAtomIndexForSymbol(.{ .sym_index = reloc_info.parent_atom_index, .file = null }).?;
const target = SymbolWithLoc{ .sym_index = sym_index, .file = null };
- try atom.addRelocation(self, .{
+ try Atom.addRelocation(self, atom_index, .{
.type = .direct,
.target = target,
.offset = @intCast(u32, reloc_info.offset),
@@ -1478,7 +1521,7 @@ pub fn getDeclVAddr(
.pcrel = false,
.length = 3,
});
- try atom.addBaseRelocation(self, @intCast(u32, reloc_info.offset));
+ try Atom.addBaseRelocation(self, atom_index, @intCast(u32, reloc_info.offset));
return 0;
}
@@ -1505,10 +1548,10 @@ pub fn getGlobalSymbol(self: *Coff, name: []const u8) !u32 {
return global_index;
}
-pub fn updateDeclLineNumber(self: *Coff, module: *Module, decl: *Module.Decl) !void {
+pub fn updateDeclLineNumber(self: *Coff, module: *Module, decl_index: Module.Decl.Index) !void {
_ = self;
_ = module;
- _ = decl;
+ _ = decl_index;
log.debug("TODO implement updateDeclLineNumber", .{});
}
@@ -1529,7 +1572,8 @@ fn writeBaseRelocations(self: *Coff) !void {
var it = self.base_relocs.iterator();
while (it.next()) |entry| {
- const atom = entry.key_ptr.*;
+ const atom_index = entry.key_ptr.*;
+ const atom = self.getAtom(atom_index);
const offsets = entry.value_ptr.*;
for (offsets.items) |offset| {
@@ -1613,7 +1657,8 @@ fn writeImportTable(self: *Coff) !void {
const gpa = self.base.allocator;
const section = self.sections.get(self.idata_section_index.?);
- const last_atom = section.last_atom orelse return;
+ const last_atom_index = section.last_atom_index orelse return;
+ const last_atom = self.getAtom(last_atom_index);
const iat_rva = section.header.virtual_address;
const iat_size = last_atom.getSymbol(self).value + last_atom.size * 2 - iat_rva; // account for sentinel zero pointer
@@ -2051,27 +2096,37 @@ pub fn getOrPutGlobalPtr(self: *Coff, name: []const u8) !GetOrPutGlobalPtrResult
return GetOrPutGlobalPtrResult{ .found_existing = false, .value_ptr = ptr };
}
+pub fn getAtom(self: *const Coff, atom_index: Atom.Index) Atom {
+ assert(atom_index < self.atoms.items.len);
+ return self.atoms.items[atom_index];
+}
+
+pub fn getAtomPtr(self: *Coff, atom_index: Atom.Index) *Atom {
+ assert(atom_index < self.atoms.items.len);
+ return &self.atoms.items[atom_index];
+}
+
/// Returns atom if there is an atom referenced by the symbol described by `sym_loc` descriptor.
/// Returns null on failure.
-pub fn getAtomForSymbol(self: *Coff, sym_loc: SymbolWithLoc) ?*Atom {
+pub fn getAtomIndexForSymbol(self: *const Coff, sym_loc: SymbolWithLoc) ?Atom.Index {
assert(sym_loc.file == null); // TODO linking with object files
return self.atom_by_index_table.get(sym_loc.sym_index);
}
/// Returns GOT atom that references `sym_loc` if one exists.
/// Returns null otherwise.
-pub fn getGotAtomForSymbol(self: *Coff, sym_loc: SymbolWithLoc) ?*Atom {
+pub fn getGotAtomIndexForSymbol(self: *const Coff, sym_loc: SymbolWithLoc) ?Atom.Index {
const got_index = self.got_entries_table.get(sym_loc) orelse return null;
const got_entry = self.got_entries.items[got_index];
- return self.getAtomForSymbol(.{ .sym_index = got_entry.sym_index, .file = null });
+ return self.getAtomIndexForSymbol(.{ .sym_index = got_entry.sym_index, .file = null });
}
/// Returns import atom that references `sym_loc` if one exists.
/// Returns null otherwise.
-pub fn getImportAtomForSymbol(self: *Coff, sym_loc: SymbolWithLoc) ?*Atom {
+pub fn getImportAtomIndexForSymbol(self: *const Coff, sym_loc: SymbolWithLoc) ?Atom.Index {
const imports_index = self.imports_table.get(sym_loc) orelse return null;
const imports_entry = self.imports.items[imports_index];
- return self.getAtomForSymbol(.{ .sym_index = imports_entry.sym_index, .file = null });
+ return self.getAtomIndexForSymbol(.{ .sym_index = imports_entry.sym_index, .file = null });
}
fn setSectionName(self: *Coff, header: *coff.SectionHeader, name: []const u8) !void {
diff --git a/src/link/Coff/Atom.zig b/src/link/Coff/Atom.zig
index 78824eac1d..80c04a8fa1 100644
--- a/src/link/Coff/Atom.zig
+++ b/src/link/Coff/Atom.zig
@@ -27,23 +27,10 @@ alignment: u32,
/// Points to the previous and next neighbors, based on the `text_offset`.
/// This can be used to find, for example, the capacity of this `Atom`.
-prev: ?*Atom,
-next: ?*Atom,
-
-pub const empty = Atom{
- .sym_index = 0,
- .file = null,
- .size = 0,
- .alignment = 0,
- .prev = null,
- .next = null,
-};
-
-pub fn ensureInitialized(self: *Atom, coff_file: *Coff) !void {
- if (self.getSymbolIndex() != null) return; // Already initialized
- self.sym_index = try coff_file.allocateSymbol();
- try coff_file.atom_by_index_table.putNoClobber(coff_file.base.allocator, self.sym_index, self);
-}
+prev_index: ?Index,
+next_index: ?Index,
+
+pub const Index = u32;
pub fn getSymbolIndex(self: Atom) ?u32 {
if (self.sym_index == 0) return null;
@@ -85,7 +72,8 @@ pub fn getName(self: Atom, coff_file: *const Coff) []const u8 {
/// Returns how much room there is to grow in virtual address space.
pub fn capacity(self: Atom, coff_file: *const Coff) u32 {
const self_sym = self.getSymbol(coff_file);
- if (self.next) |next| {
+ if (self.next_index) |next_index| {
+ const next = coff_file.getAtom(next_index);
const next_sym = next.getSymbol(coff_file);
return next_sym.value - self_sym.value;
} else {
@@ -97,7 +85,8 @@ pub fn capacity(self: Atom, coff_file: *const Coff) u32 {
pub fn freeListEligible(self: Atom, coff_file: *const Coff) bool {
// No need to keep a free list node for the last atom.
- const next = self.next orelse return false;
+ const next_index = self.next_index orelse return false;
+ const next = coff_file.getAtom(next_index);
const self_sym = self.getSymbol(coff_file);
const next_sym = next.getSymbol(coff_file);
const cap = next_sym.value - self_sym.value;
@@ -107,22 +96,33 @@ pub fn freeListEligible(self: Atom, coff_file: *const Coff) bool {
return surplus >= Coff.min_text_capacity;
}
-pub fn addRelocation(self: *Atom, coff_file: *Coff, reloc: Relocation) !void {
+pub fn addRelocation(coff_file: *Coff, atom_index: Index, reloc: Relocation) !void {
const gpa = coff_file.base.allocator;
log.debug(" (adding reloc of type {s} to target %{d})", .{ @tagName(reloc.type), reloc.target.sym_index });
- const gop = try coff_file.relocs.getOrPut(gpa, self);
+ const gop = try coff_file.relocs.getOrPut(gpa, atom_index);
if (!gop.found_existing) {
gop.value_ptr.* = .{};
}
try gop.value_ptr.append(gpa, reloc);
}
-pub fn addBaseRelocation(self: *Atom, coff_file: *Coff, offset: u32) !void {
+pub fn addBaseRelocation(coff_file: *Coff, atom_index: Index, offset: u32) !void {
const gpa = coff_file.base.allocator;
- log.debug(" (adding base relocation at offset 0x{x} in %{d})", .{ offset, self.sym_index });
- const gop = try coff_file.base_relocs.getOrPut(gpa, self);
+ log.debug(" (adding base relocation at offset 0x{x} in %{d})", .{
+ offset,
+ coff_file.getAtom(atom_index).getSymbolIndex().?,
+ });
+ const gop = try coff_file.base_relocs.getOrPut(gpa, atom_index);
if (!gop.found_existing) {
gop.value_ptr.* = .{};
}
try gop.value_ptr.append(gpa, offset);
}
+
+pub fn freeRelocations(coff_file: *Coff, atom_index: Index) void {
+ const gpa = coff_file.base.allocator;
+ var removed_relocs = coff_file.relocs.fetchRemove(atom_index);
+ if (removed_relocs) |*relocs| relocs.value.deinit(gpa);
+ var removed_base_relocs = coff_file.base_relocs.fetchRemove(atom_index);
+ if (removed_base_relocs) |*base_relocs| base_relocs.value.deinit(gpa);
+}
diff --git a/src/link/Coff/Relocation.zig b/src/link/Coff/Relocation.zig
index 12a34b332d..1ba1d7a1c1 100644
--- a/src/link/Coff/Relocation.zig
+++ b/src/link/Coff/Relocation.zig
@@ -46,33 +46,35 @@ length: u2,
dirty: bool = true,
/// Returns an Atom which is the target node of this relocation edge (if any).
-pub fn getTargetAtom(self: Relocation, coff_file: *Coff) ?*Atom {
+pub fn getTargetAtomIndex(self: Relocation, coff_file: *const Coff) ?Atom.Index {
switch (self.type) {
.got,
.got_page,
.got_pageoff,
- => return coff_file.getGotAtomForSymbol(self.target),
+ => return coff_file.getGotAtomIndexForSymbol(self.target),
.direct,
.page,
.pageoff,
- => return coff_file.getAtomForSymbol(self.target),
+ => return coff_file.getAtomIndexForSymbol(self.target),
.import,
.import_page,
.import_pageoff,
- => return coff_file.getImportAtomForSymbol(self.target),
+ => return coff_file.getImportAtomIndexForSymbol(self.target),
}
}
-pub fn resolve(self: *Relocation, atom: *Atom, coff_file: *Coff) !void {
+pub fn resolve(self: *Relocation, atom_index: Atom.Index, coff_file: *Coff) !void {
+ const atom = coff_file.getAtom(atom_index);
const source_sym = atom.getSymbol(coff_file);
const source_section = coff_file.sections.get(@enumToInt(source_sym.section_number) - 1).header;
const source_vaddr = source_sym.value + self.offset;
const file_offset = source_section.pointer_to_raw_data + source_sym.value - source_section.virtual_address;
- const target_atom = self.getTargetAtom(coff_file) orelse return;
+ const target_atom_index = self.getTargetAtomIndex(coff_file) orelse return;
+ const target_atom = coff_file.getAtom(target_atom_index);
const target_vaddr = target_atom.getSymbol(coff_file).value;
const target_vaddr_with_addend = target_vaddr + self.addend;
@@ -107,7 +109,7 @@ const Context = struct {
image_base: u64,
};
-fn resolveAarch64(self: *Relocation, ctx: Context, coff_file: *Coff) !void {
+fn resolveAarch64(self: Relocation, ctx: Context, coff_file: *Coff) !void {
var buffer: [@sizeOf(u64)]u8 = undefined;
switch (self.length) {
2 => {
@@ -197,7 +199,7 @@ fn resolveAarch64(self: *Relocation, ctx: Context, coff_file: *Coff) !void {
}
}
-fn resolveX86(self: *Relocation, ctx: Context, coff_file: *Coff) !void {
+fn resolveX86(self: Relocation, ctx: Context, coff_file: *Coff) !void {
switch (self.type) {
.got_page => unreachable,
.got_pageoff => unreachable,
diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig
index 1b65bbb04b..a3d0aa8a53 100644
--- a/src/link/Dwarf.zig
+++ b/src/link/Dwarf.zig
@@ -18,31 +18,36 @@ const LinkBlock = File.LinkBlock;
const LinkFn = File.LinkFn;
const LinkerLoad = @import("../codegen.zig").LinkerLoad;
const Module = @import("../Module.zig");
-const Value = @import("../value.zig").Value;
+const StringTable = @import("strtab.zig").StringTable;
const Type = @import("../type.zig").Type;
+const Value = @import("../value.zig").Value;
allocator: Allocator,
bin_file: *File,
ptr_width: PtrWidth,
target: std.Target,
-/// A list of `File.LinkFn` whose Line Number Programs have surplus capacity.
-/// This is the same concept as `text_block_free_list`; see those doc comments.
-dbg_line_fn_free_list: std.AutoHashMapUnmanaged(*SrcFn, void) = .{},
-dbg_line_fn_first: ?*SrcFn = null,
-dbg_line_fn_last: ?*SrcFn = null,
+/// A list of `Atom`s whose Line Number Programs have surplus capacity.
+/// This is the same concept as `Section.free_list` in Elf; see those doc comments.
+src_fn_free_list: std.AutoHashMapUnmanaged(Atom.Index, void) = .{},
+src_fn_first_index: ?Atom.Index = null,
+src_fn_last_index: ?Atom.Index = null,
+src_fns: std.ArrayListUnmanaged(Atom) = .{},
+src_fn_decls: AtomTable = .{},
/// A list of `Atom`s whose corresponding .debug_info tags have surplus capacity.
/// This is the same concept as `text_block_free_list`; see those doc comments.
-atom_free_list: std.AutoHashMapUnmanaged(*Atom, void) = .{},
-atom_first: ?*Atom = null,
-atom_last: ?*Atom = null,
+di_atom_free_list: std.AutoHashMapUnmanaged(Atom.Index, void) = .{},
+di_atom_first_index: ?Atom.Index = null,
+di_atom_last_index: ?Atom.Index = null,
+di_atoms: std.ArrayListUnmanaged(Atom) = .{},
+di_atom_decls: AtomTable = .{},
abbrev_table_offset: ?u64 = null,
/// TODO replace with InternPool
/// Table of debug symbol names.
-strtab: std.ArrayListUnmanaged(u8) = .{},
+strtab: StringTable(.strtab) = .{},
/// Quick lookup array of all defined source files referenced by at least one Decl.
/// They will end up in the DWARF debug_line header as two lists:
@@ -50,22 +55,23 @@ strtab: std.ArrayListUnmanaged(u8) = .{},
/// * []file_names
di_files: std.AutoArrayHashMapUnmanaged(*const Module.File, void) = .{},
-/// List of atoms that are owned directly by the DWARF module.
-/// TODO convert links in DebugInfoAtom into indices and make
-/// sure every atom is owned by this module.
-managed_atoms: std.ArrayListUnmanaged(*Atom) = .{},
-
global_abbrev_relocs: std.ArrayListUnmanaged(AbbrevRelocation) = .{},
-pub const Atom = struct {
- /// Previous/next linked list pointers.
- /// This is the linked list node for this Decl's corresponding .debug_info tag.
- prev: ?*Atom,
- next: ?*Atom,
- /// Offset into .debug_info pointing to the tag for this Decl.
+const AtomTable = std.AutoHashMapUnmanaged(Module.Decl.Index, Atom.Index);
+
+const Atom = struct {
+ /// Offset into .debug_info pointing to the tag for this Decl, or
+ /// offset from the beginning of the Debug Line Program header that contains this function.
off: u32,
- /// Size of the .debug_info tag for this Decl, not including padding.
+ /// Size of the .debug_info tag for this Decl, not including padding, or
+ /// size of the line number program component belonging to this function, not
+ /// including padding.
len: u32,
+
+ prev_index: ?Index,
+ next_index: ?Index,
+
+ pub const Index = u32;
};
/// Represents state of the analysed Decl.
@@ -75,6 +81,7 @@ pub const Atom = struct {
pub const DeclState = struct {
gpa: Allocator,
mod: *Module,
+ di_atom_decls: *const AtomTable,
dbg_line: std.ArrayList(u8),
dbg_info: std.ArrayList(u8),
abbrev_type_arena: std.heap.ArenaAllocator,
@@ -88,10 +95,11 @@ pub const DeclState = struct {
abbrev_relocs: std.ArrayListUnmanaged(AbbrevRelocation) = .{},
exprloc_relocs: std.ArrayListUnmanaged(ExprlocRelocation) = .{},
- fn init(gpa: Allocator, mod: *Module) DeclState {
+ fn init(gpa: Allocator, mod: *Module, di_atom_decls: *const AtomTable) DeclState {
return .{
.gpa = gpa,
.mod = mod,
+ .di_atom_decls = di_atom_decls,
.dbg_line = std.ArrayList(u8).init(gpa),
.dbg_info = std.ArrayList(u8).init(gpa),
.abbrev_type_arena = std.heap.ArenaAllocator.init(gpa),
@@ -119,11 +127,11 @@ pub const DeclState = struct {
/// Adds local type relocation of the form: @offset => @this + addend
/// @this signifies the offset within the .debug_abbrev section of the containing atom.
- fn addTypeRelocLocal(self: *DeclState, atom: *const Atom, offset: u32, addend: u32) !void {
+ fn addTypeRelocLocal(self: *DeclState, atom_index: Atom.Index, offset: u32, addend: u32) !void {
log.debug("{x}: @this + {x}", .{ offset, addend });
try self.abbrev_relocs.append(self.gpa, .{
.target = null,
- .atom = atom,
+ .atom_index = atom_index,
.offset = offset,
.addend = addend,
});
@@ -132,13 +140,13 @@ pub const DeclState = struct {
/// Adds global type relocation of the form: @offset => @symbol + 0
/// @symbol signifies a type abbreviation posititioned somewhere in the .debug_abbrev section
/// which we use as our target of the relocation.
- fn addTypeRelocGlobal(self: *DeclState, atom: *const Atom, ty: Type, offset: u32) !void {
+ fn addTypeRelocGlobal(self: *DeclState, atom_index: Atom.Index, ty: Type, offset: u32) !void {
const resolv = self.abbrev_resolver.getContext(ty, .{
.mod = self.mod,
}) orelse blk: {
const sym_index = @intCast(u32, self.abbrev_table.items.len);
try self.abbrev_table.append(self.gpa, .{
- .atom = atom,
+ .atom_index = atom_index,
.type = ty,
.offset = undefined,
});
@@ -153,7 +161,7 @@ pub const DeclState = struct {
log.debug("{x}: %{d} + 0", .{ offset, resolv });
try self.abbrev_relocs.append(self.gpa, .{
.target = resolv,
- .atom = atom,
+ .atom_index = atom_index,
.offset = offset,
.addend = 0,
});
@@ -162,7 +170,7 @@ pub const DeclState = struct {
fn addDbgInfoType(
self: *DeclState,
module: *Module,
- atom: *Atom,
+ atom_index: Atom.Index,
ty: Type,
) error{OutOfMemory}!void {
const arena = self.abbrev_type_arena.allocator();
@@ -227,7 +235,7 @@ pub const DeclState = struct {
// DW.AT.type, DW.FORM.ref4
var index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
- try self.addTypeRelocGlobal(atom, Type.bool, @intCast(u32, index));
+ try self.addTypeRelocGlobal(atom_index, Type.bool, @intCast(u32, index));
// DW.AT.data_member_location, DW.FORM.sdata
try dbg_info_buffer.ensureUnusedCapacity(6);
dbg_info_buffer.appendAssumeCapacity(0);
@@ -239,7 +247,7 @@ pub const DeclState = struct {
// DW.AT.type, DW.FORM.ref4
index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
- try self.addTypeRelocGlobal(atom, payload_ty, @intCast(u32, index));
+ try self.addTypeRelocGlobal(atom_index, payload_ty, @intCast(u32, index));
// DW.AT.data_member_location, DW.FORM.sdata
const offset = abi_size - payload_ty.abiSize(target);
try leb128.writeULEB128(dbg_info_buffer.writer(), offset);
@@ -270,7 +278,7 @@ pub const DeclState = struct {
try dbg_info_buffer.resize(index + 4);
var buf = try arena.create(Type.SlicePtrFieldTypeBuffer);
const ptr_ty = ty.slicePtrFieldType(buf);
- try self.addTypeRelocGlobal(atom, ptr_ty, @intCast(u32, index));
+ try self.addTypeRelocGlobal(atom_index, ptr_ty, @intCast(u32, index));
// DW.AT.data_member_location, DW.FORM.sdata
try dbg_info_buffer.ensureUnusedCapacity(6);
dbg_info_buffer.appendAssumeCapacity(0);
@@ -282,7 +290,7 @@ pub const DeclState = struct {
// DW.AT.type, DW.FORM.ref4
index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
- try self.addTypeRelocGlobal(atom, Type.usize, @intCast(u32, index));
+ try self.addTypeRelocGlobal(atom_index, Type.usize, @intCast(u32, index));
// DW.AT.data_member_location, DW.FORM.sdata
try dbg_info_buffer.ensureUnusedCapacity(2);
dbg_info_buffer.appendAssumeCapacity(ptr_bytes);
@@ -294,7 +302,7 @@ pub const DeclState = struct {
// DW.AT.type, DW.FORM.ref4
const index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
- try self.addTypeRelocGlobal(atom, ty.childType(), @intCast(u32, index));
+ try self.addTypeRelocGlobal(atom_index, ty.childType(), @intCast(u32, index));
}
},
.Array => {
@@ -305,13 +313,13 @@ pub const DeclState = struct {
// DW.AT.type, DW.FORM.ref4
var index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
- try self.addTypeRelocGlobal(atom, ty.childType(), @intCast(u32, index));
+ try self.addTypeRelocGlobal(atom_index, ty.childType(), @intCast(u32, index));
// DW.AT.subrange_type
try dbg_info_buffer.append(@enumToInt(AbbrevKind.array_dim));
// DW.AT.type, DW.FORM.ref4
index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
- try self.addTypeRelocGlobal(atom, Type.usize, @intCast(u32, index));
+ try self.addTypeRelocGlobal(atom_index, Type.usize, @intCast(u32, index));
// DW.AT.count, DW.FORM.udata
const len = ty.arrayLenIncludingSentinel();
try leb128.writeULEB128(dbg_info_buffer.writer(), len);
@@ -339,7 +347,7 @@ pub const DeclState = struct {
// DW.AT.type, DW.FORM.ref4
var index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
- try self.addTypeRelocGlobal(atom, field, @intCast(u32, index));
+ try self.addTypeRelocGlobal(atom_index, field, @intCast(u32, index));
// DW.AT.data_member_location, DW.FORM.sdata
const field_off = ty.structFieldOffset(field_index, target);
try leb128.writeULEB128(dbg_info_buffer.writer(), field_off);
@@ -371,7 +379,7 @@ pub const DeclState = struct {
// DW.AT.type, DW.FORM.ref4
var index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
- try self.addTypeRelocGlobal(atom, field.ty, @intCast(u32, index));
+ try self.addTypeRelocGlobal(atom_index, field.ty, @intCast(u32, index));
// DW.AT.data_member_location, DW.FORM.sdata
const field_off = ty.structFieldOffset(field_index, target);
try leb128.writeULEB128(dbg_info_buffer.writer(), field_off);
@@ -454,7 +462,7 @@ pub const DeclState = struct {
// DW.AT.type, DW.FORM.ref4
const inner_union_index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(inner_union_index + 4);
- try self.addTypeRelocLocal(atom, @intCast(u32, inner_union_index), 5);
+ try self.addTypeRelocLocal(atom_index, @intCast(u32, inner_union_index), 5);
// DW.AT.data_member_location, DW.FORM.sdata
try leb128.writeULEB128(dbg_info_buffer.writer(), payload_offset);
}
@@ -481,7 +489,7 @@ pub const DeclState = struct {
// DW.AT.type, DW.FORM.ref4
const index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
- try self.addTypeRelocGlobal(atom, field.ty, @intCast(u32, index));
+ try self.addTypeRelocGlobal(atom_index, field.ty, @intCast(u32, index));
// DW.AT.data_member_location, DW.FORM.sdata
try dbg_info_buffer.append(0);
}
@@ -498,7 +506,7 @@ pub const DeclState = struct {
// DW.AT.type, DW.FORM.ref4
const index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
- try self.addTypeRelocGlobal(atom, union_obj.tag_ty, @intCast(u32, index));
+ try self.addTypeRelocGlobal(atom_index, union_obj.tag_ty, @intCast(u32, index));
// DW.AT.data_member_location, DW.FORM.sdata
try leb128.writeULEB128(dbg_info_buffer.writer(), tag_offset);
@@ -541,7 +549,7 @@ pub const DeclState = struct {
// DW.AT.type, DW.FORM.ref4
var index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
- try self.addTypeRelocGlobal(atom, payload_ty, @intCast(u32, index));
+ try self.addTypeRelocGlobal(atom_index, payload_ty, @intCast(u32, index));
// DW.AT.data_member_location, DW.FORM.sdata
try leb128.writeULEB128(dbg_info_buffer.writer(), payload_off);
@@ -554,7 +562,7 @@ pub const DeclState = struct {
// DW.AT.type, DW.FORM.ref4
index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
- try self.addTypeRelocGlobal(atom, error_ty, @intCast(u32, index));
+ try self.addTypeRelocGlobal(atom_index, error_ty, @intCast(u32, index));
// DW.AT.data_member_location, DW.FORM.sdata
try leb128.writeULEB128(dbg_info_buffer.writer(), error_off);
@@ -587,12 +595,11 @@ pub const DeclState = struct {
self: *DeclState,
name: [:0]const u8,
ty: Type,
- tag: File.Tag,
owner_decl: Module.Decl.Index,
loc: DbgInfoLoc,
) error{OutOfMemory}!void {
const dbg_info = &self.dbg_info;
- const atom = getDbgInfoAtom(tag, self.mod, owner_decl);
+ const atom_index = self.di_atom_decls.get(owner_decl).?;
const name_with_null = name.ptr[0 .. name.len + 1];
switch (loc) {
@@ -637,7 +644,7 @@ pub const DeclState = struct {
try dbg_info.ensureUnusedCapacity(5 + name_with_null.len);
const index = dbg_info.items.len;
try dbg_info.resize(index + 4); // dw.at.type, dw.form.ref4
- try self.addTypeRelocGlobal(atom, ty, @intCast(u32, index)); // DW.AT.type, DW.FORM.ref4
+ try self.addTypeRelocGlobal(atom_index, ty, @intCast(u32, index)); // DW.AT.type, DW.FORM.ref4
dbg_info.appendSliceAssumeCapacity(name_with_null); // DW.AT.name, DW.FORM.string
}
@@ -645,13 +652,12 @@ pub const DeclState = struct {
self: *DeclState,
name: [:0]const u8,
ty: Type,
- tag: File.Tag,
owner_decl: Module.Decl.Index,
is_ptr: bool,
loc: DbgInfoLoc,
) error{OutOfMemory}!void {
const dbg_info = &self.dbg_info;
- const atom = getDbgInfoAtom(tag, self.mod, owner_decl);
+ const atom_index = self.di_atom_decls.get(owner_decl).?;
const name_with_null = name.ptr[0 .. name.len + 1];
try dbg_info.append(@enumToInt(AbbrevKind.variable));
const target = self.mod.getTarget();
@@ -781,7 +787,7 @@ pub const DeclState = struct {
try dbg_info.ensureUnusedCapacity(5 + name_with_null.len);
const index = dbg_info.items.len;
try dbg_info.resize(index + 4); // dw.at.type, dw.form.ref4
- try self.addTypeRelocGlobal(atom, child_ty, @intCast(u32, index));
+ try self.addTypeRelocGlobal(atom_index, child_ty, @intCast(u32, index));
dbg_info.appendSliceAssumeCapacity(name_with_null); // DW.AT.name, DW.FORM.string
}
@@ -814,7 +820,7 @@ pub const DeclState = struct {
};
pub const AbbrevEntry = struct {
- atom: *const Atom,
+ atom_index: Atom.Index,
type: Type,
offset: u32,
};
@@ -823,7 +829,7 @@ pub const AbbrevRelocation = struct {
/// If target is null, we deal with a local relocation that is based on simple offset + addend
/// only.
target: ?u32,
- atom: *const Atom,
+ atom_index: Atom.Index,
offset: u32,
addend: u32,
};
@@ -840,26 +846,6 @@ pub const ExprlocRelocation = struct {
offset: u32,
};
-pub const SrcFn = struct {
- /// Offset from the beginning of the Debug Line Program header that contains this function.
- off: u32,
- /// Size of the line number program component belonging to this function, not
- /// including padding.
- len: u32,
-
- /// Points to the previous and next neighbors, based on the offset from .debug_line.
- /// This can be used to find, for example, the capacity of this `SrcFn`.
- prev: ?*SrcFn,
- next: ?*SrcFn,
-
- pub const empty: SrcFn = .{
- .off = 0,
- .len = 0,
- .prev = null,
- .next = null,
- };
-};
-
pub const PtrWidth = enum { p32, p64 };
pub const AbbrevKind = enum(u8) {
@@ -909,16 +895,18 @@ pub fn init(allocator: Allocator, bin_file: *File, target: std.Target) Dwarf {
pub fn deinit(self: *Dwarf) void {
const gpa = self.allocator;
- self.dbg_line_fn_free_list.deinit(gpa);
- self.atom_free_list.deinit(gpa);
+
+ self.src_fn_free_list.deinit(gpa);
+ self.src_fns.deinit(gpa);
+ self.src_fn_decls.deinit(gpa);
+
+ self.di_atom_free_list.deinit(gpa);
+ self.di_atoms.deinit(gpa);
+ self.di_atom_decls.deinit(gpa);
+
self.strtab.deinit(gpa);
self.di_files.deinit(gpa);
self.global_abbrev_relocs.deinit(gpa);
-
- for (self.managed_atoms.items) |atom| {
- gpa.destroy(atom);
- }
- self.managed_atoms.deinit(gpa);
}
/// Initializes Decl's state and its matching output buffers.
@@ -934,15 +922,19 @@ pub fn initDeclState(self: *Dwarf, mod: *Module, decl_index: Module.Decl.Index)
log.debug("initDeclState {s}{*}", .{ decl_name, decl });
const gpa = self.allocator;
- var decl_state = DeclState.init(gpa, mod);
+ var decl_state = DeclState.init(gpa, mod, &self.di_atom_decls);
errdefer decl_state.deinit();
const dbg_line_buffer = &decl_state.dbg_line;
const dbg_info_buffer = &decl_state.dbg_info;
+ const di_atom_index = try self.getOrCreateAtomForDecl(.di_atom, decl_index);
+
assert(decl.has_tv);
switch (decl.ty.zigTypeTag()) {
.Fn => {
+ _ = try self.getOrCreateAtomForDecl(.src_fn, decl_index);
+
// For functions we need to add a prologue to the debug line program.
try dbg_line_buffer.ensureTotalCapacity(26);
@@ -1002,8 +994,7 @@ pub fn initDeclState(self: *Dwarf, mod: *Module, decl_index: Module.Decl.Index)
dbg_info_buffer.items.len += 4; // DW.AT.high_pc, DW.FORM.data4
//
if (fn_ret_has_bits) {
- const atom = getDbgInfoAtom(self.bin_file.tag, mod, decl_index);
- try decl_state.addTypeRelocGlobal(atom, fn_ret_type, @intCast(u32, dbg_info_buffer.items.len));
+ try decl_state.addTypeRelocGlobal(di_atom_index, fn_ret_type, @intCast(u32, dbg_info_buffer.items.len));
dbg_info_buffer.items.len += 4; // DW.AT.type, DW.FORM.ref4
}
@@ -1075,31 +1066,28 @@ pub fn commitDeclState(
// This logic is nearly identical to the logic below in `updateDeclDebugInfo` for
// `TextBlock` and the .debug_info. If you are editing this logic, you
// probably need to edit that logic too.
- const src_fn = switch (self.bin_file.tag) {
- .elf => &decl.fn_link.elf,
- .macho => &decl.fn_link.macho,
- .wasm => &decl.fn_link.wasm.src_fn,
- else => unreachable, // TODO
- };
+ const src_fn_index = self.src_fn_decls.get(decl_index).?;
+ const src_fn = self.getAtomPtr(.src_fn, src_fn_index);
src_fn.len = @intCast(u32, dbg_line_buffer.items.len);
- if (self.dbg_line_fn_last) |last| blk: {
- if (src_fn == last) break :blk;
- if (src_fn.next) |next| {
+ if (self.src_fn_last_index) |last_index| blk: {
+ if (src_fn_index == last_index) break :blk;
+ if (src_fn.next_index) |next_index| {
+ const next = self.getAtomPtr(.src_fn, next_index);
// Update existing function - non-last item.
if (src_fn.off + src_fn.len + min_nop_size > next.off) {
// It grew too big, so we move it to a new location.
- if (src_fn.prev) |prev| {
- self.dbg_line_fn_free_list.put(gpa, prev, {}) catch {};
- prev.next = src_fn.next;
+ if (src_fn.prev_index) |prev_index| {
+ self.src_fn_free_list.put(gpa, prev_index, {}) catch {};
+ self.getAtomPtr(.src_fn, prev_index).next_index = src_fn.next_index;
}
- next.prev = src_fn.prev;
- src_fn.next = null;
+ next.prev_index = src_fn.prev_index;
+ src_fn.next_index = null;
// Populate where it used to be with NOPs.
switch (self.bin_file.tag) {
.elf => {
const elf_file = self.bin_file.cast(File.Elf).?;
- const debug_line_sect = &elf_file.sections.items[elf_file.debug_line_section_index.?];
+ const debug_line_sect = &elf_file.sections.items(.shdr)[elf_file.debug_line_section_index.?];
const file_pos = debug_line_sect.sh_offset + src_fn.off;
try pwriteDbgLineNops(elf_file.base.file.?, file_pos, 0, &[0]u8{}, src_fn.len);
},
@@ -1111,39 +1099,48 @@ pub fn commitDeclState(
},
.wasm => {
const wasm_file = self.bin_file.cast(File.Wasm).?;
- const debug_line = wasm_file.debug_line_atom.?.code;
+ const debug_line = wasm_file.getAtomPtr(wasm_file.debug_line_atom.?).code;
writeDbgLineNopsBuffered(debug_line.items, src_fn.off, 0, &.{}, src_fn.len);
},
else => unreachable,
}
// TODO Look at the free list before appending at the end.
- src_fn.prev = last;
- last.next = src_fn;
- self.dbg_line_fn_last = src_fn;
+ src_fn.prev_index = last_index;
+ const last = self.getAtomPtr(.src_fn, last_index);
+ last.next_index = src_fn_index;
+ self.src_fn_last_index = src_fn_index;
src_fn.off = last.off + padToIdeal(last.len);
}
- } else if (src_fn.prev == null) {
+ } else if (src_fn.prev_index == null) {
// Append new function.
// TODO Look at the free list before appending at the end.
- src_fn.prev = last;
- last.next = src_fn;
- self.dbg_line_fn_last = src_fn;
+ src_fn.prev_index = last_index;
+ const last = self.getAtomPtr(.src_fn, last_index);
+ last.next_index = src_fn_index;
+ self.src_fn_last_index = src_fn_index;
src_fn.off = last.off + padToIdeal(last.len);
}
} else {
// This is the first function of the Line Number Program.
- self.dbg_line_fn_first = src_fn;
- self.dbg_line_fn_last = src_fn;
+ self.src_fn_first_index = src_fn_index;
+ self.src_fn_last_index = src_fn_index;
src_fn.off = padToIdeal(self.dbgLineNeededHeaderBytes(&[0][]u8{}, &[0][]u8{}));
}
- const last_src_fn = self.dbg_line_fn_last.?;
+ const last_src_fn_index = self.src_fn_last_index.?;
+ const last_src_fn = self.getAtom(.src_fn, last_src_fn_index);
const needed_size = last_src_fn.off + last_src_fn.len;
- const prev_padding_size: u32 = if (src_fn.prev) |prev| src_fn.off - (prev.off + prev.len) else 0;
- const next_padding_size: u32 = if (src_fn.next) |next| next.off - (src_fn.off + src_fn.len) else 0;
+ const prev_padding_size: u32 = if (src_fn.prev_index) |prev_index| blk: {
+ const prev = self.getAtom(.src_fn, prev_index);
+ break :blk src_fn.off - (prev.off + prev.len);
+ } else 0;
+ const next_padding_size: u32 = if (src_fn.next_index) |next_index| blk: {
+ const next = self.getAtom(.src_fn, next_index);
+ break :blk next.off - (src_fn.off + src_fn.len);
+ } else 0;
// We only have support for one compilation unit so far, so the offsets are directly
// from the .debug_line section.
@@ -1152,7 +1149,7 @@ pub fn commitDeclState(
const elf_file = self.bin_file.cast(File.Elf).?;
const shdr_index = elf_file.debug_line_section_index.?;
try elf_file.growNonAllocSection(shdr_index, needed_size, 1, true);
- const debug_line_sect = elf_file.sections.items[shdr_index];
+ const debug_line_sect = elf_file.sections.items(.shdr)[shdr_index];
const file_pos = debug_line_sect.sh_offset + src_fn.off;
try pwriteDbgLineNops(
elf_file.base.file.?,
@@ -1180,7 +1177,7 @@ pub fn commitDeclState(
.wasm => {
const wasm_file = self.bin_file.cast(File.Wasm).?;
- const atom = wasm_file.debug_line_atom.?;
+ const atom = wasm_file.getAtomPtr(wasm_file.debug_line_atom.?);
const debug_line = &atom.code;
const segment_size = debug_line.items.len;
if (needed_size != segment_size) {
@@ -1212,7 +1209,7 @@ pub fn commitDeclState(
if (dbg_info_buffer.items.len == 0)
return;
- const atom = getDbgInfoAtom(self.bin_file.tag, module, decl_index);
+ const di_atom_index = self.di_atom_decls.get(decl_index).?;
if (decl_state.abbrev_table.items.len > 0) {
// Now we emit the .debug_info types of the Decl. These will count towards the size of
// the buffer, so we have to do it before computing the offset, and we can't perform the actual
@@ -1234,12 +1231,12 @@ pub fn commitDeclState(
if (deferred) continue;
symbol.offset = @intCast(u32, dbg_info_buffer.items.len);
- try decl_state.addDbgInfoType(module, atom, ty);
+ try decl_state.addDbgInfoType(module, di_atom_index, ty);
}
}
log.debug("updateDeclDebugInfoAllocation for '{s}'", .{decl.name});
- try self.updateDeclDebugInfoAllocation(atom, @intCast(u32, dbg_info_buffer.items.len));
+ try self.updateDeclDebugInfoAllocation(di_atom_index, @intCast(u32, dbg_info_buffer.items.len));
while (decl_state.abbrev_relocs.popOrNull()) |reloc| {
if (reloc.target) |target| {
@@ -1260,11 +1257,12 @@ pub fn commitDeclState(
try self.global_abbrev_relocs.append(gpa, .{
.target = null,
.offset = reloc.offset,
- .atom = reloc.atom,
+ .atom_index = reloc.atom_index,
.addend = reloc.addend,
});
} else {
- const value = symbol.atom.off + symbol.offset + reloc.addend;
+ const atom = self.getAtom(.di_atom, symbol.atom_index);
+ const value = atom.off + symbol.offset + reloc.addend;
log.debug("{x}: [() => {x}] (%{d}, '{}')", .{ reloc.offset, value, target, ty.fmtDebug() });
mem.writeInt(
u32,
@@ -1274,10 +1272,11 @@ pub fn commitDeclState(
);
}
} else {
+ const atom = self.getAtom(.di_atom, reloc.atom_index);
mem.writeInt(
u32,
dbg_info_buffer.items[reloc.offset..][0..@sizeOf(u32)],
- reloc.atom.off + reloc.offset + reloc.addend,
+ atom.off + reloc.offset + reloc.addend,
target_endian,
);
}
@@ -1293,7 +1292,7 @@ pub fn commitDeclState(
.got_load => .got_load,
},
.target = reloc.target,
- .offset = reloc.offset + atom.off,
+ .offset = reloc.offset + self.getAtom(.di_atom, di_atom_index).off,
.addend = 0,
.prev_vaddr = 0,
});
@@ -1303,10 +1302,10 @@ pub fn commitDeclState(
}
log.debug("writeDeclDebugInfo for '{s}", .{decl.name});
- try self.writeDeclDebugInfo(atom, dbg_info_buffer.items);
+ try self.writeDeclDebugInfo(di_atom_index, dbg_info_buffer.items);
}
-fn updateDeclDebugInfoAllocation(self: *Dwarf, atom: *Atom, len: u32) !void {
+fn updateDeclDebugInfoAllocation(self: *Dwarf, atom_index: Atom.Index, len: u32) !void {
const tracy = trace(@src());
defer tracy.end();
@@ -1315,24 +1314,26 @@ fn updateDeclDebugInfoAllocation(self: *Dwarf, atom: *Atom, len: u32) !void {
// probably need to edit that logic too.
const gpa = self.allocator;
+ const atom = self.getAtomPtr(.di_atom, atom_index);
atom.len = len;
- if (self.atom_last) |last| blk: {
- if (atom == last) break :blk;
- if (atom.next) |next| {
+ if (self.di_atom_last_index) |last_index| blk: {
+ if (atom_index == last_index) break :blk;
+ if (atom.next_index) |next_index| {
+ const next = self.getAtomPtr(.di_atom, next_index);
// Update existing Decl - non-last item.
if (atom.off + atom.len + min_nop_size > next.off) {
// It grew too big, so we move it to a new location.
- if (atom.prev) |prev| {
- self.atom_free_list.put(gpa, prev, {}) catch {};
- prev.next = atom.next;
+ if (atom.prev_index) |prev_index| {
+ self.di_atom_free_list.put(gpa, prev_index, {}) catch {};
+ self.getAtomPtr(.di_atom, prev_index).next_index = atom.next_index;
}
- next.prev = atom.prev;
- atom.next = null;
+ next.prev_index = atom.prev_index;
+ atom.next_index = null;
// Populate where it used to be with NOPs.
switch (self.bin_file.tag) {
.elf => {
const elf_file = self.bin_file.cast(File.Elf).?;
- const debug_info_sect = &elf_file.sections.items[elf_file.debug_info_section_index.?];
+ const debug_info_sect = &elf_file.sections.items(.shdr)[elf_file.debug_info_section_index.?];
const file_pos = debug_info_sect.sh_offset + atom.off;
try pwriteDbgInfoNops(elf_file.base.file.?, file_pos, 0, &[0]u8{}, atom.len, false);
},
@@ -1344,37 +1345,40 @@ fn updateDeclDebugInfoAllocation(self: *Dwarf, atom: *Atom, len: u32) !void {
},
.wasm => {
const wasm_file = self.bin_file.cast(File.Wasm).?;
- const debug_info = &wasm_file.debug_info_atom.?.code;
+ const debug_info_index = wasm_file.debug_info_atom.?;
+ const debug_info = &wasm_file.getAtomPtr(debug_info_index).code;
try writeDbgInfoNopsToArrayList(gpa, debug_info, atom.off, 0, &.{0}, atom.len, false);
},
else => unreachable,
}
// TODO Look at the free list before appending at the end.
- atom.prev = last;
- last.next = atom;
- self.atom_last = atom;
+ atom.prev_index = last_index;
+ const last = self.getAtomPtr(.di_atom, last_index);
+ last.next_index = atom_index;
+ self.di_atom_last_index = atom_index;
atom.off = last.off + padToIdeal(last.len);
}
- } else if (atom.prev == null) {
+ } else if (atom.prev_index == null) {
// Append new Decl.
// TODO Look at the free list before appending at the end.
- atom.prev = last;
- last.next = atom;
- self.atom_last = atom;
+ atom.prev_index = last_index;
+ const last = self.getAtomPtr(.di_atom, last_index);
+ last.next_index = atom_index;
+ self.di_atom_last_index = atom_index;
atom.off = last.off + padToIdeal(last.len);
}
} else {
// This is the first Decl of the .debug_info
- self.atom_first = atom;
- self.atom_last = atom;
+ self.di_atom_first_index = atom_index;
+ self.di_atom_last_index = atom_index;
atom.off = @intCast(u32, padToIdeal(self.dbgInfoHeaderBytes()));
}
}
-fn writeDeclDebugInfo(self: *Dwarf, atom: *Atom, dbg_info_buf: []const u8) !void {
+fn writeDeclDebugInfo(self: *Dwarf, atom_index: Atom.Index, dbg_info_buf: []const u8) !void {
const tracy = trace(@src());
defer tracy.end();
@@ -1383,14 +1387,22 @@ fn writeDeclDebugInfo(self: *Dwarf, atom: *Atom, dbg_info_buf: []const u8) !void
// probably need to edit that logic too.
const gpa = self.allocator;
- const last_decl = self.atom_last.?;
+ const atom = self.getAtom(.di_atom, atom_index);
+ const last_decl_index = self.di_atom_last_index.?;
+ const last_decl = self.getAtom(.di_atom, last_decl_index);
// +1 for a trailing zero to end the children of the decl tag.
const needed_size = last_decl.off + last_decl.len + 1;
- const prev_padding_size: u32 = if (atom.prev) |prev| atom.off - (prev.off + prev.len) else 0;
- const next_padding_size: u32 = if (atom.next) |next| next.off - (atom.off + atom.len) else 0;
+ const prev_padding_size: u32 = if (atom.prev_index) |prev_index| blk: {
+ const prev = self.getAtom(.di_atom, prev_index);
+ break :blk atom.off - (prev.off + prev.len);
+ } else 0;
+ const next_padding_size: u32 = if (atom.next_index) |next_index| blk: {
+ const next = self.getAtom(.di_atom, next_index);
+ break :blk next.off - (atom.off + atom.len);
+ } else 0;
// To end the children of the decl tag.
- const trailing_zero = atom.next == null;
+ const trailing_zero = atom.next_index == null;
// We only have support for one compilation unit so far, so the offsets are directly
// from the .debug_info section.
@@ -1399,7 +1411,7 @@ fn writeDeclDebugInfo(self: *Dwarf, atom: *Atom, dbg_info_buf: []const u8) !void
const elf_file = self.bin_file.cast(File.Elf).?;
const shdr_index = elf_file.debug_info_section_index.?;
try elf_file.growNonAllocSection(shdr_index, needed_size, 1, true);
- const debug_info_sect = elf_file.sections.items[shdr_index];
+ const debug_info_sect = elf_file.sections.items(.shdr)[shdr_index];
const file_pos = debug_info_sect.sh_offset + atom.off;
try pwriteDbgInfoNops(
elf_file.base.file.?,
@@ -1430,7 +1442,7 @@ fn writeDeclDebugInfo(self: *Dwarf, atom: *Atom, dbg_info_buf: []const u8) !void
.wasm => {
const wasm_file = self.bin_file.cast(File.Wasm).?;
const info_atom = wasm_file.debug_info_atom.?;
- const debug_info = &info_atom.code;
+ const debug_info = &wasm_file.getAtomPtr(info_atom).code;
const segment_size = debug_info.items.len;
if (needed_size != segment_size) {
log.debug(" needed size does not equal allocated size: {d}", .{needed_size});
@@ -1458,10 +1470,15 @@ fn writeDeclDebugInfo(self: *Dwarf, atom: *Atom, dbg_info_buf: []const u8) !void
}
}
-pub fn updateDeclLineNumber(self: *Dwarf, decl: *const Module.Decl) !void {
+pub fn updateDeclLineNumber(self: *Dwarf, module: *Module, decl_index: Module.Decl.Index) !void {
const tracy = trace(@src());
defer tracy.end();
+ const atom_index = try self.getOrCreateAtomForDecl(.src_fn, decl_index);
+ const atom = self.getAtom(.src_fn, atom_index);
+ if (atom.len == 0) return;
+
+ const decl = module.declPtr(decl_index);
const func = decl.val.castTag(.function).?.data;
log.debug("decl.src_line={d}, func.lbrace_line={d}, func.rbrace_line={d}", .{
decl.src_line,
@@ -1475,79 +1492,81 @@ pub fn updateDeclLineNumber(self: *Dwarf, decl: *const Module.Decl) !void {
switch (self.bin_file.tag) {
.elf => {
const elf_file = self.bin_file.cast(File.Elf).?;
- const shdr = elf_file.sections.items[elf_file.debug_line_section_index.?];
- const file_pos = shdr.sh_offset + decl.fn_link.elf.off + self.getRelocDbgLineOff();
+ const shdr = elf_file.sections.items(.shdr)[elf_file.debug_line_section_index.?];
+ const file_pos = shdr.sh_offset + atom.off + self.getRelocDbgLineOff();
try elf_file.base.file.?.pwriteAll(&data, file_pos);
},
.macho => {
const d_sym = self.bin_file.cast(File.MachO).?.getDebugSymbols().?;
const sect = d_sym.getSection(d_sym.debug_line_section_index.?);
- const file_pos = sect.offset + decl.fn_link.macho.off + self.getRelocDbgLineOff();
+ const file_pos = sect.offset + atom.off + self.getRelocDbgLineOff();
try d_sym.file.pwriteAll(&data, file_pos);
},
.wasm => {
const wasm_file = self.bin_file.cast(File.Wasm).?;
- const offset = decl.fn_link.wasm.src_fn.off + self.getRelocDbgLineOff();
- const atom = wasm_file.debug_line_atom.?;
- mem.copy(u8, atom.code.items[offset..], &data);
+ const offset = atom.off + self.getRelocDbgLineOff();
+ const line_atom_index = wasm_file.debug_line_atom.?;
+ mem.copy(u8, wasm_file.getAtomPtr(line_atom_index).code.items[offset..], &data);
},
else => unreachable,
}
}
-pub fn freeAtom(self: *Dwarf, atom: *Atom) void {
- if (self.atom_first == atom) {
- self.atom_first = atom.next;
- }
- if (self.atom_last == atom) {
- // TODO shrink the .debug_info section size here
- self.atom_last = atom.prev;
- }
-
- if (atom.prev) |prev| {
- prev.next = atom.next;
+pub fn freeDecl(self: *Dwarf, decl_index: Module.Decl.Index) void {
+ const gpa = self.allocator;
- // TODO the free list logic like we do for text blocks above
- } else {
- atom.prev = null;
+ // Free SrcFn atom
+ if (self.src_fn_decls.fetchRemove(decl_index)) |kv| {
+ const src_fn_index = kv.value;
+ const src_fn = self.getAtom(.src_fn, src_fn_index);
+ _ = self.src_fn_free_list.remove(src_fn_index);
+
+ if (src_fn.prev_index) |prev_index| {
+ self.src_fn_free_list.put(gpa, prev_index, {}) catch {};
+ const prev = self.getAtomPtr(.src_fn, prev_index);
+ prev.next_index = src_fn.next_index;
+ if (src_fn.next_index) |next_index| {
+ self.getAtomPtr(.src_fn, next_index).prev_index = prev_index;
+ } else {
+ self.src_fn_last_index = prev_index;
+ }
+ } else if (src_fn.next_index) |next_index| {
+ self.src_fn_first_index = next_index;
+ self.getAtomPtr(.src_fn, next_index).prev_index = null;
+ }
+ if (self.src_fn_first_index == src_fn_index) {
+ self.src_fn_first_index = src_fn.next_index;
+ }
+ if (self.src_fn_last_index == src_fn_index) {
+ self.src_fn_last_index = src_fn.prev_index;
+ }
}
- if (atom.next) |next| {
- next.prev = atom.prev;
- } else {
- atom.next = null;
- }
-}
+ // Free DI atom
+ if (self.di_atom_decls.fetchRemove(decl_index)) |kv| {
+ const di_atom_index = kv.value;
+ const di_atom = self.getAtomPtr(.di_atom, di_atom_index);
-pub fn freeDecl(self: *Dwarf, decl: *Module.Decl) void {
- // TODO make this logic match freeTextBlock. Maybe abstract the logic out since the same thing
- // is desired for both.
- const gpa = self.allocator;
- const fn_link = switch (self.bin_file.tag) {
- .elf => &decl.fn_link.elf,
- .macho => &decl.fn_link.macho,
- .wasm => &decl.fn_link.wasm.src_fn,
- else => unreachable,
- };
- _ = self.dbg_line_fn_free_list.remove(fn_link);
+ if (self.di_atom_first_index == di_atom_index) {
+ self.di_atom_first_index = di_atom.next_index;
+ }
+ if (self.di_atom_last_index == di_atom_index) {
+ // TODO shrink the .debug_info section size here
+ self.di_atom_last_index = di_atom.prev_index;
+ }
- if (fn_link.prev) |prev| {
- self.dbg_line_fn_free_list.put(gpa, prev, {}) catch {};
- prev.next = fn_link.next;
- if (fn_link.next) |next| {
- next.prev = prev;
+ if (di_atom.prev_index) |prev_index| {
+ self.getAtomPtr(.di_atom, prev_index).next_index = di_atom.next_index;
+ // TODO the free list logic like we do for SrcFn above
} else {
- self.dbg_line_fn_last = prev;
+ di_atom.prev_index = null;
+ }
+
+ if (di_atom.next_index) |next_index| {
+ self.getAtomPtr(.di_atom, next_index).prev_index = di_atom.prev_index;
+ } else {
+ di_atom.next_index = null;
}
- } else if (fn_link.next) |next| {
- self.dbg_line_fn_first = next;
- next.prev = null;
- }
- if (self.dbg_line_fn_first == fn_link) {
- self.dbg_line_fn_first = fn_link.next;
- }
- if (self.dbg_line_fn_last == fn_link) {
- self.dbg_line_fn_last = fn_link.prev;
}
}
@@ -1690,7 +1709,7 @@ pub fn writeDbgAbbrev(self: *Dwarf) !void {
const elf_file = self.bin_file.cast(File.Elf).?;
const shdr_index = elf_file.debug_abbrev_section_index.?;
try elf_file.growNonAllocSection(shdr_index, needed_size, 1, false);
- const debug_abbrev_sect = elf_file.sections.items[shdr_index];
+ const debug_abbrev_sect = elf_file.sections.items(.shdr)[shdr_index];
const file_pos = debug_abbrev_sect.sh_offset + abbrev_offset;
try elf_file.base.file.?.pwriteAll(&abbrev_buf, file_pos);
},
@@ -1704,7 +1723,7 @@ pub fn writeDbgAbbrev(self: *Dwarf) !void {
},
.wasm => {
const wasm_file = self.bin_file.cast(File.Wasm).?;
- const debug_abbrev = &wasm_file.debug_abbrev_atom.?.code;
+ const debug_abbrev = &wasm_file.getAtomPtr(wasm_file.debug_abbrev_atom.?).code;
try debug_abbrev.resize(wasm_file.base.allocator, needed_size);
mem.copy(u8, debug_abbrev.items, &abbrev_buf);
},
@@ -1770,11 +1789,11 @@ pub fn writeDbgInfoHeader(self: *Dwarf, module: *Module, low_pc: u64, high_pc: u
},
}
// Write the form for the compile unit, which must match the abbrev table above.
- const name_strp = try self.makeString(module.root_pkg.root_src_path);
+ const name_strp = try self.strtab.insert(self.allocator, module.root_pkg.root_src_path);
var compile_unit_dir_buffer: [std.fs.MAX_PATH_BYTES]u8 = undefined;
const compile_unit_dir = resolveCompilationDir(module, &compile_unit_dir_buffer);
- const comp_dir_strp = try self.makeString(compile_unit_dir);
- const producer_strp = try self.makeString(link.producer_string);
+ const comp_dir_strp = try self.strtab.insert(self.allocator, compile_unit_dir);
+ const producer_strp = try self.strtab.insert(self.allocator, link.producer_string);
di_buf.appendAssumeCapacity(@enumToInt(AbbrevKind.compile_unit));
if (self.bin_file.tag == .macho) {
@@ -1805,7 +1824,7 @@ pub fn writeDbgInfoHeader(self: *Dwarf, module: *Module, low_pc: u64, high_pc: u
switch (self.bin_file.tag) {
.elf => {
const elf_file = self.bin_file.cast(File.Elf).?;
- const debug_info_sect = elf_file.sections.items[elf_file.debug_info_section_index.?];
+ const debug_info_sect = elf_file.sections.items(.shdr)[elf_file.debug_info_section_index.?];
const file_pos = debug_info_sect.sh_offset;
try pwriteDbgInfoNops(elf_file.base.file.?, file_pos, 0, di_buf.items, jmp_amt, false);
},
@@ -1817,7 +1836,7 @@ pub fn writeDbgInfoHeader(self: *Dwarf, module: *Module, low_pc: u64, high_pc: u
},
.wasm => {
const wasm_file = self.bin_file.cast(File.Wasm).?;
- const debug_info = &wasm_file.debug_info_atom.?.code;
+ const debug_info = &wasm_file.getAtomPtr(wasm_file.debug_info_atom.?).code;
try writeDbgInfoNopsToArrayList(self.allocator, debug_info, 0, 0, di_buf.items, jmp_amt, false);
},
else => unreachable,
@@ -2124,7 +2143,7 @@ pub fn writeDbgAranges(self: *Dwarf, addr: u64, size: u64) !void {
const elf_file = self.bin_file.cast(File.Elf).?;
const shdr_index = elf_file.debug_aranges_section_index.?;
try elf_file.growNonAllocSection(shdr_index, needed_size, 16, false);
- const debug_aranges_sect = elf_file.sections.items[shdr_index];
+ const debug_aranges_sect = elf_file.sections.items(.shdr)[shdr_index];
const file_pos = debug_aranges_sect.sh_offset;
try elf_file.base.file.?.pwriteAll(di_buf.items, file_pos);
},
@@ -2138,7 +2157,7 @@ pub fn writeDbgAranges(self: *Dwarf, addr: u64, size: u64) !void {
},
.wasm => {
const wasm_file = self.bin_file.cast(File.Wasm).?;
- const debug_ranges = &wasm_file.debug_ranges_atom.?.code;
+ const debug_ranges = &wasm_file.getAtomPtr(wasm_file.debug_ranges_atom.?).code;
try debug_ranges.resize(wasm_file.base.allocator, needed_size);
mem.copy(u8, debug_ranges.items, di_buf.items);
},
@@ -2275,19 +2294,23 @@ pub fn writeDbgLineHeader(self: *Dwarf) !void {
const needed_with_padding = padToIdeal(needed_bytes);
const delta = needed_with_padding - dbg_line_prg_off;
- var src_fn = self.dbg_line_fn_first.?;
- const last_fn = self.dbg_line_fn_last.?;
+ const first_fn_index = self.src_fn_first_index.?;
+ const first_fn = self.getAtom(.src_fn, first_fn_index);
+ const last_fn_index = self.src_fn_last_index.?;
+ const last_fn = self.getAtom(.src_fn, last_fn_index);
+
+ var src_fn_index = first_fn_index;
- var buffer = try gpa.alloc(u8, last_fn.off + last_fn.len - src_fn.off);
+ var buffer = try gpa.alloc(u8, last_fn.off + last_fn.len - first_fn.off);
defer gpa.free(buffer);
switch (self.bin_file.tag) {
.elf => {
const elf_file = self.bin_file.cast(File.Elf).?;
const shdr_index = elf_file.debug_line_section_index.?;
- const needed_size = elf_file.sections.items[shdr_index].sh_size + delta;
+ const needed_size = elf_file.sections.items(.shdr)[shdr_index].sh_size + delta;
try elf_file.growNonAllocSection(shdr_index, needed_size, 1, true);
- const file_pos = elf_file.sections.items[shdr_index].sh_offset + src_fn.off;
+ const file_pos = elf_file.sections.items(.shdr)[shdr_index].sh_offset + first_fn.off;
const amt = try elf_file.base.file.?.preadAll(buffer, file_pos);
if (amt != buffer.len) return error.InputOutput;
@@ -2299,7 +2322,7 @@ pub fn writeDbgLineHeader(self: *Dwarf) !void {
const sect_index = d_sym.debug_line_section_index.?;
const needed_size = @intCast(u32, d_sym.getSection(sect_index).size + delta);
try d_sym.growSection(sect_index, needed_size, true);
- const file_pos = d_sym.getSection(sect_index).offset + src_fn.off;
+ const file_pos = d_sym.getSection(sect_index).offset + first_fn.off;
const amt = try d_sym.file.preadAll(buffer, file_pos);
if (amt != buffer.len) return error.InputOutput;
@@ -2308,19 +2331,20 @@ pub fn writeDbgLineHeader(self: *Dwarf) !void {
},
.wasm => {
const wasm_file = self.bin_file.cast(File.Wasm).?;
- const debug_line = &wasm_file.debug_line_atom.?.code;
- mem.copy(u8, buffer, debug_line.items[src_fn.off..]);
+ const debug_line = &wasm_file.getAtomPtr(wasm_file.debug_line_atom.?).code;
+ mem.copy(u8, buffer, debug_line.items[first_fn.off..]);
try debug_line.resize(self.allocator, debug_line.items.len + delta);
- mem.copy(u8, debug_line.items[src_fn.off + delta ..], buffer);
+ mem.copy(u8, debug_line.items[first_fn.off + delta ..], buffer);
},
else => unreachable,
}
while (true) {
+ const src_fn = self.getAtomPtr(.src_fn, src_fn_index);
src_fn.off += delta;
- if (src_fn.next) |next| {
- src_fn = next;
+ if (src_fn.next_index) |next_index| {
+ src_fn_index = next_index;
} else break;
}
}
@@ -2346,7 +2370,7 @@ pub fn writeDbgLineHeader(self: *Dwarf) !void {
switch (self.bin_file.tag) {
.elf => {
const elf_file = self.bin_file.cast(File.Elf).?;
- const debug_line_sect = elf_file.sections.items[elf_file.debug_line_section_index.?];
+ const debug_line_sect = elf_file.sections.items(.shdr)[elf_file.debug_line_section_index.?];
const file_pos = debug_line_sect.sh_offset;
try pwriteDbgLineNops(elf_file.base.file.?, file_pos, 0, di_buf.items, jmp_amt);
},
@@ -2358,7 +2382,7 @@ pub fn writeDbgLineHeader(self: *Dwarf) !void {
},
.wasm => {
const wasm_file = self.bin_file.cast(File.Wasm).?;
- const debug_line = wasm_file.debug_line_atom.?.code;
+ const debug_line = &wasm_file.getAtomPtr(wasm_file.debug_line_atom.?).code;
writeDbgLineNopsBuffered(debug_line.items, 0, 0, di_buf.items, jmp_amt);
},
else => unreachable,
@@ -2366,22 +2390,26 @@ pub fn writeDbgLineHeader(self: *Dwarf) !void {
}
fn getDebugInfoOff(self: Dwarf) ?u32 {
- const first = self.atom_first orelse return null;
+ const first_index = self.di_atom_first_index orelse return null;
+ const first = self.getAtom(.di_atom, first_index);
return first.off;
}
fn getDebugInfoEnd(self: Dwarf) ?u32 {
- const last = self.atom_last orelse return null;
+ const last_index = self.di_atom_last_index orelse return null;
+ const last = self.getAtom(.di_atom, last_index);
return last.off + last.len;
}
fn getDebugLineProgramOff(self: Dwarf) ?u32 {
- const first = self.dbg_line_fn_first orelse return null;
+ const first_index = self.src_fn_first_index orelse return null;
+ const first = self.getAtom(.src_fn, first_index);
return first.off;
}
fn getDebugLineProgramEnd(self: Dwarf) ?u32 {
- const last = self.dbg_line_fn_last orelse return null;
+ const last_index = self.src_fn_last_index orelse return null;
+ const last = self.getAtom(.src_fn, last_index);
return last.off + last.len;
}
@@ -2435,15 +2463,6 @@ fn getRelocDbgInfoSubprogramHighPC(self: Dwarf) u32 {
return dbg_info_low_pc_reloc_index + self.ptrWidthBytes();
}
-/// TODO Improve this to use a table.
-fn makeString(self: *Dwarf, bytes: []const u8) !u32 {
- try self.strtab.ensureUnusedCapacity(self.allocator, bytes.len + 1);
- const result = self.strtab.items.len;
- self.strtab.appendSliceAssumeCapacity(bytes);
- self.strtab.appendAssumeCapacity(0);
- return @intCast(u32, result);
-}
-
fn padToIdeal(actual_size: anytype) @TypeOf(actual_size) {
return actual_size +| (actual_size / ideal_factor);
}
@@ -2465,29 +2484,20 @@ pub fn flushModule(self: *Dwarf, module: *Module) !void {
}
error_set.names = names;
- const atom = try gpa.create(Atom);
- errdefer gpa.destroy(atom);
- atom.* = .{
- .prev = null,
- .next = null,
- .off = 0,
- .len = 0,
- };
-
var dbg_info_buffer = std.ArrayList(u8).init(arena);
try addDbgInfoErrorSet(arena, module, error_ty, self.target, &dbg_info_buffer);
- try self.managed_atoms.append(gpa, atom);
+ const di_atom_index = try self.createAtom(.di_atom);
log.debug("updateDeclDebugInfoAllocation in flushModule", .{});
- try self.updateDeclDebugInfoAllocation(atom, @intCast(u32, dbg_info_buffer.items.len));
+ try self.updateDeclDebugInfoAllocation(di_atom_index, @intCast(u32, dbg_info_buffer.items.len));
log.debug("writeDeclDebugInfo in flushModule", .{});
- try self.writeDeclDebugInfo(atom, dbg_info_buffer.items);
+ try self.writeDeclDebugInfo(di_atom_index, dbg_info_buffer.items);
const file_pos = blk: {
switch (self.bin_file.tag) {
.elf => {
const elf_file = self.bin_file.cast(File.Elf).?;
- const debug_info_sect = &elf_file.sections.items[elf_file.debug_info_section_index.?];
+ const debug_info_sect = &elf_file.sections.items(.shdr)[elf_file.debug_info_section_index.?];
break :blk debug_info_sect.sh_offset;
},
.macho => {
@@ -2502,22 +2512,23 @@ pub fn flushModule(self: *Dwarf, module: *Module) !void {
};
var buf: [@sizeOf(u32)]u8 = undefined;
- mem.writeInt(u32, &buf, atom.off, self.target.cpu.arch.endian());
+ mem.writeInt(u32, &buf, self.getAtom(.di_atom, di_atom_index).off, self.target.cpu.arch.endian());
while (self.global_abbrev_relocs.popOrNull()) |reloc| {
+ const atom = self.getAtom(.di_atom, reloc.atom_index);
switch (self.bin_file.tag) {
.elf => {
const elf_file = self.bin_file.cast(File.Elf).?;
- try elf_file.base.file.?.pwriteAll(&buf, file_pos + reloc.atom.off + reloc.offset);
+ try elf_file.base.file.?.pwriteAll(&buf, file_pos + atom.off + reloc.offset);
},
.macho => {
const d_sym = self.bin_file.cast(File.MachO).?.getDebugSymbols().?;
- try d_sym.file.pwriteAll(&buf, file_pos + reloc.atom.off + reloc.offset);
+ try d_sym.file.pwriteAll(&buf, file_pos + atom.off + reloc.offset);
},
.wasm => {
const wasm_file = self.bin_file.cast(File.Wasm).?;
- const debug_info = wasm_file.debug_info_atom.?.code;
- mem.copy(u8, debug_info.items[reloc.atom.off + reloc.offset ..], &buf);
+ const debug_info = wasm_file.getAtomPtr(wasm_file.debug_info_atom.?).code;
+ mem.copy(u8, debug_info.items[atom.off + reloc.offset ..], &buf);
},
else => unreachable,
}
@@ -2635,12 +2646,62 @@ fn addDbgInfoErrorSet(
try dbg_info_buffer.append(0);
}
-fn getDbgInfoAtom(tag: File.Tag, mod: *Module, decl_index: Module.Decl.Index) *Atom {
- const decl = mod.declPtr(decl_index);
- return switch (tag) {
- .elf => &decl.link.elf.dbg_info_atom,
- .macho => &decl.link.macho.dbg_info_atom,
- .wasm => &decl.link.wasm.dbg_info_atom,
- else => unreachable,
+const Kind = enum { src_fn, di_atom };
+
+fn createAtom(self: *Dwarf, comptime kind: Kind) !Atom.Index {
+ const index = blk: {
+ switch (kind) {
+ .src_fn => {
+ const index = @intCast(Atom.Index, self.src_fns.items.len);
+ _ = try self.src_fns.addOne(self.allocator);
+ break :blk index;
+ },
+ .di_atom => {
+ const index = @intCast(Atom.Index, self.di_atoms.items.len);
+ _ = try self.di_atoms.addOne(self.allocator);
+ break :blk index;
+ },
+ }
+ };
+ const atom = self.getAtomPtr(kind, index);
+ atom.* = .{
+ .off = 0,
+ .len = 0,
+ .prev_index = null,
+ .next_index = null,
+ };
+ return index;
+}
+
+fn getOrCreateAtomForDecl(self: *Dwarf, comptime kind: Kind, decl_index: Module.Decl.Index) !Atom.Index {
+ switch (kind) {
+ .src_fn => {
+ const gop = try self.src_fn_decls.getOrPut(self.allocator, decl_index);
+ if (!gop.found_existing) {
+ gop.value_ptr.* = try self.createAtom(kind);
+ }
+ return gop.value_ptr.*;
+ },
+ .di_atom => {
+ const gop = try self.di_atom_decls.getOrPut(self.allocator, decl_index);
+ if (!gop.found_existing) {
+ gop.value_ptr.* = try self.createAtom(kind);
+ }
+ return gop.value_ptr.*;
+ },
+ }
+}
+
+fn getAtom(self: *const Dwarf, comptime kind: Kind, index: Atom.Index) Atom {
+ return switch (kind) {
+ .src_fn => self.src_fns.items[index],
+ .di_atom => self.di_atoms.items[index],
+ };
+}
+
+fn getAtomPtr(self: *Dwarf, comptime kind: Kind, index: Atom.Index) *Atom {
+ return switch (kind) {
+ .src_fn => &self.src_fns.items[index],
+ .di_atom => &self.di_atoms.items[index],
};
}
diff --git a/src/link/Elf.zig b/src/link/Elf.zig
index 2c55e55f83..45952da6c0 100644
--- a/src/link/Elf.zig
+++ b/src/link/Elf.zig
@@ -1,43 +1,89 @@
const Elf = @This();
const std = @import("std");
+const build_options = @import("build_options");
const builtin = @import("builtin");
-const math = std.math;
-const mem = std.mem;
const assert = std.debug.assert;
-const Allocator = std.mem.Allocator;
-const fs = std.fs;
const elf = std.elf;
+const fs = std.fs;
const log = std.log.scoped(.link);
+const math = std.math;
+const mem = std.mem;
-const Atom = @import("Elf/Atom.zig");
-const Module = @import("../Module.zig");
-const Compilation = @import("../Compilation.zig");
-const Dwarf = @import("Dwarf.zig");
const codegen = @import("../codegen.zig");
-const lldMain = @import("../main.zig").lldMain;
-const trace = @import("../tracy.zig").trace;
-const Package = @import("../Package.zig");
-const Value = @import("../value.zig").Value;
-const Type = @import("../type.zig").Type;
-const TypedValue = @import("../TypedValue.zig");
-const link = @import("../link.zig");
-const File = link.File;
-const build_options = @import("build_options");
-const target_util = @import("../target.zig");
const glibc = @import("../glibc.zig");
+const link = @import("../link.zig");
+const lldMain = @import("../main.zig").lldMain;
const musl = @import("../musl.zig");
-const Cache = @import("../Cache.zig");
+const target_util = @import("../target.zig");
+const trace = @import("../tracy.zig").trace;
+
const Air = @import("../Air.zig");
+const Allocator = std.mem.Allocator;
+pub const Atom = @import("Elf/Atom.zig");
+const Cache = @import("../Cache.zig");
+const Compilation = @import("../Compilation.zig");
+const Dwarf = @import("Dwarf.zig");
+const File = link.File;
const Liveness = @import("../Liveness.zig");
const LlvmObject = @import("../codegen/llvm.zig").Object;
-
-pub const TextBlock = Atom;
+const Module = @import("../Module.zig");
+const Package = @import("../Package.zig");
+const StringTable = @import("strtab.zig").StringTable;
+const Type = @import("../type.zig").Type;
+const TypedValue = @import("../TypedValue.zig");
+const Value = @import("../value.zig").Value;
const default_entry_addr = 0x8000000;
pub const base_tag: File.Tag = .elf;
+const Section = struct {
+ shdr: elf.Elf64_Shdr,
+ phdr_index: u16,
+
+ /// Index of the last allocated atom in this section.
+ last_atom_index: ?Atom.Index = null,
+
+ /// A list of atoms that have surplus capacity. This list can have false
+ /// positives, as functions grow and shrink over time, only sometimes being added
+ /// or removed from the freelist.
+ ///
+ /// An atom has surplus capacity when its overcapacity value is greater than
+ /// padToIdeal(minimum_atom_size). That is, when it has so
+ /// much extra capacity, that we could fit a small new symbol in it, itself with
+ /// ideal_capacity or more.
+ ///
+ /// Ideal capacity is defined by size + (size / ideal_factor)
+ ///
+ /// Overcapacity is measured by actual_capacity - ideal_capacity. Note that
+ /// overcapacity can be negative. A simple way to have negative overcapacity is to
+ /// allocate a fresh text block, which will have ideal capacity, and then grow it
+ /// by 1 byte. It will then have -1 overcapacity.
+ free_list: std.ArrayListUnmanaged(Atom.Index) = .{},
+};
+
+const DeclMetadata = struct {
+ atom: Atom.Index,
+ shdr: u16,
+ /// A list of all exports aliases of this Decl.
+ exports: std.ArrayListUnmanaged(u32) = .{},
+
+ fn getExport(m: DeclMetadata, elf_file: *const Elf, name: []const u8) ?u32 {
+ for (m.exports.items) |exp| {
+ if (mem.eql(u8, name, elf_file.getGlobalName(exp))) return exp;
+ }
+ return null;
+ }
+
+ fn getExportPtr(m: *DeclMetadata, elf_file: *Elf, name: []const u8) ?*u32 {
+ for (m.exports.items) |*exp| {
+ if (mem.eql(u8, name, elf_file.getGlobalName(exp.*))) return exp;
+ }
+ return null;
+ }
+};
+
base: File,
dwarf: ?Dwarf = null,
@@ -48,12 +94,12 @@ llvm_object: ?*LlvmObject = null,
/// Stored in native-endian format, depending on target endianness needs to be bswapped on read/write.
/// Same order as in the file.
-sections: std.ArrayListUnmanaged(elf.Elf64_Shdr) = std.ArrayListUnmanaged(elf.Elf64_Shdr){},
+sections: std.MultiArrayList(Section) = .{},
shdr_table_offset: ?u64 = null,
/// Stored in native-endian format, depending on target endianness needs to be bswapped on read/write.
/// Same order as in the file.
-program_headers: std.ArrayListUnmanaged(elf.Elf64_Phdr) = std.ArrayListUnmanaged(elf.Elf64_Phdr){},
+program_headers: std.ArrayListUnmanaged(elf.Elf64_Phdr) = .{},
phdr_table_offset: ?u64 = null,
/// The index into the program headers of a PT_LOAD program header with Read and Execute flags
phdr_load_re_index: ?u16 = null,
@@ -65,12 +111,10 @@ phdr_load_ro_index: ?u16 = null,
/// The index into the program headers of a PT_LOAD program header with Write flag
phdr_load_rw_index: ?u16 = null,
-phdr_shdr_table: std.AutoHashMapUnmanaged(u16, u16) = .{},
-
entry_addr: ?u64 = null,
page_size: u32,
-shstrtab: std.ArrayListUnmanaged(u8) = std.ArrayListUnmanaged(u8){},
+shstrtab: StringTable(.strtab) = .{},
shstrtab_index: ?u16 = null,
symtab_section_index: ?u16 = null,
@@ -113,39 +157,14 @@ debug_line_header_dirty: bool = false,
error_flags: File.ErrorFlags = File.ErrorFlags{},
-/// Pointer to the last allocated atom
-atoms: std.AutoHashMapUnmanaged(u16, *TextBlock) = .{},
-
-/// A list of text blocks that have surplus capacity. This list can have false
-/// positives, as functions grow and shrink over time, only sometimes being added
-/// or removed from the freelist.
-///
-/// A text block has surplus capacity when its overcapacity value is greater than
-/// padToIdeal(minimum_text_block_size). That is, when it has so
-/// much extra capacity, that we could fit a small new symbol in it, itself with
-/// ideal_capacity or more.
-///
-/// Ideal capacity is defined by size + (size / ideal_factor)
-///
-/// Overcapacity is measured by actual_capacity - ideal_capacity. Note that
-/// overcapacity can be negative. A simple way to have negative overcapacity is to
-/// allocate a fresh text block, which will have ideal capacity, and then grow it
-/// by 1 byte. It will then have -1 overcapacity.
-atom_free_lists: std.AutoHashMapUnmanaged(u16, std.ArrayListUnmanaged(*TextBlock)) = .{},
-
-/// Table of Decls that are currently alive.
-/// We store them here so that we can properly dispose of any allocated
-/// memory within the atom in the incremental linker.
-/// TODO consolidate this.
-decls: std.AutoHashMapUnmanaged(Module.Decl.Index, ?u16) = .{},
+/// Table of tracked Decls.
+decls: std.AutoHashMapUnmanaged(Module.Decl.Index, DeclMetadata) = .{},
/// List of atoms that are owned directly by the linker.
-/// Currently these are only atoms that are the result of linking
-/// object files. Atoms which take part in incremental linking are
-/// at present owned by Module.Decl.
-/// TODO consolidate this.
-managed_atoms: std.ArrayListUnmanaged(*TextBlock) = .{},
-atom_by_index_table: std.AutoHashMapUnmanaged(u32, *TextBlock) = .{},
+atoms: std.ArrayListUnmanaged(Atom) = .{},
+
+/// Table of atoms indexed by the symbol index.
+atom_by_index_table: std.AutoHashMapUnmanaged(u32, Atom.Index) = .{},
/// Table of unnamed constants associated with a parent `Decl`.
/// We store them here so that we can free the constants whenever the `Decl`
@@ -173,15 +192,8 @@ unnamed_const_atoms: UnnamedConstTable = .{},
/// this will be a table indexed by index into the list of Atoms.
relocs: RelocTable = .{},
-const Reloc = struct {
- target: u32,
- offset: u64,
- addend: u32,
- prev_vaddr: u64,
-};
-
-const RelocTable = std.AutoHashMapUnmanaged(*TextBlock, std.ArrayListUnmanaged(Reloc));
-const UnnamedConstTable = std.AutoHashMapUnmanaged(Module.Decl.Index, std.ArrayListUnmanaged(*TextBlock));
+const RelocTable = std.AutoHashMapUnmanaged(Atom.Index, std.ArrayListUnmanaged(Atom.Reloc));
+const UnnamedConstTable = std.AutoHashMapUnmanaged(Module.Decl.Index, std.ArrayListUnmanaged(Atom.Index));
/// When allocating, the ideal_capacity is calculated by
/// actual_capacity + (actual_capacity / ideal_factor)
@@ -190,15 +202,11 @@ const ideal_factor = 3;
/// In order for a slice of bytes to be considered eligible to keep metadata pointing at
/// it as a possible place to put new symbols, it must have enough room for this many bytes
/// (plus extra for reserved capacity).
-const minimum_text_block_size = 64;
-pub const min_text_capacity = padToIdeal(minimum_text_block_size);
+const minimum_atom_size = 64;
+pub const min_text_capacity = padToIdeal(minimum_atom_size);
pub const PtrWidth = enum { p32, p64 };
-pub const Export = struct {
- sym_index: ?u32 = null,
-};
-
pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Options) !*Elf {
assert(options.target.ofmt == .elf);
@@ -230,16 +238,19 @@ pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Option
// There must always be a null section in index 0
try self.sections.append(allocator, .{
- .sh_name = 0,
- .sh_type = elf.SHT_NULL,
- .sh_flags = 0,
- .sh_addr = 0,
- .sh_offset = 0,
- .sh_size = 0,
- .sh_link = 0,
- .sh_info = 0,
- .sh_addralign = 0,
- .sh_entsize = 0,
+ .shdr = .{
+ .sh_name = 0,
+ .sh_type = elf.SHT_NULL,
+ .sh_flags = 0,
+ .sh_addr = 0,
+ .sh_offset = 0,
+ .sh_size = 0,
+ .sh_link = 0,
+ .sh_info = 0,
+ .sh_addralign = 0,
+ .sh_entsize = 0,
+ },
+ .phdr_index = undefined,
});
try self.populateMissingMetadata();
@@ -286,75 +297,67 @@ pub fn createEmpty(gpa: Allocator, options: link.Options) !*Elf {
}
pub fn deinit(self: *Elf) void {
+ const gpa = self.base.allocator;
+
if (build_options.have_llvm) {
- if (self.llvm_object) |llvm_object| llvm_object.destroy(self.base.allocator);
- }
-
- self.sections.deinit(self.base.allocator);
- self.program_headers.deinit(self.base.allocator);
- self.shstrtab.deinit(self.base.allocator);
- self.local_symbols.deinit(self.base.allocator);
- self.global_symbols.deinit(self.base.allocator);
- self.global_symbol_free_list.deinit(self.base.allocator);
- self.local_symbol_free_list.deinit(self.base.allocator);
- self.offset_table_free_list.deinit(self.base.allocator);
- self.offset_table.deinit(self.base.allocator);
- self.phdr_shdr_table.deinit(self.base.allocator);
- self.decls.deinit(self.base.allocator);
-
- self.atoms.deinit(self.base.allocator);
+ if (self.llvm_object) |llvm_object| llvm_object.destroy(gpa);
+ }
+
+ for (self.sections.items(.free_list)) |*free_list| {
+ free_list.deinit(gpa);
+ }
+ self.sections.deinit(gpa);
+
+ self.program_headers.deinit(gpa);
+ self.shstrtab.deinit(gpa);
+ self.local_symbols.deinit(gpa);
+ self.global_symbols.deinit(gpa);
+ self.global_symbol_free_list.deinit(gpa);
+ self.local_symbol_free_list.deinit(gpa);
+ self.offset_table_free_list.deinit(gpa);
+ self.offset_table.deinit(gpa);
+
{
- var it = self.atom_free_lists.valueIterator();
- while (it.next()) |free_list| {
- free_list.deinit(self.base.allocator);
+ var it = self.decls.iterator();
+ while (it.next()) |entry| {
+ entry.value_ptr.exports.deinit(gpa);
}
- self.atom_free_lists.deinit(self.base.allocator);
+ self.decls.deinit(gpa);
}
- for (self.managed_atoms.items) |atom| {
- self.base.allocator.destroy(atom);
- }
- self.managed_atoms.deinit(self.base.allocator);
+ self.atoms.deinit(gpa);
+ self.atom_by_index_table.deinit(gpa);
{
var it = self.unnamed_const_atoms.valueIterator();
while (it.next()) |atoms| {
- atoms.deinit(self.base.allocator);
+ atoms.deinit(gpa);
}
- self.unnamed_const_atoms.deinit(self.base.allocator);
+ self.unnamed_const_atoms.deinit(gpa);
}
{
var it = self.relocs.valueIterator();
while (it.next()) |relocs| {
- relocs.deinit(self.base.allocator);
+ relocs.deinit(gpa);
}
- self.relocs.deinit(self.base.allocator);
+ self.relocs.deinit(gpa);
}
- self.atom_by_index_table.deinit(self.base.allocator);
-
if (self.dwarf) |*dw| {
dw.deinit();
}
}
pub fn getDeclVAddr(self: *Elf, decl_index: Module.Decl.Index, reloc_info: File.RelocInfo) !u64 {
- const mod = self.base.options.module.?;
- const decl = mod.declPtr(decl_index);
-
assert(self.llvm_object == null);
- try decl.link.elf.ensureInitialized(self);
- const target = decl.link.elf.getSymbolIndex().?;
-
- const vaddr = self.local_symbols.items[target].st_value;
- const atom = self.atom_by_index_table.get(reloc_info.parent_atom_index).?;
- const gop = try self.relocs.getOrPut(self.base.allocator, atom);
- if (!gop.found_existing) {
- gop.value_ptr.* = .{};
- }
- try gop.value_ptr.append(self.base.allocator, .{
+ const this_atom_index = try self.getOrCreateAtomForDecl(decl_index);
+ const this_atom = self.getAtom(this_atom_index);
+ const target = this_atom.getSymbolIndex().?;
+ const vaddr = this_atom.getSymbol(self).st_value;
+ const atom_index = self.getAtomIndexForSymbol(reloc_info.parent_atom_index).?;
+ try Atom.addRelocation(self, atom_index, .{
.target = target,
.offset = reloc_info.offset,
.addend = reloc_info.addend,
@@ -375,7 +378,7 @@ fn detectAllocCollision(self: *Elf, start: u64, size: u64) ?u64 {
if (self.shdr_table_offset) |off| {
const shdr_size: u64 = if (small_ptr) @sizeOf(elf.Elf32_Shdr) else @sizeOf(elf.Elf64_Shdr);
- const tight_size = self.sections.items.len * shdr_size;
+ const tight_size = self.sections.slice().len * shdr_size;
const increased_size = padToIdeal(tight_size);
const test_end = off + increased_size;
if (end > off and start < test_end) {
@@ -385,7 +388,7 @@ fn detectAllocCollision(self: *Elf, start: u64, size: u64) ?u64 {
if (self.phdr_table_offset) |off| {
const phdr_size: u64 = if (small_ptr) @sizeOf(elf.Elf32_Phdr) else @sizeOf(elf.Elf64_Phdr);
- const tight_size = self.sections.items.len * phdr_size;
+ const tight_size = self.sections.slice().len * phdr_size;
const increased_size = padToIdeal(tight_size);
const test_end = off + increased_size;
if (end > off and start < test_end) {
@@ -393,7 +396,7 @@ fn detectAllocCollision(self: *Elf, start: u64, size: u64) ?u64 {
}
}
- for (self.sections.items) |section| {
+ for (self.sections.items(.shdr)) |section| {
const increased_size = padToIdeal(section.sh_size);
const test_end = section.sh_offset + increased_size;
if (end > section.sh_offset and start < test_end) {
@@ -420,7 +423,7 @@ pub fn allocatedSize(self: *Elf, start: u64) u64 {
if (self.phdr_table_offset) |off| {
if (off > start and off < min_pos) min_pos = off;
}
- for (self.sections.items) |section| {
+ for (self.sections.items(.shdr)) |section| {
if (section.sh_offset <= start) continue;
if (section.sh_offset < min_pos) min_pos = section.sh_offset;
}
@@ -439,31 +442,10 @@ pub fn findFreeSpace(self: *Elf, object_size: u64, min_alignment: u32) u64 {
return start;
}
-/// TODO Improve this to use a table.
-fn makeString(self: *Elf, bytes: []const u8) !u32 {
- try self.shstrtab.ensureUnusedCapacity(self.base.allocator, bytes.len + 1);
- const result = self.shstrtab.items.len;
- self.shstrtab.appendSliceAssumeCapacity(bytes);
- self.shstrtab.appendAssumeCapacity(0);
- return @intCast(u32, result);
-}
-
-pub fn getString(self: Elf, str_off: u32) []const u8 {
- assert(str_off < self.shstrtab.items.len);
- return mem.sliceTo(@ptrCast([*:0]const u8, self.shstrtab.items.ptr + str_off), 0);
-}
-
-fn updateString(self: *Elf, old_str_off: u32, new_name: []const u8) !u32 {
- const existing_name = self.getString(old_str_off);
- if (mem.eql(u8, existing_name, new_name)) {
- return old_str_off;
- }
- return self.makeString(new_name);
-}
-
pub fn populateMissingMetadata(self: *Elf) !void {
assert(self.llvm_object == null);
+ const gpa = self.base.allocator;
const small_ptr = switch (self.ptr_width) {
.p32 => true,
.p64 => false,
@@ -477,7 +459,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
const off = self.findFreeSpace(file_size, p_align);
log.debug("found PT_LOAD RE free space 0x{x} to 0x{x}", .{ off, off + file_size });
const entry_addr: u64 = self.entry_addr orelse if (self.base.options.target.cpu.arch == .spu_2) @as(u64, 0) else default_entry_addr;
- try self.program_headers.append(self.base.allocator, .{
+ try self.program_headers.append(gpa, .{
.p_type = elf.PT_LOAD,
.p_offset = off,
.p_filesz = file_size,
@@ -487,7 +469,6 @@ pub fn populateMissingMetadata(self: *Elf) !void {
.p_align = p_align,
.p_flags = elf.PF_X | elf.PF_R,
});
- try self.atom_free_lists.putNoClobber(self.base.allocator, self.phdr_load_re_index.?, .{});
self.entry_addr = null;
self.phdr_table_dirty = true;
}
@@ -504,7 +485,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
// we'll need to re-use that function anyway, in case the GOT grows and overlaps something
// else in virtual memory.
const got_addr: u32 = if (self.base.options.target.cpu.arch.ptrBitWidth() >= 32) 0x4000000 else 0x8000;
- try self.program_headers.append(self.base.allocator, .{
+ try self.program_headers.append(gpa, .{
.p_type = elf.PT_LOAD,
.p_offset = off,
.p_filesz = file_size,
@@ -527,7 +508,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
log.debug("found PT_LOAD RO free space 0x{x} to 0x{x}", .{ off, off + file_size });
// TODO Same as for GOT
const rodata_addr: u32 = if (self.base.options.target.cpu.arch.ptrBitWidth() >= 32) 0xc000000 else 0xa000;
- try self.program_headers.append(self.base.allocator, .{
+ try self.program_headers.append(gpa, .{
.p_type = elf.PT_LOAD,
.p_offset = off,
.p_filesz = file_size,
@@ -537,7 +518,6 @@ pub fn populateMissingMetadata(self: *Elf) !void {
.p_align = p_align,
.p_flags = elf.PF_R,
});
- try self.atom_free_lists.putNoClobber(self.base.allocator, self.phdr_load_ro_index.?, .{});
self.phdr_table_dirty = true;
}
@@ -551,7 +531,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
log.debug("found PT_LOAD RW free space 0x{x} to 0x{x}", .{ off, off + file_size });
// TODO Same as for GOT
const rwdata_addr: u32 = if (self.base.options.target.cpu.arch.ptrBitWidth() >= 32) 0x10000000 else 0xc000;
- try self.program_headers.append(self.base.allocator, .{
+ try self.program_headers.append(gpa, .{
.p_type = elf.PT_LOAD,
.p_offset = off,
.p_filesz = file_size,
@@ -561,148 +541,145 @@ pub fn populateMissingMetadata(self: *Elf) !void {
.p_align = p_align,
.p_flags = elf.PF_R | elf.PF_W,
});
- try self.atom_free_lists.putNoClobber(self.base.allocator, self.phdr_load_rw_index.?, .{});
self.phdr_table_dirty = true;
}
if (self.shstrtab_index == null) {
- self.shstrtab_index = @intCast(u16, self.sections.items.len);
- assert(self.shstrtab.items.len == 0);
- try self.shstrtab.append(self.base.allocator, 0); // need a 0 at position 0
- const off = self.findFreeSpace(self.shstrtab.items.len, 1);
- log.debug("found shstrtab free space 0x{x} to 0x{x}", .{ off, off + self.shstrtab.items.len });
- try self.sections.append(self.base.allocator, .{
- .sh_name = try self.makeString(".shstrtab"),
- .sh_type = elf.SHT_STRTAB,
- .sh_flags = 0,
- .sh_addr = 0,
- .sh_offset = off,
- .sh_size = self.shstrtab.items.len,
- .sh_link = 0,
- .sh_info = 0,
- .sh_addralign = 1,
- .sh_entsize = 0,
+ self.shstrtab_index = @intCast(u16, self.sections.slice().len);
+ assert(self.shstrtab.buffer.items.len == 0);
+ try self.shstrtab.buffer.append(gpa, 0); // need a 0 at position 0
+ const off = self.findFreeSpace(self.shstrtab.buffer.items.len, 1);
+ log.debug("found shstrtab free space 0x{x} to 0x{x}", .{ off, off + self.shstrtab.buffer.items.len });
+ try self.sections.append(gpa, .{
+ .shdr = .{
+ .sh_name = try self.shstrtab.insert(gpa, ".shstrtab"),
+ .sh_type = elf.SHT_STRTAB,
+ .sh_flags = 0,
+ .sh_addr = 0,
+ .sh_offset = off,
+ .sh_size = self.shstrtab.buffer.items.len,
+ .sh_link = 0,
+ .sh_info = 0,
+ .sh_addralign = 1,
+ .sh_entsize = 0,
+ },
+ .phdr_index = undefined,
});
self.shstrtab_dirty = true;
self.shdr_table_dirty = true;
}
if (self.text_section_index == null) {
- self.text_section_index = @intCast(u16, self.sections.items.len);
+ self.text_section_index = @intCast(u16, self.sections.slice().len);
const phdr = &self.program_headers.items[self.phdr_load_re_index.?];
- try self.sections.append(self.base.allocator, .{
- .sh_name = try self.makeString(".text"),
- .sh_type = elf.SHT_PROGBITS,
- .sh_flags = elf.SHF_ALLOC | elf.SHF_EXECINSTR,
- .sh_addr = phdr.p_vaddr,
- .sh_offset = phdr.p_offset,
- .sh_size = phdr.p_filesz,
- .sh_link = 0,
- .sh_info = 0,
- .sh_addralign = 1,
- .sh_entsize = 0,
+ try self.sections.append(gpa, .{
+ .shdr = .{
+ .sh_name = try self.shstrtab.insert(gpa, ".text"),
+ .sh_type = elf.SHT_PROGBITS,
+ .sh_flags = elf.SHF_ALLOC | elf.SHF_EXECINSTR,
+ .sh_addr = phdr.p_vaddr,
+ .sh_offset = phdr.p_offset,
+ .sh_size = phdr.p_filesz,
+ .sh_link = 0,
+ .sh_info = 0,
+ .sh_addralign = 1,
+ .sh_entsize = 0,
+ },
+ .phdr_index = self.phdr_load_re_index.?,
});
- try self.phdr_shdr_table.putNoClobber(
- self.base.allocator,
- self.phdr_load_re_index.?,
- self.text_section_index.?,
- );
self.shdr_table_dirty = true;
}
if (self.got_section_index == null) {
- self.got_section_index = @intCast(u16, self.sections.items.len);
+ self.got_section_index = @intCast(u16, self.sections.slice().len);
const phdr = &self.program_headers.items[self.phdr_got_index.?];
- try self.sections.append(self.base.allocator, .{
- .sh_name = try self.makeString(".got"),
- .sh_type = elf.SHT_PROGBITS,
- .sh_flags = elf.SHF_ALLOC,
- .sh_addr = phdr.p_vaddr,
- .sh_offset = phdr.p_offset,
- .sh_size = phdr.p_filesz,
- .sh_link = 0,
- .sh_info = 0,
- .sh_addralign = @as(u16, ptr_size),
- .sh_entsize = 0,
+ try self.sections.append(gpa, .{
+ .shdr = .{
+ .sh_name = try self.shstrtab.insert(gpa, ".got"),
+ .sh_type = elf.SHT_PROGBITS,
+ .sh_flags = elf.SHF_ALLOC,
+ .sh_addr = phdr.p_vaddr,
+ .sh_offset = phdr.p_offset,
+ .sh_size = phdr.p_filesz,
+ .sh_link = 0,
+ .sh_info = 0,
+ .sh_addralign = @as(u16, ptr_size),
+ .sh_entsize = 0,
+ },
+ .phdr_index = self.phdr_got_index.?,
});
- try self.phdr_shdr_table.putNoClobber(
- self.base.allocator,
- self.phdr_got_index.?,
- self.got_section_index.?,
- );
self.shdr_table_dirty = true;
}
if (self.rodata_section_index == null) {
- self.rodata_section_index = @intCast(u16, self.sections.items.len);
+ self.rodata_section_index = @intCast(u16, self.sections.slice().len);
const phdr = &self.program_headers.items[self.phdr_load_ro_index.?];
- try self.sections.append(self.base.allocator, .{
- .sh_name = try self.makeString(".rodata"),
- .sh_type = elf.SHT_PROGBITS,
- .sh_flags = elf.SHF_ALLOC,
- .sh_addr = phdr.p_vaddr,
- .sh_offset = phdr.p_offset,
- .sh_size = phdr.p_filesz,
- .sh_link = 0,
- .sh_info = 0,
- .sh_addralign = 1,
- .sh_entsize = 0,
+ try self.sections.append(gpa, .{
+ .shdr = .{
+ .sh_name = try self.shstrtab.insert(gpa, ".rodata"),
+ .sh_type = elf.SHT_PROGBITS,
+ .sh_flags = elf.SHF_ALLOC,
+ .sh_addr = phdr.p_vaddr,
+ .sh_offset = phdr.p_offset,
+ .sh_size = phdr.p_filesz,
+ .sh_link = 0,
+ .sh_info = 0,
+ .sh_addralign = 1,
+ .sh_entsize = 0,
+ },
+ .phdr_index = self.phdr_load_ro_index.?,
});
- try self.phdr_shdr_table.putNoClobber(
- self.base.allocator,
- self.phdr_load_ro_index.?,
- self.rodata_section_index.?,
- );
self.shdr_table_dirty = true;
}
if (self.data_section_index == null) {
- self.data_section_index = @intCast(u16, self.sections.items.len);
+ self.data_section_index = @intCast(u16, self.sections.slice().len);
const phdr = &self.program_headers.items[self.phdr_load_rw_index.?];
- try self.sections.append(self.base.allocator, .{
- .sh_name = try self.makeString(".data"),
- .sh_type = elf.SHT_PROGBITS,
- .sh_flags = elf.SHF_WRITE | elf.SHF_ALLOC,
- .sh_addr = phdr.p_vaddr,
- .sh_offset = phdr.p_offset,
- .sh_size = phdr.p_filesz,
- .sh_link = 0,
- .sh_info = 0,
- .sh_addralign = @as(u16, ptr_size),
- .sh_entsize = 0,
+ try self.sections.append(gpa, .{
+ .shdr = .{
+ .sh_name = try self.shstrtab.insert(gpa, ".data"),
+ .sh_type = elf.SHT_PROGBITS,
+ .sh_flags = elf.SHF_WRITE | elf.SHF_ALLOC,
+ .sh_addr = phdr.p_vaddr,
+ .sh_offset = phdr.p_offset,
+ .sh_size = phdr.p_filesz,
+ .sh_link = 0,
+ .sh_info = 0,
+ .sh_addralign = @as(u16, ptr_size),
+ .sh_entsize = 0,
+ },
+ .phdr_index = self.phdr_load_rw_index.?,
});
- try self.phdr_shdr_table.putNoClobber(
- self.base.allocator,
- self.phdr_load_rw_index.?,
- self.data_section_index.?,
- );
self.shdr_table_dirty = true;
}
if (self.symtab_section_index == null) {
- self.symtab_section_index = @intCast(u16, self.sections.items.len);
+ self.symtab_section_index = @intCast(u16, self.sections.slice().len);
const min_align: u16 = if (small_ptr) @alignOf(elf.Elf32_Sym) else @alignOf(elf.Elf64_Sym);
const each_size: u64 = if (small_ptr) @sizeOf(elf.Elf32_Sym) else @sizeOf(elf.Elf64_Sym);
const file_size = self.base.options.symbol_count_hint * each_size;
const off = self.findFreeSpace(file_size, min_align);
log.debug("found symtab free space 0x{x} to 0x{x}", .{ off, off + file_size });
- try self.sections.append(self.base.allocator, .{
- .sh_name = try self.makeString(".symtab"),
- .sh_type = elf.SHT_SYMTAB,
- .sh_flags = 0,
- .sh_addr = 0,
- .sh_offset = off,
- .sh_size = file_size,
- // The section header index of the associated string table.
- .sh_link = self.shstrtab_index.?,
- .sh_info = @intCast(u32, self.local_symbols.items.len),
- .sh_addralign = min_align,
- .sh_entsize = each_size,
+ try self.sections.append(gpa, .{
+ .shdr = .{
+ .sh_name = try self.shstrtab.insert(gpa, ".symtab"),
+ .sh_type = elf.SHT_SYMTAB,
+ .sh_flags = 0,
+ .sh_addr = 0,
+ .sh_offset = off,
+ .sh_size = file_size,
+ // The section header index of the associated string table.
+ .sh_link = self.shstrtab_index.?,
+ .sh_info = @intCast(u32, self.local_symbols.items.len),
+ .sh_addralign = min_align,
+ .sh_entsize = each_size,
+ },
+ .phdr_index = undefined,
});
self.shdr_table_dirty = true;
try self.writeSymbol(0);
@@ -710,27 +687,30 @@ pub fn populateMissingMetadata(self: *Elf) !void {
if (self.dwarf) |*dw| {
if (self.debug_str_section_index == null) {
- self.debug_str_section_index = @intCast(u16, self.sections.items.len);
- assert(dw.strtab.items.len == 0);
- try dw.strtab.append(self.base.allocator, 0);
- try self.sections.append(self.base.allocator, .{
- .sh_name = try self.makeString(".debug_str"),
- .sh_type = elf.SHT_PROGBITS,
- .sh_flags = elf.SHF_MERGE | elf.SHF_STRINGS,
- .sh_addr = 0,
- .sh_offset = 0,
- .sh_size = 0,
- .sh_link = 0,
- .sh_info = 0,
- .sh_addralign = 1,
- .sh_entsize = 1,
+ self.debug_str_section_index = @intCast(u16, self.sections.slice().len);
+ assert(dw.strtab.buffer.items.len == 0);
+ try dw.strtab.buffer.append(gpa, 0);
+ try self.sections.append(gpa, .{
+ .shdr = .{
+ .sh_name = try self.shstrtab.insert(gpa, ".debug_str"),
+ .sh_type = elf.SHT_PROGBITS,
+ .sh_flags = elf.SHF_MERGE | elf.SHF_STRINGS,
+ .sh_addr = 0,
+ .sh_offset = 0,
+ .sh_size = 0,
+ .sh_link = 0,
+ .sh_info = 0,
+ .sh_addralign = 1,
+ .sh_entsize = 1,
+ },
+ .phdr_index = undefined,
});
self.debug_strtab_dirty = true;
self.shdr_table_dirty = true;
}
if (self.debug_info_section_index == null) {
- self.debug_info_section_index = @intCast(u16, self.sections.items.len);
+ self.debug_info_section_index = @intCast(u16, self.sections.slice().len);
const file_size_hint = 200;
const p_align = 1;
@@ -739,24 +719,27 @@ pub fn populateMissingMetadata(self: *Elf) !void {
off,
off + file_size_hint,
});
- try self.sections.append(self.base.allocator, .{
- .sh_name = try self.makeString(".debug_info"),
- .sh_type = elf.SHT_PROGBITS,
- .sh_flags = 0,
- .sh_addr = 0,
- .sh_offset = off,
- .sh_size = file_size_hint,
- .sh_link = 0,
- .sh_info = 0,
- .sh_addralign = p_align,
- .sh_entsize = 0,
+ try self.sections.append(gpa, .{
+ .shdr = .{
+ .sh_name = try self.shstrtab.insert(gpa, ".debug_info"),
+ .sh_type = elf.SHT_PROGBITS,
+ .sh_flags = 0,
+ .sh_addr = 0,
+ .sh_offset = off,
+ .sh_size = file_size_hint,
+ .sh_link = 0,
+ .sh_info = 0,
+ .sh_addralign = p_align,
+ .sh_entsize = 0,
+ },
+ .phdr_index = undefined,
});
self.shdr_table_dirty = true;
self.debug_info_header_dirty = true;
}
if (self.debug_abbrev_section_index == null) {
- self.debug_abbrev_section_index = @intCast(u16, self.sections.items.len);
+ self.debug_abbrev_section_index = @intCast(u16, self.sections.slice().len);
const file_size_hint = 128;
const p_align = 1;
@@ -765,24 +748,27 @@ pub fn populateMissingMetadata(self: *Elf) !void {
off,
off + file_size_hint,
});
- try self.sections.append(self.base.allocator, .{
- .sh_name = try self.makeString(".debug_abbrev"),
- .sh_type = elf.SHT_PROGBITS,
- .sh_flags = 0,
- .sh_addr = 0,
- .sh_offset = off,
- .sh_size = file_size_hint,
- .sh_link = 0,
- .sh_info = 0,
- .sh_addralign = p_align,
- .sh_entsize = 0,
+ try self.sections.append(gpa, .{
+ .shdr = .{
+ .sh_name = try self.shstrtab.insert(gpa, ".debug_abbrev"),
+ .sh_type = elf.SHT_PROGBITS,
+ .sh_flags = 0,
+ .sh_addr = 0,
+ .sh_offset = off,
+ .sh_size = file_size_hint,
+ .sh_link = 0,
+ .sh_info = 0,
+ .sh_addralign = p_align,
+ .sh_entsize = 0,
+ },
+ .phdr_index = undefined,
});
self.shdr_table_dirty = true;
self.debug_abbrev_section_dirty = true;
}
if (self.debug_aranges_section_index == null) {
- self.debug_aranges_section_index = @intCast(u16, self.sections.items.len);
+ self.debug_aranges_section_index = @intCast(u16, self.sections.slice().len);
const file_size_hint = 160;
const p_align = 16;
@@ -791,24 +777,27 @@ pub fn populateMissingMetadata(self: *Elf) !void {
off,
off + file_size_hint,
});
- try self.sections.append(self.base.allocator, .{
- .sh_name = try self.makeString(".debug_aranges"),
- .sh_type = elf.SHT_PROGBITS,
- .sh_flags = 0,
- .sh_addr = 0,
- .sh_offset = off,
- .sh_size = file_size_hint,
- .sh_link = 0,
- .sh_info = 0,
- .sh_addralign = p_align,
- .sh_entsize = 0,
+ try self.sections.append(gpa, .{
+ .shdr = .{
+ .sh_name = try self.shstrtab.insert(gpa, ".debug_aranges"),
+ .sh_type = elf.SHT_PROGBITS,
+ .sh_flags = 0,
+ .sh_addr = 0,
+ .sh_offset = off,
+ .sh_size = file_size_hint,
+ .sh_link = 0,
+ .sh_info = 0,
+ .sh_addralign = p_align,
+ .sh_entsize = 0,
+ },
+ .phdr_index = undefined,
});
self.shdr_table_dirty = true;
self.debug_aranges_section_dirty = true;
}
if (self.debug_line_section_index == null) {
- self.debug_line_section_index = @intCast(u16, self.sections.items.len);
+ self.debug_line_section_index = @intCast(u16, self.sections.slice().len);
const file_size_hint = 250;
const p_align = 1;
@@ -817,17 +806,20 @@ pub fn populateMissingMetadata(self: *Elf) !void {
off,
off + file_size_hint,
});
- try self.sections.append(self.base.allocator, .{
- .sh_name = try self.makeString(".debug_line"),
- .sh_type = elf.SHT_PROGBITS,
- .sh_flags = 0,
- .sh_addr = 0,
- .sh_offset = off,
- .sh_size = file_size_hint,
- .sh_link = 0,
- .sh_info = 0,
- .sh_addralign = p_align,
- .sh_entsize = 0,
+ try self.sections.append(gpa, .{
+ .shdr = .{
+ .sh_name = try self.shstrtab.insert(gpa, ".debug_line"),
+ .sh_type = elf.SHT_PROGBITS,
+ .sh_flags = 0,
+ .sh_addr = 0,
+ .sh_offset = off,
+ .sh_size = file_size_hint,
+ .sh_link = 0,
+ .sh_info = 0,
+ .sh_addralign = p_align,
+ .sh_entsize = 0,
+ },
+ .phdr_index = undefined,
});
self.shdr_table_dirty = true;
self.debug_line_header_dirty = true;
@@ -843,7 +835,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
.p64 => @alignOf(elf.Elf64_Shdr),
};
if (self.shdr_table_offset == null) {
- self.shdr_table_offset = self.findFreeSpace(self.sections.items.len * shsize, shalign);
+ self.shdr_table_offset = self.findFreeSpace(self.sections.slice().len * shsize, shalign);
self.shdr_table_dirty = true;
}
@@ -874,7 +866,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
// offset + it's filesize.
var max_file_offset: u64 = 0;
- for (self.sections.items) |shdr| {
+ for (self.sections.items(.shdr)) |shdr| {
if (shdr.sh_offset + shdr.sh_size > max_file_offset) {
max_file_offset = shdr.sh_offset + shdr.sh_size;
}
@@ -884,15 +876,18 @@ pub fn populateMissingMetadata(self: *Elf) !void {
}
}
-fn growAllocSection(self: *Elf, shdr_index: u16, phdr_index: u16, needed_size: u64) !void {
+fn growAllocSection(self: *Elf, shdr_index: u16, needed_size: u64) !void {
// TODO Also detect virtual address collisions.
- const shdr = &self.sections.items[shdr_index];
+ const shdr = &self.sections.items(.shdr)[shdr_index];
+ const phdr_index = self.sections.items(.phdr_index)[shdr_index];
const phdr = &self.program_headers.items[phdr_index];
+ const maybe_last_atom_index = self.sections.items(.last_atom_index)[shdr_index];
if (needed_size > self.allocatedSize(shdr.sh_offset)) {
// Must move the entire section.
const new_offset = self.findFreeSpace(needed_size, self.page_size);
- const existing_size = if (self.atoms.get(phdr_index)) |last| blk: {
+ const existing_size = if (maybe_last_atom_index) |last_atom_index| blk: {
+ const last = self.getAtom(last_atom_index);
const sym = last.getSymbol(self);
break :blk (sym.st_value + sym.st_size) - phdr.p_vaddr;
} else if (shdr_index == self.got_section_index.?) blk: {
@@ -900,8 +895,8 @@ fn growAllocSection(self: *Elf, shdr_index: u16, phdr_index: u16, needed_size: u
} else 0;
shdr.sh_size = 0;
- log.debug("new '{s}' file offset 0x{x} to 0x{x}", .{
- self.getString(shdr.sh_name),
+ log.debug("new '{?s}' file offset 0x{x} to 0x{x}", .{
+ self.shstrtab.get(shdr.sh_name),
new_offset,
new_offset + existing_size,
});
@@ -927,7 +922,7 @@ pub fn growNonAllocSection(
min_alignment: u32,
requires_file_copy: bool,
) !void {
- const shdr = &self.sections.items[shdr_index];
+ const shdr = &self.sections.items(.shdr)[shdr_index];
if (needed_size > self.allocatedSize(shdr.sh_offset)) {
const existing_size = if (self.symtab_section_index.? == shdr_index) blk: {
@@ -940,7 +935,7 @@ pub fn growNonAllocSection(
shdr.sh_size = 0;
// Move all the symbols to a new file location.
const new_offset = self.findFreeSpace(needed_size, min_alignment);
- log.debug("moving '{s}' from 0x{x} to 0x{x}", .{ self.getString(shdr.sh_name), shdr.sh_offset, new_offset });
+ log.debug("moving '{?s}' from 0x{x} to 0x{x}", .{ self.shstrtab.get(shdr.sh_name), shdr.sh_offset, new_offset });
if (requires_file_copy) {
const amt = try self.base.file.?.copyRangeAll(
@@ -1011,6 +1006,7 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
}
}
+ const gpa = self.base.allocator;
var sub_prog_node = prog_node.start("ELF Flush", 0);
sub_prog_node.activate();
defer sub_prog_node.end();
@@ -1029,12 +1025,13 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
{
var it = self.relocs.iterator();
while (it.next()) |entry| {
- const atom = entry.key_ptr.*;
+ const atom_index = entry.key_ptr.*;
const relocs = entry.value_ptr.*;
+ const atom = self.getAtom(atom_index);
const source_sym = atom.getSymbol(self);
- const source_shdr = self.sections.items[source_sym.st_shndx];
+ const source_shdr = self.sections.items(.shdr)[source_sym.st_shndx];
- log.debug("relocating '{s}'", .{self.getString(source_sym.st_name)});
+ log.debug("relocating '{?s}'", .{self.shstrtab.get(source_sym.st_name)});
for (relocs.items) |*reloc| {
const target_sym = self.local_symbols.items[reloc.target];
@@ -1045,10 +1042,10 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
const section_offset = (source_sym.st_value + reloc.offset) - source_shdr.sh_addr;
const file_offset = source_shdr.sh_offset + section_offset;
- log.debug(" ({x}: [() => 0x{x}] ({s}))", .{
+ log.debug(" ({x}: [() => 0x{x}] ({?s}))", .{
reloc.offset,
target_vaddr,
- self.getString(target_sym.st_name),
+ self.shstrtab.get(target_sym.st_name),
});
switch (self.ptr_width) {
@@ -1126,8 +1123,8 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
switch (self.ptr_width) {
.p32 => {
- const buf = try self.base.allocator.alloc(elf.Elf32_Phdr, self.program_headers.items.len);
- defer self.base.allocator.free(buf);
+ const buf = try gpa.alloc(elf.Elf32_Phdr, self.program_headers.items.len);
+ defer gpa.free(buf);
for (buf) |*phdr, i| {
phdr.* = progHeaderTo32(self.program_headers.items[i]);
@@ -1138,8 +1135,8 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
try self.base.file.?.pwriteAll(mem.sliceAsBytes(buf), self.phdr_table_offset.?);
},
.p64 => {
- const buf = try self.base.allocator.alloc(elf.Elf64_Phdr, self.program_headers.items.len);
- defer self.base.allocator.free(buf);
+ const buf = try gpa.alloc(elf.Elf64_Phdr, self.program_headers.items.len);
+ defer gpa.free(buf);
for (buf) |*phdr, i| {
phdr.* = self.program_headers.items[i];
@@ -1155,20 +1152,20 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
{
const shdr_index = self.shstrtab_index.?;
- if (self.shstrtab_dirty or self.shstrtab.items.len != self.sections.items[shdr_index].sh_size) {
- try self.growNonAllocSection(shdr_index, self.shstrtab.items.len, 1, false);
- const shstrtab_sect = self.sections.items[shdr_index];
- try self.base.file.?.pwriteAll(self.shstrtab.items, shstrtab_sect.sh_offset);
+ if (self.shstrtab_dirty or self.shstrtab.buffer.items.len != self.sections.items(.shdr)[shdr_index].sh_size) {
+ try self.growNonAllocSection(shdr_index, self.shstrtab.buffer.items.len, 1, false);
+ const shstrtab_sect = self.sections.items(.shdr)[shdr_index];
+ try self.base.file.?.pwriteAll(self.shstrtab.buffer.items, shstrtab_sect.sh_offset);
self.shstrtab_dirty = false;
}
}
if (self.dwarf) |dwarf| {
const shdr_index = self.debug_str_section_index.?;
- if (self.debug_strtab_dirty or dwarf.strtab.items.len != self.sections.items[shdr_index].sh_size) {
- try self.growNonAllocSection(shdr_index, dwarf.strtab.items.len, 1, false);
- const debug_strtab_sect = self.sections.items[shdr_index];
- try self.base.file.?.pwriteAll(dwarf.strtab.items, debug_strtab_sect.sh_offset);
+ if (self.debug_strtab_dirty or dwarf.strtab.buffer.items.len != self.sections.items(.shdr)[shdr_index].sh_size) {
+ try self.growNonAllocSection(shdr_index, dwarf.strtab.buffer.items.len, 1, false);
+ const debug_strtab_sect = self.sections.items(.shdr)[shdr_index];
+ try self.base.file.?.pwriteAll(dwarf.strtab.buffer.items, debug_strtab_sect.sh_offset);
self.debug_strtab_dirty = false;
}
}
@@ -1183,7 +1180,7 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
.p64 => @alignOf(elf.Elf64_Shdr),
};
const allocated_size = self.allocatedSize(self.shdr_table_offset.?);
- const needed_size = self.sections.items.len * shsize;
+ const needed_size = self.sections.slice().len * shsize;
if (needed_size > allocated_size) {
self.shdr_table_offset = null; // free the space
@@ -1192,12 +1189,13 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
switch (self.ptr_width) {
.p32 => {
- const buf = try self.base.allocator.alloc(elf.Elf32_Shdr, self.sections.items.len);
- defer self.base.allocator.free(buf);
+ const slice = self.sections.slice();
+ const buf = try gpa.alloc(elf.Elf32_Shdr, slice.len);
+ defer gpa.free(buf);
for (buf) |*shdr, i| {
- shdr.* = sectHeaderTo32(self.sections.items[i]);
- log.debug("writing section {s}: {}", .{ self.getString(shdr.sh_name), shdr.* });
+ shdr.* = sectHeaderTo32(slice.items(.shdr)[i]);
+ log.debug("writing section {?s}: {}", .{ self.shstrtab.get(shdr.sh_name), shdr.* });
if (foreign_endian) {
mem.byteSwapAllFields(elf.Elf32_Shdr, shdr);
}
@@ -1205,12 +1203,13 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
try self.base.file.?.pwriteAll(mem.sliceAsBytes(buf), self.shdr_table_offset.?);
},
.p64 => {
- const buf = try self.base.allocator.alloc(elf.Elf64_Shdr, self.sections.items.len);
- defer self.base.allocator.free(buf);
+ const slice = self.sections.slice();
+ const buf = try gpa.alloc(elf.Elf64_Shdr, slice.len);
+ defer gpa.free(buf);
for (buf) |*shdr, i| {
- shdr.* = self.sections.items[i];
- log.debug("writing section {s}: {}", .{ self.getString(shdr.sh_name), shdr.* });
+ shdr.* = slice.items(.shdr)[i];
+ log.debug("writing section {?s}: {}", .{ self.shstrtab.get(shdr.sh_name), shdr.* });
if (foreign_endian) {
mem.byteSwapAllFields(elf.Elf64_Shdr, shdr);
}
@@ -2021,7 +2020,7 @@ fn writeElfHeader(self: *Elf) !void {
mem.writeInt(u16, hdr_buf[index..][0..2], e_shentsize, endian);
index += 2;
- const e_shnum = @intCast(u16, self.sections.items.len);
+ const e_shnum = @intCast(u16, self.sections.slice().len);
mem.writeInt(u16, hdr_buf[index..][0..2], e_shnum, endian);
index += 2;
@@ -2033,124 +2032,145 @@ fn writeElfHeader(self: *Elf) !void {
try self.base.file.?.pwriteAll(hdr_buf[0..index], 0);
}
-fn freeTextBlock(self: *Elf, text_block: *TextBlock, phdr_index: u16) void {
- const local_sym = text_block.getSymbol(self);
- const name_str_index = local_sym.st_name;
- const name = self.getString(name_str_index);
- log.debug("freeTextBlock {*} ({s})", .{ text_block, name });
+fn freeAtom(self: *Elf, atom_index: Atom.Index) void {
+ const atom = self.getAtom(atom_index);
+ log.debug("freeAtom {d} ({s})", .{ atom_index, atom.getName(self) });
- self.freeRelocationsForTextBlock(text_block);
+ Atom.freeRelocations(self, atom_index);
- const free_list = self.atom_free_lists.getPtr(phdr_index).?;
+ const gpa = self.base.allocator;
+ const shndx = atom.getSymbol(self).st_shndx;
+ const free_list = &self.sections.items(.free_list)[shndx];
var already_have_free_list_node = false;
{
var i: usize = 0;
// TODO turn free_list into a hash map
while (i < free_list.items.len) {
- if (free_list.items[i] == text_block) {
+ if (free_list.items[i] == atom_index) {
_ = free_list.swapRemove(i);
continue;
}
- if (free_list.items[i] == text_block.prev) {
+ if (free_list.items[i] == atom.prev_index) {
already_have_free_list_node = true;
}
i += 1;
}
}
- if (self.atoms.getPtr(phdr_index)) |last_block| {
- if (last_block.* == text_block) {
- if (text_block.prev) |prev| {
+ const maybe_last_atom_index = &self.sections.items(.last_atom_index)[shndx];
+ if (maybe_last_atom_index.*) |last_atom_index| {
+ if (last_atom_index == atom_index) {
+ if (atom.prev_index) |prev_index| {
// TODO shrink the section size here
- last_block.* = prev;
+ maybe_last_atom_index.* = prev_index;
} else {
- _ = self.atoms.fetchRemove(phdr_index);
+ maybe_last_atom_index.* = null;
}
}
}
- if (text_block.prev) |prev| {
- prev.next = text_block.next;
+ if (atom.prev_index) |prev_index| {
+ const prev = self.getAtomPtr(prev_index);
+ prev.next_index = atom.next_index;
- if (!already_have_free_list_node and prev.freeListEligible(self)) {
+ if (!already_have_free_list_node and prev.*.freeListEligible(self)) {
// The free list is heuristics, it doesn't have to be perfect, so we can
// ignore the OOM here.
- free_list.append(self.base.allocator, prev) catch {};
+ free_list.append(gpa, prev_index) catch {};
}
} else {
- text_block.prev = null;
+ self.getAtomPtr(atom_index).prev_index = null;
}
- if (text_block.next) |next| {
- next.prev = text_block.prev;
+ if (atom.next_index) |next_index| {
+ self.getAtomPtr(next_index).prev_index = atom.prev_index;
} else {
- text_block.next = null;
+ self.getAtomPtr(atom_index).next_index = null;
}
// Appending to free lists is allowed to fail because the free lists are heuristics based anyway.
- const local_sym_index = text_block.getSymbolIndex().?;
- self.local_symbol_free_list.append(self.base.allocator, local_sym_index) catch {};
+ const local_sym_index = atom.getSymbolIndex().?;
+
+ self.local_symbol_free_list.append(gpa, local_sym_index) catch {};
self.local_symbols.items[local_sym_index].st_info = 0;
+ self.local_symbols.items[local_sym_index].st_shndx = 0;
_ = self.atom_by_index_table.remove(local_sym_index);
- text_block.local_sym_index = 0;
+ self.getAtomPtr(atom_index).local_sym_index = 0;
- self.offset_table_free_list.append(self.base.allocator, text_block.offset_table_index) catch {};
-
- if (self.dwarf) |*dw| {
- dw.freeAtom(&text_block.dbg_info_atom);
- }
+ self.offset_table_free_list.append(self.base.allocator, atom.offset_table_index) catch {};
}
-fn shrinkTextBlock(self: *Elf, text_block: *TextBlock, new_block_size: u64, phdr_index: u16) void {
+fn shrinkAtom(self: *Elf, atom_index: Atom.Index, new_block_size: u64) void {
_ = self;
- _ = text_block;
+ _ = atom_index;
_ = new_block_size;
- _ = phdr_index;
}
-fn growTextBlock(self: *Elf, text_block: *TextBlock, new_block_size: u64, alignment: u64, phdr_index: u16) !u64 {
- const sym = text_block.getSymbol(self);
+fn growAtom(self: *Elf, atom_index: Atom.Index, new_block_size: u64, alignment: u64) !u64 {
+ const atom = self.getAtom(atom_index);
+ const sym = atom.getSymbol(self);
const align_ok = mem.alignBackwardGeneric(u64, sym.st_value, alignment) == sym.st_value;
- const need_realloc = !align_ok or new_block_size > text_block.capacity(self);
+ const need_realloc = !align_ok or new_block_size > atom.capacity(self);
if (!need_realloc) return sym.st_value;
- return self.allocateTextBlock(text_block, new_block_size, alignment, phdr_index);
+ return self.allocateAtom(atom_index, new_block_size, alignment);
+}
+
+pub fn createAtom(self: *Elf) !Atom.Index {
+ const gpa = self.base.allocator;
+ const atom_index = @intCast(Atom.Index, self.atoms.items.len);
+ const atom = try self.atoms.addOne(gpa);
+ const local_sym_index = try self.allocateLocalSymbol();
+ const offset_table_index = try self.allocateGotOffset();
+ try self.atom_by_index_table.putNoClobber(gpa, local_sym_index, atom_index);
+ atom.* = .{
+ .local_sym_index = local_sym_index,
+ .offset_table_index = offset_table_index,
+ .prev_index = null,
+ .next_index = null,
+ };
+ log.debug("creating ATOM(%{d}) at index {d}", .{ local_sym_index, atom_index });
+ return atom_index;
}
-fn allocateTextBlock(self: *Elf, text_block: *TextBlock, new_block_size: u64, alignment: u64, phdr_index: u16) !u64 {
- const shdr_index = self.phdr_shdr_table.get(phdr_index).?;
+fn allocateAtom(self: *Elf, atom_index: Atom.Index, new_block_size: u64, alignment: u64) !u64 {
+ const atom = self.getAtom(atom_index);
+ const sym = atom.getSymbol(self);
+ const phdr_index = self.sections.items(.phdr_index)[sym.st_shndx];
const phdr = &self.program_headers.items[phdr_index];
- const shdr = &self.sections.items[shdr_index];
- const new_block_ideal_capacity = padToIdeal(new_block_size);
+ const shdr = &self.sections.items(.shdr)[sym.st_shndx];
+ const free_list = &self.sections.items(.free_list)[sym.st_shndx];
+ const maybe_last_atom_index = &self.sections.items(.last_atom_index)[sym.st_shndx];
+ const new_atom_ideal_capacity = padToIdeal(new_block_size);
- // We use these to indicate our intention to update metadata, placing the new block,
+ // We use these to indicate our intention to update metadata, placing the new atom,
// and possibly removing a free list node.
// It would be simpler to do it inside the for loop below, but that would cause a
// problem if an error was returned later in the function. So this action
// is actually carried out at the end of the function, when errors are no longer possible.
- var block_placement: ?*TextBlock = null;
+ var atom_placement: ?Atom.Index = null;
var free_list_removal: ?usize = null;
- var free_list = self.atom_free_lists.get(phdr_index).?;
// First we look for an appropriately sized free list node.
// The list is unordered. We'll just take the first thing that works.
const vaddr = blk: {
var i: usize = 0;
while (i < free_list.items.len) {
- const big_block = free_list.items[i];
- // We now have a pointer to a live text block that has too much capacity.
- // Is it enough that we could fit this new text block?
- const sym = big_block.getSymbol(self);
- const capacity = big_block.capacity(self);
+ const big_atom_index = free_list.items[i];
+ const big_atom = self.getAtom(big_atom_index);
+ // We now have a pointer to a live atom that has too much capacity.
+ // Is it enough that we could fit this new atom?
+ const big_atom_sym = big_atom.getSymbol(self);
+ const capacity = big_atom.capacity(self);
const ideal_capacity = padToIdeal(capacity);
- const ideal_capacity_end_vaddr = std.math.add(u64, sym.st_value, ideal_capacity) catch ideal_capacity;
- const capacity_end_vaddr = sym.st_value + capacity;
- const new_start_vaddr_unaligned = capacity_end_vaddr - new_block_ideal_capacity;
+ const ideal_capacity_end_vaddr = std.math.add(u64, big_atom_sym.st_value, ideal_capacity) catch ideal_capacity;
+ const capacity_end_vaddr = big_atom_sym.st_value + capacity;
+ const new_start_vaddr_unaligned = capacity_end_vaddr - new_atom_ideal_capacity;
const new_start_vaddr = mem.alignBackwardGeneric(u64, new_start_vaddr_unaligned, alignment);
if (new_start_vaddr < ideal_capacity_end_vaddr) {
// Additional bookkeeping here to notice if this free list node
// should be deleted because the block that it points to has grown to take up
// more of the extra capacity.
- if (!big_block.freeListEligible(self)) {
+ if (!big_atom.freeListEligible(self)) {
_ = free_list.swapRemove(i);
} else {
i += 1;
@@ -2164,29 +2184,33 @@ fn allocateTextBlock(self: *Elf, text_block: *TextBlock, new_block_size: u64, al
const keep_free_list_node = remaining_capacity >= min_text_capacity;
// Set up the metadata to be updated, after errors are no longer possible.
- block_placement = big_block;
+ atom_placement = big_atom_index;
if (!keep_free_list_node) {
free_list_removal = i;
}
break :blk new_start_vaddr;
- } else if (self.atoms.get(phdr_index)) |last| {
- const sym = last.getSymbol(self);
- const ideal_capacity = padToIdeal(sym.st_size);
- const ideal_capacity_end_vaddr = sym.st_value + ideal_capacity;
+ } else if (maybe_last_atom_index.*) |last_index| {
+ const last = self.getAtom(last_index);
+ const last_sym = last.getSymbol(self);
+ const ideal_capacity = padToIdeal(last_sym.st_size);
+ const ideal_capacity_end_vaddr = last_sym.st_value + ideal_capacity;
const new_start_vaddr = mem.alignForwardGeneric(u64, ideal_capacity_end_vaddr, alignment);
// Set up the metadata to be updated, after errors are no longer possible.
- block_placement = last;
+ atom_placement = last_index;
break :blk new_start_vaddr;
} else {
break :blk phdr.p_vaddr;
}
};
- const expand_text_section = block_placement == null or block_placement.?.next == null;
- if (expand_text_section) {
+ const expand_section = if (atom_placement) |placement_index|
+ self.getAtom(placement_index).next_index == null
+ else
+ true;
+ if (expand_section) {
const needed_size = (vaddr + new_block_size) - phdr.p_vaddr;
- try self.growAllocSection(shdr_index, phdr_index, needed_size);
- _ = try self.atoms.put(self.base.allocator, phdr_index, text_block);
+ try self.growAllocSection(sym.st_shndx, needed_size);
+ maybe_last_atom_index.* = atom_index;
if (self.dwarf) |_| {
// The .debug_info section has `low_pc` and `high_pc` values which is the virtual address
@@ -2201,23 +2225,28 @@ fn allocateTextBlock(self: *Elf, text_block: *TextBlock, new_block_size: u64, al
}
shdr.sh_addralign = math.max(shdr.sh_addralign, alignment);
- // This function can also reallocate a text block.
+ // This function can also reallocate an atom.
// In this case we need to "unplug" it from its previous location before
// plugging it in to its new location.
- if (text_block.prev) |prev| {
- prev.next = text_block.next;
+ if (atom.prev_index) |prev_index| {
+ const prev = self.getAtomPtr(prev_index);
+ prev.next_index = atom.next_index;
}
- if (text_block.next) |next| {
- next.prev = text_block.prev;
+ if (atom.next_index) |next_index| {
+ const next = self.getAtomPtr(next_index);
+ next.prev_index = atom.prev_index;
}
- if (block_placement) |big_block| {
- text_block.prev = big_block;
- text_block.next = big_block.next;
- big_block.next = text_block;
+ if (atom_placement) |big_atom_index| {
+ const big_atom = self.getAtomPtr(big_atom_index);
+ const atom_ptr = self.getAtomPtr(atom_index);
+ atom_ptr.prev_index = big_atom_index;
+ atom_ptr.next_index = big_atom.next_index;
+ big_atom.next_index = atom_index;
} else {
- text_block.prev = null;
- text_block.next = null;
+ const atom_ptr = self.getAtomPtr(atom_index);
+ atom_ptr.prev_index = null;
+ atom_ptr.next_index = null;
}
if (free_list_removal) |i| {
_ = free_list.swapRemove(i);
@@ -2272,15 +2301,10 @@ pub fn allocateGotOffset(self: *Elf) !u32 {
return index;
}
-fn freeRelocationsForTextBlock(self: *Elf, text_block: *TextBlock) void {
- var removed_relocs = self.relocs.fetchRemove(text_block);
- if (removed_relocs) |*relocs| relocs.value.deinit(self.base.allocator);
-}
-
fn freeUnnamedConsts(self: *Elf, decl_index: Module.Decl.Index) void {
const unnamed_consts = self.unnamed_const_atoms.getPtr(decl_index) orelse return;
for (unnamed_consts.items) |atom| {
- self.freeTextBlock(atom, self.phdr_load_ro_index.?);
+ self.freeAtom(atom);
}
unnamed_consts.clearAndFree(self.base.allocator);
}
@@ -2295,43 +2319,57 @@ pub fn freeDecl(self: *Elf, decl_index: Module.Decl.Index) void {
log.debug("freeDecl {*}", .{decl});
- if (self.decls.fetchRemove(decl_index)) |kv| {
- if (kv.value) |index| {
- self.freeTextBlock(&decl.link.elf, index);
- self.freeUnnamedConsts(decl_index);
- }
+ if (self.decls.fetchRemove(decl_index)) |const_kv| {
+ var kv = const_kv;
+ self.freeAtom(kv.value.atom);
+ self.freeUnnamedConsts(decl_index);
+ kv.value.exports.deinit(self.base.allocator);
}
if (self.dwarf) |*dw| {
- dw.freeDecl(decl);
+ dw.freeDecl(decl_index);
+ }
+}
+
+pub fn getOrCreateAtomForDecl(self: *Elf, decl_index: Module.Decl.Index) !Atom.Index {
+ const gop = try self.decls.getOrPut(self.base.allocator, decl_index);
+ if (!gop.found_existing) {
+ gop.value_ptr.* = .{
+ .atom = try self.createAtom(),
+ .shdr = self.getDeclShdrIndex(decl_index),
+ .exports = .{},
+ };
}
+ return gop.value_ptr.atom;
}
-fn getDeclPhdrIndex(self: *Elf, decl: *Module.Decl) !u16 {
+fn getDeclShdrIndex(self: *Elf, decl_index: Module.Decl.Index) u16 {
+ const decl = self.base.options.module.?.declPtr(decl_index);
const ty = decl.ty;
const zig_ty = ty.zigTypeTag();
const val = decl.val;
- const phdr_index: u16 = blk: {
+ const shdr_index: u16 = blk: {
if (val.isUndefDeep()) {
// TODO in release-fast and release-small, we should put undef in .bss
- break :blk self.phdr_load_rw_index.?;
+ break :blk self.data_section_index.?;
}
switch (zig_ty) {
// TODO: what if this is a function pointer?
- .Fn => break :blk self.phdr_load_re_index.?,
+ .Fn => break :blk self.text_section_index.?,
else => {
if (val.castTag(.variable)) |_| {
- break :blk self.phdr_load_rw_index.?;
+ break :blk self.data_section_index.?;
}
- break :blk self.phdr_load_ro_index.?;
+ break :blk self.rodata_section_index.?;
},
}
};
- return phdr_index;
+ return shdr_index;
}
fn updateDeclCode(self: *Elf, decl_index: Module.Decl.Index, code: []const u8, stt_bits: u8) !*elf.Elf64_Sym {
+ const gpa = self.base.allocator;
const mod = self.base.options.module.?;
const decl = mod.declPtr(decl_index);
@@ -2341,60 +2379,65 @@ fn updateDeclCode(self: *Elf, decl_index: Module.Decl.Index, code: []const u8, s
log.debug("updateDeclCode {s}{*}", .{ decl_name, decl });
const required_alignment = decl.getAlignment(self.base.options.target);
- const decl_ptr = self.decls.getPtr(decl_index).?;
- if (decl_ptr.* == null) {
- decl_ptr.* = try self.getDeclPhdrIndex(decl);
- }
- const phdr_index = decl_ptr.*.?;
- const shdr_index = self.phdr_shdr_table.get(phdr_index).?;
+ const decl_metadata = self.decls.get(decl_index).?;
+ const atom_index = decl_metadata.atom;
+ const atom = self.getAtom(atom_index);
+
+ const shdr_index = decl_metadata.shdr;
+ if (atom.getSymbol(self).st_size != 0) {
+ const local_sym = atom.getSymbolPtr(self);
+ local_sym.st_name = try self.shstrtab.insert(gpa, decl_name);
+ local_sym.st_info = (elf.STB_LOCAL << 4) | stt_bits;
+ local_sym.st_other = 0;
+ local_sym.st_shndx = shdr_index;
- const local_sym = decl.link.elf.getSymbolPtr(self);
- if (local_sym.st_size != 0) {
- const capacity = decl.link.elf.capacity(self);
+ const capacity = atom.capacity(self);
const need_realloc = code.len > capacity or
!mem.isAlignedGeneric(u64, local_sym.st_value, required_alignment);
+
if (need_realloc) {
- const vaddr = try self.growTextBlock(&decl.link.elf, code.len, required_alignment, phdr_index);
+ const vaddr = try self.growAtom(atom_index, code.len, required_alignment);
log.debug("growing {s} from 0x{x} to 0x{x}", .{ decl_name, local_sym.st_value, vaddr });
if (vaddr != local_sym.st_value) {
local_sym.st_value = vaddr;
log.debug(" (writing new offset table entry)", .{});
- self.offset_table.items[decl.link.elf.offset_table_index] = vaddr;
- try self.writeOffsetTableEntry(decl.link.elf.offset_table_index);
+ self.offset_table.items[atom.offset_table_index] = vaddr;
+ try self.writeOffsetTableEntry(atom.offset_table_index);
}
} else if (code.len < local_sym.st_size) {
- self.shrinkTextBlock(&decl.link.elf, code.len, phdr_index);
+ self.shrinkAtom(atom_index, code.len);
}
local_sym.st_size = code.len;
- local_sym.st_name = try self.updateString(local_sym.st_name, decl_name);
- local_sym.st_info = (elf.STB_LOCAL << 4) | stt_bits;
- local_sym.st_other = 0;
- local_sym.st_shndx = shdr_index;
+
// TODO this write could be avoided if no fields of the symbol were changed.
- try self.writeSymbol(decl.link.elf.getSymbolIndex().?);
+ try self.writeSymbol(atom.getSymbolIndex().?);
} else {
- const name_str_index = try self.makeString(decl_name);
- const vaddr = try self.allocateTextBlock(&decl.link.elf, code.len, required_alignment, phdr_index);
- errdefer self.freeTextBlock(&decl.link.elf, phdr_index);
- log.debug("allocated text block for {s} at 0x{x}", .{ decl_name, vaddr });
-
+ const local_sym = atom.getSymbolPtr(self);
local_sym.* = .{
- .st_name = name_str_index,
+ .st_name = try self.shstrtab.insert(gpa, decl_name),
.st_info = (elf.STB_LOCAL << 4) | stt_bits,
.st_other = 0,
.st_shndx = shdr_index,
- .st_value = vaddr,
- .st_size = code.len,
+ .st_value = 0,
+ .st_size = 0,
};
- self.offset_table.items[decl.link.elf.offset_table_index] = vaddr;
+ const vaddr = try self.allocateAtom(atom_index, code.len, required_alignment);
+ errdefer self.freeAtom(atom_index);
+ log.debug("allocated text block for {s} at 0x{x}", .{ decl_name, vaddr });
- try self.writeSymbol(decl.link.elf.getSymbolIndex().?);
- try self.writeOffsetTableEntry(decl.link.elf.offset_table_index);
+ self.offset_table.items[atom.offset_table_index] = vaddr;
+ local_sym.st_value = vaddr;
+ local_sym.st_size = code.len;
+
+ try self.writeSymbol(atom.getSymbolIndex().?);
+ try self.writeOffsetTableEntry(atom.offset_table_index);
}
+ const local_sym = atom.getSymbolPtr(self);
+ const phdr_index = self.sections.items(.phdr_index)[shdr_index];
const section_offset = local_sym.st_value - self.program_headers.items[phdr_index].p_vaddr;
- const file_offset = self.sections.items[shdr_index].sh_offset + section_offset;
+ const file_offset = self.sections.items(.shdr)[shdr_index].sh_offset + section_offset;
try self.base.file.?.pwriteAll(code, file_offset);
return local_sym;
@@ -2413,15 +2456,10 @@ pub fn updateFunc(self: *Elf, module: *Module, func: *Module.Fn, air: Air, liven
const decl_index = func.owner_decl;
const decl = module.declPtr(decl_index);
- const atom = &decl.link.elf;
- try atom.ensureInitialized(self);
- const gop = try self.decls.getOrPut(self.base.allocator, decl_index);
- if (gop.found_existing) {
- self.freeUnnamedConsts(decl_index);
- self.freeRelocationsForTextBlock(atom);
- } else {
- gop.value_ptr.* = null;
- }
+
+ const atom_index = try self.getOrCreateAtomForDecl(decl_index);
+ self.freeUnnamedConsts(decl_index);
+ Atom.freeRelocations(self, atom_index);
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
defer code_buffer.deinit();
@@ -2483,16 +2521,9 @@ pub fn updateDecl(self: *Elf, module: *Module, decl_index: Module.Decl.Index) !v
}
}
- assert(!self.unnamed_const_atoms.contains(decl_index));
-
- const atom = &decl.link.elf;
- try atom.ensureInitialized(self);
- const gop = try self.decls.getOrPut(self.base.allocator, decl_index);
- if (gop.found_existing) {
- self.freeRelocationsForTextBlock(atom);
- } else {
- gop.value_ptr.* = null;
- }
+ const atom_index = try self.getOrCreateAtomForDecl(decl_index);
+ Atom.freeRelocations(self, atom_index);
+ const atom = self.getAtom(atom_index);
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
defer code_buffer.deinit();
@@ -2509,14 +2540,14 @@ pub fn updateDecl(self: *Elf, module: *Module, decl_index: Module.Decl.Index) !v
}, &code_buffer, .{
.dwarf = ds,
}, .{
- .parent_atom_index = decl.link.elf.getSymbolIndex().?,
+ .parent_atom_index = atom.getSymbolIndex().?,
})
else
try codegen.generateSymbol(&self.base, decl.srcLoc(), .{
.ty = decl.ty,
.val = decl_val,
}, &code_buffer, .none, .{
- .parent_atom_index = decl.link.elf.getSymbolIndex().?,
+ .parent_atom_index = atom.getSymbolIndex().?,
});
const code = switch (res) {
@@ -2545,41 +2576,35 @@ pub fn updateDecl(self: *Elf, module: *Module, decl_index: Module.Decl.Index) !v
}
pub fn lowerUnnamedConst(self: *Elf, typed_value: TypedValue, decl_index: Module.Decl.Index) !u32 {
- var code_buffer = std.ArrayList(u8).init(self.base.allocator);
+ const gpa = self.base.allocator;
+
+ var code_buffer = std.ArrayList(u8).init(gpa);
defer code_buffer.deinit();
const mod = self.base.options.module.?;
- const decl = mod.declPtr(decl_index);
-
- const gop = try self.unnamed_const_atoms.getOrPut(self.base.allocator, decl_index);
+ const gop = try self.unnamed_const_atoms.getOrPut(gpa, decl_index);
if (!gop.found_existing) {
gop.value_ptr.* = .{};
}
const unnamed_consts = gop.value_ptr;
- const atom = try self.base.allocator.create(TextBlock);
- errdefer self.base.allocator.destroy(atom);
- atom.* = TextBlock.empty;
- // TODO for unnamed consts we don't need GOT offset/entry allocated
- try atom.ensureInitialized(self);
- try self.managed_atoms.append(self.base.allocator, atom);
-
+ const decl = mod.declPtr(decl_index);
const name_str_index = blk: {
const decl_name = try decl.getFullyQualifiedName(mod);
- defer self.base.allocator.free(decl_name);
-
+ defer gpa.free(decl_name);
const index = unnamed_consts.items.len;
- const name = try std.fmt.allocPrint(self.base.allocator, "__unnamed_{s}_{d}", .{ decl_name, index });
- defer self.base.allocator.free(name);
-
- break :blk try self.makeString(name);
+ const name = try std.fmt.allocPrint(gpa, "__unnamed_{s}_{d}", .{ decl_name, index });
+ defer gpa.free(name);
+ break :blk try self.shstrtab.insert(gpa, name);
};
- const name = self.getString(name_str_index);
+ const name = self.shstrtab.get(name_str_index).?;
+
+ const atom_index = try self.createAtom();
const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), typed_value, &code_buffer, .{
.none = {},
}, .{
- .parent_atom_index = atom.getSymbolIndex().?,
+ .parent_atom_index = self.getAtom(atom_index).getSymbolIndex().?,
});
const code = switch (res) {
.ok => code_buffer.items,
@@ -2592,31 +2617,27 @@ pub fn lowerUnnamedConst(self: *Elf, typed_value: TypedValue, decl_index: Module
};
const required_alignment = typed_value.ty.abiAlignment(self.base.options.target);
- const phdr_index = self.phdr_load_ro_index.?;
- const shdr_index = self.phdr_shdr_table.get(phdr_index).?;
- const vaddr = try self.allocateTextBlock(atom, code.len, required_alignment, phdr_index);
- errdefer self.freeTextBlock(atom, phdr_index);
-
- log.debug("allocated text block for {s} at 0x{x}", .{ name, vaddr });
-
- const local_sym = atom.getSymbolPtr(self);
- local_sym.* = .{
- .st_name = name_str_index,
- .st_info = (elf.STB_LOCAL << 4) | elf.STT_OBJECT,
- .st_other = 0,
- .st_shndx = shdr_index,
- .st_value = vaddr,
- .st_size = code.len,
- };
-
- try self.writeSymbol(atom.getSymbolIndex().?);
- try unnamed_consts.append(self.base.allocator, atom);
+ const shdr_index = self.rodata_section_index.?;
+ const phdr_index = self.sections.items(.phdr_index)[shdr_index];
+ const local_sym = self.getAtom(atom_index).getSymbolPtr(self);
+ local_sym.st_name = name_str_index;
+ local_sym.st_info = (elf.STB_LOCAL << 4) | elf.STT_OBJECT;
+ local_sym.st_other = 0;
+ local_sym.st_shndx = shdr_index;
+ local_sym.st_size = code.len;
+ local_sym.st_value = try self.allocateAtom(atom_index, code.len, required_alignment);
+ errdefer self.freeAtom(atom_index);
+
+ log.debug("allocated text block for {s} at 0x{x}", .{ name, local_sym.st_value });
+
+ try self.writeSymbol(self.getAtom(atom_index).getSymbolIndex().?);
+ try unnamed_consts.append(gpa, atom_index);
const section_offset = local_sym.st_value - self.program_headers.items[phdr_index].p_vaddr;
- const file_offset = self.sections.items[shdr_index].sh_offset + section_offset;
+ const file_offset = self.sections.items(.shdr)[shdr_index].sh_offset + section_offset;
try self.base.file.?.pwriteAll(code, file_offset);
- return atom.getSymbolIndex().?;
+ return self.getAtom(atom_index).getSymbolIndex().?;
}
pub fn updateDeclExports(
@@ -2635,20 +2656,16 @@ pub fn updateDeclExports(
const tracy = trace(@src());
defer tracy.end();
- const decl = module.declPtr(decl_index);
- const atom = &decl.link.elf;
-
- if (atom.getSymbolIndex() == null) return;
+ const gpa = self.base.allocator;
+ const decl = module.declPtr(decl_index);
+ const atom_index = try self.getOrCreateAtomForDecl(decl_index);
+ const atom = self.getAtom(atom_index);
const decl_sym = atom.getSymbol(self);
- try self.global_symbols.ensureUnusedCapacity(self.base.allocator, exports.len);
+ const decl_metadata = self.decls.getPtr(decl_index).?;
+ const shdr_index = decl_metadata.shdr;
- const gop = try self.decls.getOrPut(self.base.allocator, decl_index);
- if (!gop.found_existing) {
- gop.value_ptr.* = try self.getDeclPhdrIndex(decl);
- }
- const phdr_index = gop.value_ptr.*.?;
- const shdr_index = self.phdr_shdr_table.get(phdr_index).?;
+ try self.global_symbols.ensureUnusedCapacity(gpa, exports.len);
for (exports) |exp| {
if (exp.options.section) |section_name| {
@@ -2681,10 +2698,10 @@ pub fn updateDeclExports(
},
};
const stt_bits: u8 = @truncate(u4, decl_sym.st_info);
- if (exp.link.elf.sym_index) |i| {
+ if (decl_metadata.getExport(self, exp.options.name)) |i| {
const sym = &self.global_symbols.items[i];
sym.* = .{
- .st_name = try self.updateString(sym.st_name, exp.options.name),
+ .st_name = try self.shstrtab.insert(gpa, exp.options.name),
.st_info = (stb_bits << 4) | stt_bits,
.st_other = 0,
.st_shndx = shdr_index,
@@ -2692,30 +2709,29 @@ pub fn updateDeclExports(
.st_size = decl_sym.st_size,
};
} else {
- const name = try self.makeString(exp.options.name);
const i = if (self.global_symbol_free_list.popOrNull()) |i| i else blk: {
_ = self.global_symbols.addOneAssumeCapacity();
break :blk self.global_symbols.items.len - 1;
};
+ try decl_metadata.exports.append(gpa, @intCast(u32, i));
self.global_symbols.items[i] = .{
- .st_name = name,
+ .st_name = try self.shstrtab.insert(gpa, exp.options.name),
.st_info = (stb_bits << 4) | stt_bits,
.st_other = 0,
.st_shndx = shdr_index,
.st_value = decl_sym.st_value,
.st_size = decl_sym.st_size,
};
-
- exp.link.elf.sym_index = @intCast(u32, i);
}
}
}
/// Must be called only after a successful call to `updateDecl`.
-pub fn updateDeclLineNumber(self: *Elf, mod: *Module, decl: *const Module.Decl) !void {
+pub fn updateDeclLineNumber(self: *Elf, mod: *Module, decl_index: Module.Decl.Index) !void {
const tracy = trace(@src());
defer tracy.end();
+ const decl = mod.declPtr(decl_index);
const decl_name = try decl.getFullyQualifiedName(mod);
defer self.base.allocator.free(decl_name);
@@ -2723,16 +2739,18 @@ pub fn updateDeclLineNumber(self: *Elf, mod: *Module, decl: *const Module.Decl)
if (self.llvm_object) |_| return;
if (self.dwarf) |*dw| {
- try dw.updateDeclLineNumber(decl);
+ try dw.updateDeclLineNumber(mod, decl_index);
}
}
-pub fn deleteExport(self: *Elf, exp: Export) void {
+pub fn deleteDeclExport(self: *Elf, decl_index: Module.Decl.Index, name: []const u8) void {
if (self.llvm_object) |_| return;
-
- const sym_index = exp.sym_index orelse return;
- self.global_symbol_free_list.append(self.base.allocator, sym_index) catch {};
- self.global_symbols.items[sym_index].st_info = 0;
+ const metadata = self.decls.getPtr(decl_index) orelse return;
+ const sym_index = metadata.getExportPtr(self, name) orelse return;
+ log.debug("deleting export '{s}'", .{name});
+ self.global_symbol_free_list.append(self.base.allocator, sym_index.*) catch {};
+ self.global_symbols.items[sym_index.*].st_info = 0;
+ sym_index.* = 0;
}
fn writeProgHeader(self: *Elf, index: usize) !void {
@@ -2761,7 +2779,7 @@ fn writeSectHeader(self: *Elf, index: usize) !void {
switch (self.ptr_width) {
.p32 => {
var shdr: [1]elf.Elf32_Shdr = undefined;
- shdr[0] = sectHeaderTo32(self.sections.items[index]);
+ shdr[0] = sectHeaderTo32(self.sections.items(.shdr)[index]);
if (foreign_endian) {
mem.byteSwapAllFields(elf.Elf32_Shdr, &shdr[0]);
}
@@ -2769,7 +2787,7 @@ fn writeSectHeader(self: *Elf, index: usize) !void {
return self.base.file.?.pwriteAll(mem.sliceAsBytes(&shdr), offset);
},
.p64 => {
- var shdr = [1]elf.Elf64_Shdr{self.sections.items[index]};
+ var shdr = [1]elf.Elf64_Shdr{self.sections.items(.shdr)[index]};
if (foreign_endian) {
mem.byteSwapAllFields(elf.Elf64_Shdr, &shdr[0]);
}
@@ -2783,11 +2801,11 @@ fn writeOffsetTableEntry(self: *Elf, index: usize) !void {
const entry_size: u16 = self.archPtrWidthBytes();
if (self.offset_table_count_dirty) {
const needed_size = self.offset_table.items.len * entry_size;
- try self.growAllocSection(self.got_section_index.?, self.phdr_got_index.?, needed_size);
+ try self.growAllocSection(self.got_section_index.?, needed_size);
self.offset_table_count_dirty = false;
}
const endian = self.base.options.target.cpu.arch.endian();
- const shdr = &self.sections.items[self.got_section_index.?];
+ const shdr = &self.sections.items(.shdr)[self.got_section_index.?];
const off = shdr.sh_offset + @as(u64, entry_size) * index;
switch (entry_size) {
2 => {
@@ -2813,7 +2831,7 @@ fn writeSymbol(self: *Elf, index: usize) !void {
const tracy = trace(@src());
defer tracy.end();
- const syms_sect = &self.sections.items[self.symtab_section_index.?];
+ const syms_sect = &self.sections.items(.shdr)[self.symtab_section_index.?];
// Make sure we are not pointlessly writing symbol data that will have to get relocated
// due to running out of space.
if (self.local_symbols.items.len != syms_sect.sh_info) {
@@ -2835,7 +2853,7 @@ fn writeSymbol(self: *Elf, index: usize) !void {
.p64 => syms_sect.sh_offset + @sizeOf(elf.Elf64_Sym) * index,
};
const local = self.local_symbols.items[index];
- log.debug("writing symbol {d}, '{s}' at 0x{x}", .{ index, self.getString(local.st_name), off });
+ log.debug("writing symbol {d}, '{?s}' at 0x{x}", .{ index, self.shstrtab.get(local.st_name), off });
log.debug(" ({})", .{local});
switch (self.ptr_width) {
.p32 => {
@@ -2865,7 +2883,7 @@ fn writeSymbol(self: *Elf, index: usize) !void {
}
fn writeAllGlobalSymbols(self: *Elf) !void {
- const syms_sect = &self.sections.items[self.symtab_section_index.?];
+ const syms_sect = &self.sections.items(.shdr)[self.symtab_section_index.?];
const sym_size: u64 = switch (self.ptr_width) {
.p32 => @sizeOf(elf.Elf32_Sym),
.p64 => @sizeOf(elf.Elf64_Sym),
@@ -3215,10 +3233,58 @@ const CsuObjects = struct {
fn logSymtab(self: Elf) void {
log.debug("locals:", .{});
for (self.local_symbols.items) |sym, id| {
- log.debug(" {d}: {s}: @{x} in {d}", .{ id, self.getString(sym.st_name), sym.st_value, sym.st_shndx });
+ log.debug(" {d}: {?s}: @{x} in {d}", .{ id, self.shstrtab.get(sym.st_name), sym.st_value, sym.st_shndx });
}
log.debug("globals:", .{});
for (self.global_symbols.items) |sym, id| {
- log.debug(" {d}: {s}: @{x} in {d}", .{ id, self.getString(sym.st_name), sym.st_value, sym.st_shndx });
+ log.debug(" {d}: {?s}: @{x} in {d}", .{ id, self.shstrtab.get(sym.st_name), sym.st_value, sym.st_shndx });
}
}
+
+pub fn getProgramHeader(self: *const Elf, shdr_index: u16) elf.Elf64_Phdr {
+ const index = self.sections.items(.phdr_index)[shdr_index];
+ return self.program_headers.items[index];
+}
+
+pub fn getProgramHeaderPtr(self: *Elf, shdr_index: u16) *elf.Elf64_Phdr {
+ const index = self.sections.items(.phdr_index)[shdr_index];
+ return &self.program_headers.items[index];
+}
+
+/// Returns pointer-to-symbol described at sym_index.
+pub fn getSymbolPtr(self: *Elf, sym_index: u32) *elf.Elf64_Sym {
+ return &self.local_symbols.items[sym_index];
+}
+
+/// Returns symbol at sym_index.
+pub fn getSymbol(self: *const Elf, sym_index: u32) elf.Elf64_Sym {
+ return self.local_symbols.items[sym_index];
+}
+
+/// Returns name of the symbol at sym_index.
+pub fn getSymbolName(self: *const Elf, sym_index: u32) []const u8 {
+ const sym = self.local_symbols.items[sym_index];
+ return self.shstrtab.get(sym.st_name).?;
+}
+
+/// Returns name of the global symbol at index.
+pub fn getGlobalName(self: *const Elf, index: u32) []const u8 {
+ const sym = self.global_symbols.items[index];
+ return self.shstrtab.get(sym.st_name).?;
+}
+
+pub fn getAtom(self: *const Elf, atom_index: Atom.Index) Atom {
+ assert(atom_index < self.atoms.items.len);
+ return self.atoms.items[atom_index];
+}
+
+pub fn getAtomPtr(self: *Elf, atom_index: Atom.Index) *Atom {
+ assert(atom_index < self.atoms.items.len);
+ return &self.atoms.items[atom_index];
+}
+
+/// Returns atom if there is an atom referenced by the symbol.
+/// Returns null on failure.
+pub fn getAtomIndexForSymbol(self: *Elf, sym_index: u32) ?Atom.Index {
+ return self.atom_by_index_table.get(sym_index);
+}
diff --git a/src/link/Elf/Atom.zig b/src/link/Elf/Atom.zig
index caeb3bfbc5..4ab304ef71 100644
--- a/src/link/Elf/Atom.zig
+++ b/src/link/Elf/Atom.zig
@@ -4,7 +4,6 @@ const std = @import("std");
const assert = std.debug.assert;
const elf = std.elf;
-const Dwarf = @import("../Dwarf.zig");
const Elf = @import("../Elf.zig");
/// Each decl always gets a local symbol with the fully qualified name.
@@ -20,44 +19,33 @@ offset_table_index: u32,
/// Points to the previous and next neighbors, based on the `text_offset`.
/// This can be used to find, for example, the capacity of this `TextBlock`.
-prev: ?*Atom,
-next: ?*Atom,
+prev_index: ?Index,
+next_index: ?Index,
-dbg_info_atom: Dwarf.Atom,
+pub const Index = u32;
-pub const empty = Atom{
- .local_sym_index = 0,
- .offset_table_index = undefined,
- .prev = null,
- .next = null,
- .dbg_info_atom = undefined,
+pub const Reloc = struct {
+ target: u32,
+ offset: u64,
+ addend: u32,
+ prev_vaddr: u64,
};
-pub fn ensureInitialized(self: *Atom, elf_file: *Elf) !void {
- if (self.getSymbolIndex() != null) return; // Already initialized
- self.local_sym_index = try elf_file.allocateLocalSymbol();
- self.offset_table_index = try elf_file.allocateGotOffset();
- try elf_file.atom_by_index_table.putNoClobber(elf_file.base.allocator, self.local_sym_index, self);
-}
-
pub fn getSymbolIndex(self: Atom) ?u32 {
if (self.local_sym_index == 0) return null;
return self.local_sym_index;
}
-pub fn getSymbol(self: Atom, elf_file: *Elf) elf.Elf64_Sym {
- const sym_index = self.getSymbolIndex().?;
- return elf_file.local_symbols.items[sym_index];
+pub fn getSymbol(self: Atom, elf_file: *const Elf) elf.Elf64_Sym {
+ return elf_file.getSymbol(self.getSymbolIndex().?);
}
pub fn getSymbolPtr(self: Atom, elf_file: *Elf) *elf.Elf64_Sym {
- const sym_index = self.getSymbolIndex().?;
- return &elf_file.local_symbols.items[sym_index];
+ return elf_file.getSymbolPtr(self.getSymbolIndex().?);
}
-pub fn getName(self: Atom, elf_file: *Elf) []const u8 {
- const sym = self.getSymbol();
- return elf_file.getString(sym.st_name);
+pub fn getName(self: Atom, elf_file: *const Elf) []const u8 {
+ return elf_file.getSymbolName(self.getSymbolIndex().?);
}
pub fn getOffsetTableAddress(self: Atom, elf_file: *Elf) u64 {
@@ -72,9 +60,10 @@ pub fn getOffsetTableAddress(self: Atom, elf_file: *Elf) u64 {
/// Returns how much room there is to grow in virtual address space.
/// File offset relocation happens transparently, so it is not included in
/// this calculation.
-pub fn capacity(self: Atom, elf_file: *Elf) u64 {
+pub fn capacity(self: Atom, elf_file: *const Elf) u64 {
const self_sym = self.getSymbol(elf_file);
- if (self.next) |next| {
+ if (self.next_index) |next_index| {
+ const next = elf_file.getAtom(next_index);
const next_sym = next.getSymbol(elf_file);
return next_sym.st_value - self_sym.st_value;
} else {
@@ -83,9 +72,10 @@ pub fn capacity(self: Atom, elf_file: *Elf) u64 {
}
}
-pub fn freeListEligible(self: Atom, elf_file: *Elf) bool {
+pub fn freeListEligible(self: Atom, elf_file: *const Elf) bool {
// No need to keep a free list node for the last block.
- const next = self.next orelse return false;
+ const next_index = self.next_index orelse return false;
+ const next = elf_file.getAtom(next_index);
const self_sym = self.getSymbol(elf_file);
const next_sym = next.getSymbol(elf_file);
const cap = next_sym.st_value - self_sym.st_value;
@@ -94,3 +84,17 @@ pub fn freeListEligible(self: Atom, elf_file: *Elf) bool {
const surplus = cap - ideal_cap;
return surplus >= Elf.min_text_capacity;
}
+
+pub fn addRelocation(elf_file: *Elf, atom_index: Index, reloc: Reloc) !void {
+ const gpa = elf_file.base.allocator;
+ const gop = try elf_file.relocs.getOrPut(gpa, atom_index);
+ if (!gop.found_existing) {
+ gop.value_ptr.* = .{};
+ }
+ try gop.value_ptr.append(gpa, reloc);
+}
+
+pub fn freeRelocations(elf_file: *Elf, atom_index: Index) void {
+ var removed_relocs = elf_file.relocs.fetchRemove(atom_index);
+ if (removed_relocs) |*relocs| relocs.value.deinit(elf_file.base.allocator);
+}
diff --git a/src/link/MachO.zig b/src/link/MachO.zig
index 543cb473d7..24ef275c5b 100644
--- a/src/link/MachO.zig
+++ b/src/link/MachO.zig
@@ -66,7 +66,7 @@ const Section = struct {
// TODO is null here necessary, or can we do away with tracking via section
// size in incremental context?
- last_atom: ?*Atom = null,
+ last_atom_index: ?Atom.Index = null,
/// A list of atoms that have surplus capacity. This list can have false
/// positives, as functions grow and shrink over time, only sometimes being added
@@ -83,7 +83,7 @@ const Section = struct {
/// overcapacity can be negative. A simple way to have negative overcapacity is to
/// allocate a fresh atom, which will have ideal capacity, and then grow it
/// by 1 byte. It will then have -1 overcapacity.
- free_list: std.ArrayListUnmanaged(*Atom) = .{},
+ free_list: std.ArrayListUnmanaged(Atom.Index) = .{},
};
base: File,
@@ -140,8 +140,8 @@ locals_free_list: std.ArrayListUnmanaged(u32) = .{},
globals_free_list: std.ArrayListUnmanaged(u32) = .{},
dyld_stub_binder_index: ?u32 = null,
-dyld_private_atom: ?*Atom = null,
-stub_helper_preamble_atom: ?*Atom = null,
+dyld_private_atom_index: ?Atom.Index = null,
+stub_helper_preamble_atom_index: ?Atom.Index = null,
strtab: StringTable(.strtab) = .{},
@@ -164,10 +164,10 @@ segment_table_dirty: bool = false,
cold_start: bool = true,
/// List of atoms that are either synthetic or map directly to the Zig source program.
-managed_atoms: std.ArrayListUnmanaged(*Atom) = .{},
+atoms: std.ArrayListUnmanaged(Atom) = .{},
/// Table of atoms indexed by the symbol index.
-atom_by_index_table: std.AutoHashMapUnmanaged(u32, *Atom) = .{},
+atom_by_index_table: std.AutoHashMapUnmanaged(u32, Atom.Index) = .{},
/// Table of unnamed constants associated with a parent `Decl`.
/// We store them here so that we can free the constants whenever the `Decl`
@@ -210,11 +210,36 @@ bindings: BindingTable = .{},
/// this will be a table indexed by index into the list of Atoms.
lazy_bindings: BindingTable = .{},
-/// Table of Decls that are currently alive.
-/// We store them here so that we can properly dispose of any allocated
-/// memory within the atom in the incremental linker.
-/// TODO consolidate this.
-decls: std.AutoArrayHashMapUnmanaged(Module.Decl.Index, ?u8) = .{},
+/// Table of tracked Decls.
+decls: std.AutoArrayHashMapUnmanaged(Module.Decl.Index, DeclMetadata) = .{},
+
+const DeclMetadata = struct {
+ atom: Atom.Index,
+ section: u8,
+ /// A list of all exports aliases of this Decl.
+ /// TODO do we actually need this at all?
+ exports: std.ArrayListUnmanaged(u32) = .{},
+
+ fn getExport(m: DeclMetadata, macho_file: *const MachO, name: []const u8) ?u32 {
+ for (m.exports.items) |exp| {
+ if (mem.eql(u8, name, macho_file.getSymbolName(.{
+ .sym_index = exp,
+ .file = null,
+ }))) return exp;
+ }
+ return null;
+ }
+
+ fn getExportPtr(m: *DeclMetadata, macho_file: *MachO, name: []const u8) ?*u32 {
+ for (m.exports.items) |*exp| {
+ if (mem.eql(u8, name, macho_file.getSymbolName(.{
+ .sym_index = exp.*,
+ .file = null,
+ }))) return exp;
+ }
+ return null;
+ }
+};
const Entry = struct {
target: SymbolWithLoc,
@@ -229,8 +254,8 @@ const Entry = struct {
return macho_file.getSymbolPtr(.{ .sym_index = entry.sym_index, .file = null });
}
- pub fn getAtom(entry: Entry, macho_file: *MachO) ?*Atom {
- return macho_file.getAtomForSymbol(.{ .sym_index = entry.sym_index, .file = null });
+ pub fn getAtomIndex(entry: Entry, macho_file: *MachO) ?Atom.Index {
+ return macho_file.getAtomIndexForSymbol(.{ .sym_index = entry.sym_index, .file = null });
}
pub fn getName(entry: Entry, macho_file: *MachO) []const u8 {
@@ -238,10 +263,10 @@ const Entry = struct {
}
};
-const BindingTable = std.AutoArrayHashMapUnmanaged(*Atom, std.ArrayListUnmanaged(Atom.Binding));
-const UnnamedConstTable = std.AutoArrayHashMapUnmanaged(Module.Decl.Index, std.ArrayListUnmanaged(*Atom));
-const RebaseTable = std.AutoArrayHashMapUnmanaged(*Atom, std.ArrayListUnmanaged(u32));
-const RelocationTable = std.AutoArrayHashMapUnmanaged(*Atom, std.ArrayListUnmanaged(Relocation));
+const BindingTable = std.AutoArrayHashMapUnmanaged(Atom.Index, std.ArrayListUnmanaged(Atom.Binding));
+const UnnamedConstTable = std.AutoArrayHashMapUnmanaged(Module.Decl.Index, std.ArrayListUnmanaged(Atom.Index));
+const RebaseTable = std.AutoArrayHashMapUnmanaged(Atom.Index, std.ArrayListUnmanaged(u32));
+const RelocationTable = std.AutoArrayHashMapUnmanaged(Atom.Index, std.ArrayListUnmanaged(Relocation));
const PendingUpdate = union(enum) {
resolve_undef: u32,
@@ -286,10 +311,6 @@ pub const default_pagezero_vmsize: u64 = 0x100000000;
/// potential future extensions.
pub const default_headerpad_size: u32 = 0x1000;
-pub const Export = struct {
- sym_index: ?u32 = null,
-};
-
pub fn openPath(allocator: Allocator, options: link.Options) !*MachO {
assert(options.target.ofmt == .macho);
@@ -547,8 +568,8 @@ pub fn flushModule(self: *MachO, comp: *Compilation, prog_node: *std.Progress.No
try self.allocateSpecialSymbols();
- for (self.relocs.keys()) |atom| {
- try atom.resolveRelocations(self);
+ for (self.relocs.keys()) |atom_index| {
+ try Atom.resolveRelocations(self, atom_index);
}
if (build_options.enable_logging) {
@@ -999,18 +1020,19 @@ pub fn parseDependentLibs(self: *MachO, syslibroot: ?[]const u8, dependent_libs:
}
}
-pub fn writeAtom(self: *MachO, atom: *Atom, code: []const u8) !void {
+pub fn writeAtom(self: *MachO, atom_index: Atom.Index, code: []const u8) !void {
+ const atom = self.getAtom(atom_index);
const sym = atom.getSymbol(self);
const section = self.sections.get(sym.n_sect - 1);
const file_offset = section.header.offset + sym.n_value - section.header.addr;
log.debug("writing atom for symbol {s} at file offset 0x{x}", .{ atom.getName(self), file_offset });
try self.base.file.?.pwriteAll(code, file_offset);
- try atom.resolveRelocations(self);
+ try Atom.resolveRelocations(self, atom_index);
}
-fn writePtrWidthAtom(self: *MachO, atom: *Atom) !void {
+fn writePtrWidthAtom(self: *MachO, atom_index: Atom.Index) !void {
var buffer: [@sizeOf(u64)]u8 = [_]u8{0} ** @sizeOf(u64);
- try self.writeAtom(atom, &buffer);
+ try self.writeAtom(atom_index, &buffer);
}
fn markRelocsDirtyByTarget(self: *MachO, target: SymbolWithLoc) void {
@@ -1026,7 +1048,8 @@ fn markRelocsDirtyByTarget(self: *MachO, target: SymbolWithLoc) void {
fn markRelocsDirtyByAddress(self: *MachO, addr: u64) void {
for (self.relocs.values()) |*relocs| {
for (relocs.items) |*reloc| {
- const target_atom = reloc.getTargetAtom(self) orelse continue;
+ const target_atom_index = reloc.getTargetAtomIndex(self) orelse continue;
+ const target_atom = self.getAtom(target_atom_index);
const target_sym = target_atom.getSymbol(self);
if (target_sym.n_value < addr) continue;
reloc.dirty = true;
@@ -1053,26 +1076,38 @@ pub fn allocateSpecialSymbols(self: *MachO) !void {
}
}
-pub fn createGotAtom(self: *MachO, target: SymbolWithLoc) !*Atom {
+pub fn createAtom(self: *MachO) !Atom.Index {
const gpa = self.base.allocator;
+ const atom_index = @intCast(Atom.Index, self.atoms.items.len);
+ const atom = try self.atoms.addOne(gpa);
+ const sym_index = try self.allocateSymbol();
+ try self.atom_by_index_table.putNoClobber(gpa, sym_index, atom_index);
+ atom.* = .{
+ .sym_index = sym_index,
+ .file = null,
+ .size = 0,
+ .alignment = 0,
+ .prev_index = null,
+ .next_index = null,
+ };
+ log.debug("creating ATOM(%{d}) at index {d}", .{ sym_index, atom_index });
+ return atom_index;
+}
- const atom = try gpa.create(Atom);
- atom.* = Atom.empty;
- try atom.ensureInitialized(self);
+pub fn createGotAtom(self: *MachO, target: SymbolWithLoc) !Atom.Index {
+ const atom_index = try self.createAtom();
+ const atom = self.getAtomPtr(atom_index);
atom.size = @sizeOf(u64);
atom.alignment = @alignOf(u64);
- errdefer gpa.destroy(atom);
-
- try self.managed_atoms.append(gpa, atom);
const sym = atom.getSymbolPtr(self);
sym.n_type = macho.N_SECT;
sym.n_sect = self.got_section_index.? + 1;
- sym.n_value = try self.allocateAtom(atom, atom.size, @alignOf(u64));
+ sym.n_value = try self.allocateAtom(atom_index, atom.size, @alignOf(u64));
log.debug("allocated GOT atom at 0x{x}", .{sym.n_value});
- try atom.addRelocation(self, .{
+ try Atom.addRelocation(self, atom_index, .{
.type = switch (self.base.options.target.cpu.arch) {
.aarch64 => @enumToInt(macho.reloc_type_arm64.ARM64_RELOC_UNSIGNED),
.x86_64 => @enumToInt(macho.reloc_type_x86_64.X86_64_RELOC_UNSIGNED),
@@ -1087,45 +1122,39 @@ pub fn createGotAtom(self: *MachO, target: SymbolWithLoc) !*Atom {
const target_sym = self.getSymbol(target);
if (target_sym.undf()) {
- try atom.addBinding(self, .{
+ try Atom.addBinding(self, atom_index, .{
.target = self.getGlobal(self.getSymbolName(target)).?,
.offset = 0,
});
} else {
- try atom.addRebase(self, 0);
+ try Atom.addRebase(self, atom_index, 0);
}
- return atom;
+ return atom_index;
}
pub fn createDyldPrivateAtom(self: *MachO) !void {
if (self.dyld_stub_binder_index == null) return;
- if (self.dyld_private_atom != null) return;
-
- const gpa = self.base.allocator;
+ if (self.dyld_private_atom_index != null) return;
- const atom = try gpa.create(Atom);
- atom.* = Atom.empty;
- try atom.ensureInitialized(self);
+ const atom_index = try self.createAtom();
+ const atom = self.getAtomPtr(atom_index);
atom.size = @sizeOf(u64);
atom.alignment = @alignOf(u64);
- errdefer gpa.destroy(atom);
const sym = atom.getSymbolPtr(self);
sym.n_type = macho.N_SECT;
sym.n_sect = self.data_section_index.? + 1;
- self.dyld_private_atom = atom;
+ self.dyld_private_atom_index = atom_index;
- try self.managed_atoms.append(gpa, atom);
-
- sym.n_value = try self.allocateAtom(atom, atom.size, @alignOf(u64));
+ sym.n_value = try self.allocateAtom(atom_index, atom.size, @alignOf(u64));
log.debug("allocated dyld_private atom at 0x{x}", .{sym.n_value});
- try self.writePtrWidthAtom(atom);
+ try self.writePtrWidthAtom(atom_index);
}
pub fn createStubHelperPreambleAtom(self: *MachO) !void {
if (self.dyld_stub_binder_index == null) return;
- if (self.stub_helper_preamble_atom != null) return;
+ if (self.stub_helper_preamble_atom_index != null) return;
const gpa = self.base.allocator;
const arch = self.base.options.target.cpu.arch;
@@ -1134,22 +1163,23 @@ pub fn createStubHelperPreambleAtom(self: *MachO) !void {
.aarch64 => 6 * @sizeOf(u32),
else => unreachable,
};
- const atom = try gpa.create(Atom);
- atom.* = Atom.empty;
- try atom.ensureInitialized(self);
+ const atom_index = try self.createAtom();
+ const atom = self.getAtomPtr(atom_index);
atom.size = size;
atom.alignment = switch (arch) {
.x86_64 => 1,
.aarch64 => @alignOf(u32),
else => unreachable,
};
- errdefer gpa.destroy(atom);
const sym = atom.getSymbolPtr(self);
sym.n_type = macho.N_SECT;
sym.n_sect = self.stub_helper_section_index.? + 1;
- const dyld_private_sym_index = self.dyld_private_atom.?.getSymbolIndex().?;
+ const dyld_private_sym_index = if (self.dyld_private_atom_index) |dyld_index|
+ self.getAtom(dyld_index).getSymbolIndex().?
+ else
+ unreachable;
const code = try gpa.alloc(u8, size);
defer gpa.free(code);
@@ -1168,7 +1198,7 @@ pub fn createStubHelperPreambleAtom(self: *MachO) !void {
code[9] = 0xff;
code[10] = 0x25;
- try atom.addRelocations(self, 2, .{ .{
+ try Atom.addRelocations(self, atom_index, 2, .{ .{
.type = @enumToInt(macho.reloc_type_x86_64.X86_64_RELOC_SIGNED),
.target = .{ .sym_index = dyld_private_sym_index, .file = null },
.offset = 3,
@@ -1208,7 +1238,7 @@ pub fn createStubHelperPreambleAtom(self: *MachO) !void {
// br x16
mem.writeIntLittle(u32, code[20..][0..4], aarch64.Instruction.br(.x16).toU32());
- try atom.addRelocations(self, 4, .{ .{
+ try Atom.addRelocations(self, atom_index, 4, .{ .{
.type = @enumToInt(macho.reloc_type_arm64.ARM64_RELOC_PAGE21),
.target = .{ .sym_index = dyld_private_sym_index, .file = null },
.offset = 0,
@@ -1241,16 +1271,14 @@ pub fn createStubHelperPreambleAtom(self: *MachO) !void {
else => unreachable,
}
- self.stub_helper_preamble_atom = atom;
+ self.stub_helper_preamble_atom_index = atom_index;
- try self.managed_atoms.append(gpa, atom);
-
- sym.n_value = try self.allocateAtom(atom, size, atom.alignment);
+ sym.n_value = try self.allocateAtom(atom_index, size, atom.alignment);
log.debug("allocated stub preamble atom at 0x{x}", .{sym.n_value});
- try self.writeAtom(atom, code);
+ try self.writeAtom(atom_index, code);
}
-pub fn createStubHelperAtom(self: *MachO) !*Atom {
+pub fn createStubHelperAtom(self: *MachO) !Atom.Index {
const gpa = self.base.allocator;
const arch = self.base.options.target.cpu.arch;
const size: u4 = switch (arch) {
@@ -1258,16 +1286,14 @@ pub fn createStubHelperAtom(self: *MachO) !*Atom {
.aarch64 => 3 * @sizeOf(u32),
else => unreachable,
};
- const atom = try gpa.create(Atom);
- atom.* = Atom.empty;
- try atom.ensureInitialized(self);
+ const atom_index = try self.createAtom();
+ const atom = self.getAtomPtr(atom_index);
atom.size = size;
atom.alignment = switch (arch) {
.x86_64 => 1,
.aarch64 => @alignOf(u32),
else => unreachable,
};
- errdefer gpa.destroy(atom);
const sym = atom.getSymbolPtr(self);
sym.n_type = macho.N_SECT;
@@ -1277,6 +1303,11 @@ pub fn createStubHelperAtom(self: *MachO) !*Atom {
defer gpa.free(code);
mem.set(u8, code, 0);
+ const stub_helper_preamble_atom_sym_index = if (self.stub_helper_preamble_atom_index) |stub_index|
+ self.getAtom(stub_index).getSymbolIndex().?
+ else
+ unreachable;
+
switch (arch) {
.x86_64 => {
// pushq
@@ -1285,9 +1316,9 @@ pub fn createStubHelperAtom(self: *MachO) !*Atom {
// jmpq
code[5] = 0xe9;
- try atom.addRelocation(self, .{
+ try Atom.addRelocation(self, atom_index, .{
.type = @enumToInt(macho.reloc_type_x86_64.X86_64_RELOC_BRANCH),
- .target = .{ .sym_index = self.stub_helper_preamble_atom.?.getSymbolIndex().?, .file = null },
+ .target = .{ .sym_index = stub_helper_preamble_atom_sym_index, .file = null },
.offset = 6,
.addend = 0,
.pcrel = true,
@@ -1308,9 +1339,9 @@ pub fn createStubHelperAtom(self: *MachO) !*Atom {
mem.writeIntLittle(u32, code[4..8], aarch64.Instruction.b(0).toU32());
// Next 4 bytes 8..12 are just a placeholder populated in `populateLazyBindOffsetsInStubHelper`.
- try atom.addRelocation(self, .{
+ try Atom.addRelocation(self, atom_index, .{
.type = @enumToInt(macho.reloc_type_arm64.ARM64_RELOC_BRANCH26),
- .target = .{ .sym_index = self.stub_helper_preamble_atom.?.getSymbolIndex().?, .file = null },
+ .target = .{ .sym_index = stub_helper_preamble_atom_sym_index, .file = null },
.offset = 4,
.addend = 0,
.pcrel = true,
@@ -1320,29 +1351,24 @@ pub fn createStubHelperAtom(self: *MachO) !*Atom {
else => unreachable,
}
- try self.managed_atoms.append(gpa, atom);
-
- sym.n_value = try self.allocateAtom(atom, size, atom.alignment);
+ sym.n_value = try self.allocateAtom(atom_index, size, atom.alignment);
log.debug("allocated stub helper atom at 0x{x}", .{sym.n_value});
- try self.writeAtom(atom, code);
+ try self.writeAtom(atom_index, code);
- return atom;
+ return atom_index;
}
-pub fn createLazyPointerAtom(self: *MachO, stub_sym_index: u32, target: SymbolWithLoc) !*Atom {
- const gpa = self.base.allocator;
- const atom = try gpa.create(Atom);
- atom.* = Atom.empty;
- try atom.ensureInitialized(self);
+pub fn createLazyPointerAtom(self: *MachO, stub_sym_index: u32, target: SymbolWithLoc) !Atom.Index {
+ const atom_index = try self.createAtom();
+ const atom = self.getAtomPtr(atom_index);
atom.size = @sizeOf(u64);
atom.alignment = @alignOf(u64);
- errdefer gpa.destroy(atom);
const sym = atom.getSymbolPtr(self);
sym.n_type = macho.N_SECT;
sym.n_sect = self.la_symbol_ptr_section_index.? + 1;
- try atom.addRelocation(self, .{
+ try Atom.addRelocation(self, atom_index, .{
.type = switch (self.base.options.target.cpu.arch) {
.aarch64 => @enumToInt(macho.reloc_type_arm64.ARM64_RELOC_UNSIGNED),
.x86_64 => @enumToInt(macho.reloc_type_x86_64.X86_64_RELOC_UNSIGNED),
@@ -1354,22 +1380,20 @@ pub fn createLazyPointerAtom(self: *MachO, stub_sym_index: u32, target: SymbolWi
.pcrel = false,
.length = 3,
});
- try atom.addRebase(self, 0);
- try atom.addLazyBinding(self, .{
+ try Atom.addRebase(self, atom_index, 0);
+ try Atom.addLazyBinding(self, atom_index, .{
.target = self.getGlobal(self.getSymbolName(target)).?,
.offset = 0,
});
- try self.managed_atoms.append(gpa, atom);
-
- sym.n_value = try self.allocateAtom(atom, atom.size, @alignOf(u64));
+ sym.n_value = try self.allocateAtom(atom_index, atom.size, @alignOf(u64));
log.debug("allocated lazy pointer atom at 0x{x} ({s})", .{ sym.n_value, self.getSymbolName(target) });
- try self.writePtrWidthAtom(atom);
+ try self.writePtrWidthAtom(atom_index);
- return atom;
+ return atom_index;
}
-pub fn createStubAtom(self: *MachO, laptr_sym_index: u32) !*Atom {
+pub fn createStubAtom(self: *MachO, laptr_sym_index: u32) !Atom.Index {
const gpa = self.base.allocator;
const arch = self.base.options.target.cpu.arch;
const size: u4 = switch (arch) {
@@ -1377,9 +1401,8 @@ pub fn createStubAtom(self: *MachO, laptr_sym_index: u32) !*Atom {
.aarch64 => 3 * @sizeOf(u32),
else => unreachable, // unhandled architecture type
};
- const atom = try gpa.create(Atom);
- atom.* = Atom.empty;
- try atom.ensureInitialized(self);
+ const atom_index = try self.createAtom();
+ const atom = self.getAtomPtr(atom_index);
atom.size = size;
atom.alignment = switch (arch) {
.x86_64 => 1,
@@ -1387,7 +1410,6 @@ pub fn createStubAtom(self: *MachO, laptr_sym_index: u32) !*Atom {
else => unreachable, // unhandled architecture type
};
- errdefer gpa.destroy(atom);
const sym = atom.getSymbolPtr(self);
sym.n_type = macho.N_SECT;
@@ -1403,7 +1425,7 @@ pub fn createStubAtom(self: *MachO, laptr_sym_index: u32) !*Atom {
code[0] = 0xff;
code[1] = 0x25;
- try atom.addRelocation(self, .{
+ try Atom.addRelocation(self, atom_index, .{
.type = @enumToInt(macho.reloc_type_x86_64.X86_64_RELOC_BRANCH),
.target = .{ .sym_index = laptr_sym_index, .file = null },
.offset = 2,
@@ -1424,7 +1446,7 @@ pub fn createStubAtom(self: *MachO, laptr_sym_index: u32) !*Atom {
// br x16
mem.writeIntLittle(u32, code[8..12], aarch64.Instruction.br(.x16).toU32());
- try atom.addRelocations(self, 2, .{
+ try Atom.addRelocations(self, atom_index, 2, .{
.{
.type = @enumToInt(macho.reloc_type_arm64.ARM64_RELOC_PAGE21),
.target = .{ .sym_index = laptr_sym_index, .file = null },
@@ -1446,13 +1468,11 @@ pub fn createStubAtom(self: *MachO, laptr_sym_index: u32) !*Atom {
else => unreachable,
}
- try self.managed_atoms.append(gpa, atom);
-
- sym.n_value = try self.allocateAtom(atom, size, atom.alignment);
+ sym.n_value = try self.allocateAtom(atom_index, size, atom.alignment);
log.debug("allocated stub atom at 0x{x}", .{sym.n_value});
- try self.writeAtom(atom, code);
+ try self.writeAtom(atom_index, code);
- return atom;
+ return atom_index;
}
pub fn createMhExecuteHeaderSymbol(self: *MachO) !void {
@@ -1586,9 +1606,12 @@ pub fn resolveSymbolsInDylibs(self: *MachO) !void {
if (self.stubs_table.contains(global)) break :blk;
const stub_index = try self.allocateStubEntry(global);
- const stub_helper_atom = try self.createStubHelperAtom();
- const laptr_atom = try self.createLazyPointerAtom(stub_helper_atom.getSymbolIndex().?, global);
- const stub_atom = try self.createStubAtom(laptr_atom.getSymbolIndex().?);
+ const stub_helper_atom_index = try self.createStubHelperAtom();
+ const stub_helper_atom = self.getAtom(stub_helper_atom_index);
+ const laptr_atom_index = try self.createLazyPointerAtom(stub_helper_atom.getSymbolIndex().?, global);
+ const laptr_atom = self.getAtom(laptr_atom_index);
+ const stub_atom_index = try self.createStubAtom(laptr_atom.getSymbolIndex().?);
+ const stub_atom = self.getAtom(stub_atom_index);
self.stubs.items[stub_index].sym_index = stub_atom.getSymbolIndex().?;
self.markRelocsDirtyByTarget(global);
}
@@ -1686,10 +1709,11 @@ pub fn resolveDyldStubBinder(self: *MachO) !void {
// Add dyld_stub_binder as the final GOT entry.
const got_index = try self.allocateGotEntry(global);
- const got_atom = try self.createGotAtom(global);
+ const got_atom_index = try self.createGotAtom(global);
+ const got_atom = self.getAtom(got_atom_index);
self.got_entries.items[got_index].sym_index = got_atom.getSymbolIndex().?;
- try self.writePtrWidthAtom(got_atom);
+ try self.writePtrWidthAtom(got_atom_index);
}
pub fn deinit(self: *MachO) void {
@@ -1739,12 +1763,12 @@ pub fn deinit(self: *MachO) void {
}
self.sections.deinit(gpa);
- for (self.managed_atoms.items) |atom| {
- gpa.destroy(atom);
- }
- self.managed_atoms.deinit(gpa);
+ self.atoms.deinit(gpa);
if (self.base.options.module) |_| {
+ for (self.decls.values()) |*m| {
+ m.exports.deinit(gpa);
+ }
self.decls.deinit(gpa);
} else {
assert(self.decls.count() == 0);
@@ -1778,14 +1802,14 @@ pub fn deinit(self: *MachO) void {
self.lazy_bindings.deinit(gpa);
}
-fn freeAtom(self: *MachO, atom: *Atom) void {
- log.debug("freeAtom {*}", .{atom});
-
+fn freeAtom(self: *MachO, atom_index: Atom.Index) void {
const gpa = self.base.allocator;
+ log.debug("freeAtom {d}", .{atom_index});
// Remove any relocs and base relocs associated with this Atom
- self.freeRelocationsForAtom(atom);
+ Atom.freeRelocations(self, atom_index);
+ const atom = self.getAtom(atom_index);
const sect_id = atom.getSymbol(self).n_sect - 1;
const free_list = &self.sections.items(.free_list)[sect_id];
var already_have_free_list_node = false;
@@ -1793,45 +1817,46 @@ fn freeAtom(self: *MachO, atom: *Atom) void {
var i: usize = 0;
// TODO turn free_list into a hash map
while (i < free_list.items.len) {
- if (free_list.items[i] == atom) {
+ if (free_list.items[i] == atom_index) {
_ = free_list.swapRemove(i);
continue;
}
- if (free_list.items[i] == atom.prev) {
+ if (free_list.items[i] == atom.prev_index) {
already_have_free_list_node = true;
}
i += 1;
}
}
- const maybe_last_atom = &self.sections.items(.last_atom)[sect_id];
- if (maybe_last_atom.*) |last_atom| {
- if (last_atom == atom) {
- if (atom.prev) |prev| {
+ const maybe_last_atom_index = &self.sections.items(.last_atom_index)[sect_id];
+ if (maybe_last_atom_index.*) |last_atom_index| {
+ if (last_atom_index == atom_index) {
+ if (atom.prev_index) |prev_index| {
// TODO shrink the section size here
- maybe_last_atom.* = prev;
+ maybe_last_atom_index.* = prev_index;
} else {
- maybe_last_atom.* = null;
+ maybe_last_atom_index.* = null;
}
}
}
- if (atom.prev) |prev| {
- prev.next = atom.next;
+ if (atom.prev_index) |prev_index| {
+ const prev = self.getAtomPtr(prev_index);
+ prev.next_index = atom.next_index;
- if (!already_have_free_list_node and prev.freeListEligible(self)) {
+ if (!already_have_free_list_node and prev.*.freeListEligible(self)) {
// The free list is heuristics, it doesn't have to be perfect, so we can ignore
// the OOM here.
- free_list.append(gpa, prev) catch {};
+ free_list.append(gpa, prev_index) catch {};
}
} else {
- atom.prev = null;
+ self.getAtomPtr(atom_index).prev_index = null;
}
- if (atom.next) |next| {
- next.prev = atom.prev;
+ if (atom.next_index) |next_index| {
+ self.getAtomPtr(next_index).prev_index = atom.prev_index;
} else {
- atom.next = null;
+ self.getAtomPtr(atom_index).next_index = null;
}
// Appending to free lists is allowed to fail because the free lists are heuristics based anyway.
@@ -1859,27 +1884,24 @@ fn freeAtom(self: *MachO, atom: *Atom) void {
self.locals.items[sym_index].n_type = 0;
_ = self.atom_by_index_table.remove(sym_index);
log.debug(" adding local symbol index {d} to free list", .{sym_index});
- atom.sym_index = 0;
-
- if (self.d_sym) |*d_sym| {
- d_sym.dwarf.freeAtom(&atom.dbg_info_atom);
- }
+ self.getAtomPtr(atom_index).sym_index = 0;
}
-fn shrinkAtom(self: *MachO, atom: *Atom, new_block_size: u64) void {
+fn shrinkAtom(self: *MachO, atom_index: Atom.Index, new_block_size: u64) void {
_ = self;
- _ = atom;
+ _ = atom_index;
_ = new_block_size;
// TODO check the new capacity, and if it crosses the size threshold into a big enough
// capacity, insert a free list node for it.
}
-fn growAtom(self: *MachO, atom: *Atom, new_atom_size: u64, alignment: u64) !u64 {
+fn growAtom(self: *MachO, atom_index: Atom.Index, new_atom_size: u64, alignment: u64) !u64 {
+ const atom = self.getAtom(atom_index);
const sym = atom.getSymbol(self);
const align_ok = mem.alignBackwardGeneric(u64, sym.n_value, alignment) == sym.n_value;
const need_realloc = !align_ok or new_atom_size > atom.capacity(self);
if (!need_realloc) return sym.n_value;
- return self.allocateAtom(atom, new_atom_size, alignment);
+ return self.allocateAtom(atom_index, new_atom_size, alignment);
}
pub fn allocateSymbol(self: *MachO) !u32 {
@@ -1986,15 +2008,12 @@ pub fn updateFunc(self: *MachO, module: *Module, func: *Module.Fn, air: Air, liv
const decl_index = func.owner_decl;
const decl = module.declPtr(decl_index);
- const atom = &decl.link.macho;
- try atom.ensureInitialized(self);
- const gop = try self.decls.getOrPut(self.base.allocator, decl_index);
- if (gop.found_existing) {
- self.freeUnnamedConsts(decl_index);
- self.freeRelocationsForAtom(atom);
- } else {
- gop.value_ptr.* = null;
- }
+
+ const atom_index = try self.getOrCreateAtomForDecl(decl_index);
+ self.freeUnnamedConsts(decl_index);
+ Atom.freeRelocations(self, atom_index);
+
+ const atom = self.getAtom(atom_index);
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
defer code_buffer.deinit();
@@ -2024,13 +2043,7 @@ pub fn updateFunc(self: *MachO, module: *Module, func: *Module.Fn, air: Air, liv
const addr = try self.updateDeclCode(decl_index, code);
if (decl_state) |*ds| {
- try self.d_sym.?.dwarf.commitDeclState(
- module,
- decl_index,
- addr,
- decl.link.macho.size,
- ds,
- );
+ try self.d_sym.?.dwarf.commitDeclState(module, decl_index, addr, atom.size, ds);
}
// Since we updated the vaddr and the size, each corresponding export symbol also
@@ -2065,14 +2078,10 @@ pub fn lowerUnnamedConst(self: *MachO, typed_value: TypedValue, decl_index: Modu
log.debug("allocating symbol indexes for {?s}", .{name});
- const atom = try gpa.create(Atom);
- errdefer gpa.destroy(atom);
- atom.* = Atom.empty;
- try atom.ensureInitialized(self);
- try self.managed_atoms.append(gpa, atom);
+ const atom_index = try self.createAtom();
const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), typed_value, &code_buffer, .none, .{
- .parent_atom_index = atom.getSymbolIndex().?,
+ .parent_atom_index = self.getAtom(atom_index).getSymbolIndex().?,
});
const code = switch (res) {
.ok => code_buffer.items,
@@ -2085,24 +2094,25 @@ pub fn lowerUnnamedConst(self: *MachO, typed_value: TypedValue, decl_index: Modu
};
const required_alignment = typed_value.ty.abiAlignment(self.base.options.target);
+ const atom = self.getAtomPtr(atom_index);
atom.size = code.len;
atom.alignment = required_alignment;
// TODO: work out logic for disambiguating functions from function pointers
- // const sect_id = self.getDeclOutputSection(decl);
+ // const sect_id = self.getDeclOutputSection(decl_index);
const sect_id = self.data_const_section_index.?;
const symbol = atom.getSymbolPtr(self);
symbol.n_strx = name_str_index;
symbol.n_type = macho.N_SECT;
symbol.n_sect = sect_id + 1;
- symbol.n_value = try self.allocateAtom(atom, code.len, required_alignment);
- errdefer self.freeAtom(atom);
+ symbol.n_value = try self.allocateAtom(atom_index, code.len, required_alignment);
+ errdefer self.freeAtom(atom_index);
- try unnamed_consts.append(gpa, atom);
+ try unnamed_consts.append(gpa, atom_index);
log.debug("allocated atom for {?s} at 0x{x}", .{ name, symbol.n_value });
log.debug(" (required alignment 0x{x})", .{required_alignment});
- try self.writeAtom(atom, code);
+ try self.writeAtom(atom_index, code);
return atom.getSymbolIndex().?;
}
@@ -2129,14 +2139,9 @@ pub fn updateDecl(self: *MachO, module: *Module, decl_index: Module.Decl.Index)
}
}
- const atom = &decl.link.macho;
- try atom.ensureInitialized(self);
- const gop = try self.decls.getOrPut(self.base.allocator, decl_index);
- if (gop.found_existing) {
- self.freeRelocationsForAtom(atom);
- } else {
- gop.value_ptr.* = null;
- }
+ const atom_index = try self.getOrCreateAtomForDecl(decl_index);
+ Atom.freeRelocations(self, atom_index);
+ const atom = self.getAtom(atom_index);
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
defer code_buffer.deinit();
@@ -2155,14 +2160,14 @@ pub fn updateDecl(self: *MachO, module: *Module, decl_index: Module.Decl.Index)
}, &code_buffer, .{
.dwarf = ds,
}, .{
- .parent_atom_index = decl.link.macho.getSymbolIndex().?,
+ .parent_atom_index = atom.getSymbolIndex().?,
})
else
try codegen.generateSymbol(&self.base, decl.srcLoc(), .{
.ty = decl.ty,
.val = decl_val,
}, &code_buffer, .none, .{
- .parent_atom_index = decl.link.macho.getSymbolIndex().?,
+ .parent_atom_index = atom.getSymbolIndex().?,
});
const code = switch (res) {
@@ -2176,13 +2181,7 @@ pub fn updateDecl(self: *MachO, module: *Module, decl_index: Module.Decl.Index)
const addr = try self.updateDeclCode(decl_index, code);
if (decl_state) |*ds| {
- try self.d_sym.?.dwarf.commitDeclState(
- module,
- decl_index,
- addr,
- decl.link.macho.size,
- ds,
- );
+ try self.d_sym.?.dwarf.commitDeclState(module, decl_index, addr, atom.size, ds);
}
// Since we updated the vaddr and the size, each corresponding export symbol also
@@ -2190,7 +2189,20 @@ pub fn updateDecl(self: *MachO, module: *Module, decl_index: Module.Decl.Index)
try self.updateDeclExports(module, decl_index, module.getDeclExports(decl_index));
}
-fn getDeclOutputSection(self: *MachO, decl: *Module.Decl) u8 {
+pub fn getOrCreateAtomForDecl(self: *MachO, decl_index: Module.Decl.Index) !Atom.Index {
+ const gop = try self.decls.getOrPut(self.base.allocator, decl_index);
+ if (!gop.found_existing) {
+ gop.value_ptr.* = .{
+ .atom = try self.createAtom(),
+ .section = self.getDeclOutputSection(decl_index),
+ .exports = .{},
+ };
+ }
+ return gop.value_ptr.atom;
+}
+
+fn getDeclOutputSection(self: *MachO, decl_index: Module.Decl.Index) u8 {
+ const decl = self.base.options.module.?.declPtr(decl_index);
const ty = decl.ty;
const val = decl.val;
const zig_ty = ty.zigTypeTag();
@@ -2341,13 +2353,11 @@ fn updateDeclCode(self: *MachO, decl_index: Module.Decl.Index, code: []const u8)
const sym_name = try decl.getFullyQualifiedName(mod);
defer self.base.allocator.free(sym_name);
- const atom = &decl.link.macho;
- const sym_index = atom.getSymbolIndex().?; // Atom was not initialized
- const decl_ptr = self.decls.getPtr(decl_index).?;
- if (decl_ptr.* == null) {
- decl_ptr.* = self.getDeclOutputSection(decl);
- }
- const sect_id = decl_ptr.*.?;
+ const decl_metadata = self.decls.get(decl_index).?;
+ const atom_index = decl_metadata.atom;
+ const atom = self.getAtom(atom_index);
+ const sym_index = atom.getSymbolIndex().?;
+ const sect_id = decl_metadata.section;
const code_len = code.len;
if (atom.size != 0) {
@@ -2357,11 +2367,11 @@ fn updateDeclCode(self: *MachO, decl_index: Module.Decl.Index, code: []const u8)
sym.n_sect = sect_id + 1;
sym.n_desc = 0;
- const capacity = decl.link.macho.capacity(self);
+ const capacity = atom.capacity(self);
const need_realloc = code_len > capacity or !mem.isAlignedGeneric(u64, sym.n_value, required_alignment);
if (need_realloc) {
- const vaddr = try self.growAtom(atom, code_len, required_alignment);
+ const vaddr = try self.growAtom(atom_index, code_len, required_alignment);
log.debug("growing {s} and moving from 0x{x} to 0x{x}", .{ sym_name, sym.n_value, vaddr });
log.debug(" (required alignment 0x{x})", .{required_alignment});
@@ -2369,19 +2379,19 @@ fn updateDeclCode(self: *MachO, decl_index: Module.Decl.Index, code: []const u8)
sym.n_value = vaddr;
log.debug(" (updating GOT entry)", .{});
const got_target = SymbolWithLoc{ .sym_index = sym_index, .file = null };
- const got_atom = self.getGotAtomForSymbol(got_target).?;
+ const got_atom_index = self.getGotAtomIndexForSymbol(got_target).?;
self.markRelocsDirtyByTarget(got_target);
- try self.writePtrWidthAtom(got_atom);
+ try self.writePtrWidthAtom(got_atom_index);
}
} else if (code_len < atom.size) {
- self.shrinkAtom(atom, code_len);
- } else if (atom.next == null) {
+ self.shrinkAtom(atom_index, code_len);
+ } else if (atom.next_index == null) {
const header = &self.sections.items(.header)[sect_id];
const segment = self.getSegment(sect_id);
const needed_size = (sym.n_value + code_len) - segment.vmaddr;
header.size = needed_size;
}
- atom.size = code_len;
+ self.getAtomPtr(atom_index).size = code_len;
} else {
const name_str_index = try self.strtab.insert(gpa, sym_name);
const sym = atom.getSymbolPtr(self);
@@ -2390,32 +2400,32 @@ fn updateDeclCode(self: *MachO, decl_index: Module.Decl.Index, code: []const u8)
sym.n_sect = sect_id + 1;
sym.n_desc = 0;
- const vaddr = try self.allocateAtom(atom, code_len, required_alignment);
- errdefer self.freeAtom(atom);
+ const vaddr = try self.allocateAtom(atom_index, code_len, required_alignment);
+ errdefer self.freeAtom(atom_index);
log.debug("allocated atom for {s} at 0x{x}", .{ sym_name, vaddr });
log.debug(" (required alignment 0x{x})", .{required_alignment});
- atom.size = code_len;
+ self.getAtomPtr(atom_index).size = code_len;
sym.n_value = vaddr;
const got_target = SymbolWithLoc{ .sym_index = sym_index, .file = null };
const got_index = try self.allocateGotEntry(got_target);
- const got_atom = try self.createGotAtom(got_target);
+ const got_atom_index = try self.createGotAtom(got_target);
+ const got_atom = self.getAtom(got_atom_index);
self.got_entries.items[got_index].sym_index = got_atom.getSymbolIndex().?;
- try self.writePtrWidthAtom(got_atom);
+ try self.writePtrWidthAtom(got_atom_index);
}
self.markRelocsDirtyByTarget(atom.getSymbolWithLoc());
- try self.writeAtom(atom, code);
+ try self.writeAtom(atom_index, code);
return atom.getSymbol(self).n_value;
}
-pub fn updateDeclLineNumber(self: *MachO, module: *Module, decl: *const Module.Decl) !void {
- _ = module;
+pub fn updateDeclLineNumber(self: *MachO, module: *Module, decl_index: Module.Decl.Index) !void {
if (self.d_sym) |*d_sym| {
- try d_sym.dwarf.updateDeclLineNumber(decl);
+ try d_sym.dwarf.updateDeclLineNumber(module, decl_index);
}
}
@@ -2432,22 +2442,17 @@ pub fn updateDeclExports(
if (self.llvm_object) |llvm_object|
return llvm_object.updateDeclExports(module, decl_index, exports);
}
+
const tracy = trace(@src());
defer tracy.end();
const gpa = self.base.allocator;
const decl = module.declPtr(decl_index);
- const atom = &decl.link.macho;
-
- if (atom.getSymbolIndex() == null) return;
-
- const gop = try self.decls.getOrPut(gpa, decl_index);
- if (!gop.found_existing) {
- gop.value_ptr.* = self.getDeclOutputSection(decl);
- }
-
+ const atom_index = try self.getOrCreateAtomForDecl(decl_index);
+ const atom = self.getAtom(atom_index);
const decl_sym = atom.getSymbol(self);
+ const decl_metadata = self.decls.getPtr(decl_index).?;
for (exports) |exp| {
const exp_name = try std.fmt.allocPrint(gpa, "_{s}", .{exp.options.name});
@@ -2485,9 +2490,9 @@ pub fn updateDeclExports(
continue;
}
- const sym_index = exp.link.macho.sym_index orelse blk: {
+ const sym_index = decl_metadata.getExport(self, exp_name) orelse blk: {
const sym_index = try self.allocateSymbol();
- exp.link.macho.sym_index = sym_index;
+ try decl_metadata.exports.append(gpa, sym_index);
break :blk sym_index;
};
const sym_loc = SymbolWithLoc{ .sym_index = sym_index, .file = null };
@@ -2535,16 +2540,18 @@ pub fn updateDeclExports(
}
}
-pub fn deleteExport(self: *MachO, exp: Export) void {
+pub fn deleteDeclExport(self: *MachO, decl_index: Module.Decl.Index, name: []const u8) Allocator.Error!void {
if (self.llvm_object) |_| return;
- const sym_index = exp.sym_index orelse return;
+ const metadata = self.decls.getPtr(decl_index) orelse return;
const gpa = self.base.allocator;
+ const exp_name = try std.fmt.allocPrint(gpa, "_{s}", .{name});
+ defer gpa.free(exp_name);
+ const sym_index = metadata.getExportPtr(self, exp_name) orelse return;
- const sym_loc = SymbolWithLoc{ .sym_index = sym_index, .file = null };
+ const sym_loc = SymbolWithLoc{ .sym_index = sym_index.*, .file = null };
const sym = self.getSymbolPtr(sym_loc);
- const sym_name = self.getSymbolName(sym_loc);
- log.debug("deleting export '{s}'", .{sym_name});
+ log.debug("deleting export '{s}'", .{exp_name});
assert(sym.sect() and sym.ext());
sym.* = .{
.n_strx = 0,
@@ -2553,9 +2560,9 @@ pub fn deleteExport(self: *MachO, exp: Export) void {
.n_desc = 0,
.n_value = 0,
};
- self.locals_free_list.append(gpa, sym_index) catch {};
+ self.locals_free_list.append(gpa, sym_index.*) catch {};
- if (self.resolver.fetchRemove(sym_name)) |entry| {
+ if (self.resolver.fetchRemove(exp_name)) |entry| {
defer gpa.free(entry.key);
self.globals_free_list.append(gpa, entry.value) catch {};
self.globals.items[entry.value] = .{
@@ -2563,17 +2570,8 @@ pub fn deleteExport(self: *MachO, exp: Export) void {
.file = null,
};
}
-}
-fn freeRelocationsForAtom(self: *MachO, atom: *Atom) void {
- var removed_relocs = self.relocs.fetchOrderedRemove(atom);
- if (removed_relocs) |*relocs| relocs.value.deinit(self.base.allocator);
- var removed_rebases = self.rebases.fetchOrderedRemove(atom);
- if (removed_rebases) |*rebases| rebases.value.deinit(self.base.allocator);
- var removed_bindings = self.bindings.fetchOrderedRemove(atom);
- if (removed_bindings) |*bindings| bindings.value.deinit(self.base.allocator);
- var removed_lazy_bindings = self.lazy_bindings.fetchOrderedRemove(atom);
- if (removed_lazy_bindings) |*lazy_bindings| lazy_bindings.value.deinit(self.base.allocator);
+ sym_index.* = 0;
}
fn freeUnnamedConsts(self: *MachO, decl_index: Module.Decl.Index) void {
@@ -2594,29 +2592,25 @@ pub fn freeDecl(self: *MachO, decl_index: Module.Decl.Index) void {
log.debug("freeDecl {*}", .{decl});
- if (self.decls.fetchSwapRemove(decl_index)) |kv| {
- if (kv.value) |_| {
- self.freeAtom(&decl.link.macho);
- self.freeUnnamedConsts(decl_index);
- }
+ if (self.decls.fetchSwapRemove(decl_index)) |const_kv| {
+ var kv = const_kv;
+ self.freeAtom(kv.value.atom);
+ self.freeUnnamedConsts(decl_index);
+ kv.value.exports.deinit(self.base.allocator);
}
if (self.d_sym) |*d_sym| {
- d_sym.dwarf.freeDecl(decl);
+ d_sym.dwarf.freeDecl(decl_index);
}
}
pub fn getDeclVAddr(self: *MachO, decl_index: Module.Decl.Index, reloc_info: File.RelocInfo) !u64 {
- const mod = self.base.options.module.?;
- const decl = mod.declPtr(decl_index);
-
assert(self.llvm_object == null);
- try decl.link.macho.ensureInitialized(self);
- const sym_index = decl.link.macho.getSymbolIndex().?;
-
- const atom = self.getAtomForSymbol(.{ .sym_index = reloc_info.parent_atom_index, .file = null }).?;
- try atom.addRelocation(self, .{
+ const this_atom_index = try self.getOrCreateAtomForDecl(decl_index);
+ const sym_index = self.getAtom(this_atom_index).getSymbolIndex().?;
+ const atom_index = self.getAtomIndexForSymbol(.{ .sym_index = reloc_info.parent_atom_index, .file = null }).?;
+ try Atom.addRelocation(self, atom_index, .{
.type = switch (self.base.options.target.cpu.arch) {
.aarch64 => @enumToInt(macho.reloc_type_arm64.ARM64_RELOC_UNSIGNED),
.x86_64 => @enumToInt(macho.reloc_type_x86_64.X86_64_RELOC_UNSIGNED),
@@ -2628,7 +2622,7 @@ pub fn getDeclVAddr(self: *MachO, decl_index: Module.Decl.Index, reloc_info: Fil
.pcrel = false,
.length = 3,
});
- try atom.addRebase(self, @intCast(u32, reloc_info.offset));
+ try Atom.addRebase(self, atom_index, @intCast(u32, reloc_info.offset));
return 0;
}
@@ -2860,34 +2854,36 @@ fn moveSectionInVirtualMemory(self: *MachO, sect_id: u8, needed_size: u64) !void
// TODO: enforce order by increasing VM addresses in self.sections container.
for (self.sections.items(.header)[sect_id + 1 ..]) |*next_header, next_sect_id| {
const index = @intCast(u8, sect_id + 1 + next_sect_id);
- const maybe_last_atom = &self.sections.items(.last_atom)[index];
const next_segment = self.getSegmentPtr(index);
next_header.addr += diff;
next_segment.vmaddr += diff;
- if (maybe_last_atom.*) |last_atom| {
- var atom = last_atom;
+ const maybe_last_atom_index = &self.sections.items(.last_atom_index)[index];
+ if (maybe_last_atom_index.*) |last_atom_index| {
+ var atom_index = last_atom_index;
while (true) {
+ const atom = self.getAtom(atom_index);
const sym = atom.getSymbolPtr(self);
sym.n_value += diff;
- if (atom.prev) |prev| {
- atom = prev;
+ if (atom.prev_index) |prev_index| {
+ atom_index = prev_index;
} else break;
}
}
}
}
-fn allocateAtom(self: *MachO, atom: *Atom, new_atom_size: u64, alignment: u64) !u64 {
+fn allocateAtom(self: *MachO, atom_index: Atom.Index, new_atom_size: u64, alignment: u64) !u64 {
const tracy = trace(@src());
defer tracy.end();
+ const atom = self.getAtom(atom_index);
const sect_id = atom.getSymbol(self).n_sect - 1;
const segment = self.getSegmentPtr(sect_id);
const header = &self.sections.items(.header)[sect_id];
const free_list = &self.sections.items(.free_list)[sect_id];
- const maybe_last_atom = &self.sections.items(.last_atom)[sect_id];
+ const maybe_last_atom_index = &self.sections.items(.last_atom_index)[sect_id];
const requires_padding = blk: {
if (!header.isCode()) break :blk false;
if (header.isSymbolStubs()) break :blk false;
@@ -2901,7 +2897,7 @@ fn allocateAtom(self: *MachO, atom: *Atom, new_atom_size: u64, alignment: u64) !
// It would be simpler to do it inside the for loop below, but that would cause a
// problem if an error was returned later in the function. So this action
// is actually carried out at the end of the function, when errors are no longer possible.
- var atom_placement: ?*Atom = null;
+ var atom_placement: ?Atom.Index = null;
var free_list_removal: ?usize = null;
// First we look for an appropriately sized free list node.
@@ -2909,7 +2905,8 @@ fn allocateAtom(self: *MachO, atom: *Atom, new_atom_size: u64, alignment: u64) !
var vaddr = blk: {
var i: usize = 0;
while (i < free_list.items.len) {
- const big_atom = free_list.items[i];
+ const big_atom_index = free_list.items[i];
+ const big_atom = self.getAtom(big_atom_index);
// We now have a pointer to a live atom that has too much capacity.
// Is it enough that we could fit this new atom?
const sym = big_atom.getSymbol(self);
@@ -2937,30 +2934,35 @@ fn allocateAtom(self: *MachO, atom: *Atom, new_atom_size: u64, alignment: u64) !
const keep_free_list_node = remaining_capacity >= min_text_capacity;
// Set up the metadata to be updated, after errors are no longer possible.
- atom_placement = big_atom;
+ atom_placement = big_atom_index;
if (!keep_free_list_node) {
free_list_removal = i;
}
break :blk new_start_vaddr;
- } else if (maybe_last_atom.*) |last| {
+ } else if (maybe_last_atom_index.*) |last_index| {
+ const last = self.getAtom(last_index);
const last_symbol = last.getSymbol(self);
const ideal_capacity = if (requires_padding) padToIdeal(last.size) else last.size;
const ideal_capacity_end_vaddr = last_symbol.n_value + ideal_capacity;
const new_start_vaddr = mem.alignForwardGeneric(u64, ideal_capacity_end_vaddr, alignment);
- atom_placement = last;
+ atom_placement = last_index;
break :blk new_start_vaddr;
} else {
break :blk mem.alignForwardGeneric(u64, segment.vmaddr, alignment);
}
};
- const expand_section = atom_placement == null or atom_placement.?.next == null;
+ const expand_section = if (atom_placement) |placement_index|
+ self.getAtom(placement_index).next_index == null
+ else
+ true;
if (expand_section) {
const sect_capacity = self.allocatedSize(header.offset);
const needed_size = (vaddr + new_atom_size) - segment.vmaddr;
if (needed_size > sect_capacity) {
const new_offset = self.findFreeSpace(needed_size, self.page_size);
- const current_size = if (maybe_last_atom.*) |last_atom| blk: {
+ const current_size = if (maybe_last_atom_index.*) |last_atom_index| blk: {
+ const last_atom = self.getAtom(last_atom_index);
const sym = last_atom.getSymbol(self);
break :blk (sym.n_value + last_atom.size) - segment.vmaddr;
} else 0;
@@ -2992,7 +2994,7 @@ fn allocateAtom(self: *MachO, atom: *Atom, new_atom_size: u64, alignment: u64) !
header.size = needed_size;
segment.filesize = mem.alignForwardGeneric(u64, needed_size, self.page_size);
segment.vmsize = mem.alignForwardGeneric(u64, needed_size, self.page_size);
- maybe_last_atom.* = atom;
+ maybe_last_atom_index.* = atom_index;
self.segment_table_dirty = true;
}
@@ -3001,21 +3003,31 @@ fn allocateAtom(self: *MachO, atom: *Atom, new_atom_size: u64, alignment: u64) !
if (header.@"align" < align_pow) {
header.@"align" = align_pow;
}
+ {
+ const atom_ptr = self.getAtomPtr(atom_index);
+ atom_ptr.size = new_atom_size;
+ atom_ptr.alignment = @intCast(u32, alignment);
+ }
- if (atom.prev) |prev| {
- prev.next = atom.next;
+ if (atom.prev_index) |prev_index| {
+ const prev = self.getAtomPtr(prev_index);
+ prev.next_index = atom.next_index;
}
- if (atom.next) |next| {
- next.prev = atom.prev;
+ if (atom.next_index) |next_index| {
+ const next = self.getAtomPtr(next_index);
+ next.prev_index = atom.prev_index;
}
- if (atom_placement) |big_atom| {
- atom.prev = big_atom;
- atom.next = big_atom.next;
- big_atom.next = atom;
+ if (atom_placement) |big_atom_index| {
+ const big_atom = self.getAtomPtr(big_atom_index);
+ const atom_ptr = self.getAtomPtr(atom_index);
+ atom_ptr.prev_index = big_atom_index;
+ atom_ptr.next_index = big_atom.next_index;
+ big_atom.next_index = atom_index;
} else {
- atom.prev = null;
- atom.next = null;
+ const atom_ptr = self.getAtomPtr(atom_index);
+ atom_ptr.prev_index = null;
+ atom_ptr.next_index = null;
}
if (free_list_removal) |i| {
_ = free_list.swapRemove(i);
@@ -3155,7 +3167,8 @@ fn collectRebaseData(self: *MachO, rebase: *Rebase) !void {
const gpa = self.base.allocator;
const slice = self.sections.slice();
- for (self.rebases.keys()) |atom, i| {
+ for (self.rebases.keys()) |atom_index, i| {
+ const atom = self.getAtom(atom_index);
log.debug(" ATOM(%{?d}, '{s}')", .{ atom.getSymbolIndex(), atom.getName(self) });
const sym = atom.getSymbol(self);
@@ -3184,7 +3197,8 @@ fn collectBindData(self: *MachO, bind: anytype, raw_bindings: anytype) !void {
const gpa = self.base.allocator;
const slice = self.sections.slice();
- for (raw_bindings.keys()) |atom, i| {
+ for (raw_bindings.keys()) |atom_index, i| {
+ const atom = self.getAtom(atom_index);
log.debug(" ATOM(%{?d}, '{s}')", .{ atom.getSymbolIndex(), atom.getName(self) });
const sym = atom.getSymbol(self);
@@ -3359,7 +3373,7 @@ fn populateLazyBindOffsetsInStubHelper(self: *MachO, lazy_bind: LazyBind) !void
if (lazy_bind.size() == 0) return;
const stub_helper_section_index = self.stub_helper_section_index.?;
- assert(self.stub_helper_preamble_atom != null);
+ assert(self.stub_helper_preamble_atom_index != null);
const section = self.sections.get(stub_helper_section_index);
@@ -3369,10 +3383,11 @@ fn populateLazyBindOffsetsInStubHelper(self: *MachO, lazy_bind: LazyBind) !void
else => unreachable,
};
const header = section.header;
- var atom = section.last_atom.?;
+ var atom_index = section.last_atom_index.?;
var index: usize = lazy_bind.offsets.items.len;
while (index > 0) : (index -= 1) {
+ const atom = self.getAtom(atom_index);
const sym = atom.getSymbol(self);
const file_offset = header.offset + sym.n_value - header.addr + stub_offset;
const bind_offset = lazy_bind.offsets.items[index - 1];
@@ -3385,7 +3400,7 @@ fn populateLazyBindOffsetsInStubHelper(self: *MachO, lazy_bind: LazyBind) !void
try self.base.file.?.pwriteAll(mem.asBytes(&bind_offset), file_offset);
- atom = atom.prev.?;
+ atom_index = atom.prev_index.?;
}
}
@@ -3828,25 +3843,35 @@ pub fn getOrPutGlobalPtr(self: *MachO, name: []const u8) !GetOrPutGlobalPtrResul
return GetOrPutGlobalPtrResult{ .found_existing = false, .value_ptr = ptr };
}
+pub fn getAtom(self: *MachO, atom_index: Atom.Index) Atom {
+ assert(atom_index < self.atoms.items.len);
+ return self.atoms.items[atom_index];
+}
+
+pub fn getAtomPtr(self: *MachO, atom_index: Atom.Index) *Atom {
+ assert(atom_index < self.atoms.items.len);
+ return &self.atoms.items[atom_index];
+}
+
/// Returns atom if there is an atom referenced by the symbol described by `sym_with_loc` descriptor.
/// Returns null on failure.
-pub fn getAtomForSymbol(self: *MachO, sym_with_loc: SymbolWithLoc) ?*Atom {
+pub fn getAtomIndexForSymbol(self: *MachO, sym_with_loc: SymbolWithLoc) ?Atom.Index {
assert(sym_with_loc.file == null);
return self.atom_by_index_table.get(sym_with_loc.sym_index);
}
/// Returns GOT atom that references `sym_with_loc` if one exists.
/// Returns null otherwise.
-pub fn getGotAtomForSymbol(self: *MachO, sym_with_loc: SymbolWithLoc) ?*Atom {
+pub fn getGotAtomIndexForSymbol(self: *MachO, sym_with_loc: SymbolWithLoc) ?Atom.Index {
const got_index = self.got_entries_table.get(sym_with_loc) orelse return null;
- return self.got_entries.items[got_index].getAtom(self);
+ return self.got_entries.items[got_index].getAtomIndex(self);
}
/// Returns stubs atom that references `sym_with_loc` if one exists.
/// Returns null otherwise.
-pub fn getStubsAtomForSymbol(self: *MachO, sym_with_loc: SymbolWithLoc) ?*Atom {
+pub fn getStubsAtomIndexForSymbol(self: *MachO, sym_with_loc: SymbolWithLoc) ?Atom.Index {
const stubs_index = self.stubs_table.get(sym_with_loc) orelse return null;
- return self.stubs.items[stubs_index].getAtom(self);
+ return self.stubs.items[stubs_index].getAtomIndex(self);
}
/// Returns symbol location corresponding to the set entrypoint.
@@ -4232,26 +4257,31 @@ pub fn logAtoms(self: *MachO) void {
log.debug("atoms:", .{});
const slice = self.sections.slice();
- for (slice.items(.last_atom)) |last, i| {
- var atom = last orelse continue;
+ for (slice.items(.last_atom_index)) |last_atom_index, i| {
+ var atom_index = last_atom_index orelse continue;
const header = slice.items(.header)[i];
- while (atom.prev) |prev| {
- atom = prev;
+ while (true) {
+ const atom = self.getAtom(atom_index);
+ if (atom.prev_index) |prev_index| {
+ atom_index = prev_index;
+ } else break;
}
log.debug("{s},{s}", .{ header.segName(), header.sectName() });
while (true) {
- self.logAtom(atom);
- if (atom.next) |next| {
- atom = next;
+ self.logAtom(atom_index);
+ const atom = self.getAtom(atom_index);
+ if (atom.next_index) |next_index| {
+ atom_index = next_index;
} else break;
}
}
}
-pub fn logAtom(self: *MachO, atom: *const Atom) void {
+pub fn logAtom(self: *MachO, atom_index: Atom.Index) void {
+ const atom = self.getAtom(atom_index);
const sym = atom.getSymbol(self);
const sym_name = atom.getName(self);
log.debug(" ATOM(%{?d}, '{s}') @ {x} (sizeof({x}), alignof({x})) in object({?d}) in sect({d})", .{
diff --git a/src/link/MachO/Atom.zig b/src/link/MachO/Atom.zig
index f15958b3df..5fb94b7c13 100644
--- a/src/link/MachO/Atom.zig
+++ b/src/link/MachO/Atom.zig
@@ -13,7 +13,6 @@ const trace = @import("../../tracy.zig").trace;
const Allocator = mem.Allocator;
const Arch = std.Target.Cpu.Arch;
-const Dwarf = @import("../Dwarf.zig");
const MachO = @import("../MachO.zig");
const Relocation = @import("Relocation.zig");
const SymbolWithLoc = MachO.SymbolWithLoc;
@@ -39,10 +38,11 @@ size: u64,
alignment: u32,
/// Points to the previous and next neighbours
-next: ?*Atom,
-prev: ?*Atom,
+/// TODO use the same trick as with symbols: reserve index 0 as null atom
+next_index: ?Index,
+prev_index: ?Index,
-dbg_info_atom: Dwarf.Atom,
+pub const Index = u32;
pub const Binding = struct {
target: SymbolWithLoc,
@@ -54,22 +54,6 @@ pub const SymbolAtOffset = struct {
offset: u64,
};
-pub const empty = Atom{
- .sym_index = 0,
- .file = null,
- .size = 0,
- .alignment = 0,
- .prev = null,
- .next = null,
- .dbg_info_atom = undefined,
-};
-
-pub fn ensureInitialized(self: *Atom, macho_file: *MachO) !void {
- if (self.getSymbolIndex() != null) return; // Already initialized
- self.sym_index = try macho_file.allocateSymbol();
- try macho_file.atom_by_index_table.putNoClobber(macho_file.base.allocator, self.sym_index, self);
-}
-
pub fn getSymbolIndex(self: Atom) ?u32 {
if (self.sym_index == 0) return null;
return self.sym_index;
@@ -108,7 +92,8 @@ pub fn getName(self: Atom, macho_file: *MachO) []const u8 {
/// this calculation.
pub fn capacity(self: Atom, macho_file: *MachO) u64 {
const self_sym = self.getSymbol(macho_file);
- if (self.next) |next| {
+ if (self.next_index) |next_index| {
+ const next = macho_file.getAtom(next_index);
const next_sym = next.getSymbol(macho_file);
return next_sym.n_value - self_sym.n_value;
} else {
@@ -120,7 +105,8 @@ pub fn capacity(self: Atom, macho_file: *MachO) u64 {
pub fn freeListEligible(self: Atom, macho_file: *MachO) bool {
// No need to keep a free list node for the last atom.
- const next = self.next orelse return false;
+ const next_index = self.next_index orelse return false;
+ const next = macho_file.getAtom(next_index);
const self_sym = self.getSymbol(macho_file);
const next_sym = next.getSymbol(macho_file);
const cap = next_sym.n_value - self_sym.n_value;
@@ -130,19 +116,19 @@ pub fn freeListEligible(self: Atom, macho_file: *MachO) bool {
return surplus >= MachO.min_text_capacity;
}
-pub fn addRelocation(self: *Atom, macho_file: *MachO, reloc: Relocation) !void {
- return self.addRelocations(macho_file, 1, .{reloc});
+pub fn addRelocation(macho_file: *MachO, atom_index: Index, reloc: Relocation) !void {
+ return addRelocations(macho_file, atom_index, 1, .{reloc});
}
pub fn addRelocations(
- self: *Atom,
macho_file: *MachO,
+ atom_index: Index,
comptime count: comptime_int,
relocs: [count]Relocation,
) !void {
const gpa = macho_file.base.allocator;
const target = macho_file.base.options.target;
- const gop = try macho_file.relocs.getOrPut(gpa, self);
+ const gop = try macho_file.relocs.getOrPut(gpa, atom_index);
if (!gop.found_existing) {
gop.value_ptr.* = .{};
}
@@ -156,56 +142,72 @@ pub fn addRelocations(
}
}
-pub fn addRebase(self: *Atom, macho_file: *MachO, offset: u32) !void {
+pub fn addRebase(macho_file: *MachO, atom_index: Index, offset: u32) !void {
const gpa = macho_file.base.allocator;
- log.debug(" (adding rebase at offset 0x{x} in %{?d})", .{ offset, self.getSymbolIndex() });
- const gop = try macho_file.rebases.getOrPut(gpa, self);
+ const atom = macho_file.getAtom(atom_index);
+ log.debug(" (adding rebase at offset 0x{x} in %{?d})", .{ offset, atom.getSymbolIndex() });
+ const gop = try macho_file.rebases.getOrPut(gpa, atom_index);
if (!gop.found_existing) {
gop.value_ptr.* = .{};
}
try gop.value_ptr.append(gpa, offset);
}
-pub fn addBinding(self: *Atom, macho_file: *MachO, binding: Binding) !void {
+pub fn addBinding(macho_file: *MachO, atom_index: Index, binding: Binding) !void {
const gpa = macho_file.base.allocator;
+ const atom = macho_file.getAtom(atom_index);
log.debug(" (adding binding to symbol {s} at offset 0x{x} in %{?d})", .{
macho_file.getSymbolName(binding.target),
binding.offset,
- self.getSymbolIndex(),
+ atom.getSymbolIndex(),
});
- const gop = try macho_file.bindings.getOrPut(gpa, self);
+ const gop = try macho_file.bindings.getOrPut(gpa, atom_index);
if (!gop.found_existing) {
gop.value_ptr.* = .{};
}
try gop.value_ptr.append(gpa, binding);
}
-pub fn addLazyBinding(self: *Atom, macho_file: *MachO, binding: Binding) !void {
+pub fn addLazyBinding(macho_file: *MachO, atom_index: Index, binding: Binding) !void {
const gpa = macho_file.base.allocator;
+ const atom = macho_file.getAtom(atom_index);
log.debug(" (adding lazy binding to symbol {s} at offset 0x{x} in %{?d})", .{
macho_file.getSymbolName(binding.target),
binding.offset,
- self.getSymbolIndex(),
+ atom.getSymbolIndex(),
});
- const gop = try macho_file.lazy_bindings.getOrPut(gpa, self);
+ const gop = try macho_file.lazy_bindings.getOrPut(gpa, atom_index);
if (!gop.found_existing) {
gop.value_ptr.* = .{};
}
try gop.value_ptr.append(gpa, binding);
}
-pub fn resolveRelocations(self: *Atom, macho_file: *MachO) !void {
- const relocs = macho_file.relocs.get(self) orelse return;
- const source_sym = self.getSymbol(macho_file);
+pub fn resolveRelocations(macho_file: *MachO, atom_index: Index) !void {
+ const atom = macho_file.getAtom(atom_index);
+ const relocs = macho_file.relocs.get(atom_index) orelse return;
+ const source_sym = atom.getSymbol(macho_file);
const source_section = macho_file.sections.get(source_sym.n_sect - 1).header;
const file_offset = source_section.offset + source_sym.n_value - source_section.addr;
- log.debug("relocating '{s}'", .{self.getName(macho_file)});
+ log.debug("relocating '{s}'", .{atom.getName(macho_file)});
for (relocs.items) |*reloc| {
if (!reloc.dirty) continue;
- try reloc.resolve(self, macho_file, file_offset);
+ try reloc.resolve(macho_file, atom_index, file_offset);
reloc.dirty = false;
}
}
+
+pub fn freeRelocations(macho_file: *MachO, atom_index: Index) void {
+ const gpa = macho_file.base.allocator;
+ var removed_relocs = macho_file.relocs.fetchOrderedRemove(atom_index);
+ if (removed_relocs) |*relocs| relocs.value.deinit(gpa);
+ var removed_rebases = macho_file.rebases.fetchOrderedRemove(atom_index);
+ if (removed_rebases) |*rebases| rebases.value.deinit(gpa);
+ var removed_bindings = macho_file.bindings.fetchOrderedRemove(atom_index);
+ if (removed_bindings) |*bindings| bindings.value.deinit(gpa);
+ var removed_lazy_bindings = macho_file.lazy_bindings.fetchOrderedRemove(atom_index);
+ if (removed_lazy_bindings) |*lazy_bindings| lazy_bindings.value.deinit(gpa);
+}
diff --git a/src/link/MachO/DebugSymbols.zig b/src/link/MachO/DebugSymbols.zig
index 7c22f441cd..0a5c8b0372 100644
--- a/src/link/MachO/DebugSymbols.zig
+++ b/src/link/MachO/DebugSymbols.zig
@@ -82,11 +82,11 @@ pub fn populateMissingMetadata(self: *DebugSymbols) !void {
}
if (self.debug_str_section_index == null) {
- assert(self.dwarf.strtab.items.len == 0);
- try self.dwarf.strtab.append(self.allocator, 0);
+ assert(self.dwarf.strtab.buffer.items.len == 0);
+ try self.dwarf.strtab.buffer.append(self.allocator, 0);
self.debug_str_section_index = try self.allocateSection(
"__debug_str",
- @intCast(u32, self.dwarf.strtab.items.len),
+ @intCast(u32, self.dwarf.strtab.buffer.items.len),
0,
);
self.debug_string_table_dirty = true;
@@ -291,10 +291,10 @@ pub fn flushModule(self: *DebugSymbols, macho_file: *MachO) !void {
{
const sect_index = self.debug_str_section_index.?;
- if (self.debug_string_table_dirty or self.dwarf.strtab.items.len != self.getSection(sect_index).size) {
- const needed_size = @intCast(u32, self.dwarf.strtab.items.len);
+ if (self.debug_string_table_dirty or self.dwarf.strtab.buffer.items.len != self.getSection(sect_index).size) {
+ const needed_size = @intCast(u32, self.dwarf.strtab.buffer.items.len);
try self.growSection(sect_index, needed_size, false);
- try self.file.pwriteAll(self.dwarf.strtab.items, self.getSection(sect_index).offset);
+ try self.file.pwriteAll(self.dwarf.strtab.buffer.items, self.getSection(sect_index).offset);
self.debug_string_table_dirty = false;
}
}
diff --git a/src/link/MachO/Relocation.zig b/src/link/MachO/Relocation.zig
index ca6bf9d681..07e5cf1aa2 100644
--- a/src/link/MachO/Relocation.zig
+++ b/src/link/MachO/Relocation.zig
@@ -29,33 +29,35 @@ pub fn fmtType(self: Relocation, target: std.Target) []const u8 {
}
}
-pub fn getTargetAtom(self: Relocation, macho_file: *MachO) ?*Atom {
+pub fn getTargetAtomIndex(self: Relocation, macho_file: *MachO) ?Atom.Index {
switch (macho_file.base.options.target.cpu.arch) {
.aarch64 => switch (@intToEnum(macho.reloc_type_arm64, self.type)) {
.ARM64_RELOC_GOT_LOAD_PAGE21,
.ARM64_RELOC_GOT_LOAD_PAGEOFF12,
.ARM64_RELOC_POINTER_TO_GOT,
- => return macho_file.getGotAtomForSymbol(self.target),
+ => return macho_file.getGotAtomIndexForSymbol(self.target),
else => {},
},
.x86_64 => switch (@intToEnum(macho.reloc_type_x86_64, self.type)) {
.X86_64_RELOC_GOT,
.X86_64_RELOC_GOT_LOAD,
- => return macho_file.getGotAtomForSymbol(self.target),
+ => return macho_file.getGotAtomIndexForSymbol(self.target),
else => {},
},
else => unreachable,
}
- if (macho_file.getStubsAtomForSymbol(self.target)) |stubs_atom| return stubs_atom;
- return macho_file.getAtomForSymbol(self.target);
+ if (macho_file.getStubsAtomIndexForSymbol(self.target)) |stubs_atom| return stubs_atom;
+ return macho_file.getAtomIndexForSymbol(self.target);
}
-pub fn resolve(self: Relocation, atom: *Atom, macho_file: *MachO, base_offset: u64) !void {
+pub fn resolve(self: Relocation, macho_file: *MachO, atom_index: Atom.Index, base_offset: u64) !void {
const arch = macho_file.base.options.target.cpu.arch;
+ const atom = macho_file.getAtom(atom_index);
const source_sym = atom.getSymbol(macho_file);
const source_addr = source_sym.n_value + self.offset;
- const target_atom = self.getTargetAtom(macho_file) orelse return;
+ const target_atom_index = self.getTargetAtomIndex(macho_file) orelse return;
+ const target_atom = macho_file.getAtom(target_atom_index);
const target_addr = @intCast(i64, target_atom.getSymbol(macho_file).n_value) + self.addend;
log.debug(" ({x}: [() => 0x{x} ({s})) ({s})", .{
diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig
index a8b8caafab..87e3ca5c22 100644
--- a/src/link/Plan9.zig
+++ b/src/link/Plan9.zig
@@ -21,14 +21,7 @@ const Allocator = std.mem.Allocator;
const log = std.log.scoped(.link);
const assert = std.debug.assert;
-const FnDeclOutput = struct {
- /// this code is modified when relocated so it is mutable
- code: []u8,
- /// this might have to be modified in the linker, so thats why its mutable
- lineinfo: []u8,
- start_line: u32,
- end_line: u32,
-};
+pub const base_tag = .plan9;
base: link.File,
sixtyfour_bit: bool,
@@ -101,6 +94,9 @@ got_index_free_list: std.ArrayListUnmanaged(usize) = .{},
syms_index_free_list: std.ArrayListUnmanaged(usize) = .{},
+decl_blocks: std.ArrayListUnmanaged(DeclBlock) = .{},
+decls: std.AutoHashMapUnmanaged(Module.Decl.Index, DeclMetadata) = .{},
+
const Reloc = struct {
target: Module.Decl.Index,
offset: u64,
@@ -115,6 +111,42 @@ const Bases = struct {
const UnnamedConstTable = std.AutoHashMapUnmanaged(Module.Decl.Index, std.ArrayListUnmanaged(struct { info: DeclBlock, code: []const u8 }));
+pub const PtrWidth = enum { p32, p64 };
+
+pub const DeclBlock = struct {
+ type: aout.Sym.Type,
+ /// offset in the text or data sects
+ offset: ?u64,
+ /// offset into syms
+ sym_index: ?usize,
+ /// offset into got
+ got_index: ?usize,
+
+ pub const Index = u32;
+};
+
+const DeclMetadata = struct {
+ index: DeclBlock.Index,
+ exports: std.ArrayListUnmanaged(usize) = .{},
+
+ fn getExport(m: DeclMetadata, p9: *const Plan9, name: []const u8) ?usize {
+ for (m.exports.items) |exp| {
+ const sym = p9.syms.items[exp];
+ if (mem.eql(u8, name, sym.name)) return exp;
+ }
+ return null;
+ }
+};
+
+const FnDeclOutput = struct {
+ /// this code is modified when relocated so it is mutable
+ code: []u8,
+ /// this might have to be modified in the linker, so thats why its mutable
+ lineinfo: []u8,
+ start_line: u32,
+ end_line: u32,
+};
+
fn getAddr(self: Plan9, addr: u64, t: aout.Sym.Type) u64 {
return addr + switch (t) {
.T, .t, .l, .L => self.bases.text,
@@ -127,22 +159,6 @@ fn getSymAddr(self: Plan9, s: aout.Sym) u64 {
return self.getAddr(s.value, s.type);
}
-pub const DeclBlock = struct {
- type: aout.Sym.Type,
- /// offset in the text or data sects
- offset: ?u64,
- /// offset into syms
- sym_index: ?usize,
- /// offset into got
- got_index: ?usize,
- pub const empty = DeclBlock{
- .type = .t,
- .offset = null,
- .sym_index = null,
- .got_index = null,
- };
-};
-
pub fn defaultBaseAddrs(arch: std.Target.Cpu.Arch) Bases {
return switch (arch) {
.x86_64 => .{
@@ -164,8 +180,6 @@ pub fn defaultBaseAddrs(arch: std.Target.Cpu.Arch) Bases {
};
}
-pub const PtrWidth = enum { p32, p64 };
-
pub fn createEmpty(gpa: Allocator, options: link.Options) !*Plan9 {
if (options.use_llvm)
return error.LLVMBackendDoesNotSupportPlan9;
@@ -271,7 +285,7 @@ pub fn updateFunc(self: *Plan9, module: *Module, func: *Module.Fn, air: Air, liv
const decl = module.declPtr(decl_index);
self.freeUnnamedConsts(decl_index);
- try self.seeDecl(decl_index);
+ _ = try self.seeDecl(decl_index);
log.debug("codegen decl {*} ({s})", .{ decl, decl.name });
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
@@ -313,11 +327,11 @@ pub fn updateFunc(self: *Plan9, module: *Module, func: *Module.Fn, air: Air, liv
.end_line = end_line,
};
try self.putFn(decl_index, out);
- return self.updateFinish(decl);
+ return self.updateFinish(decl_index);
}
pub fn lowerUnnamedConst(self: *Plan9, tv: TypedValue, decl_index: Module.Decl.Index) !u32 {
- try self.seeDecl(decl_index);
+ _ = try self.seeDecl(decl_index);
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
defer code_buffer.deinit();
@@ -387,7 +401,7 @@ pub fn updateDecl(self: *Plan9, module: *Module, decl_index: Module.Decl.Index)
}
}
- try self.seeDecl(decl_index);
+ _ = try self.seeDecl(decl_index);
log.debug("codegen decl {*} ({s}) ({d})", .{ decl, decl.name, decl_index });
@@ -414,28 +428,31 @@ pub fn updateDecl(self: *Plan9, module: *Module, decl_index: Module.Decl.Index)
if (self.data_decl_table.fetchPutAssumeCapacity(decl_index, duped_code)) |old_entry| {
self.base.allocator.free(old_entry.value);
}
- return self.updateFinish(decl);
+ return self.updateFinish(decl_index);
}
/// called at the end of update{Decl,Func}
-fn updateFinish(self: *Plan9, decl: *Module.Decl) !void {
+fn updateFinish(self: *Plan9, decl_index: Module.Decl.Index) !void {
+ const decl = self.base.options.module.?.declPtr(decl_index);
const is_fn = (decl.ty.zigTypeTag() == .Fn);
log.debug("update the symbol table and got for decl {*} ({s})", .{ decl, decl.name });
const sym_t: aout.Sym.Type = if (is_fn) .t else .d;
+
+ const decl_block = self.getDeclBlockPtr(self.decls.get(decl_index).?.index);
// write the internal linker metadata
- decl.link.plan9.type = sym_t;
+ decl_block.type = sym_t;
// write the symbol
// we already have the got index
const sym: aout.Sym = .{
.value = undefined, // the value of stuff gets filled in in flushModule
- .type = decl.link.plan9.type,
+ .type = decl_block.type,
.name = mem.span(decl.name),
};
- if (decl.link.plan9.sym_index) |s| {
+ if (decl_block.sym_index) |s| {
self.syms.items[s] = sym;
} else {
const s = try self.allocateSymbolIndex();
- decl.link.plan9.sym_index = s;
+ decl_block.sym_index = s;
self.syms.items[s] = sym;
}
}
@@ -550,6 +567,7 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No
while (it.next()) |entry| {
const decl_index = entry.key_ptr.*;
const decl = mod.declPtr(decl_index);
+ const decl_block = self.getDeclBlockPtr(self.decls.get(decl_index).?.index);
const out = entry.value_ptr.*;
log.debug("write text decl {*} ({s}), lines {d} to {d}", .{ decl, decl.name, out.start_line + 1, out.end_line });
{
@@ -568,16 +586,16 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No
iovecs_i += 1;
const off = self.getAddr(text_i, .t);
text_i += out.code.len;
- decl.link.plan9.offset = off;
+ decl_block.offset = off;
if (!self.sixtyfour_bit) {
- mem.writeIntNative(u32, got_table[decl.link.plan9.got_index.? * 4 ..][0..4], @intCast(u32, off));
- mem.writeInt(u32, got_table[decl.link.plan9.got_index.? * 4 ..][0..4], @intCast(u32, off), self.base.options.target.cpu.arch.endian());
+ mem.writeIntNative(u32, got_table[decl_block.got_index.? * 4 ..][0..4], @intCast(u32, off));
+ mem.writeInt(u32, got_table[decl_block.got_index.? * 4 ..][0..4], @intCast(u32, off), self.base.options.target.cpu.arch.endian());
} else {
- mem.writeInt(u64, got_table[decl.link.plan9.got_index.? * 8 ..][0..8], off, self.base.options.target.cpu.arch.endian());
+ mem.writeInt(u64, got_table[decl_block.got_index.? * 8 ..][0..8], off, self.base.options.target.cpu.arch.endian());
}
- self.syms.items[decl.link.plan9.sym_index.?].value = off;
+ self.syms.items[decl_block.sym_index.?].value = off;
if (mod.decl_exports.get(decl_index)) |exports| {
- try self.addDeclExports(mod, decl, exports.items);
+ try self.addDeclExports(mod, decl_index, exports.items);
}
}
}
@@ -598,6 +616,7 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No
while (it.next()) |entry| {
const decl_index = entry.key_ptr.*;
const decl = mod.declPtr(decl_index);
+ const decl_block = self.getDeclBlockPtr(self.decls.get(decl_index).?.index);
const code = entry.value_ptr.*;
log.debug("write data decl {*} ({s})", .{ decl, decl.name });
@@ -606,15 +625,15 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No
iovecs_i += 1;
const off = self.getAddr(data_i, .d);
data_i += code.len;
- decl.link.plan9.offset = off;
+ decl_block.offset = off;
if (!self.sixtyfour_bit) {
- mem.writeInt(u32, got_table[decl.link.plan9.got_index.? * 4 ..][0..4], @intCast(u32, off), self.base.options.target.cpu.arch.endian());
+ mem.writeInt(u32, got_table[decl_block.got_index.? * 4 ..][0..4], @intCast(u32, off), self.base.options.target.cpu.arch.endian());
} else {
- mem.writeInt(u64, got_table[decl.link.plan9.got_index.? * 8 ..][0..8], off, self.base.options.target.cpu.arch.endian());
+ mem.writeInt(u64, got_table[decl_block.got_index.? * 8 ..][0..8], off, self.base.options.target.cpu.arch.endian());
}
- self.syms.items[decl.link.plan9.sym_index.?].value = off;
+ self.syms.items[decl_block.sym_index.?].value = off;
if (mod.decl_exports.get(decl_index)) |exports| {
- try self.addDeclExports(mod, decl, exports.items);
+ try self.addDeclExports(mod, decl_index, exports.items);
}
}
// write the unnamed constants after the other data decls
@@ -676,7 +695,8 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No
for (kv.value_ptr.items) |reloc| {
const target_decl_index = reloc.target;
const target_decl = mod.declPtr(target_decl_index);
- const target_decl_offset = target_decl.link.plan9.offset.?;
+ const target_decl_block = self.getDeclBlock(self.decls.get(target_decl_index).?.index);
+ const target_decl_offset = target_decl_block.offset.?;
const offset = reloc.offset;
const addend = reloc.addend;
@@ -709,28 +729,36 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No
fn addDeclExports(
self: *Plan9,
module: *Module,
- decl: *Module.Decl,
+ decl_index: Module.Decl.Index,
exports: []const *Module.Export,
) !void {
+ const metadata = self.decls.getPtr(decl_index).?;
+ const decl_block = self.getDeclBlock(metadata.index);
+
for (exports) |exp| {
// plan9 does not support custom sections
if (exp.options.section) |section_name| {
if (!mem.eql(u8, section_name, ".text") or !mem.eql(u8, section_name, ".data")) {
- try module.failed_exports.put(module.gpa, exp, try Module.ErrorMsg.create(self.base.allocator, decl.srcLoc(), "plan9 does not support extra sections", .{}));
+ try module.failed_exports.put(module.gpa, exp, try Module.ErrorMsg.create(
+ self.base.allocator,
+ module.declPtr(decl_index).srcLoc(),
+ "plan9 does not support extra sections",
+ .{},
+ ));
break;
}
}
const sym = .{
- .value = decl.link.plan9.offset.?,
- .type = decl.link.plan9.type.toGlobal(),
+ .value = decl_block.offset.?,
+ .type = decl_block.type.toGlobal(),
.name = exp.options.name,
};
- if (exp.link.plan9) |i| {
+ if (metadata.getExport(self, exp.options.name)) |i| {
self.syms.items[i] = sym;
} else {
try self.syms.append(self.base.allocator, sym);
- exp.link.plan9 = self.syms.items.len - 1;
+ try metadata.exports.append(self.base.allocator, self.syms.items.len - 1);
}
}
}
@@ -760,13 +788,18 @@ pub fn freeDecl(self: *Plan9, decl_index: Module.Decl.Index) void {
self.base.allocator.free(removed_entry.value);
}
}
- if (decl.link.plan9.got_index) |i| {
- // TODO: if this catch {} is triggered, an assertion in flushModule will be triggered, because got_index_free_list will have the wrong length
- self.got_index_free_list.append(self.base.allocator, i) catch {};
- }
- if (decl.link.plan9.sym_index) |i| {
- self.syms_index_free_list.append(self.base.allocator, i) catch {};
- self.syms.items[i] = aout.Sym.undefined_symbol;
+ if (self.decls.fetchRemove(decl_index)) |const_kv| {
+ var kv = const_kv;
+ const decl_block = self.getDeclBlock(kv.value.index);
+ if (decl_block.got_index) |i| {
+ // TODO: if this catch {} is triggered, an assertion in flushModule will be triggered, because got_index_free_list will have the wrong length
+ self.got_index_free_list.append(self.base.allocator, i) catch {};
+ }
+ if (decl_block.sym_index) |i| {
+ self.syms_index_free_list.append(self.base.allocator, i) catch {};
+ self.syms.items[i] = aout.Sym.undefined_symbol;
+ }
+ kv.value.exports.deinit(self.base.allocator);
}
self.freeUnnamedConsts(decl_index);
{
@@ -786,12 +819,30 @@ fn freeUnnamedConsts(self: *Plan9, decl_index: Module.Decl.Index) void {
unnamed_consts.clearAndFree(self.base.allocator);
}
-pub fn seeDecl(self: *Plan9, decl_index: Module.Decl.Index) !void {
- const mod = self.base.options.module.?;
- const decl = mod.declPtr(decl_index);
- if (decl.link.plan9.got_index == null) {
- decl.link.plan9.got_index = self.allocateGotIndex();
+fn createDeclBlock(self: *Plan9) !DeclBlock.Index {
+ const gpa = self.base.allocator;
+ const index = @intCast(DeclBlock.Index, self.decl_blocks.items.len);
+ const decl_block = try self.decl_blocks.addOne(gpa);
+ decl_block.* = .{
+ .type = .t,
+ .offset = null,
+ .sym_index = null,
+ .got_index = null,
+ };
+ return index;
+}
+
+pub fn seeDecl(self: *Plan9, decl_index: Module.Decl.Index) !DeclBlock.Index {
+ const gop = try self.decls.getOrPut(self.base.allocator, decl_index);
+ if (!gop.found_existing) {
+ const index = try self.createDeclBlock();
+ self.getDeclBlockPtr(index).got_index = self.allocateGotIndex();
+ gop.value_ptr.* = .{
+ .index = index,
+ .exports = .{},
+ };
}
+ return gop.value_ptr.index;
}
pub fn updateDeclExports(
@@ -800,7 +851,7 @@ pub fn updateDeclExports(
decl_index: Module.Decl.Index,
exports: []const *Module.Export,
) !void {
- try self.seeDecl(decl_index);
+ _ = try self.seeDecl(decl_index);
// we do all the things in flush
_ = module;
_ = exports;
@@ -842,10 +893,17 @@ pub fn deinit(self: *Plan9) void {
self.syms_index_free_list.deinit(gpa);
self.file_segments.deinit(gpa);
self.path_arena.deinit();
+ self.decl_blocks.deinit(gpa);
+
+ {
+ var it = self.decls.iterator();
+ while (it.next()) |entry| {
+ entry.value_ptr.exports.deinit(gpa);
+ }
+ self.decls.deinit(gpa);
+ }
}
-pub const Export = ?usize;
-pub const base_tag = .plan9;
pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Options) !*Plan9 {
if (options.use_llvm)
return error.LLVMBackendDoesNotSupportPlan9;
@@ -911,20 +969,19 @@ pub fn writeSyms(self: *Plan9, buf: *std.ArrayList(u8)) !void {
}
}
- const mod = self.base.options.module.?;
-
// write the data symbols
{
var it = self.data_decl_table.iterator();
while (it.next()) |entry| {
const decl_index = entry.key_ptr.*;
- const decl = mod.declPtr(decl_index);
- const sym = self.syms.items[decl.link.plan9.sym_index.?];
+ const decl_metadata = self.decls.get(decl_index).?;
+ const decl_block = self.getDeclBlock(decl_metadata.index);
+ const sym = self.syms.items[decl_block.sym_index.?];
try self.writeSym(writer, sym);
if (self.base.options.module.?.decl_exports.get(decl_index)) |exports| {
- for (exports.items) |e| {
- try self.writeSym(writer, self.syms.items[e.link.plan9.?]);
- }
+ for (exports.items) |e| if (decl_metadata.getExport(self, e.options.name)) |exp_i| {
+ try self.writeSym(writer, self.syms.items[exp_i]);
+ };
}
}
}
@@ -943,16 +1000,17 @@ pub fn writeSyms(self: *Plan9, buf: *std.ArrayList(u8)) !void {
var submap_it = symidx_and_submap.functions.iterator();
while (submap_it.next()) |entry| {
const decl_index = entry.key_ptr.*;
- const decl = mod.declPtr(decl_index);
- const sym = self.syms.items[decl.link.plan9.sym_index.?];
+ const decl_metadata = self.decls.get(decl_index).?;
+ const decl_block = self.getDeclBlock(decl_metadata.index);
+ const sym = self.syms.items[decl_block.sym_index.?];
try self.writeSym(writer, sym);
if (self.base.options.module.?.decl_exports.get(decl_index)) |exports| {
- for (exports.items) |e| {
- const s = self.syms.items[e.link.plan9.?];
+ for (exports.items) |e| if (decl_metadata.getExport(self, e.options.name)) |exp_i| {
+ const s = self.syms.items[exp_i];
if (mem.eql(u8, s.name, "_start"))
self.entry_val = s.value;
try self.writeSym(writer, s);
- }
+ };
}
}
}
@@ -960,10 +1018,10 @@ pub fn writeSyms(self: *Plan9, buf: *std.ArrayList(u8)) !void {
}
/// Must be called only after a successful call to `updateDecl`.
-pub fn updateDeclLineNumber(self: *Plan9, mod: *Module, decl: *const Module.Decl) !void {
+pub fn updateDeclLineNumber(self: *Plan9, mod: *Module, decl_index: Module.Decl.Index) !void {
_ = self;
_ = mod;
- _ = decl;
+ _ = decl_index;
}
pub fn getDeclVAddr(
@@ -1004,3 +1062,11 @@ pub fn getDeclVAddr(
});
return undefined;
}
+
+pub fn getDeclBlock(self: *const Plan9, index: DeclBlock.Index) DeclBlock {
+ return self.decl_blocks.items[index];
+}
+
+fn getDeclBlockPtr(self: *Plan9, index: DeclBlock.Index) *DeclBlock {
+ return &self.decl_blocks.items[index];
+}
diff --git a/src/link/SpirV.zig b/src/link/SpirV.zig
index 7dbd3a42ce..14a29e4498 100644
--- a/src/link/SpirV.zig
+++ b/src/link/SpirV.zig
@@ -42,13 +42,6 @@ const SpvModule = @import("../codegen/spirv/Module.zig");
const spec = @import("../codegen/spirv/spec.zig");
const IdResult = spec.IdResult;
-// TODO: Should this struct be used at all rather than just a hashmap of aux data for every decl?
-pub const FnData = struct {
- // We're going to fill these in flushModule, and we're going to fill them unconditionally,
- // so just set it to undefined.
- id: IdResult = undefined,
-};
-
base: link.File,
/// This linker backend does not try to incrementally link output SPIR-V code.
@@ -209,16 +202,19 @@ pub fn flushModule(self: *SpirV, comp: *Compilation, prog_node: *std.Progress.No
// so that we can access them before processing them.
// TODO: We're allocating an ID unconditionally now, are there
// declarations which don't generate a result?
- // TODO: fn_link is used here, but thats probably not the right field. It will work anyway though.
+ var ids = std.AutoHashMap(Module.Decl.Index, IdResult).init(self.base.allocator);
+ defer ids.deinit();
+ try ids.ensureTotalCapacity(@intCast(u32, self.decl_table.count()));
+
for (self.decl_table.keys()) |decl_index| {
const decl = module.declPtr(decl_index);
if (decl.has_tv) {
- decl.fn_link.spirv.id = spv.allocId();
+ ids.putAssumeCapacityNoClobber(decl_index, spv.allocId());
}
}
// Now, actually generate the code for all declarations.
- var decl_gen = codegen.DeclGen.init(self.base.allocator, module, &spv);
+ var decl_gen = codegen.DeclGen.init(self.base.allocator, module, &spv, &ids);
defer decl_gen.deinit();
var it = self.decl_table.iterator();
@@ -231,7 +227,7 @@ pub fn flushModule(self: *SpirV, comp: *Compilation, prog_node: *std.Progress.No
const liveness = entry.value_ptr.liveness;
// Note, if `decl` is not a function, air/liveness may be undefined.
- if (try decl_gen.gen(decl, air, liveness)) |msg| {
+ if (try decl_gen.gen(decl_index, air, liveness)) |msg| {
try module.failed_decls.put(module.gpa, decl_index, msg);
return; // TODO: Attempt to generate more decls?
}
diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig
index 31dfb87659..17391b017a 100644
--- a/src/link/Wasm.zig
+++ b/src/link/Wasm.zig
@@ -9,7 +9,7 @@ const fs = std.fs;
const leb = std.leb;
const log = std.log.scoped(.link);
-const Atom = @import("Wasm/Atom.zig");
+pub const Atom = @import("Wasm/Atom.zig");
const Dwarf = @import("Dwarf.zig");
const Module = @import("../Module.zig");
const Compilation = @import("../Compilation.zig");
@@ -31,10 +31,7 @@ const Object = @import("Wasm/Object.zig");
const Archive = @import("Wasm/Archive.zig");
const types = @import("Wasm/types.zig");
-pub const base_tag = link.File.Tag.wasm;
-
-/// deprecated: Use `@import("Wasm/Atom.zig");`
-pub const DeclBlock = Atom;
+pub const base_tag: link.File.Tag = .wasm;
base: link.File,
/// Output name of the file
@@ -47,18 +44,16 @@ llvm_object: ?*LlvmObject = null,
/// TODO: Allow setting this through a flag?
host_name: []const u8 = "env",
/// List of all `Decl` that are currently alive.
-/// This is ment for bookkeeping so we can safely cleanup all codegen memory
-/// when calling `deinit`
-decls: std.AutoHashMapUnmanaged(Module.Decl.Index, void) = .{},
+/// Each index maps to the corresponding `Atom.Index`.
+decls: std.AutoHashMapUnmanaged(Module.Decl.Index, Atom.Index) = .{},
/// List of all symbols generated by Zig code.
symbols: std.ArrayListUnmanaged(Symbol) = .{},
/// List of symbol indexes which are free to be used.
symbols_free_list: std.ArrayListUnmanaged(u32) = .{},
/// Maps atoms to their segment index
-atoms: std.AutoHashMapUnmanaged(u32, *Atom) = .{},
-/// Atoms managed and created by the linker. This contains atoms
-/// from object files, and not Atoms generated by a Decl.
-managed_atoms: std.ArrayListUnmanaged(*Atom) = .{},
+atoms: std.AutoHashMapUnmanaged(u32, Atom.Index) = .{},
+/// List of all atoms.
+managed_atoms: std.ArrayListUnmanaged(Atom) = .{},
/// Represents the index into `segments` where the 'code' section
/// lives.
code_section_index: ?u32 = null,
@@ -148,7 +143,7 @@ undefs: std.StringArrayHashMapUnmanaged(SymbolLoc) = .{},
/// Maps a symbol's location to an atom. This can be used to find meta
/// data of a symbol, such as its size, or its offset to perform a relocation.
/// Undefined (and synthetic) symbols do not have an Atom and therefore cannot be mapped.
-symbol_atom: std.AutoHashMapUnmanaged(SymbolLoc, *Atom) = .{},
+symbol_atom: std.AutoHashMapUnmanaged(SymbolLoc, Atom.Index) = .{},
/// Maps a symbol's location to its export name, which may differ from the decl's name
/// which does the exporting.
/// Note: The value represents the offset into the string table, rather than the actual string.
@@ -165,14 +160,14 @@ error_table_symbol: ?u32 = null,
// unit contains Zig code. The lifetime of these atoms are extended
// until the end of the compiler's lifetime. Meaning they're not freed
// during `flush()` in incremental-mode.
-debug_info_atom: ?*Atom = null,
-debug_line_atom: ?*Atom = null,
-debug_loc_atom: ?*Atom = null,
-debug_ranges_atom: ?*Atom = null,
-debug_abbrev_atom: ?*Atom = null,
-debug_str_atom: ?*Atom = null,
-debug_pubnames_atom: ?*Atom = null,
-debug_pubtypes_atom: ?*Atom = null,
+debug_info_atom: ?Atom.Index = null,
+debug_line_atom: ?Atom.Index = null,
+debug_loc_atom: ?Atom.Index = null,
+debug_ranges_atom: ?Atom.Index = null,
+debug_abbrev_atom: ?Atom.Index = null,
+debug_str_atom: ?Atom.Index = null,
+debug_pubnames_atom: ?Atom.Index = null,
+debug_pubtypes_atom: ?Atom.Index = null,
pub const Segment = struct {
alignment: u32,
@@ -183,13 +178,9 @@ pub const Segment = struct {
pub const FnData = struct {
/// Reference to the wasm type that represents this function.
type_index: u32,
- /// Contains debug information related to this function.
- /// For Wasm, the offset is relative to the code-section.
- src_fn: Dwarf.SrcFn,
pub const empty: FnData = .{
.type_index = undefined,
- .src_fn = Dwarf.SrcFn.empty,
};
};
@@ -434,10 +425,10 @@ pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Option
// at the end during `initializeCallCtorsFunction`.
}
- if (!options.strip and options.module != null) {
- wasm_bin.dwarf = Dwarf.init(allocator, &wasm_bin.base, options.target);
- try wasm_bin.initDebugSections();
- }
+ // if (!options.strip and options.module != null) {
+ // wasm_bin.dwarf = Dwarf.init(allocator, &wasm_bin.base, options.target);
+ // try wasm_bin.initDebugSections();
+ // }
return wasm_bin;
}
@@ -478,6 +469,7 @@ fn createSyntheticSymbol(wasm: *Wasm, name: []const u8, tag: Symbol.Tag) !Symbol
try wasm.globals.put(wasm.base.allocator, name_offset, loc);
return loc;
}
+
/// Initializes symbols and atoms for the debug sections
/// Initialization is only done when compiling Zig code.
/// When Zig is invoked as a linker instead, the atoms
@@ -520,6 +512,36 @@ fn parseObjectFile(wasm: *Wasm, path: []const u8) !bool {
return true;
}
+/// For a given `Module.Decl.Index` returns its corresponding `Atom.Index`.
+/// When the index was not found, a new `Atom` will be created, and its index will be returned.
+/// The newly created Atom is empty with default fields as specified by `Atom.empty`.
+pub fn getOrCreateAtomForDecl(wasm: *Wasm, decl_index: Module.Decl.Index) !Atom.Index {
+ const gop = try wasm.decls.getOrPut(wasm.base.allocator, decl_index);
+ if (!gop.found_existing) {
+ gop.value_ptr.* = try wasm.createAtom();
+ }
+ return gop.value_ptr.*;
+}
+
+/// Creates a new empty `Atom` and returns its `Atom.Index`
+fn createAtom(wasm: *Wasm) !Atom.Index {
+ const index = @intCast(Atom.Index, wasm.managed_atoms.items.len);
+ const atom = try wasm.managed_atoms.addOne(wasm.base.allocator);
+ atom.* = Atom.empty;
+ atom.sym_index = try wasm.allocateSymbol();
+ try wasm.symbol_atom.putNoClobber(wasm.base.allocator, .{ .file = null, .index = atom.sym_index }, index);
+
+ return index;
+}
+
+pub inline fn getAtom(wasm: *const Wasm, index: Atom.Index) Atom {
+ return wasm.managed_atoms.items[index];
+}
+
+pub inline fn getAtomPtr(wasm: *Wasm, index: Atom.Index) *Atom {
+ return &wasm.managed_atoms.items[index];
+}
+
/// Parses an archive file and will then parse each object file
/// that was found in the archive file.
/// Returns false when the file is not an archive file.
@@ -861,15 +883,16 @@ fn resolveLazySymbols(wasm: *Wasm) !void {
try wasm.discarded.putNoClobber(wasm.base.allocator, kv.value, loc);
_ = wasm.resolved_symbols.swapRemove(loc); // we don't want to emit this symbol, only use it for relocations.
- const atom = try wasm.base.allocator.create(Atom);
- errdefer wasm.base.allocator.destroy(atom);
- try wasm.managed_atoms.append(wasm.base.allocator, atom);
+ // TODO: Can we use `createAtom` here while also re-using the symbol
+ // from `createSyntheticSymbol`.
+ const atom_index = @intCast(Atom.Index, wasm.managed_atoms.items.len);
+ const atom = try wasm.managed_atoms.addOne(wasm.base.allocator);
atom.* = Atom.empty;
atom.sym_index = loc.index;
atom.alignment = 1;
- try wasm.parseAtom(atom, .{ .data = .synthetic });
- try wasm.symbol_atom.putNoClobber(wasm.base.allocator, loc, atom);
+ try wasm.parseAtom(atom_index, .{ .data = .synthetic });
+ try wasm.symbol_atom.putNoClobber(wasm.base.allocator, loc, atom_index);
}
if (wasm.undefs.fetchSwapRemove("__heap_end")) |kv| {
@@ -877,15 +900,14 @@ fn resolveLazySymbols(wasm: *Wasm) !void {
try wasm.discarded.putNoClobber(wasm.base.allocator, kv.value, loc);
_ = wasm.resolved_symbols.swapRemove(loc);
- const atom = try wasm.base.allocator.create(Atom);
- errdefer wasm.base.allocator.destroy(atom);
- try wasm.managed_atoms.append(wasm.base.allocator, atom);
+ const atom_index = @intCast(Atom.Index, wasm.managed_atoms.items.len);
+ const atom = try wasm.managed_atoms.addOne(wasm.base.allocator);
atom.* = Atom.empty;
atom.sym_index = loc.index;
atom.alignment = 1;
- try wasm.parseAtom(atom, .{ .data = .synthetic });
- try wasm.symbol_atom.putNoClobber(wasm.base.allocator, loc, atom);
+ try wasm.parseAtom(atom_index, .{ .data = .synthetic });
+ try wasm.symbol_atom.putNoClobber(wasm.base.allocator, loc, atom_index);
}
}
@@ -924,16 +946,6 @@ pub fn deinit(wasm: *Wasm) void {
if (wasm.llvm_object) |llvm_object| llvm_object.destroy(gpa);
}
- if (wasm.base.options.module) |mod| {
- var decl_it = wasm.decls.keyIterator();
- while (decl_it.next()) |decl_index_ptr| {
- const decl = mod.declPtr(decl_index_ptr.*);
- decl.link.wasm.deinit(gpa);
- }
- } else {
- assert(wasm.decls.count() == 0);
- }
-
for (wasm.func_types.items) |*func_type| {
func_type.deinit(gpa);
}
@@ -958,9 +970,8 @@ pub fn deinit(wasm: *Wasm) void {
wasm.symbol_atom.deinit(gpa);
wasm.export_names.deinit(gpa);
wasm.atoms.deinit(gpa);
- for (wasm.managed_atoms.items) |managed_atom| {
- managed_atom.deinit(gpa);
- gpa.destroy(managed_atom);
+ for (wasm.managed_atoms.items) |*managed_atom| {
+ managed_atom.deinit(wasm);
}
wasm.managed_atoms.deinit(gpa);
wasm.segments.deinit(gpa);
@@ -1018,18 +1029,24 @@ pub fn updateFunc(wasm: *Wasm, mod: *Module, func: *Module.Fn, air: Air, livenes
const decl_index = func.owner_decl;
const decl = mod.declPtr(decl_index);
- const atom = &decl.link.wasm;
- try atom.ensureInitialized(wasm);
- const gop = try wasm.decls.getOrPut(wasm.base.allocator, decl_index);
- if (gop.found_existing) {
- atom.clear();
- } else gop.value_ptr.* = {};
+ const atom_index = try wasm.getOrCreateAtomForDecl(decl_index);
+ const atom = wasm.getAtomPtr(atom_index);
+ atom.clear();
- var decl_state: ?Dwarf.DeclState = if (wasm.dwarf) |*dwarf| try dwarf.initDeclState(mod, decl_index) else null;
- defer if (decl_state) |*ds| ds.deinit();
+ // var decl_state: ?Dwarf.DeclState = if (wasm.dwarf) |*dwarf| try dwarf.initDeclState(mod, decl_index) else null;
+ // defer if (decl_state) |*ds| ds.deinit();
var code_writer = std.ArrayList(u8).init(wasm.base.allocator);
defer code_writer.deinit();
+ // const result = try codegen.generateFunction(
+ // &wasm.base,
+ // decl.srcLoc(),
+ // func,
+ // air,
+ // liveness,
+ // &code_writer,
+ // if (decl_state) |*ds| .{ .dwarf = ds } else .none,
+ // );
const result = try codegen.generateFunction(
&wasm.base,
decl.srcLoc(),
@@ -1037,7 +1054,7 @@ pub fn updateFunc(wasm: *Wasm, mod: *Module, func: *Module.Fn, air: Air, livenes
air,
liveness,
&code_writer,
- if (decl_state) |*ds| .{ .dwarf = ds } else .none,
+ .none,
);
const code = switch (result) {
@@ -1049,19 +1066,19 @@ pub fn updateFunc(wasm: *Wasm, mod: *Module, func: *Module.Fn, air: Air, livenes
},
};
- if (wasm.dwarf) |*dwarf| {
- try dwarf.commitDeclState(
- mod,
- decl_index,
- // Actual value will be written after relocation.
- // For Wasm, this is the offset relative to the code section
- // which isn't known until flush().
- 0,
- code.len,
- &decl_state.?,
- );
- }
- return wasm.finishUpdateDecl(decl, code);
+ // if (wasm.dwarf) |*dwarf| {
+ // try dwarf.commitDeclState(
+ // mod,
+ // decl_index,
+ // // Actual value will be written after relocation.
+ // // For Wasm, this is the offset relative to the code section
+ // // which isn't known until flush().
+ // 0,
+ // code.len,
+ // &decl_state.?,
+ // );
+ // }
+ return wasm.finishUpdateDecl(decl_index, code);
}
// Generate code for the Decl, storing it in memory to be later written to
@@ -1084,17 +1101,14 @@ pub fn updateDecl(wasm: *Wasm, mod: *Module, decl_index: Module.Decl.Index) !voi
return;
}
- const atom = &decl.link.wasm;
- try atom.ensureInitialized(wasm);
- const gop = try wasm.decls.getOrPut(wasm.base.allocator, decl_index);
- if (gop.found_existing) {
- atom.clear();
- } else gop.value_ptr.* = {};
+ const atom_index = try wasm.getOrCreateAtomForDecl(decl_index);
+ const atom = wasm.getAtomPtr(atom_index);
+ atom.clear();
if (decl.isExtern()) {
const variable = decl.getVariable().?;
const name = mem.sliceTo(decl.name, 0);
- return wasm.addOrUpdateImport(name, decl.link.wasm.sym_index, variable.lib_name, null);
+ return wasm.addOrUpdateImport(name, atom.sym_index, variable.lib_name, null);
}
const val = if (decl.val.castTag(.variable)) |payload| payload.data.init else decl.val;
@@ -1107,7 +1121,7 @@ pub fn updateDecl(wasm: *Wasm, mod: *Module, decl_index: Module.Decl.Index) !voi
.{ .ty = decl.ty, .val = val },
&code_writer,
.none,
- .{ .parent_atom_index = decl.link.wasm.sym_index },
+ .{ .parent_atom_index = atom.sym_index },
);
const code = switch (res) {
@@ -1119,26 +1133,29 @@ pub fn updateDecl(wasm: *Wasm, mod: *Module, decl_index: Module.Decl.Index) !voi
},
};
- return wasm.finishUpdateDecl(decl, code);
+ return wasm.finishUpdateDecl(decl_index, code);
}
-pub fn updateDeclLineNumber(wasm: *Wasm, mod: *Module, decl: *const Module.Decl) !void {
+pub fn updateDeclLineNumber(wasm: *Wasm, mod: *Module, decl_index: Module.Decl.Index) !void {
if (wasm.llvm_object) |_| return;
if (wasm.dwarf) |*dw| {
const tracy = trace(@src());
defer tracy.end();
+ const decl = mod.declPtr(decl_index);
const decl_name = try decl.getFullyQualifiedName(mod);
defer wasm.base.allocator.free(decl_name);
log.debug("updateDeclLineNumber {s}{*}", .{ decl_name, decl });
- try dw.updateDeclLineNumber(decl);
+ try dw.updateDeclLineNumber(mod, decl_index);
}
}
-fn finishUpdateDecl(wasm: *Wasm, decl: *Module.Decl, code: []const u8) !void {
+fn finishUpdateDecl(wasm: *Wasm, decl_index: Module.Decl.Index, code: []const u8) !void {
const mod = wasm.base.options.module.?;
- const atom: *Atom = &decl.link.wasm;
+ const decl = mod.declPtr(decl_index);
+ const atom_index = wasm.decls.get(decl_index).?;
+ const atom = wasm.getAtomPtr(atom_index);
const symbol = &wasm.symbols.items[atom.sym_index];
const full_name = try decl.getFullyQualifiedName(mod);
defer wasm.base.allocator.free(full_name);
@@ -1204,48 +1221,51 @@ pub fn lowerUnnamedConst(wasm: *Wasm, tv: TypedValue, decl_index: Module.Decl.In
const decl = mod.declPtr(decl_index);
// Create and initialize a new local symbol and atom
- const local_index = decl.link.wasm.locals.items.len;
+ const atom_index = try wasm.createAtom();
+ const parent_atom_index = try wasm.getOrCreateAtomForDecl(decl_index);
+ const parent_atom = wasm.getAtomPtr(parent_atom_index);
+ const local_index = parent_atom.locals.items.len;
+ try parent_atom.locals.append(wasm.base.allocator, atom_index);
const fqdn = try decl.getFullyQualifiedName(mod);
defer wasm.base.allocator.free(fqdn);
const name = try std.fmt.allocPrintZ(wasm.base.allocator, "__unnamed_{s}_{d}", .{ fqdn, local_index });
defer wasm.base.allocator.free(name);
-
- const atom = try decl.link.wasm.locals.addOne(wasm.base.allocator);
- atom.* = Atom.empty;
- try atom.ensureInitialized(wasm);
- atom.alignment = tv.ty.abiAlignment(wasm.base.options.target);
- wasm.symbols.items[atom.sym_index] = .{
- .name = try wasm.string_table.put(wasm.base.allocator, name),
- .flags = @enumToInt(Symbol.Flag.WASM_SYM_BINDING_LOCAL),
- .tag = .data,
- .index = undefined,
- };
-
- try wasm.resolved_symbols.putNoClobber(wasm.base.allocator, atom.symbolLoc(), {});
-
var value_bytes = std.ArrayList(u8).init(wasm.base.allocator);
defer value_bytes.deinit();
- const result = try codegen.generateSymbol(
- &wasm.base,
- decl.srcLoc(),
- tv,
- &value_bytes,
- .none,
- .{
- .parent_atom_index = atom.sym_index,
- .addend = null,
- },
- );
- const code = switch (result) {
- .ok => value_bytes.items,
- .fail => |em| {
- decl.analysis = .codegen_failure;
- try mod.failed_decls.put(mod.gpa, decl_index, em);
- return error.AnalysisFail;
- },
+ const code = code: {
+ const atom = wasm.getAtomPtr(atom_index);
+ atom.alignment = tv.ty.abiAlignment(wasm.base.options.target);
+ wasm.symbols.items[atom.sym_index] = .{
+ .name = try wasm.string_table.put(wasm.base.allocator, name),
+ .flags = @enumToInt(Symbol.Flag.WASM_SYM_BINDING_LOCAL),
+ .tag = .data,
+ .index = undefined,
+ };
+ try wasm.resolved_symbols.putNoClobber(wasm.base.allocator, atom.symbolLoc(), {});
+
+ const result = try codegen.generateSymbol(
+ &wasm.base,
+ decl.srcLoc(),
+ tv,
+ &value_bytes,
+ .none,
+ .{
+ .parent_atom_index = atom.sym_index,
+ .addend = null,
+ },
+ );
+ break :code switch (result) {
+ .ok => value_bytes.items,
+ .fail => |em| {
+ decl.analysis = .codegen_failure;
+ try mod.failed_decls.put(mod.gpa, decl_index, em);
+ return error.AnalysisFail;
+ },
+ };
};
+ const atom = wasm.getAtomPtr(atom_index);
atom.size = @intCast(u32, code.len);
try atom.code.appendSlice(wasm.base.allocator, code);
return atom.sym_index;
@@ -1293,10 +1313,13 @@ pub fn getDeclVAddr(
) !u64 {
const mod = wasm.base.options.module.?;
const decl = mod.declPtr(decl_index);
- try decl.link.wasm.ensureInitialized(wasm);
- const target_symbol_index = decl.link.wasm.sym_index;
+
+ const target_atom_index = try wasm.getOrCreateAtomForDecl(decl_index);
+ const target_symbol_index = wasm.getAtom(target_atom_index).sym_index;
+
assert(reloc_info.parent_atom_index != 0);
- const atom = wasm.symbol_atom.get(.{ .file = null, .index = reloc_info.parent_atom_index }).?;
+ const atom_index = wasm.symbol_atom.get(.{ .file = null, .index = reloc_info.parent_atom_index }).?;
+ const atom = wasm.getAtomPtr(atom_index);
const is_wasm32 = wasm.base.options.target.cpu.arch == .wasm32;
if (decl.ty.zigTypeTag() == .Fn) {
assert(reloc_info.addend == 0); // addend not allowed for function relocations
@@ -1324,9 +1347,10 @@ pub fn getDeclVAddr(
return target_symbol_index;
}
-pub fn deleteExport(wasm: *Wasm, exp: Export) void {
+pub fn deleteDeclExport(wasm: *Wasm, decl_index: Module.Decl.Index) void {
if (wasm.llvm_object) |_| return;
- const sym_index = exp.sym_index orelse return;
+ const atom_index = wasm.decls.get(decl_index) orelse return;
+ const sym_index = wasm.getAtom(atom_index).sym_index;
const loc: SymbolLoc = .{ .file = null, .index = sym_index };
const symbol = loc.getSymbol(wasm);
const symbol_name = wasm.string_table.get(symbol.name);
@@ -1352,7 +1376,8 @@ pub fn updateDeclExports(
}
const decl = mod.declPtr(decl_index);
- if (decl.link.wasm.getSymbolIndex() == null) return; // unititialized
+ const atom_index = try wasm.getOrCreateAtomForDecl(decl_index);
+ const atom = wasm.getAtom(atom_index);
for (exports) |exp| {
if (exp.options.section) |section| {
@@ -1367,7 +1392,7 @@ pub fn updateDeclExports(
const export_name = try wasm.string_table.put(wasm.base.allocator, exp.options.name);
if (wasm.globals.getPtr(export_name)) |existing_loc| {
- if (existing_loc.index == decl.link.wasm.sym_index) continue;
+ if (existing_loc.index == atom.sym_index) continue;
const existing_sym: Symbol = existing_loc.getSymbol(wasm).*;
const exp_is_weak = exp.options.linkage == .Internal or exp.options.linkage == .Weak;
@@ -1388,15 +1413,16 @@ pub fn updateDeclExports(
} else if (exp_is_weak) {
continue; // to-be-exported symbol is weak, so we keep the existing symbol
} else {
- existing_loc.index = decl.link.wasm.sym_index;
+ // TODO: Revisit this, why was this needed?
+ existing_loc.index = atom.sym_index;
existing_loc.file = null;
- exp.link.wasm.sym_index = existing_loc.index;
+ // exp.link.wasm.sym_index = existing_loc.index;
}
}
- const exported_decl = mod.declPtr(exp.exported_decl);
- const sym_index = exported_decl.link.wasm.sym_index;
- const sym_loc = exported_decl.link.wasm.symbolLoc();
+ const exported_atom_index = try wasm.getOrCreateAtomForDecl(exp.exported_decl);
+ const exported_atom = wasm.getAtom(exported_atom_index);
+ const sym_loc = exported_atom.symbolLoc();
const symbol = sym_loc.getSymbol(wasm);
switch (exp.options.linkage) {
.Internal => {
@@ -1432,7 +1458,6 @@ pub fn updateDeclExports(
// if the symbol was previously undefined, remove it as an import
_ = wasm.imports.remove(sym_loc);
_ = wasm.undefs.swapRemove(exp.options.name);
- exp.link.wasm.sym_index = sym_index;
}
}
@@ -1442,11 +1467,13 @@ pub fn freeDecl(wasm: *Wasm, decl_index: Module.Decl.Index) void {
}
const mod = wasm.base.options.module.?;
const decl = mod.declPtr(decl_index);
- const atom = &decl.link.wasm;
+ const atom_index = wasm.decls.get(decl_index).?;
+ const atom = wasm.getAtomPtr(atom_index);
wasm.symbols_free_list.append(wasm.base.allocator, atom.sym_index) catch {};
_ = wasm.decls.remove(decl_index);
wasm.symbols.items[atom.sym_index].tag = .dead;
- for (atom.locals.items) |local_atom| {
+ for (atom.locals.items) |local_atom_index| {
+ const local_atom = wasm.getAtom(local_atom_index);
const local_symbol = &wasm.symbols.items[local_atom.sym_index];
local_symbol.tag = .dead; // also for any local symbol
wasm.symbols_free_list.append(wasm.base.allocator, local_atom.sym_index) catch {};
@@ -1460,12 +1487,20 @@ pub fn freeDecl(wasm: *Wasm, decl_index: Module.Decl.Index) void {
_ = wasm.resolved_symbols.swapRemove(atom.symbolLoc());
_ = wasm.symbol_atom.remove(atom.symbolLoc());
- if (wasm.dwarf) |*dwarf| {
- dwarf.freeDecl(decl);
- dwarf.freeAtom(&atom.dbg_info_atom);
- }
+ // if (wasm.dwarf) |*dwarf| {
+ // dwarf.freeDecl(decl_index);
+ // }
- atom.deinit(wasm.base.allocator);
+ if (atom.next) |next_atom_index| {
+ const next_atom = wasm.getAtomPtr(next_atom_index);
+ next_atom.prev = atom.prev;
+ atom.next = null;
+ }
+ if (atom.prev) |prev_index| {
+ const prev_atom = wasm.getAtomPtr(prev_index);
+ prev_atom.next = atom.next;
+ atom.prev = null;
+ }
}
/// Appends a new entry to the indirect function table
@@ -1587,7 +1622,8 @@ const Kind = union(enum) {
};
/// Parses an Atom and inserts its metadata into the corresponding sections.
-fn parseAtom(wasm: *Wasm, atom: *Atom, kind: Kind) !void {
+fn parseAtom(wasm: *Wasm, atom_index: Atom.Index, kind: Kind) !void {
+ const atom = wasm.getAtomPtr(atom_index);
const symbol = (SymbolLoc{ .file = null, .index = atom.sym_index }).getSymbol(wasm);
const final_index: u32 = switch (kind) {
.function => |fn_data| result: {
@@ -1662,18 +1698,20 @@ fn parseAtom(wasm: *Wasm, atom: *Atom, kind: Kind) !void {
const segment: *Segment = &wasm.segments.items[final_index];
segment.alignment = std.math.max(segment.alignment, atom.alignment);
- try wasm.appendAtomAtIndex(final_index, atom);
+ try wasm.appendAtomAtIndex(final_index, atom_index);
}
/// From a given index, append the given `Atom` at the back of the linked list.
/// Simply inserts it into the map of atoms when it doesn't exist yet.
-pub fn appendAtomAtIndex(wasm: *Wasm, index: u32, atom: *Atom) !void {
- if (wasm.atoms.getPtr(index)) |last| {
- last.*.next = atom;
- atom.prev = last.*;
- last.* = atom;
+pub fn appendAtomAtIndex(wasm: *Wasm, index: u32, atom_index: Atom.Index) !void {
+ const atom = wasm.getAtomPtr(atom_index);
+ if (wasm.atoms.getPtr(index)) |last_index_ptr| {
+ const last = wasm.getAtomPtr(last_index_ptr.*);
+ last.*.next = atom_index;
+ atom.prev = last_index_ptr.*;
+ last_index_ptr.* = atom_index;
} else {
- try wasm.atoms.putNoClobber(wasm.base.allocator, index, atom);
+ try wasm.atoms.putNoClobber(wasm.base.allocator, index, atom_index);
}
}
@@ -1683,16 +1721,17 @@ fn allocateDebugAtoms(wasm: *Wasm) !void {
if (wasm.dwarf == null) return;
const allocAtom = struct {
- fn f(bin: *Wasm, maybe_index: *?u32, atom: *Atom) !void {
+ fn f(bin: *Wasm, maybe_index: *?u32, atom_index: Atom.Index) !void {
const index = maybe_index.* orelse idx: {
const index = @intCast(u32, bin.segments.items.len);
try bin.appendDummySegment();
maybe_index.* = index;
break :idx index;
};
+ const atom = bin.getAtomPtr(atom_index);
atom.size = @intCast(u32, atom.code.items.len);
bin.symbols.items[atom.sym_index].index = index;
- try bin.appendAtomAtIndex(index, atom);
+ try bin.appendAtomAtIndex(index, atom_index);
}
}.f;
@@ -1714,15 +1753,16 @@ fn allocateAtoms(wasm: *Wasm) !void {
var it = wasm.atoms.iterator();
while (it.next()) |entry| {
const segment = &wasm.segments.items[entry.key_ptr.*];
- var atom: *Atom = entry.value_ptr.*.getFirst();
+ var atom_index = entry.value_ptr.*;
var offset: u32 = 0;
while (true) {
+ const atom = wasm.getAtomPtr(atom_index);
const symbol_loc = atom.symbolLoc();
if (wasm.code_section_index) |index| {
if (index == entry.key_ptr.*) {
if (!wasm.resolved_symbols.contains(symbol_loc)) {
// only allocate resolved function body's.
- atom = atom.next orelse break;
+ atom_index = atom.prev orelse break;
continue;
}
}
@@ -1736,8 +1776,7 @@ fn allocateAtoms(wasm: *Wasm) !void {
atom.size,
});
offset += atom.size;
- try wasm.symbol_atom.put(wasm.base.allocator, symbol_loc, atom); // Update atom pointers
- atom = atom.next orelse break;
+ atom_index = atom.prev orelse break;
}
segment.size = std.mem.alignForwardGeneric(u32, offset, segment.alignment);
}
@@ -1871,8 +1910,8 @@ fn initializeCallCtorsFunction(wasm: *Wasm) !void {
symbol.index = func_index;
// create the atom that will be output into the final binary
- const atom = try wasm.base.allocator.create(Atom);
- errdefer wasm.base.allocator.destroy(atom);
+ const atom_index = @intCast(Atom.Index, wasm.managed_atoms.items.len);
+ const atom = try wasm.managed_atoms.addOne(wasm.base.allocator);
atom.* = .{
.size = @intCast(u32, function_body.items.len),
.offset = 0,
@@ -1882,15 +1921,14 @@ fn initializeCallCtorsFunction(wasm: *Wasm) !void {
.next = null,
.prev = null,
.code = function_body.moveToUnmanaged(),
- .dbg_info_atom = undefined,
};
- try wasm.managed_atoms.append(wasm.base.allocator, atom);
- try wasm.appendAtomAtIndex(wasm.code_section_index.?, atom);
- try wasm.symbol_atom.putNoClobber(wasm.base.allocator, loc, atom);
+ try wasm.appendAtomAtIndex(wasm.code_section_index.?, atom_index);
+ try wasm.symbol_atom.putNoClobber(wasm.base.allocator, loc, atom_index);
// `allocateAtoms` has already been called, set the atom's offset manually.
// This is fine to do manually as we insert the atom at the very end.
- atom.offset = atom.prev.?.offset + atom.prev.?.size;
+ const prev_atom = wasm.getAtom(atom.prev.?);
+ atom.offset = prev_atom.offset + prev_atom.size;
}
fn setupImports(wasm: *Wasm) !void {
@@ -2093,7 +2131,8 @@ fn setupExports(wasm: *Wasm) !void {
break :blk try wasm.string_table.put(wasm.base.allocator, sym_name);
};
const exp: types.Export = if (symbol.tag == .data) exp: {
- const atom = wasm.symbol_atom.get(sym_loc).?;
+ const atom_index = wasm.symbol_atom.get(sym_loc).?;
+ const atom = wasm.getAtom(atom_index);
const va = atom.getVA(wasm, symbol);
const global_index = @intCast(u32, wasm.imported_globals_count + wasm.wasm_globals.items.len);
try wasm.wasm_globals.append(wasm.base.allocator, .{
@@ -2198,7 +2237,8 @@ fn setupMemory(wasm: *Wasm) !void {
const segment_index = wasm.data_segments.get(".synthetic").?;
const segment = &wasm.segments.items[segment_index];
segment.offset = 0; // for simplicity we store the entire VA into atom's offset.
- const atom = wasm.symbol_atom.get(loc).?;
+ const atom_index = wasm.symbol_atom.get(loc).?;
+ const atom = wasm.getAtomPtr(atom_index);
atom.offset = @intCast(u32, mem.alignForwardGeneric(u64, memory_ptr, heap_alignment));
}
@@ -2231,7 +2271,8 @@ fn setupMemory(wasm: *Wasm) !void {
const segment_index = wasm.data_segments.get(".synthetic").?;
const segment = &wasm.segments.items[segment_index];
segment.offset = 0;
- const atom = wasm.symbol_atom.get(loc).?;
+ const atom_index = wasm.symbol_atom.get(loc).?;
+ const atom = wasm.getAtomPtr(atom_index);
atom.offset = @intCast(u32, memory_ptr);
}
@@ -2357,15 +2398,14 @@ pub fn getErrorTableSymbol(wasm: *Wasm) !u32 {
// and then return said symbol's index. The final table will be populated
// during `flush` when we know all possible error names.
- // As sym_index '0' is reserved, we use it for our stack pointer symbol
- const symbol_index = wasm.symbols_free_list.popOrNull() orelse blk: {
- const index = @intCast(u32, wasm.symbols.items.len);
- _ = try wasm.symbols.addOne(wasm.base.allocator);
- break :blk index;
- };
+ const atom_index = try wasm.createAtom();
+ const atom = wasm.getAtomPtr(atom_index);
+ const slice_ty = Type.initTag(.const_slice_u8_sentinel_0);
+ atom.alignment = slice_ty.abiAlignment(wasm.base.options.target);
+ const sym_index = atom.sym_index;
const sym_name = try wasm.string_table.put(wasm.base.allocator, "__zig_err_name_table");
- const symbol = &wasm.symbols.items[symbol_index];
+ const symbol = &wasm.symbols.items[sym_index];
symbol.* = .{
.name = sym_name,
.tag = .data,
@@ -2374,20 +2414,11 @@ pub fn getErrorTableSymbol(wasm: *Wasm) !u32 {
};
symbol.setFlag(.WASM_SYM_VISIBILITY_HIDDEN);
- const slice_ty = Type.initTag(.const_slice_u8_sentinel_0);
+ try wasm.resolved_symbols.put(wasm.base.allocator, atom.symbolLoc(), {});
- const atom = try wasm.base.allocator.create(Atom);
- atom.* = Atom.empty;
- atom.sym_index = symbol_index;
- atom.alignment = slice_ty.abiAlignment(wasm.base.options.target);
- try wasm.managed_atoms.append(wasm.base.allocator, atom);
- const loc = atom.symbolLoc();
- try wasm.resolved_symbols.put(wasm.base.allocator, loc, {});
- try wasm.symbol_atom.put(wasm.base.allocator, loc, atom);
-
- log.debug("Error name table was created with symbol index: ({d})", .{symbol_index});
- wasm.error_table_symbol = symbol_index;
- return symbol_index;
+ log.debug("Error name table was created with symbol index: ({d})", .{sym_index});
+ wasm.error_table_symbol = sym_index;
+ return sym_index;
}
/// Populates the error name table, when `error_table_symbol` is not null.
@@ -2396,22 +2427,17 @@ pub fn getErrorTableSymbol(wasm: *Wasm) !u32 {
/// The table is what is being pointed to within the runtime bodies that are generated.
fn populateErrorNameTable(wasm: *Wasm) !void {
const symbol_index = wasm.error_table_symbol orelse return;
- const atom: *Atom = wasm.symbol_atom.get(.{ .file = null, .index = symbol_index }).?;
+ const atom_index = wasm.symbol_atom.get(.{ .file = null, .index = symbol_index }).?;
+ const atom = wasm.getAtomPtr(atom_index);
+
// Rather than creating a symbol for each individual error name,
// we create a symbol for the entire region of error names. We then calculate
// the pointers into the list using addends which are appended to the relocation.
- const names_atom = try wasm.base.allocator.create(Atom);
- names_atom.* = Atom.empty;
- try wasm.managed_atoms.append(wasm.base.allocator, names_atom);
- const names_symbol_index = wasm.symbols_free_list.popOrNull() orelse blk: {
- const index = @intCast(u32, wasm.symbols.items.len);
- _ = try wasm.symbols.addOne(wasm.base.allocator);
- break :blk index;
- };
- names_atom.sym_index = names_symbol_index;
+ const names_atom_index = try wasm.createAtom();
+ const names_atom = wasm.getAtomPtr(names_atom_index);
names_atom.alignment = 1;
const sym_name = try wasm.string_table.put(wasm.base.allocator, "__zig_err_names");
- const names_symbol = &wasm.symbols.items[names_symbol_index];
+ const names_symbol = &wasm.symbols.items[names_atom.sym_index];
names_symbol.* = .{
.name = sym_name,
.tag = .data,
@@ -2435,7 +2461,7 @@ fn populateErrorNameTable(wasm: *Wasm) !void {
try atom.code.writer(wasm.base.allocator).writeIntLittle(u32, len - 1);
// create relocation to the error name
try atom.relocs.append(wasm.base.allocator, .{
- .index = names_symbol_index,
+ .index = names_atom.sym_index,
.relocation_type = .R_WASM_MEMORY_ADDR_I32,
.offset = offset,
.addend = @intCast(i32, addend),
@@ -2454,61 +2480,53 @@ fn populateErrorNameTable(wasm: *Wasm) !void {
const name_loc = names_atom.symbolLoc();
try wasm.resolved_symbols.put(wasm.base.allocator, name_loc, {});
- try wasm.symbol_atom.put(wasm.base.allocator, name_loc, names_atom);
+ try wasm.symbol_atom.put(wasm.base.allocator, name_loc, names_atom_index);
// link the atoms with the rest of the binary so they can be allocated
// and relocations will be performed.
- try wasm.parseAtom(atom, .{ .data = .read_only });
- try wasm.parseAtom(names_atom, .{ .data = .read_only });
+ try wasm.parseAtom(atom_index, .{ .data = .read_only });
+ try wasm.parseAtom(names_atom_index, .{ .data = .read_only });
}
/// From a given index variable, creates a new debug section.
/// This initializes the index, appends a new segment,
/// and finally, creates a managed `Atom`.
-pub fn createDebugSectionForIndex(wasm: *Wasm, index: *?u32, name: []const u8) !*Atom {
+pub fn createDebugSectionForIndex(wasm: *Wasm, index: *?u32, name: []const u8) !Atom.Index {
const new_index = @intCast(u32, wasm.segments.items.len);
index.* = new_index;
try wasm.appendDummySegment();
- const sym_index = wasm.symbols_free_list.popOrNull() orelse idx: {
- const tmp_index = @intCast(u32, wasm.symbols.items.len);
- _ = try wasm.symbols.addOne(wasm.base.allocator);
- break :idx tmp_index;
- };
- wasm.symbols.items[sym_index] = .{
+ const atom_index = try wasm.createAtom();
+ const atom = wasm.getAtomPtr(atom_index);
+ wasm.symbols.items[atom.sym_index] = .{
.tag = .section,
.name = try wasm.string_table.put(wasm.base.allocator, name),
.index = 0,
.flags = @enumToInt(Symbol.Flag.WASM_SYM_BINDING_LOCAL),
};
- const atom = try wasm.base.allocator.create(Atom);
- atom.* = Atom.empty;
atom.alignment = 1; // debug sections are always 1-byte-aligned
- atom.sym_index = sym_index;
- try wasm.managed_atoms.append(wasm.base.allocator, atom);
- try wasm.symbol_atom.put(wasm.base.allocator, atom.symbolLoc(), atom);
- return atom;
+ return atom_index;
}
fn resetState(wasm: *Wasm) void {
for (wasm.segment_info.values()) |segment_info| {
wasm.base.allocator.free(segment_info.name);
}
- if (wasm.base.options.module) |mod| {
- var decl_it = wasm.decls.keyIterator();
- while (decl_it.next()) |decl_index_ptr| {
- const decl = mod.declPtr(decl_index_ptr.*);
- const atom = &decl.link.wasm;
- atom.next = null;
- atom.prev = null;
-
- for (atom.locals.items) |*local_atom| {
- local_atom.next = null;
- local_atom.prev = null;
- }
+
+ var atom_it = wasm.decls.valueIterator();
+ while (atom_it.next()) |atom_index| {
+ const atom = wasm.getAtomPtr(atom_index.*);
+ atom.next = null;
+ atom.prev = null;
+
+ for (atom.locals.items) |local_atom_index| {
+ const local_atom = wasm.getAtomPtr(local_atom_index);
+ local_atom.next = null;
+ local_atom.prev = null;
}
}
+
wasm.functions.clearRetainingCapacity();
wasm.exports.clearRetainingCapacity();
wasm.segments.clearRetainingCapacity();
@@ -2805,28 +2823,29 @@ pub fn flushModule(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Nod
try wasm.setupStart();
try wasm.setupImports();
if (wasm.base.options.module) |mod| {
- var decl_it = wasm.decls.keyIterator();
- while (decl_it.next()) |decl_index_ptr| {
- const decl = mod.declPtr(decl_index_ptr.*);
+ var decl_it = wasm.decls.iterator();
+ while (decl_it.next()) |entry| {
+ const decl = mod.declPtr(entry.key_ptr.*);
if (decl.isExtern()) continue;
- const atom = &decl.*.link.wasm;
+ const atom_index = entry.value_ptr.*;
if (decl.ty.zigTypeTag() == .Fn) {
- try wasm.parseAtom(atom, .{ .function = decl.fn_link.wasm });
+ try wasm.parseAtom(atom_index, .{ .function = decl.fn_link.? });
} else if (decl.getVariable()) |variable| {
if (!variable.is_mutable) {
- try wasm.parseAtom(atom, .{ .data = .read_only });
+ try wasm.parseAtom(atom_index, .{ .data = .read_only });
} else if (variable.init.isUndefDeep()) {
- try wasm.parseAtom(atom, .{ .data = .uninitialized });
+ try wasm.parseAtom(atom_index, .{ .data = .uninitialized });
} else {
- try wasm.parseAtom(atom, .{ .data = .initialized });
+ try wasm.parseAtom(atom_index, .{ .data = .initialized });
}
} else {
- try wasm.parseAtom(atom, .{ .data = .read_only });
+ try wasm.parseAtom(atom_index, .{ .data = .read_only });
}
// also parse atoms for a decl's locals
- for (atom.locals.items) |*local_atom| {
- try wasm.parseAtom(local_atom, .{ .data = .read_only });
+ const atom = wasm.getAtomPtr(atom_index);
+ for (atom.locals.items) |local_atom_index| {
+ try wasm.parseAtom(local_atom_index, .{ .data = .read_only });
}
}
@@ -3071,20 +3090,22 @@ fn writeToFile(
var code_section_size: u32 = 0;
if (wasm.code_section_index) |code_index| {
const header_offset = try reserveVecSectionHeader(&binary_bytes);
- var atom: *Atom = wasm.atoms.get(code_index).?.getFirst();
+ var atom_index = wasm.atoms.get(code_index).?;
// The code section must be sorted in line with the function order.
var sorted_atoms = try std.ArrayList(*Atom).initCapacity(wasm.base.allocator, wasm.functions.count());
defer sorted_atoms.deinit();
while (true) {
+ var atom = wasm.getAtomPtr(atom_index);
if (wasm.resolved_symbols.contains(atom.symbolLoc())) {
if (!is_obj) {
atom.resolveRelocs(wasm);
}
sorted_atoms.appendAssumeCapacity(atom);
}
- atom = atom.next orelse break;
+ // atom = if (atom.prev) |prev| wasm.getAtomPtr(prev) else break;
+ atom_index = atom.prev orelse break;
}
const atom_sort_fn = struct {
@@ -3124,11 +3145,11 @@ fn writeToFile(
// do not output 'bss' section unless we import memory and therefore
// want to guarantee the data is zero initialized
if (!import_memory and std.mem.eql(u8, entry.key_ptr.*, ".bss")) continue;
- const atom_index = entry.value_ptr.*;
- const segment = wasm.segments.items[atom_index];
+ const segment_index = entry.value_ptr.*;
+ const segment = wasm.segments.items[segment_index];
if (segment.size == 0) continue; // do not emit empty segments
segment_count += 1;
- var atom: *Atom = wasm.atoms.getPtr(atom_index).?.*.getFirst();
+ var atom_index = wasm.atoms.get(segment_index).?;
// flag and index to memory section (currently, there can only be 1 memory section in wasm)
try leb.writeULEB128(binary_writer, @as(u32, 0));
@@ -3139,6 +3160,7 @@ fn writeToFile(
// fill in the offset table and the data segments
var current_offset: u32 = 0;
while (true) {
+ const atom = wasm.getAtomPtr(atom_index);
if (!is_obj) {
atom.resolveRelocs(wasm);
}
@@ -3154,8 +3176,8 @@ fn writeToFile(
try binary_writer.writeAll(atom.code.items);
current_offset += atom.size;
- if (atom.next) |next| {
- atom = next;
+ if (atom.prev) |prev| {
+ atom_index = prev;
} else {
// also pad with zeroes when last atom to ensure
// segments are aligned.
@@ -3197,15 +3219,15 @@ fn writeToFile(
}
if (!wasm.base.options.strip) {
- if (wasm.dwarf) |*dwarf| {
- const mod = wasm.base.options.module.?;
- try dwarf.writeDbgAbbrev();
- // for debug info and ranges, the address is always 0,
- // as locations are always offsets relative to 'code' section.
- try dwarf.writeDbgInfoHeader(mod, 0, code_section_size);
- try dwarf.writeDbgAranges(0, code_section_size);
- try dwarf.writeDbgLineHeader();
- }
+ // if (wasm.dwarf) |*dwarf| {
+ // const mod = wasm.base.options.module.?;
+ // try dwarf.writeDbgAbbrev();
+ // // for debug info and ranges, the address is always 0,
+ // // as locations are always offsets relative to 'code' section.
+ // try dwarf.writeDbgInfoHeader(mod, 0, code_section_size);
+ // try dwarf.writeDbgAranges(0, code_section_size);
+ // try dwarf.writeDbgLineHeader();
+ // }
var debug_bytes = std.ArrayList(u8).init(wasm.base.allocator);
defer debug_bytes.deinit();
@@ -3228,11 +3250,11 @@ fn writeToFile(
for (debug_sections) |item| {
if (item.index) |index| {
- var atom = wasm.atoms.get(index).?.getFirst();
+ var atom = wasm.getAtomPtr(wasm.atoms.get(index).?);
while (true) {
atom.resolveRelocs(wasm);
try debug_bytes.appendSlice(atom.code.items);
- atom = atom.next orelse break;
+ atom = if (atom.prev) |prev| wasm.getAtomPtr(prev) else break;
}
try emitDebugSection(&binary_bytes, debug_bytes.items, item.name);
debug_bytes.clearRetainingCapacity();
@@ -3964,7 +3986,8 @@ fn emitSymbolTable(wasm: *Wasm, binary_bytes: *std.ArrayList(u8), symbol_table:
if (symbol.isDefined()) {
try leb.writeULEB128(writer, symbol.index);
- const atom = wasm.symbol_atom.get(sym_loc).?;
+ const atom_index = wasm.symbol_atom.get(sym_loc).?;
+ const atom = wasm.getAtom(atom_index);
try leb.writeULEB128(writer, @as(u32, atom.offset));
try leb.writeULEB128(writer, @as(u32, atom.size));
}
@@ -4042,7 +4065,7 @@ fn emitCodeRelocations(
const reloc_start = binary_bytes.items.len;
var count: u32 = 0;
- var atom: *Atom = wasm.atoms.get(code_index).?.getFirst();
+ var atom: *Atom = wasm.getAtomPtr(wasm.atoms.get(code_index).?);
// for each atom, we calculate the uleb size and append that
var size_offset: u32 = 5; // account for code section size leb128
while (true) {
@@ -4060,7 +4083,7 @@ fn emitCodeRelocations(
}
log.debug("Emit relocation: {}", .{relocation});
}
- atom = atom.next orelse break;
+ atom = if (atom.prev) |prev| wasm.getAtomPtr(prev) else break;
}
if (count == 0) return;
var buf: [5]u8 = undefined;
@@ -4091,7 +4114,7 @@ fn emitDataRelocations(
// for each atom, we calculate the uleb size and append that
var size_offset: u32 = 5; // account for code section size leb128
for (wasm.data_segments.values()) |segment_index| {
- var atom: *Atom = wasm.atoms.get(segment_index).?.getFirst();
+ var atom: *Atom = wasm.getAtomPtr(wasm.atoms.get(segment_index).?);
while (true) {
size_offset += getULEB128Size(atom.size);
for (atom.relocs.items) |relocation| {
@@ -4110,7 +4133,7 @@ fn emitDataRelocations(
}
log.debug("Emit relocation: {}", .{relocation});
}
- atom = atom.next orelse break;
+ atom = if (atom.prev) |prev| wasm.getAtomPtr(prev) else break;
}
}
if (count == 0) return;
diff --git a/src/link/Wasm/Atom.zig b/src/link/Wasm/Atom.zig
index 20f847e475..e719f8dfcc 100644
--- a/src/link/Wasm/Atom.zig
+++ b/src/link/Wasm/Atom.zig
@@ -4,7 +4,6 @@ const std = @import("std");
const types = @import("types.zig");
const Wasm = @import("../Wasm.zig");
const Symbol = @import("Symbol.zig");
-const Dwarf = @import("../Dwarf.zig");
const leb = std.leb;
const log = std.log.scoped(.link);
@@ -30,17 +29,17 @@ file: ?u16,
/// Next atom in relation to this atom.
/// When null, this atom is the last atom
-next: ?*Atom,
+next: ?Atom.Index,
/// Previous atom in relation to this atom.
/// is null when this atom is the first in its order
-prev: ?*Atom,
+prev: ?Atom.Index,
/// Contains atoms local to a decl, all managed by this `Atom`.
/// When the parent atom is being freed, it will also do so for all local atoms.
-locals: std.ArrayListUnmanaged(Atom) = .{},
+locals: std.ArrayListUnmanaged(Atom.Index) = .{},
-/// Represents the debug Atom that holds all debug information of this Atom.
-dbg_info_atom: Dwarf.Atom,
+/// Alias to an unsigned 32-bit integer
+pub const Index = u32;
/// Represents a default empty wasm `Atom`
pub const empty: Atom = .{
@@ -51,18 +50,15 @@ pub const empty: Atom = .{
.prev = null,
.size = 0,
.sym_index = 0,
- .dbg_info_atom = undefined,
};
/// Frees all resources owned by this `Atom`.
-pub fn deinit(atom: *Atom, gpa: Allocator) void {
+pub fn deinit(atom: *Atom, wasm: *Wasm) void {
+ const gpa = wasm.base.allocator;
atom.relocs.deinit(gpa);
atom.code.deinit(gpa);
-
- for (atom.locals.items) |*local| {
- local.deinit(gpa);
- }
atom.locals.deinit(gpa);
+ atom.* = undefined;
}
/// Sets the length of relocations and code to '0',
@@ -83,24 +79,11 @@ pub fn format(atom: Atom, comptime fmt: []const u8, options: std.fmt.FormatOptio
});
}
-/// Returns the first `Atom` from a given atom
-pub fn getFirst(atom: *Atom) *Atom {
- var tmp = atom;
- while (tmp.prev) |prev| tmp = prev;
- return tmp;
-}
-
/// Returns the location of the symbol that represents this `Atom`
pub fn symbolLoc(atom: Atom) Wasm.SymbolLoc {
return .{ .file = atom.file, .index = atom.sym_index };
}
-pub fn ensureInitialized(atom: *Atom, wasm_bin: *Wasm) !void {
- if (atom.getSymbolIndex() != null) return; // already initialized
- atom.sym_index = try wasm_bin.allocateSymbol();
- try wasm_bin.symbol_atom.putNoClobber(wasm_bin.base.allocator, atom.symbolLoc(), atom);
-}
-
pub fn getSymbolIndex(atom: Atom) ?u32 {
if (atom.sym_index == 0) return null;
return atom.sym_index;
@@ -203,20 +186,28 @@ fn relocationValue(atom: Atom, relocation: types.Relocation, wasm_bin: *const Wa
if (symbol.isUndefined()) {
return 0;
}
- const target_atom = wasm_bin.symbol_atom.get(target_loc).?;
+ const target_atom_index = wasm_bin.symbol_atom.get(target_loc) orelse {
+ // this can only occur during incremental-compilation when a relocation
+ // still points to a freed decl. It is fine to emit the value 0 here
+ // as no actual code will point towards it.
+ return 0;
+ };
+ const target_atom = wasm_bin.getAtom(target_atom_index);
const va = @intCast(i32, target_atom.getVA(wasm_bin, symbol));
return @intCast(u32, va + relocation.addend);
},
.R_WASM_EVENT_INDEX_LEB => return symbol.index,
.R_WASM_SECTION_OFFSET_I32 => {
- const target_atom = wasm_bin.symbol_atom.get(target_loc).?;
+ const target_atom_index = wasm_bin.symbol_atom.get(target_loc).?;
+ const target_atom = wasm_bin.getAtom(target_atom_index);
const rel_value = @intCast(i32, target_atom.offset) + relocation.addend;
return @intCast(u32, rel_value);
},
.R_WASM_FUNCTION_OFFSET_I32 => {
- const target_atom = wasm_bin.symbol_atom.get(target_loc) orelse {
+ const target_atom_index = wasm_bin.symbol_atom.get(target_loc) orelse {
return @bitCast(u32, @as(i32, -1));
};
+ const target_atom = wasm_bin.getAtom(target_atom_index);
const offset: u32 = 11 + Wasm.getULEB128Size(target_atom.size); // Header (11 bytes fixed-size) + body size (leb-encoded)
const rel_value = @intCast(i32, target_atom.offset + offset) + relocation.addend;
return @intCast(u32, rel_value);
diff --git a/src/link/Wasm/Object.zig b/src/link/Wasm/Object.zig
index 8f49d68712..7d4f6a4e36 100644
--- a/src/link/Wasm/Object.zig
+++ b/src/link/Wasm/Object.zig
@@ -901,14 +901,9 @@ pub fn parseIntoAtoms(object: *Object, gpa: Allocator, object_index: u16, wasm_b
continue; // found unknown section, so skip parsing into atom as we do not know how to handle it.
};
- const atom = try gpa.create(Atom);
+ const atom_index = @intCast(Atom.Index, wasm_bin.managed_atoms.items.len);
+ const atom = try wasm_bin.managed_atoms.addOne(gpa);
atom.* = Atom.empty;
- errdefer {
- atom.deinit(gpa);
- gpa.destroy(atom);
- }
-
- try wasm_bin.managed_atoms.append(gpa, atom);
atom.file = object_index;
atom.size = relocatable_data.size;
atom.alignment = relocatable_data.getAlignment(object);
@@ -938,12 +933,12 @@ pub fn parseIntoAtoms(object: *Object, gpa: Allocator, object_index: u16, wasm_b
.index = relocatable_data.getIndex(),
})) |symbols| {
atom.sym_index = symbols.pop();
- try wasm_bin.symbol_atom.putNoClobber(gpa, atom.symbolLoc(), atom);
+ try wasm_bin.symbol_atom.putNoClobber(gpa, atom.symbolLoc(), atom_index);
// symbols referencing the same atom will be added as alias
// or as 'parent' when they are global.
while (symbols.popOrNull()) |idx| {
- try wasm_bin.symbol_atom.putNoClobber(gpa, .{ .file = atom.file, .index = idx }, atom);
+ try wasm_bin.symbol_atom.putNoClobber(gpa, .{ .file = atom.file, .index = idx }, atom_index);
const alias_symbol = object.symtable[idx];
if (alias_symbol.isGlobal()) {
atom.sym_index = idx;
@@ -956,7 +951,7 @@ pub fn parseIntoAtoms(object: *Object, gpa: Allocator, object_index: u16, wasm_b
segment.alignment = std.math.max(segment.alignment, atom.alignment);
}
- try wasm_bin.appendAtomAtIndex(final_index, atom);
+ try wasm_bin.appendAtomAtIndex(final_index, atom_index);
log.debug("Parsed into atom: '{s}' at segment index {d}", .{ object.string_table.get(object.symtable[atom.sym_index].name), final_index });
}
}