aboutsummaryrefslogtreecommitdiff
path: root/src/link
diff options
context:
space:
mode:
authorJakub Konka <kubkon@jakubkonka.com>2023-01-30 18:22:50 +0100
committerJakub Konka <kubkon@jakubkonka.com>2023-01-31 00:43:25 +0100
commitd42a93105142e3e8f1d02efeecc0c0e52457a5d9 (patch)
tree2ea425b3a763ce455bcc3c7a36416d0a26e26f4b /src/link
parent23b7d28896609e3f01765730599119baf53a56c9 (diff)
downloadzig-d42a93105142e3e8f1d02efeecc0c0e52457a5d9.tar.gz
zig-d42a93105142e3e8f1d02efeecc0c0e52457a5d9.zip
link: make MachO atoms fully owned by the linker
Diffstat (limited to 'src/link')
-rw-r--r--src/link/Dwarf.zig2
-rw-r--r--src/link/MachO.zig719
-rw-r--r--src/link/MachO/Atom.zig81
-rw-r--r--src/link/MachO/Relocation.zig16
4 files changed, 430 insertions, 388 deletions
diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig
index 1b65bbb04b..2595cd8ba5 100644
--- a/src/link/Dwarf.zig
+++ b/src/link/Dwarf.zig
@@ -2639,7 +2639,7 @@ fn getDbgInfoAtom(tag: File.Tag, mod: *Module, decl_index: Module.Decl.Index) *A
const decl = mod.declPtr(decl_index);
return switch (tag) {
.elf => &decl.link.elf.dbg_info_atom,
- .macho => &decl.link.macho.dbg_info_atom,
+ .macho => unreachable,
.wasm => &decl.link.wasm.dbg_info_atom,
else => unreachable,
};
diff --git a/src/link/MachO.zig b/src/link/MachO.zig
index 543cb473d7..29aed25b31 100644
--- a/src/link/MachO.zig
+++ b/src/link/MachO.zig
@@ -66,7 +66,7 @@ const Section = struct {
// TODO is null here necessary, or can we do away with tracking via section
// size in incremental context?
- last_atom: ?*Atom = null,
+ last_atom_index: ?Atom.Index = null,
/// A list of atoms that have surplus capacity. This list can have false
/// positives, as functions grow and shrink over time, only sometimes being added
@@ -83,7 +83,7 @@ const Section = struct {
/// overcapacity can be negative. A simple way to have negative overcapacity is to
/// allocate a fresh atom, which will have ideal capacity, and then grow it
/// by 1 byte. It will then have -1 overcapacity.
- free_list: std.ArrayListUnmanaged(*Atom) = .{},
+ free_list: std.ArrayListUnmanaged(Atom.Index) = .{},
};
base: File,
@@ -140,8 +140,8 @@ locals_free_list: std.ArrayListUnmanaged(u32) = .{},
globals_free_list: std.ArrayListUnmanaged(u32) = .{},
dyld_stub_binder_index: ?u32 = null,
-dyld_private_atom: ?*Atom = null,
-stub_helper_preamble_atom: ?*Atom = null,
+dyld_private_atom_index: ?Atom.Index = null,
+stub_helper_preamble_atom_index: ?Atom.Index = null,
strtab: StringTable(.strtab) = .{},
@@ -164,10 +164,10 @@ segment_table_dirty: bool = false,
cold_start: bool = true,
/// List of atoms that are either synthetic or map directly to the Zig source program.
-managed_atoms: std.ArrayListUnmanaged(*Atom) = .{},
+atoms: std.ArrayListUnmanaged(Atom) = .{},
/// Table of atoms indexed by the symbol index.
-atom_by_index_table: std.AutoHashMapUnmanaged(u32, *Atom) = .{},
+atom_by_index_table: std.AutoHashMapUnmanaged(u32, Atom.Index) = .{},
/// Table of unnamed constants associated with a parent `Decl`.
/// We store them here so that we can free the constants whenever the `Decl`
@@ -210,11 +210,36 @@ bindings: BindingTable = .{},
/// this will be a table indexed by index into the list of Atoms.
lazy_bindings: BindingTable = .{},
-/// Table of Decls that are currently alive.
-/// We store them here so that we can properly dispose of any allocated
-/// memory within the atom in the incremental linker.
-/// TODO consolidate this.
-decls: std.AutoArrayHashMapUnmanaged(Module.Decl.Index, ?u8) = .{},
+/// Table of tracked Decls.
+decls: std.AutoArrayHashMapUnmanaged(Module.Decl.Index, DeclMetadata) = .{},
+
+const DeclMetadata = struct {
+ atom: Atom.Index,
+ section: u8,
+ /// A list of all exports aliases of this Decl.
+ /// TODO do we actually need this at all?
+ exports: std.ArrayListUnmanaged(u32) = .{},
+
+ fn getExport(m: DeclMetadata, macho_file: *const MachO, name: []const u8) ?u32 {
+ for (m.exports.items) |exp| {
+ if (mem.eql(u8, name, macho_file.getSymbolName(.{
+ .sym_index = exp,
+ .file = null,
+ }))) return exp;
+ }
+ return null;
+ }
+
+ fn getExportPtr(m: *DeclMetadata, macho_file: *MachO, name: []const u8) ?*u32 {
+ for (m.exports.items) |*exp| {
+ if (mem.eql(u8, name, macho_file.getSymbolName(.{
+ .sym_index = exp.*,
+ .file = null,
+ }))) return exp;
+ }
+ return null;
+ }
+};
const Entry = struct {
target: SymbolWithLoc,
@@ -229,8 +254,8 @@ const Entry = struct {
return macho_file.getSymbolPtr(.{ .sym_index = entry.sym_index, .file = null });
}
- pub fn getAtom(entry: Entry, macho_file: *MachO) ?*Atom {
- return macho_file.getAtomForSymbol(.{ .sym_index = entry.sym_index, .file = null });
+ pub fn getAtomIndex(entry: Entry, macho_file: *MachO) ?Atom.Index {
+ return macho_file.getAtomIndexForSymbol(.{ .sym_index = entry.sym_index, .file = null });
}
pub fn getName(entry: Entry, macho_file: *MachO) []const u8 {
@@ -238,10 +263,10 @@ const Entry = struct {
}
};
-const BindingTable = std.AutoArrayHashMapUnmanaged(*Atom, std.ArrayListUnmanaged(Atom.Binding));
-const UnnamedConstTable = std.AutoArrayHashMapUnmanaged(Module.Decl.Index, std.ArrayListUnmanaged(*Atom));
-const RebaseTable = std.AutoArrayHashMapUnmanaged(*Atom, std.ArrayListUnmanaged(u32));
-const RelocationTable = std.AutoArrayHashMapUnmanaged(*Atom, std.ArrayListUnmanaged(Relocation));
+const BindingTable = std.AutoArrayHashMapUnmanaged(Atom.Index, std.ArrayListUnmanaged(Atom.Binding));
+const UnnamedConstTable = std.AutoArrayHashMapUnmanaged(Module.Decl.Index, std.ArrayListUnmanaged(Atom.Index));
+const RebaseTable = std.AutoArrayHashMapUnmanaged(Atom.Index, std.ArrayListUnmanaged(u32));
+const RelocationTable = std.AutoArrayHashMapUnmanaged(Atom.Index, std.ArrayListUnmanaged(Relocation));
const PendingUpdate = union(enum) {
resolve_undef: u32,
@@ -286,10 +311,6 @@ pub const default_pagezero_vmsize: u64 = 0x100000000;
/// potential future extensions.
pub const default_headerpad_size: u32 = 0x1000;
-pub const Export = struct {
- sym_index: ?u32 = null,
-};
-
pub fn openPath(allocator: Allocator, options: link.Options) !*MachO {
assert(options.target.ofmt == .macho);
@@ -451,9 +472,9 @@ pub fn flushModule(self: *MachO, comp: *Compilation, prog_node: *std.Progress.No
const module = self.base.options.module orelse return error.LinkingWithoutZigSourceUnimplemented;
- if (self.d_sym) |*d_sym| {
- try d_sym.dwarf.flushModule(module);
- }
+ // if (self.d_sym) |*d_sym| {
+ // try d_sym.dwarf.flushModule(module);
+ // }
var libs = std.StringArrayHashMap(link.SystemLib).init(arena);
try resolveLibSystem(
@@ -547,8 +568,8 @@ pub fn flushModule(self: *MachO, comp: *Compilation, prog_node: *std.Progress.No
try self.allocateSpecialSymbols();
- for (self.relocs.keys()) |atom| {
- try atom.resolveRelocations(self);
+ for (self.relocs.keys()) |atom_index| {
+ try Atom.resolveRelocations(self, atom_index);
}
if (build_options.enable_logging) {
@@ -643,10 +664,10 @@ pub fn flushModule(self: *MachO, comp: *Compilation, prog_node: *std.Progress.No
try self.writeCodeSignature(comp, csig); // code signing always comes last
}
- if (self.d_sym) |*d_sym| {
- // Flush debug symbols bundle.
- try d_sym.flushModule(self);
- }
+ // if (self.d_sym) |*d_sym| {
+ // // Flush debug symbols bundle.
+ // try d_sym.flushModule(self);
+ // }
// if (build_options.enable_link_snapshots) {
// if (self.base.options.enable_link_snapshots)
@@ -999,18 +1020,19 @@ pub fn parseDependentLibs(self: *MachO, syslibroot: ?[]const u8, dependent_libs:
}
}
-pub fn writeAtom(self: *MachO, atom: *Atom, code: []const u8) !void {
+pub fn writeAtom(self: *MachO, atom_index: Atom.Index, code: []const u8) !void {
+ const atom = self.getAtom(atom_index);
const sym = atom.getSymbol(self);
const section = self.sections.get(sym.n_sect - 1);
const file_offset = section.header.offset + sym.n_value - section.header.addr;
log.debug("writing atom for symbol {s} at file offset 0x{x}", .{ atom.getName(self), file_offset });
try self.base.file.?.pwriteAll(code, file_offset);
- try atom.resolveRelocations(self);
+ try Atom.resolveRelocations(self, atom_index);
}
-fn writePtrWidthAtom(self: *MachO, atom: *Atom) !void {
+fn writePtrWidthAtom(self: *MachO, atom_index: Atom.Index) !void {
var buffer: [@sizeOf(u64)]u8 = [_]u8{0} ** @sizeOf(u64);
- try self.writeAtom(atom, &buffer);
+ try self.writeAtom(atom_index, &buffer);
}
fn markRelocsDirtyByTarget(self: *MachO, target: SymbolWithLoc) void {
@@ -1026,7 +1048,8 @@ fn markRelocsDirtyByTarget(self: *MachO, target: SymbolWithLoc) void {
fn markRelocsDirtyByAddress(self: *MachO, addr: u64) void {
for (self.relocs.values()) |*relocs| {
for (relocs.items) |*reloc| {
- const target_atom = reloc.getTargetAtom(self) orelse continue;
+ const target_atom_index = reloc.getTargetAtomIndex(self) orelse continue;
+ const target_atom = self.getAtom(target_atom_index);
const target_sym = target_atom.getSymbol(self);
if (target_sym.n_value < addr) continue;
reloc.dirty = true;
@@ -1053,26 +1076,39 @@ pub fn allocateSpecialSymbols(self: *MachO) !void {
}
}
-pub fn createGotAtom(self: *MachO, target: SymbolWithLoc) !*Atom {
+pub fn createAtom(self: *MachO) !Atom.Index {
const gpa = self.base.allocator;
+ const atom_index = @intCast(Atom.Index, self.atoms.items.len);
+ const atom = try self.atoms.addOne(gpa);
+ const sym_index = try self.allocateSymbol();
+ try self.atom_by_index_table.putNoClobber(gpa, sym_index, atom_index);
+ atom.* = .{
+ .sym_index = sym_index,
+ .file = null,
+ .size = 0,
+ .alignment = 0,
+ .prev_index = null,
+ .next_index = null,
+ .dbg_info_atom = undefined,
+ };
+ log.debug("creating ATOM(%{d}) at index {d}", .{ sym_index, atom_index });
+ return atom_index;
+}
- const atom = try gpa.create(Atom);
- atom.* = Atom.empty;
- try atom.ensureInitialized(self);
+pub fn createGotAtom(self: *MachO, target: SymbolWithLoc) !Atom.Index {
+ const atom_index = try self.createAtom();
+ const atom = self.getAtomPtr(atom_index);
atom.size = @sizeOf(u64);
atom.alignment = @alignOf(u64);
- errdefer gpa.destroy(atom);
-
- try self.managed_atoms.append(gpa, atom);
const sym = atom.getSymbolPtr(self);
sym.n_type = macho.N_SECT;
sym.n_sect = self.got_section_index.? + 1;
- sym.n_value = try self.allocateAtom(atom, atom.size, @alignOf(u64));
+ sym.n_value = try self.allocateAtom(atom_index, atom.size, @alignOf(u64));
log.debug("allocated GOT atom at 0x{x}", .{sym.n_value});
- try atom.addRelocation(self, .{
+ try Atom.addRelocation(self, atom_index, .{
.type = switch (self.base.options.target.cpu.arch) {
.aarch64 => @enumToInt(macho.reloc_type_arm64.ARM64_RELOC_UNSIGNED),
.x86_64 => @enumToInt(macho.reloc_type_x86_64.X86_64_RELOC_UNSIGNED),
@@ -1087,45 +1123,39 @@ pub fn createGotAtom(self: *MachO, target: SymbolWithLoc) !*Atom {
const target_sym = self.getSymbol(target);
if (target_sym.undf()) {
- try atom.addBinding(self, .{
+ try Atom.addBinding(self, atom_index, .{
.target = self.getGlobal(self.getSymbolName(target)).?,
.offset = 0,
});
} else {
- try atom.addRebase(self, 0);
+ try Atom.addRebase(self, atom_index, 0);
}
- return atom;
+ return atom_index;
}
pub fn createDyldPrivateAtom(self: *MachO) !void {
if (self.dyld_stub_binder_index == null) return;
- if (self.dyld_private_atom != null) return;
+ if (self.dyld_private_atom_index != null) return;
- const gpa = self.base.allocator;
-
- const atom = try gpa.create(Atom);
- atom.* = Atom.empty;
- try atom.ensureInitialized(self);
+ const atom_index = try self.createAtom();
+ const atom = self.getAtomPtr(atom_index);
atom.size = @sizeOf(u64);
atom.alignment = @alignOf(u64);
- errdefer gpa.destroy(atom);
const sym = atom.getSymbolPtr(self);
sym.n_type = macho.N_SECT;
sym.n_sect = self.data_section_index.? + 1;
- self.dyld_private_atom = atom;
-
- try self.managed_atoms.append(gpa, atom);
+ self.dyld_private_atom_index = atom_index;
- sym.n_value = try self.allocateAtom(atom, atom.size, @alignOf(u64));
+ sym.n_value = try self.allocateAtom(atom_index, atom.size, @alignOf(u64));
log.debug("allocated dyld_private atom at 0x{x}", .{sym.n_value});
- try self.writePtrWidthAtom(atom);
+ try self.writePtrWidthAtom(atom_index);
}
pub fn createStubHelperPreambleAtom(self: *MachO) !void {
if (self.dyld_stub_binder_index == null) return;
- if (self.stub_helper_preamble_atom != null) return;
+ if (self.stub_helper_preamble_atom_index != null) return;
const gpa = self.base.allocator;
const arch = self.base.options.target.cpu.arch;
@@ -1134,22 +1164,23 @@ pub fn createStubHelperPreambleAtom(self: *MachO) !void {
.aarch64 => 6 * @sizeOf(u32),
else => unreachable,
};
- const atom = try gpa.create(Atom);
- atom.* = Atom.empty;
- try atom.ensureInitialized(self);
+ const atom_index = try self.createAtom();
+ const atom = self.getAtomPtr(atom_index);
atom.size = size;
atom.alignment = switch (arch) {
.x86_64 => 1,
.aarch64 => @alignOf(u32),
else => unreachable,
};
- errdefer gpa.destroy(atom);
const sym = atom.getSymbolPtr(self);
sym.n_type = macho.N_SECT;
sym.n_sect = self.stub_helper_section_index.? + 1;
- const dyld_private_sym_index = self.dyld_private_atom.?.getSymbolIndex().?;
+ const dyld_private_sym_index = if (self.dyld_private_atom_index) |dyld_index|
+ self.getAtom(dyld_index).getSymbolIndex().?
+ else
+ unreachable;
const code = try gpa.alloc(u8, size);
defer gpa.free(code);
@@ -1168,7 +1199,7 @@ pub fn createStubHelperPreambleAtom(self: *MachO) !void {
code[9] = 0xff;
code[10] = 0x25;
- try atom.addRelocations(self, 2, .{ .{
+ try Atom.addRelocations(self, atom_index, 2, .{ .{
.type = @enumToInt(macho.reloc_type_x86_64.X86_64_RELOC_SIGNED),
.target = .{ .sym_index = dyld_private_sym_index, .file = null },
.offset = 3,
@@ -1208,7 +1239,7 @@ pub fn createStubHelperPreambleAtom(self: *MachO) !void {
// br x16
mem.writeIntLittle(u32, code[20..][0..4], aarch64.Instruction.br(.x16).toU32());
- try atom.addRelocations(self, 4, .{ .{
+ try Atom.addRelocations(self, atom_index, 4, .{ .{
.type = @enumToInt(macho.reloc_type_arm64.ARM64_RELOC_PAGE21),
.target = .{ .sym_index = dyld_private_sym_index, .file = null },
.offset = 0,
@@ -1241,16 +1272,14 @@ pub fn createStubHelperPreambleAtom(self: *MachO) !void {
else => unreachable,
}
- self.stub_helper_preamble_atom = atom;
+ self.stub_helper_preamble_atom_index = atom_index;
- try self.managed_atoms.append(gpa, atom);
-
- sym.n_value = try self.allocateAtom(atom, size, atom.alignment);
+ sym.n_value = try self.allocateAtom(atom_index, size, atom.alignment);
log.debug("allocated stub preamble atom at 0x{x}", .{sym.n_value});
- try self.writeAtom(atom, code);
+ try self.writeAtom(atom_index, code);
}
-pub fn createStubHelperAtom(self: *MachO) !*Atom {
+pub fn createStubHelperAtom(self: *MachO) !Atom.Index {
const gpa = self.base.allocator;
const arch = self.base.options.target.cpu.arch;
const size: u4 = switch (arch) {
@@ -1258,16 +1287,14 @@ pub fn createStubHelperAtom(self: *MachO) !*Atom {
.aarch64 => 3 * @sizeOf(u32),
else => unreachable,
};
- const atom = try gpa.create(Atom);
- atom.* = Atom.empty;
- try atom.ensureInitialized(self);
+ const atom_index = try self.createAtom();
+ const atom = self.getAtomPtr(atom_index);
atom.size = size;
atom.alignment = switch (arch) {
.x86_64 => 1,
.aarch64 => @alignOf(u32),
else => unreachable,
};
- errdefer gpa.destroy(atom);
const sym = atom.getSymbolPtr(self);
sym.n_type = macho.N_SECT;
@@ -1277,6 +1304,11 @@ pub fn createStubHelperAtom(self: *MachO) !*Atom {
defer gpa.free(code);
mem.set(u8, code, 0);
+ const stub_helper_preamble_atom_sym_index = if (self.stub_helper_preamble_atom_index) |stub_index|
+ self.getAtom(stub_index).getSymbolIndex().?
+ else
+ unreachable;
+
switch (arch) {
.x86_64 => {
// pushq
@@ -1285,9 +1317,9 @@ pub fn createStubHelperAtom(self: *MachO) !*Atom {
// jmpq
code[5] = 0xe9;
- try atom.addRelocation(self, .{
+ try Atom.addRelocation(self, atom_index, .{
.type = @enumToInt(macho.reloc_type_x86_64.X86_64_RELOC_BRANCH),
- .target = .{ .sym_index = self.stub_helper_preamble_atom.?.getSymbolIndex().?, .file = null },
+ .target = .{ .sym_index = stub_helper_preamble_atom_sym_index, .file = null },
.offset = 6,
.addend = 0,
.pcrel = true,
@@ -1308,9 +1340,9 @@ pub fn createStubHelperAtom(self: *MachO) !*Atom {
mem.writeIntLittle(u32, code[4..8], aarch64.Instruction.b(0).toU32());
// Next 4 bytes 8..12 are just a placeholder populated in `populateLazyBindOffsetsInStubHelper`.
- try atom.addRelocation(self, .{
+ try Atom.addRelocation(self, atom_index, .{
.type = @enumToInt(macho.reloc_type_arm64.ARM64_RELOC_BRANCH26),
- .target = .{ .sym_index = self.stub_helper_preamble_atom.?.getSymbolIndex().?, .file = null },
+ .target = .{ .sym_index = stub_helper_preamble_atom_sym_index, .file = null },
.offset = 4,
.addend = 0,
.pcrel = true,
@@ -1320,29 +1352,24 @@ pub fn createStubHelperAtom(self: *MachO) !*Atom {
else => unreachable,
}
- try self.managed_atoms.append(gpa, atom);
-
- sym.n_value = try self.allocateAtom(atom, size, atom.alignment);
+ sym.n_value = try self.allocateAtom(atom_index, size, atom.alignment);
log.debug("allocated stub helper atom at 0x{x}", .{sym.n_value});
- try self.writeAtom(atom, code);
+ try self.writeAtom(atom_index, code);
- return atom;
+ return atom_index;
}
-pub fn createLazyPointerAtom(self: *MachO, stub_sym_index: u32, target: SymbolWithLoc) !*Atom {
- const gpa = self.base.allocator;
- const atom = try gpa.create(Atom);
- atom.* = Atom.empty;
- try atom.ensureInitialized(self);
+pub fn createLazyPointerAtom(self: *MachO, stub_sym_index: u32, target: SymbolWithLoc) !Atom.Index {
+ const atom_index = try self.createAtom();
+ const atom = self.getAtomPtr(atom_index);
atom.size = @sizeOf(u64);
atom.alignment = @alignOf(u64);
- errdefer gpa.destroy(atom);
const sym = atom.getSymbolPtr(self);
sym.n_type = macho.N_SECT;
sym.n_sect = self.la_symbol_ptr_section_index.? + 1;
- try atom.addRelocation(self, .{
+ try Atom.addRelocation(self, atom_index, .{
.type = switch (self.base.options.target.cpu.arch) {
.aarch64 => @enumToInt(macho.reloc_type_arm64.ARM64_RELOC_UNSIGNED),
.x86_64 => @enumToInt(macho.reloc_type_x86_64.X86_64_RELOC_UNSIGNED),
@@ -1354,22 +1381,20 @@ pub fn createLazyPointerAtom(self: *MachO, stub_sym_index: u32, target: SymbolWi
.pcrel = false,
.length = 3,
});
- try atom.addRebase(self, 0);
- try atom.addLazyBinding(self, .{
+ try Atom.addRebase(self, atom_index, 0);
+ try Atom.addLazyBinding(self, atom_index, .{
.target = self.getGlobal(self.getSymbolName(target)).?,
.offset = 0,
});
- try self.managed_atoms.append(gpa, atom);
-
- sym.n_value = try self.allocateAtom(atom, atom.size, @alignOf(u64));
+ sym.n_value = try self.allocateAtom(atom_index, atom.size, @alignOf(u64));
log.debug("allocated lazy pointer atom at 0x{x} ({s})", .{ sym.n_value, self.getSymbolName(target) });
- try self.writePtrWidthAtom(atom);
+ try self.writePtrWidthAtom(atom_index);
- return atom;
+ return atom_index;
}
-pub fn createStubAtom(self: *MachO, laptr_sym_index: u32) !*Atom {
+pub fn createStubAtom(self: *MachO, laptr_sym_index: u32) !Atom.Index {
const gpa = self.base.allocator;
const arch = self.base.options.target.cpu.arch;
const size: u4 = switch (arch) {
@@ -1377,9 +1402,8 @@ pub fn createStubAtom(self: *MachO, laptr_sym_index: u32) !*Atom {
.aarch64 => 3 * @sizeOf(u32),
else => unreachable, // unhandled architecture type
};
- const atom = try gpa.create(Atom);
- atom.* = Atom.empty;
- try atom.ensureInitialized(self);
+ const atom_index = try self.createAtom();
+ const atom = self.getAtomPtr(atom_index);
atom.size = size;
atom.alignment = switch (arch) {
.x86_64 => 1,
@@ -1387,7 +1411,6 @@ pub fn createStubAtom(self: *MachO, laptr_sym_index: u32) !*Atom {
else => unreachable, // unhandled architecture type
};
- errdefer gpa.destroy(atom);
const sym = atom.getSymbolPtr(self);
sym.n_type = macho.N_SECT;
@@ -1403,7 +1426,7 @@ pub fn createStubAtom(self: *MachO, laptr_sym_index: u32) !*Atom {
code[0] = 0xff;
code[1] = 0x25;
- try atom.addRelocation(self, .{
+ try Atom.addRelocation(self, atom_index, .{
.type = @enumToInt(macho.reloc_type_x86_64.X86_64_RELOC_BRANCH),
.target = .{ .sym_index = laptr_sym_index, .file = null },
.offset = 2,
@@ -1424,7 +1447,7 @@ pub fn createStubAtom(self: *MachO, laptr_sym_index: u32) !*Atom {
// br x16
mem.writeIntLittle(u32, code[8..12], aarch64.Instruction.br(.x16).toU32());
- try atom.addRelocations(self, 2, .{
+ try Atom.addRelocations(self, atom_index, 2, .{
.{
.type = @enumToInt(macho.reloc_type_arm64.ARM64_RELOC_PAGE21),
.target = .{ .sym_index = laptr_sym_index, .file = null },
@@ -1446,13 +1469,11 @@ pub fn createStubAtom(self: *MachO, laptr_sym_index: u32) !*Atom {
else => unreachable,
}
- try self.managed_atoms.append(gpa, atom);
-
- sym.n_value = try self.allocateAtom(atom, size, atom.alignment);
+ sym.n_value = try self.allocateAtom(atom_index, size, atom.alignment);
log.debug("allocated stub atom at 0x{x}", .{sym.n_value});
- try self.writeAtom(atom, code);
+ try self.writeAtom(atom_index, code);
- return atom;
+ return atom_index;
}
pub fn createMhExecuteHeaderSymbol(self: *MachO) !void {
@@ -1586,9 +1607,12 @@ pub fn resolveSymbolsInDylibs(self: *MachO) !void {
if (self.stubs_table.contains(global)) break :blk;
const stub_index = try self.allocateStubEntry(global);
- const stub_helper_atom = try self.createStubHelperAtom();
- const laptr_atom = try self.createLazyPointerAtom(stub_helper_atom.getSymbolIndex().?, global);
- const stub_atom = try self.createStubAtom(laptr_atom.getSymbolIndex().?);
+ const stub_helper_atom_index = try self.createStubHelperAtom();
+ const stub_helper_atom = self.getAtom(stub_helper_atom_index);
+ const laptr_atom_index = try self.createLazyPointerAtom(stub_helper_atom.getSymbolIndex().?, global);
+ const laptr_atom = self.getAtom(laptr_atom_index);
+ const stub_atom_index = try self.createStubAtom(laptr_atom.getSymbolIndex().?);
+ const stub_atom = self.getAtom(stub_atom_index);
self.stubs.items[stub_index].sym_index = stub_atom.getSymbolIndex().?;
self.markRelocsDirtyByTarget(global);
}
@@ -1686,10 +1710,11 @@ pub fn resolveDyldStubBinder(self: *MachO) !void {
// Add dyld_stub_binder as the final GOT entry.
const got_index = try self.allocateGotEntry(global);
- const got_atom = try self.createGotAtom(global);
+ const got_atom_index = try self.createGotAtom(global);
+ const got_atom = self.getAtom(got_atom_index);
self.got_entries.items[got_index].sym_index = got_atom.getSymbolIndex().?;
- try self.writePtrWidthAtom(got_atom);
+ try self.writePtrWidthAtom(got_atom_index);
}
pub fn deinit(self: *MachO) void {
@@ -1699,9 +1724,9 @@ pub fn deinit(self: *MachO) void {
if (self.llvm_object) |llvm_object| llvm_object.destroy(gpa);
}
- if (self.d_sym) |*d_sym| {
- d_sym.deinit();
- }
+ // if (self.d_sym) |*d_sym| {
+ // d_sym.deinit();
+ // }
self.got_entries.deinit(gpa);
self.got_entries_free_list.deinit(gpa);
@@ -1739,12 +1764,12 @@ pub fn deinit(self: *MachO) void {
}
self.sections.deinit(gpa);
- for (self.managed_atoms.items) |atom| {
- gpa.destroy(atom);
- }
- self.managed_atoms.deinit(gpa);
+ self.atoms.deinit(gpa);
if (self.base.options.module) |_| {
+ for (self.decls.values()) |*m| {
+ m.exports.deinit(gpa);
+ }
self.decls.deinit(gpa);
} else {
assert(self.decls.count() == 0);
@@ -1778,14 +1803,15 @@ pub fn deinit(self: *MachO) void {
self.lazy_bindings.deinit(gpa);
}
-fn freeAtom(self: *MachO, atom: *Atom) void {
- log.debug("freeAtom {*}", .{atom});
+fn freeAtom(self: *MachO, atom_index: Atom.Index) void {
+ log.debug("freeAtom {d}", .{atom_index});
const gpa = self.base.allocator;
// Remove any relocs and base relocs associated with this Atom
- self.freeRelocationsForAtom(atom);
+ Atom.freeRelocations(self, atom_index);
+ const atom = self.getAtom(atom_index);
const sect_id = atom.getSymbol(self).n_sect - 1;
const free_list = &self.sections.items(.free_list)[sect_id];
var already_have_free_list_node = false;
@@ -1793,45 +1819,46 @@ fn freeAtom(self: *MachO, atom: *Atom) void {
var i: usize = 0;
// TODO turn free_list into a hash map
while (i < free_list.items.len) {
- if (free_list.items[i] == atom) {
+ if (free_list.items[i] == atom_index) {
_ = free_list.swapRemove(i);
continue;
}
- if (free_list.items[i] == atom.prev) {
+ if (free_list.items[i] == atom.prev_index) {
already_have_free_list_node = true;
}
i += 1;
}
}
- const maybe_last_atom = &self.sections.items(.last_atom)[sect_id];
- if (maybe_last_atom.*) |last_atom| {
- if (last_atom == atom) {
- if (atom.prev) |prev| {
+ const maybe_last_atom_index = &self.sections.items(.last_atom_index)[sect_id];
+ if (maybe_last_atom_index.*) |last_atom_index| {
+ if (last_atom_index == atom_index) {
+ if (atom.prev_index) |prev_index| {
// TODO shrink the section size here
- maybe_last_atom.* = prev;
+ maybe_last_atom_index.* = prev_index;
} else {
- maybe_last_atom.* = null;
+ maybe_last_atom_index.* = null;
}
}
}
- if (atom.prev) |prev| {
- prev.next = atom.next;
+ if (atom.prev_index) |prev_index| {
+ const prev = self.getAtomPtr(prev_index);
+ prev.next_index = atom.next_index;
- if (!already_have_free_list_node and prev.freeListEligible(self)) {
+ if (!already_have_free_list_node and prev.*.freeListEligible(self)) {
// The free list is heuristics, it doesn't have to be perfect, so we can ignore
// the OOM here.
- free_list.append(gpa, prev) catch {};
+ free_list.append(gpa, prev_index) catch {};
}
} else {
- atom.prev = null;
+ self.getAtomPtr(atom_index).prev_index = null;
}
- if (atom.next) |next| {
- next.prev = atom.prev;
+ if (atom.next_index) |next_index| {
+ self.getAtomPtr(next_index).prev_index = atom.prev_index;
} else {
- atom.next = null;
+ self.getAtomPtr(atom_index).next_index = null;
}
// Appending to free lists is allowed to fail because the free lists are heuristics based anyway.
@@ -1849,9 +1876,9 @@ fn freeAtom(self: *MachO, atom: *Atom) void {
};
_ = self.got_entries_table.remove(got_target);
- if (self.d_sym) |*d_sym| {
- d_sym.swapRemoveRelocs(sym_index);
- }
+ // if (self.d_sym) |*d_sym| {
+ // d_sym.swapRemoveRelocs(sym_index);
+ // }
log.debug(" adding GOT index {d} to free list (target local@{d})", .{ got_index, sym_index });
}
@@ -1859,27 +1886,28 @@ fn freeAtom(self: *MachO, atom: *Atom) void {
self.locals.items[sym_index].n_type = 0;
_ = self.atom_by_index_table.remove(sym_index);
log.debug(" adding local symbol index {d} to free list", .{sym_index});
- atom.sym_index = 0;
+ self.getAtomPtr(atom_index).sym_index = 0;
- if (self.d_sym) |*d_sym| {
- d_sym.dwarf.freeAtom(&atom.dbg_info_atom);
- }
+ // if (self.d_sym) |*d_sym| {
+ // d_sym.dwarf.freeAtom(&atom.dbg_info_atom);
+ // }
}
-fn shrinkAtom(self: *MachO, atom: *Atom, new_block_size: u64) void {
+fn shrinkAtom(self: *MachO, atom_index: Atom.Index, new_block_size: u64) void {
_ = self;
- _ = atom;
+ _ = atom_index;
_ = new_block_size;
// TODO check the new capacity, and if it crosses the size threshold into a big enough
// capacity, insert a free list node for it.
}
-fn growAtom(self: *MachO, atom: *Atom, new_atom_size: u64, alignment: u64) !u64 {
+fn growAtom(self: *MachO, atom_index: Atom.Index, new_atom_size: u64, alignment: u64) !u64 {
+ const atom = self.getAtom(atom_index);
const sym = atom.getSymbol(self);
const align_ok = mem.alignBackwardGeneric(u64, sym.n_value, alignment) == sym.n_value;
const need_realloc = !align_ok or new_atom_size > atom.capacity(self);
if (!need_realloc) return sym.n_value;
- return self.allocateAtom(atom, new_atom_size, alignment);
+ return self.allocateAtom(atom_index, new_atom_size, alignment);
}
pub fn allocateSymbol(self: *MachO) !u32 {
@@ -1986,31 +2014,29 @@ pub fn updateFunc(self: *MachO, module: *Module, func: *Module.Fn, air: Air, liv
const decl_index = func.owner_decl;
const decl = module.declPtr(decl_index);
- const atom = &decl.link.macho;
- try atom.ensureInitialized(self);
- const gop = try self.decls.getOrPut(self.base.allocator, decl_index);
- if (gop.found_existing) {
- self.freeUnnamedConsts(decl_index);
- self.freeRelocationsForAtom(atom);
- } else {
- gop.value_ptr.* = null;
- }
+
+ const atom_index = try self.getOrCreateAtomForDecl(decl_index);
+ self.freeUnnamedConsts(decl_index);
+ Atom.freeRelocations(self, atom_index);
+
+ const atom = self.getAtom(atom_index);
+ _ = atom;
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
defer code_buffer.deinit();
- var decl_state = if (self.d_sym) |*d_sym|
- try d_sym.dwarf.initDeclState(module, decl_index)
- else
- null;
- defer if (decl_state) |*ds| ds.deinit();
+ // var decl_state = if (self.d_sym) |*d_sym|
+ // try d_sym.dwarf.initDeclState(module, decl_index)
+ // else
+ // null;
+ // defer if (decl_state) |*ds| ds.deinit();
- const res = if (decl_state) |*ds|
- try codegen.generateFunction(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .{
- .dwarf = ds,
- })
- else
- try codegen.generateFunction(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .none);
+ // const res = if (decl_state) |*ds|
+ // try codegen.generateFunction(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .{
+ // .dwarf = ds,
+ // })
+ // else
+ const res = try codegen.generateFunction(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .none);
const code = switch (res) {
.ok => code_buffer.items,
@@ -2022,16 +2048,11 @@ pub fn updateFunc(self: *MachO, module: *Module, func: *Module.Fn, air: Air, liv
};
const addr = try self.updateDeclCode(decl_index, code);
+ _ = addr;
- if (decl_state) |*ds| {
- try self.d_sym.?.dwarf.commitDeclState(
- module,
- decl_index,
- addr,
- decl.link.macho.size,
- ds,
- );
- }
+ // if (decl_state) |*ds| {
+ // try self.d_sym.?.dwarf.commitDeclState(module, decl_index, addr, atom.size, ds);
+ // }
// Since we updated the vaddr and the size, each corresponding export symbol also
// needs to be updated.
@@ -2065,11 +2086,8 @@ pub fn lowerUnnamedConst(self: *MachO, typed_value: TypedValue, decl_index: Modu
log.debug("allocating symbol indexes for {?s}", .{name});
- const atom = try gpa.create(Atom);
- errdefer gpa.destroy(atom);
- atom.* = Atom.empty;
- try atom.ensureInitialized(self);
- try self.managed_atoms.append(gpa, atom);
+ const atom_index = try self.createAtom();
+ const atom = self.getAtomPtr(atom_index);
const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), typed_value, &code_buffer, .none, .{
.parent_atom_index = atom.getSymbolIndex().?,
@@ -2088,21 +2106,21 @@ pub fn lowerUnnamedConst(self: *MachO, typed_value: TypedValue, decl_index: Modu
atom.size = code.len;
atom.alignment = required_alignment;
// TODO: work out logic for disambiguating functions from function pointers
- // const sect_id = self.getDeclOutputSection(decl);
+ // const sect_id = self.getDeclOutputSection(decl_index);
const sect_id = self.data_const_section_index.?;
const symbol = atom.getSymbolPtr(self);
symbol.n_strx = name_str_index;
symbol.n_type = macho.N_SECT;
symbol.n_sect = sect_id + 1;
- symbol.n_value = try self.allocateAtom(atom, code.len, required_alignment);
- errdefer self.freeAtom(atom);
+ symbol.n_value = try self.allocateAtom(atom_index, code.len, required_alignment);
+ errdefer self.freeAtom(atom_index);
- try unnamed_consts.append(gpa, atom);
+ try unnamed_consts.append(gpa, atom_index);
log.debug("allocated atom for {?s} at 0x{x}", .{ name, symbol.n_value });
log.debug(" (required alignment 0x{x})", .{required_alignment});
- try self.writeAtom(atom, code);
+ try self.writeAtom(atom_index, code);
return atom.getSymbolIndex().?;
}
@@ -2129,41 +2147,36 @@ pub fn updateDecl(self: *MachO, module: *Module, decl_index: Module.Decl.Index)
}
}
- const atom = &decl.link.macho;
- try atom.ensureInitialized(self);
- const gop = try self.decls.getOrPut(self.base.allocator, decl_index);
- if (gop.found_existing) {
- self.freeRelocationsForAtom(atom);
- } else {
- gop.value_ptr.* = null;
- }
+ const atom_index = try self.getOrCreateAtomForDecl(decl_index);
+ Atom.freeRelocations(self, atom_index);
+ const atom = self.getAtom(atom_index);
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
defer code_buffer.deinit();
- var decl_state: ?Dwarf.DeclState = if (self.d_sym) |*d_sym|
- try d_sym.dwarf.initDeclState(module, decl_index)
- else
- null;
- defer if (decl_state) |*ds| ds.deinit();
+ // var decl_state: ?Dwarf.DeclState = if (self.d_sym) |*d_sym|
+ // try d_sym.dwarf.initDeclState(module, decl_index)
+ // else
+ // null;
+ // defer if (decl_state) |*ds| ds.deinit();
const decl_val = if (decl.val.castTag(.variable)) |payload| payload.data.init else decl.val;
- const res = if (decl_state) |*ds|
- try codegen.generateSymbol(&self.base, decl.srcLoc(), .{
- .ty = decl.ty,
- .val = decl_val,
- }, &code_buffer, .{
- .dwarf = ds,
- }, .{
- .parent_atom_index = decl.link.macho.getSymbolIndex().?,
- })
- else
- try codegen.generateSymbol(&self.base, decl.srcLoc(), .{
- .ty = decl.ty,
- .val = decl_val,
- }, &code_buffer, .none, .{
- .parent_atom_index = decl.link.macho.getSymbolIndex().?,
- });
+ // const res = if (decl_state) |*ds|
+ // try codegen.generateSymbol(&self.base, decl.srcLoc(), .{
+ // .ty = decl.ty,
+ // .val = decl_val,
+ // }, &code_buffer, .{
+ // .dwarf = ds,
+ // }, .{
+ // .parent_atom_index = atom.getSymbolIndex().?,
+ // })
+ // else
+ const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), .{
+ .ty = decl.ty,
+ .val = decl_val,
+ }, &code_buffer, .none, .{
+ .parent_atom_index = atom.getSymbolIndex().?,
+ });
const code = switch (res) {
.ok => code_buffer.items,
@@ -2174,23 +2187,31 @@ pub fn updateDecl(self: *MachO, module: *Module, decl_index: Module.Decl.Index)
},
};
const addr = try self.updateDeclCode(decl_index, code);
+ _ = addr;
- if (decl_state) |*ds| {
- try self.d_sym.?.dwarf.commitDeclState(
- module,
- decl_index,
- addr,
- decl.link.macho.size,
- ds,
- );
- }
+ // if (decl_state) |*ds| {
+ // try self.d_sym.?.dwarf.commitDeclState(module, decl_index, addr, atom.size, ds);
+ // }
// Since we updated the vaddr and the size, each corresponding export symbol also
// needs to be updated.
try self.updateDeclExports(module, decl_index, module.getDeclExports(decl_index));
}
-fn getDeclOutputSection(self: *MachO, decl: *Module.Decl) u8 {
+pub fn getOrCreateAtomForDecl(self: *MachO, decl_index: Module.Decl.Index) !Atom.Index {
+ const gop = try self.decls.getOrPut(self.base.allocator, decl_index);
+ if (!gop.found_existing) {
+ gop.value_ptr.* = .{
+ .atom = try self.createAtom(),
+ .section = self.getDeclOutputSection(decl_index),
+ .exports = .{},
+ };
+ }
+ return gop.value_ptr.atom;
+}
+
+fn getDeclOutputSection(self: *MachO, decl_index: Module.Decl.Index) u8 {
+ const decl = self.base.options.module.?.declPtr(decl_index);
const ty = decl.ty;
const val = decl.val;
const zig_ty = ty.zigTypeTag();
@@ -2341,13 +2362,11 @@ fn updateDeclCode(self: *MachO, decl_index: Module.Decl.Index, code: []const u8)
const sym_name = try decl.getFullyQualifiedName(mod);
defer self.base.allocator.free(sym_name);
- const atom = &decl.link.macho;
- const sym_index = atom.getSymbolIndex().?; // Atom was not initialized
- const decl_ptr = self.decls.getPtr(decl_index).?;
- if (decl_ptr.* == null) {
- decl_ptr.* = self.getDeclOutputSection(decl);
- }
- const sect_id = decl_ptr.*.?;
+ const decl_metadata = self.decls.get(decl_index).?;
+ const atom_index = decl_metadata.atom;
+ const atom = self.getAtom(atom_index);
+ const sym_index = atom.getSymbolIndex().?;
+ const sect_id = decl_metadata.section;
const code_len = code.len;
if (atom.size != 0) {
@@ -2357,11 +2376,11 @@ fn updateDeclCode(self: *MachO, decl_index: Module.Decl.Index, code: []const u8)
sym.n_sect = sect_id + 1;
sym.n_desc = 0;
- const capacity = decl.link.macho.capacity(self);
+ const capacity = atom.capacity(self);
const need_realloc = code_len > capacity or !mem.isAlignedGeneric(u64, sym.n_value, required_alignment);
if (need_realloc) {
- const vaddr = try self.growAtom(atom, code_len, required_alignment);
+ const vaddr = try self.growAtom(atom_index, code_len, required_alignment);
log.debug("growing {s} and moving from 0x{x} to 0x{x}", .{ sym_name, sym.n_value, vaddr });
log.debug(" (required alignment 0x{x})", .{required_alignment});
@@ -2369,19 +2388,19 @@ fn updateDeclCode(self: *MachO, decl_index: Module.Decl.Index, code: []const u8)
sym.n_value = vaddr;
log.debug(" (updating GOT entry)", .{});
const got_target = SymbolWithLoc{ .sym_index = sym_index, .file = null };
- const got_atom = self.getGotAtomForSymbol(got_target).?;
+ const got_atom_index = self.getGotAtomIndexForSymbol(got_target).?;
self.markRelocsDirtyByTarget(got_target);
- try self.writePtrWidthAtom(got_atom);
+ try self.writePtrWidthAtom(got_atom_index);
}
} else if (code_len < atom.size) {
- self.shrinkAtom(atom, code_len);
- } else if (atom.next == null) {
+ self.shrinkAtom(atom_index, code_len);
+ } else if (atom.next_index == null) {
const header = &self.sections.items(.header)[sect_id];
const segment = self.getSegment(sect_id);
const needed_size = (sym.n_value + code_len) - segment.vmaddr;
header.size = needed_size;
}
- atom.size = code_len;
+ self.getAtomPtr(atom_index).size = code_len;
} else {
const name_str_index = try self.strtab.insert(gpa, sym_name);
const sym = atom.getSymbolPtr(self);
@@ -2390,33 +2409,36 @@ fn updateDeclCode(self: *MachO, decl_index: Module.Decl.Index, code: []const u8)
sym.n_sect = sect_id + 1;
sym.n_desc = 0;
- const vaddr = try self.allocateAtom(atom, code_len, required_alignment);
- errdefer self.freeAtom(atom);
+ const vaddr = try self.allocateAtom(atom_index, code_len, required_alignment);
+ errdefer self.freeAtom(atom_index);
log.debug("allocated atom for {s} at 0x{x}", .{ sym_name, vaddr });
log.debug(" (required alignment 0x{x})", .{required_alignment});
- atom.size = code_len;
+ self.getAtomPtr(atom_index).size = code_len;
sym.n_value = vaddr;
const got_target = SymbolWithLoc{ .sym_index = sym_index, .file = null };
const got_index = try self.allocateGotEntry(got_target);
- const got_atom = try self.createGotAtom(got_target);
+ const got_atom_index = try self.createGotAtom(got_target);
+ const got_atom = self.getAtom(got_atom_index);
self.got_entries.items[got_index].sym_index = got_atom.getSymbolIndex().?;
- try self.writePtrWidthAtom(got_atom);
+ try self.writePtrWidthAtom(got_atom_index);
}
self.markRelocsDirtyByTarget(atom.getSymbolWithLoc());
- try self.writeAtom(atom, code);
+ try self.writeAtom(atom_index, code);
return atom.getSymbol(self).n_value;
}
pub fn updateDeclLineNumber(self: *MachO, module: *Module, decl: *const Module.Decl) !void {
+ _ = decl;
+ _ = self;
_ = module;
- if (self.d_sym) |*d_sym| {
- try d_sym.dwarf.updateDeclLineNumber(decl);
- }
+ // if (self.d_sym) |*d_sym| {
+ // try d_sym.dwarf.updateDeclLineNumber(decl);
+ // }
}
pub fn updateDeclExports(
@@ -2432,22 +2454,17 @@ pub fn updateDeclExports(
if (self.llvm_object) |llvm_object|
return llvm_object.updateDeclExports(module, decl_index, exports);
}
+
const tracy = trace(@src());
defer tracy.end();
const gpa = self.base.allocator;
const decl = module.declPtr(decl_index);
- const atom = &decl.link.macho;
-
- if (atom.getSymbolIndex() == null) return;
-
- const gop = try self.decls.getOrPut(gpa, decl_index);
- if (!gop.found_existing) {
- gop.value_ptr.* = self.getDeclOutputSection(decl);
- }
-
+ const atom_index = try self.getOrCreateAtomForDecl(decl_index);
+ const atom = self.getAtom(atom_index);
const decl_sym = atom.getSymbol(self);
+ const decl_metadata = self.decls.getPtr(decl_index).?;
for (exports) |exp| {
const exp_name = try std.fmt.allocPrint(gpa, "_{s}", .{exp.options.name});
@@ -2485,9 +2502,9 @@ pub fn updateDeclExports(
continue;
}
- const sym_index = exp.link.macho.sym_index orelse blk: {
+ const sym_index = decl_metadata.getExport(self, exp_name) orelse blk: {
const sym_index = try self.allocateSymbol();
- exp.link.macho.sym_index = sym_index;
+ try decl_metadata.exports.append(gpa, sym_index);
break :blk sym_index;
};
const sym_loc = SymbolWithLoc{ .sym_index = sym_index, .file = null };
@@ -2535,16 +2552,18 @@ pub fn updateDeclExports(
}
}
-pub fn deleteExport(self: *MachO, exp: Export) void {
+pub fn deleteDeclExport(self: *MachO, decl_index: Module.Decl.Index, name: []const u8) Allocator.Error!void {
if (self.llvm_object) |_| return;
- const sym_index = exp.sym_index orelse return;
+ const metadata = self.decls.getPtr(decl_index) orelse return;
const gpa = self.base.allocator;
+ const exp_name = try std.fmt.allocPrint(gpa, "_{s}", .{name});
+ defer gpa.free(exp_name);
+ const sym_index = metadata.getExportPtr(self, exp_name) orelse return;
- const sym_loc = SymbolWithLoc{ .sym_index = sym_index, .file = null };
+ const sym_loc = SymbolWithLoc{ .sym_index = sym_index.*, .file = null };
const sym = self.getSymbolPtr(sym_loc);
- const sym_name = self.getSymbolName(sym_loc);
- log.debug("deleting export '{s}'", .{sym_name});
+ log.debug("deleting export '{s}'", .{exp_name});
assert(sym.sect() and sym.ext());
sym.* = .{
.n_strx = 0,
@@ -2553,9 +2572,9 @@ pub fn deleteExport(self: *MachO, exp: Export) void {
.n_desc = 0,
.n_value = 0,
};
- self.locals_free_list.append(gpa, sym_index) catch {};
+ self.locals_free_list.append(gpa, sym_index.*) catch {};
- if (self.resolver.fetchRemove(sym_name)) |entry| {
+ if (self.resolver.fetchRemove(exp_name)) |entry| {
defer gpa.free(entry.key);
self.globals_free_list.append(gpa, entry.value) catch {};
self.globals.items[entry.value] = .{
@@ -2563,17 +2582,8 @@ pub fn deleteExport(self: *MachO, exp: Export) void {
.file = null,
};
}
-}
-fn freeRelocationsForAtom(self: *MachO, atom: *Atom) void {
- var removed_relocs = self.relocs.fetchOrderedRemove(atom);
- if (removed_relocs) |*relocs| relocs.value.deinit(self.base.allocator);
- var removed_rebases = self.rebases.fetchOrderedRemove(atom);
- if (removed_rebases) |*rebases| rebases.value.deinit(self.base.allocator);
- var removed_bindings = self.bindings.fetchOrderedRemove(atom);
- if (removed_bindings) |*bindings| bindings.value.deinit(self.base.allocator);
- var removed_lazy_bindings = self.lazy_bindings.fetchOrderedRemove(atom);
- if (removed_lazy_bindings) |*lazy_bindings| lazy_bindings.value.deinit(self.base.allocator);
+ sym_index.* = 0;
}
fn freeUnnamedConsts(self: *MachO, decl_index: Module.Decl.Index) void {
@@ -2595,28 +2605,22 @@ pub fn freeDecl(self: *MachO, decl_index: Module.Decl.Index) void {
log.debug("freeDecl {*}", .{decl});
if (self.decls.fetchSwapRemove(decl_index)) |kv| {
- if (kv.value) |_| {
- self.freeAtom(&decl.link.macho);
- self.freeUnnamedConsts(decl_index);
- }
+ self.freeAtom(kv.value.atom);
+ self.freeUnnamedConsts(decl_index);
}
- if (self.d_sym) |*d_sym| {
- d_sym.dwarf.freeDecl(decl);
- }
+ // if (self.d_sym) |*d_sym| {
+ // d_sym.dwarf.freeDecl(decl);
+ // }
}
pub fn getDeclVAddr(self: *MachO, decl_index: Module.Decl.Index, reloc_info: File.RelocInfo) !u64 {
- const mod = self.base.options.module.?;
- const decl = mod.declPtr(decl_index);
-
assert(self.llvm_object == null);
- try decl.link.macho.ensureInitialized(self);
- const sym_index = decl.link.macho.getSymbolIndex().?;
-
- const atom = self.getAtomForSymbol(.{ .sym_index = reloc_info.parent_atom_index, .file = null }).?;
- try atom.addRelocation(self, .{
+ const this_atom_index = try self.getOrCreateAtomForDecl(decl_index);
+ const sym_index = self.getAtom(this_atom_index).getSymbolIndex().?;
+ const atom_index = self.getAtomIndexForSymbol(.{ .sym_index = reloc_info.parent_atom_index, .file = null }).?;
+ try Atom.addRelocation(self, atom_index, .{
.type = switch (self.base.options.target.cpu.arch) {
.aarch64 => @enumToInt(macho.reloc_type_arm64.ARM64_RELOC_UNSIGNED),
.x86_64 => @enumToInt(macho.reloc_type_x86_64.X86_64_RELOC_UNSIGNED),
@@ -2628,7 +2632,7 @@ pub fn getDeclVAddr(self: *MachO, decl_index: Module.Decl.Index, reloc_info: Fil
.pcrel = false,
.length = 3,
});
- try atom.addRebase(self, @intCast(u32, reloc_info.offset));
+ try Atom.addRebase(self, atom_index, @intCast(u32, reloc_info.offset));
return 0;
}
@@ -2860,34 +2864,36 @@ fn moveSectionInVirtualMemory(self: *MachO, sect_id: u8, needed_size: u64) !void
// TODO: enforce order by increasing VM addresses in self.sections container.
for (self.sections.items(.header)[sect_id + 1 ..]) |*next_header, next_sect_id| {
const index = @intCast(u8, sect_id + 1 + next_sect_id);
- const maybe_last_atom = &self.sections.items(.last_atom)[index];
const next_segment = self.getSegmentPtr(index);
next_header.addr += diff;
next_segment.vmaddr += diff;
- if (maybe_last_atom.*) |last_atom| {
- var atom = last_atom;
+ const maybe_last_atom_index = &self.sections.items(.last_atom_index)[index];
+ if (maybe_last_atom_index.*) |last_atom_index| {
+ var atom_index = last_atom_index;
while (true) {
+ const atom = self.getAtom(atom_index);
const sym = atom.getSymbolPtr(self);
sym.n_value += diff;
- if (atom.prev) |prev| {
- atom = prev;
+ if (atom.prev_index) |prev_index| {
+ atom_index = prev_index;
} else break;
}
}
}
}
-fn allocateAtom(self: *MachO, atom: *Atom, new_atom_size: u64, alignment: u64) !u64 {
+fn allocateAtom(self: *MachO, atom_index: Atom.Index, new_atom_size: u64, alignment: u64) !u64 {
const tracy = trace(@src());
defer tracy.end();
+ const atom = self.getAtom(atom_index);
const sect_id = atom.getSymbol(self).n_sect - 1;
const segment = self.getSegmentPtr(sect_id);
const header = &self.sections.items(.header)[sect_id];
const free_list = &self.sections.items(.free_list)[sect_id];
- const maybe_last_atom = &self.sections.items(.last_atom)[sect_id];
+ const maybe_last_atom_index = &self.sections.items(.last_atom_index)[sect_id];
const requires_padding = blk: {
if (!header.isCode()) break :blk false;
if (header.isSymbolStubs()) break :blk false;
@@ -2901,7 +2907,7 @@ fn allocateAtom(self: *MachO, atom: *Atom, new_atom_size: u64, alignment: u64) !
// It would be simpler to do it inside the for loop below, but that would cause a
// problem if an error was returned later in the function. So this action
// is actually carried out at the end of the function, when errors are no longer possible.
- var atom_placement: ?*Atom = null;
+ var atom_placement: ?Atom.Index = null;
var free_list_removal: ?usize = null;
// First we look for an appropriately sized free list node.
@@ -2909,7 +2915,8 @@ fn allocateAtom(self: *MachO, atom: *Atom, new_atom_size: u64, alignment: u64) !
var vaddr = blk: {
var i: usize = 0;
while (i < free_list.items.len) {
- const big_atom = free_list.items[i];
+ const big_atom_index = free_list.items[i];
+ const big_atom = self.getAtom(big_atom_index);
// We now have a pointer to a live atom that has too much capacity.
// Is it enough that we could fit this new atom?
const sym = big_atom.getSymbol(self);
@@ -2937,30 +2944,35 @@ fn allocateAtom(self: *MachO, atom: *Atom, new_atom_size: u64, alignment: u64) !
const keep_free_list_node = remaining_capacity >= min_text_capacity;
// Set up the metadata to be updated, after errors are no longer possible.
- atom_placement = big_atom;
+ atom_placement = big_atom_index;
if (!keep_free_list_node) {
free_list_removal = i;
}
break :blk new_start_vaddr;
- } else if (maybe_last_atom.*) |last| {
+ } else if (maybe_last_atom_index.*) |last_index| {
+ const last = self.getAtom(last_index);
const last_symbol = last.getSymbol(self);
const ideal_capacity = if (requires_padding) padToIdeal(last.size) else last.size;
const ideal_capacity_end_vaddr = last_symbol.n_value + ideal_capacity;
const new_start_vaddr = mem.alignForwardGeneric(u64, ideal_capacity_end_vaddr, alignment);
- atom_placement = last;
+ atom_placement = last_index;
break :blk new_start_vaddr;
} else {
break :blk mem.alignForwardGeneric(u64, segment.vmaddr, alignment);
}
};
- const expand_section = atom_placement == null or atom_placement.?.next == null;
+ const expand_section = if (atom_placement) |placement_index|
+ self.getAtom(placement_index).next_index == null
+ else
+ true;
if (expand_section) {
const sect_capacity = self.allocatedSize(header.offset);
const needed_size = (vaddr + new_atom_size) - segment.vmaddr;
if (needed_size > sect_capacity) {
const new_offset = self.findFreeSpace(needed_size, self.page_size);
- const current_size = if (maybe_last_atom.*) |last_atom| blk: {
+ const current_size = if (maybe_last_atom_index.*) |last_atom_index| blk: {
+ const last_atom = self.getAtom(last_atom_index);
const sym = last_atom.getSymbol(self);
break :blk (sym.n_value + last_atom.size) - segment.vmaddr;
} else 0;
@@ -2992,7 +3004,7 @@ fn allocateAtom(self: *MachO, atom: *Atom, new_atom_size: u64, alignment: u64) !
header.size = needed_size;
segment.filesize = mem.alignForwardGeneric(u64, needed_size, self.page_size);
segment.vmsize = mem.alignForwardGeneric(u64, needed_size, self.page_size);
- maybe_last_atom.* = atom;
+ maybe_last_atom_index.* = atom_index;
self.segment_table_dirty = true;
}
@@ -3002,20 +3014,25 @@ fn allocateAtom(self: *MachO, atom: *Atom, new_atom_size: u64, alignment: u64) !
header.@"align" = align_pow;
}
- if (atom.prev) |prev| {
- prev.next = atom.next;
+ if (atom.prev_index) |prev_index| {
+ const prev = self.getAtomPtr(prev_index);
+ prev.next_index = atom.next_index;
}
- if (atom.next) |next| {
- next.prev = atom.prev;
+ if (atom.next_index) |next_index| {
+ const next = self.getAtomPtr(next_index);
+ next.prev_index = atom.prev_index;
}
- if (atom_placement) |big_atom| {
- atom.prev = big_atom;
- atom.next = big_atom.next;
- big_atom.next = atom;
+ if (atom_placement) |big_atom_index| {
+ const big_atom = self.getAtomPtr(big_atom_index);
+ const atom_ptr = self.getAtomPtr(atom_index);
+ atom_ptr.prev_index = big_atom_index;
+ atom_ptr.next_index = big_atom.next_index;
+ big_atom.next_index = atom_index;
} else {
- atom.prev = null;
- atom.next = null;
+ const atom_ptr = self.getAtomPtr(atom_index);
+ atom_ptr.prev_index = null;
+ atom_ptr.next_index = null;
}
if (free_list_removal) |i| {
_ = free_list.swapRemove(i);
@@ -3155,7 +3172,8 @@ fn collectRebaseData(self: *MachO, rebase: *Rebase) !void {
const gpa = self.base.allocator;
const slice = self.sections.slice();
- for (self.rebases.keys()) |atom, i| {
+ for (self.rebases.keys()) |atom_index, i| {
+ const atom = self.getAtom(atom_index);
log.debug(" ATOM(%{?d}, '{s}')", .{ atom.getSymbolIndex(), atom.getName(self) });
const sym = atom.getSymbol(self);
@@ -3184,7 +3202,8 @@ fn collectBindData(self: *MachO, bind: anytype, raw_bindings: anytype) !void {
const gpa = self.base.allocator;
const slice = self.sections.slice();
- for (raw_bindings.keys()) |atom, i| {
+ for (raw_bindings.keys()) |atom_index, i| {
+ const atom = self.getAtom(atom_index);
log.debug(" ATOM(%{?d}, '{s}')", .{ atom.getSymbolIndex(), atom.getName(self) });
const sym = atom.getSymbol(self);
@@ -3359,7 +3378,7 @@ fn populateLazyBindOffsetsInStubHelper(self: *MachO, lazy_bind: LazyBind) !void
if (lazy_bind.size() == 0) return;
const stub_helper_section_index = self.stub_helper_section_index.?;
- assert(self.stub_helper_preamble_atom != null);
+ assert(self.stub_helper_preamble_atom_index != null);
const section = self.sections.get(stub_helper_section_index);
@@ -3369,10 +3388,11 @@ fn populateLazyBindOffsetsInStubHelper(self: *MachO, lazy_bind: LazyBind) !void
else => unreachable,
};
const header = section.header;
- var atom = section.last_atom.?;
+ var atom_index = section.last_atom_index.?;
var index: usize = lazy_bind.offsets.items.len;
while (index > 0) : (index -= 1) {
+ const atom = self.getAtom(atom_index);
const sym = atom.getSymbol(self);
const file_offset = header.offset + sym.n_value - header.addr + stub_offset;
const bind_offset = lazy_bind.offsets.items[index - 1];
@@ -3385,7 +3405,7 @@ fn populateLazyBindOffsetsInStubHelper(self: *MachO, lazy_bind: LazyBind) !void
try self.base.file.?.pwriteAll(mem.asBytes(&bind_offset), file_offset);
- atom = atom.prev.?;
+ atom_index = atom.prev_index.?;
}
}
@@ -3828,25 +3848,35 @@ pub fn getOrPutGlobalPtr(self: *MachO, name: []const u8) !GetOrPutGlobalPtrResul
return GetOrPutGlobalPtrResult{ .found_existing = false, .value_ptr = ptr };
}
+pub fn getAtom(self: *MachO, atom_index: Atom.Index) Atom {
+ assert(atom_index < self.atoms.items.len);
+ return self.atoms.items[atom_index];
+}
+
+pub fn getAtomPtr(self: *MachO, atom_index: Atom.Index) *Atom {
+ assert(atom_index < self.atoms.items.len);
+ return &self.atoms.items[atom_index];
+}
+
/// Returns atom if there is an atom referenced by the symbol described by `sym_with_loc` descriptor.
/// Returns null on failure.
-pub fn getAtomForSymbol(self: *MachO, sym_with_loc: SymbolWithLoc) ?*Atom {
+pub fn getAtomIndexForSymbol(self: *MachO, sym_with_loc: SymbolWithLoc) ?Atom.Index {
assert(sym_with_loc.file == null);
return self.atom_by_index_table.get(sym_with_loc.sym_index);
}
/// Returns GOT atom that references `sym_with_loc` if one exists.
/// Returns null otherwise.
-pub fn getGotAtomForSymbol(self: *MachO, sym_with_loc: SymbolWithLoc) ?*Atom {
+pub fn getGotAtomIndexForSymbol(self: *MachO, sym_with_loc: SymbolWithLoc) ?Atom.Index {
const got_index = self.got_entries_table.get(sym_with_loc) orelse return null;
- return self.got_entries.items[got_index].getAtom(self);
+ return self.got_entries.items[got_index].getAtomIndex(self);
}
/// Returns stubs atom that references `sym_with_loc` if one exists.
/// Returns null otherwise.
-pub fn getStubsAtomForSymbol(self: *MachO, sym_with_loc: SymbolWithLoc) ?*Atom {
+pub fn getStubsAtomIndexForSymbol(self: *MachO, sym_with_loc: SymbolWithLoc) ?Atom.Index {
const stubs_index = self.stubs_table.get(sym_with_loc) orelse return null;
- return self.stubs.items[stubs_index].getAtom(self);
+ return self.stubs.items[stubs_index].getAtomIndex(self);
}
/// Returns symbol location corresponding to the set entrypoint.
@@ -4232,26 +4262,31 @@ pub fn logAtoms(self: *MachO) void {
log.debug("atoms:", .{});
const slice = self.sections.slice();
- for (slice.items(.last_atom)) |last, i| {
- var atom = last orelse continue;
+ for (slice.items(.last_atom_index)) |last_atom_index, i| {
+ var atom_index = last_atom_index orelse continue;
const header = slice.items(.header)[i];
- while (atom.prev) |prev| {
- atom = prev;
+ while (true) {
+ const atom = self.getAtom(atom_index);
+ if (atom.prev_index) |prev_index| {
+ atom_index = prev_index;
+ } else break;
}
log.debug("{s},{s}", .{ header.segName(), header.sectName() });
while (true) {
- self.logAtom(atom);
- if (atom.next) |next| {
- atom = next;
+ self.logAtom(atom_index);
+ const atom = self.getAtom(atom_index);
+ if (atom.next_index) |next_index| {
+ atom_index = next_index;
} else break;
}
}
}
-pub fn logAtom(self: *MachO, atom: *const Atom) void {
+pub fn logAtom(self: *MachO, atom_index: Atom.Index) void {
+ const atom = self.getAtom(atom_index);
const sym = atom.getSymbol(self);
const sym_name = atom.getName(self);
log.debug(" ATOM(%{?d}, '{s}') @ {x} (sizeof({x}), alignof({x})) in object({?d}) in sect({d})", .{
diff --git a/src/link/MachO/Atom.zig b/src/link/MachO/Atom.zig
index f15958b3df..da0115d069 100644
--- a/src/link/MachO/Atom.zig
+++ b/src/link/MachO/Atom.zig
@@ -39,11 +39,14 @@ size: u64,
alignment: u32,
/// Points to the previous and next neighbours
-next: ?*Atom,
-prev: ?*Atom,
+/// TODO use the same trick as with symbols: reserve index 0 as null atom
+next_index: ?Atom.Index,
+prev_index: ?Atom.Index,
dbg_info_atom: Dwarf.Atom,
+pub const Index = u32;
+
pub const Binding = struct {
target: SymbolWithLoc,
offset: u64,
@@ -54,22 +57,6 @@ pub const SymbolAtOffset = struct {
offset: u64,
};
-pub const empty = Atom{
- .sym_index = 0,
- .file = null,
- .size = 0,
- .alignment = 0,
- .prev = null,
- .next = null,
- .dbg_info_atom = undefined,
-};
-
-pub fn ensureInitialized(self: *Atom, macho_file: *MachO) !void {
- if (self.getSymbolIndex() != null) return; // Already initialized
- self.sym_index = try macho_file.allocateSymbol();
- try macho_file.atom_by_index_table.putNoClobber(macho_file.base.allocator, self.sym_index, self);
-}
-
pub fn getSymbolIndex(self: Atom) ?u32 {
if (self.sym_index == 0) return null;
return self.sym_index;
@@ -108,7 +95,8 @@ pub fn getName(self: Atom, macho_file: *MachO) []const u8 {
/// this calculation.
pub fn capacity(self: Atom, macho_file: *MachO) u64 {
const self_sym = self.getSymbol(macho_file);
- if (self.next) |next| {
+ if (self.next_index) |next_index| {
+ const next = macho_file.getAtom(next_index);
const next_sym = next.getSymbol(macho_file);
return next_sym.n_value - self_sym.n_value;
} else {
@@ -120,7 +108,8 @@ pub fn capacity(self: Atom, macho_file: *MachO) u64 {
pub fn freeListEligible(self: Atom, macho_file: *MachO) bool {
// No need to keep a free list node for the last atom.
- const next = self.next orelse return false;
+ const next_index = self.next_index orelse return false;
+ const next = macho_file.getAtom(next_index);
const self_sym = self.getSymbol(macho_file);
const next_sym = next.getSymbol(macho_file);
const cap = next_sym.n_value - self_sym.n_value;
@@ -130,19 +119,19 @@ pub fn freeListEligible(self: Atom, macho_file: *MachO) bool {
return surplus >= MachO.min_text_capacity;
}
-pub fn addRelocation(self: *Atom, macho_file: *MachO, reloc: Relocation) !void {
- return self.addRelocations(macho_file, 1, .{reloc});
+pub fn addRelocation(macho_file: *MachO, atom_index: Atom.Index, reloc: Relocation) !void {
+ return addRelocations(macho_file, atom_index, 1, .{reloc});
}
pub fn addRelocations(
- self: *Atom,
macho_file: *MachO,
+ atom_index: Atom.Index,
comptime count: comptime_int,
relocs: [count]Relocation,
) !void {
const gpa = macho_file.base.allocator;
const target = macho_file.base.options.target;
- const gop = try macho_file.relocs.getOrPut(gpa, self);
+ const gop = try macho_file.relocs.getOrPut(gpa, atom_index);
if (!gop.found_existing) {
gop.value_ptr.* = .{};
}
@@ -156,56 +145,72 @@ pub fn addRelocations(
}
}
-pub fn addRebase(self: *Atom, macho_file: *MachO, offset: u32) !void {
+pub fn addRebase(macho_file: *MachO, atom_index: Atom.Index, offset: u32) !void {
const gpa = macho_file.base.allocator;
- log.debug(" (adding rebase at offset 0x{x} in %{?d})", .{ offset, self.getSymbolIndex() });
- const gop = try macho_file.rebases.getOrPut(gpa, self);
+ const atom = macho_file.getAtom(atom_index);
+ log.debug(" (adding rebase at offset 0x{x} in %{?d})", .{ offset, atom.getSymbolIndex() });
+ const gop = try macho_file.rebases.getOrPut(gpa, atom_index);
if (!gop.found_existing) {
gop.value_ptr.* = .{};
}
try gop.value_ptr.append(gpa, offset);
}
-pub fn addBinding(self: *Atom, macho_file: *MachO, binding: Binding) !void {
+pub fn addBinding(macho_file: *MachO, atom_index: Atom.Index, binding: Binding) !void {
const gpa = macho_file.base.allocator;
+ const atom = macho_file.getAtom(atom_index);
log.debug(" (adding binding to symbol {s} at offset 0x{x} in %{?d})", .{
macho_file.getSymbolName(binding.target),
binding.offset,
- self.getSymbolIndex(),
+ atom.getSymbolIndex(),
});
- const gop = try macho_file.bindings.getOrPut(gpa, self);
+ const gop = try macho_file.bindings.getOrPut(gpa, atom_index);
if (!gop.found_existing) {
gop.value_ptr.* = .{};
}
try gop.value_ptr.append(gpa, binding);
}
-pub fn addLazyBinding(self: *Atom, macho_file: *MachO, binding: Binding) !void {
+pub fn addLazyBinding(macho_file: *MachO, atom_index: Atom.Index, binding: Binding) !void {
const gpa = macho_file.base.allocator;
+ const atom = macho_file.getAtom(atom_index);
log.debug(" (adding lazy binding to symbol {s} at offset 0x{x} in %{?d})", .{
macho_file.getSymbolName(binding.target),
binding.offset,
- self.getSymbolIndex(),
+ atom.getSymbolIndex(),
});
- const gop = try macho_file.lazy_bindings.getOrPut(gpa, self);
+ const gop = try macho_file.lazy_bindings.getOrPut(gpa, atom_index);
if (!gop.found_existing) {
gop.value_ptr.* = .{};
}
try gop.value_ptr.append(gpa, binding);
}
-pub fn resolveRelocations(self: *Atom, macho_file: *MachO) !void {
- const relocs = macho_file.relocs.get(self) orelse return;
- const source_sym = self.getSymbol(macho_file);
+pub fn resolveRelocations(macho_file: *MachO, atom_index: Atom.Index) !void {
+ const atom = macho_file.getAtom(atom_index);
+ const relocs = macho_file.relocs.get(atom_index) orelse return;
+ const source_sym = atom.getSymbol(macho_file);
const source_section = macho_file.sections.get(source_sym.n_sect - 1).header;
const file_offset = source_section.offset + source_sym.n_value - source_section.addr;
- log.debug("relocating '{s}'", .{self.getName(macho_file)});
+ log.debug("relocating '{s}'", .{atom.getName(macho_file)});
for (relocs.items) |*reloc| {
if (!reloc.dirty) continue;
- try reloc.resolve(self, macho_file, file_offset);
+ try reloc.resolve(macho_file, atom_index, file_offset);
reloc.dirty = false;
}
}
+
+pub fn freeRelocations(macho_file: *MachO, atom_index: Atom.Index) void {
+ const gpa = macho_file.base.allocator;
+ var removed_relocs = macho_file.relocs.fetchOrderedRemove(atom_index);
+ if (removed_relocs) |*relocs| relocs.value.deinit(gpa);
+ var removed_rebases = macho_file.rebases.fetchOrderedRemove(atom_index);
+ if (removed_rebases) |*rebases| rebases.value.deinit(gpa);
+ var removed_bindings = macho_file.bindings.fetchOrderedRemove(atom_index);
+ if (removed_bindings) |*bindings| bindings.value.deinit(gpa);
+ var removed_lazy_bindings = macho_file.lazy_bindings.fetchOrderedRemove(atom_index);
+ if (removed_lazy_bindings) |*lazy_bindings| lazy_bindings.value.deinit(gpa);
+}
diff --git a/src/link/MachO/Relocation.zig b/src/link/MachO/Relocation.zig
index ca6bf9d681..07e5cf1aa2 100644
--- a/src/link/MachO/Relocation.zig
+++ b/src/link/MachO/Relocation.zig
@@ -29,33 +29,35 @@ pub fn fmtType(self: Relocation, target: std.Target) []const u8 {
}
}
-pub fn getTargetAtom(self: Relocation, macho_file: *MachO) ?*Atom {
+pub fn getTargetAtomIndex(self: Relocation, macho_file: *MachO) ?Atom.Index {
switch (macho_file.base.options.target.cpu.arch) {
.aarch64 => switch (@intToEnum(macho.reloc_type_arm64, self.type)) {
.ARM64_RELOC_GOT_LOAD_PAGE21,
.ARM64_RELOC_GOT_LOAD_PAGEOFF12,
.ARM64_RELOC_POINTER_TO_GOT,
- => return macho_file.getGotAtomForSymbol(self.target),
+ => return macho_file.getGotAtomIndexForSymbol(self.target),
else => {},
},
.x86_64 => switch (@intToEnum(macho.reloc_type_x86_64, self.type)) {
.X86_64_RELOC_GOT,
.X86_64_RELOC_GOT_LOAD,
- => return macho_file.getGotAtomForSymbol(self.target),
+ => return macho_file.getGotAtomIndexForSymbol(self.target),
else => {},
},
else => unreachable,
}
- if (macho_file.getStubsAtomForSymbol(self.target)) |stubs_atom| return stubs_atom;
- return macho_file.getAtomForSymbol(self.target);
+ if (macho_file.getStubsAtomIndexForSymbol(self.target)) |stubs_atom| return stubs_atom;
+ return macho_file.getAtomIndexForSymbol(self.target);
}
-pub fn resolve(self: Relocation, atom: *Atom, macho_file: *MachO, base_offset: u64) !void {
+pub fn resolve(self: Relocation, macho_file: *MachO, atom_index: Atom.Index, base_offset: u64) !void {
const arch = macho_file.base.options.target.cpu.arch;
+ const atom = macho_file.getAtom(atom_index);
const source_sym = atom.getSymbol(macho_file);
const source_addr = source_sym.n_value + self.offset;
- const target_atom = self.getTargetAtom(macho_file) orelse return;
+ const target_atom_index = self.getTargetAtomIndex(macho_file) orelse return;
+ const target_atom = macho_file.getAtom(target_atom_index);
const target_addr = @intCast(i64, target_atom.getSymbol(macho_file).n_value) + self.addend;
log.debug(" ({x}: [() => 0x{x} ({s})) ({s})", .{