aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/arch/aarch64/CodeGen.zig1
-rw-r--r--src/arch/arm/CodeGen.zig1
-rw-r--r--src/arch/riscv64/CodeGen.zig1
-rw-r--r--src/arch/sparc64/CodeGen.zig1
-rw-r--r--src/arch/x86_64/CodeGen.zig14
-rw-r--r--src/codegen.zig1
-rw-r--r--src/link/Coff.zig322
-rw-r--r--src/link/Coff/Relocation.zig18
-rw-r--r--src/link/Elf.zig71
-rw-r--r--src/link/Elf/Atom.zig18
-rw-r--r--src/link/MachO.zig1033
-rw-r--r--src/link/MachO/Atom.zig17
-rw-r--r--src/link/MachO/DebugSymbols.zig4
-rw-r--r--src/link/MachO/Relocation.zig88
-rw-r--r--src/link/MachO/ZldAtom.zig75
-rw-r--r--src/link/MachO/eh_frame.zig3
-rw-r--r--src/link/MachO/stubs.zig161
-rw-r--r--src/link/MachO/thunks.zig7
-rw-r--r--src/link/MachO/zld.zig130
-rw-r--r--src/link/table_section.zig65
20 files changed, 985 insertions, 1046 deletions
diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig
index 1acc11d7e8..a2db3459dc 100644
--- a/src/arch/aarch64/CodeGen.zig
+++ b/src/arch/aarch64/CodeGen.zig
@@ -4290,6 +4290,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl);
const atom = elf_file.getAtom(atom_index);
+ _ = try atom.getOrCreateOffsetTableEntry(elf_file);
const got_addr = @intCast(u32, atom.getOffsetTableAddress(elf_file));
try self.genSetReg(Type.initTag(.usize), .x30, .{ .memory = got_addr });
} else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig
index 661e713b1c..156ad380b8 100644
--- a/src/arch/arm/CodeGen.zig
+++ b/src/arch/arm/CodeGen.zig
@@ -4270,6 +4270,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl);
const atom = elf_file.getAtom(atom_index);
+ _ = try atom.getOrCreateOffsetTableEntry(elf_file);
const got_addr = @intCast(u32, atom.getOffsetTableAddress(elf_file));
try self.genSetReg(Type.initTag(.usize), .lr, .{ .memory = got_addr });
} else if (self.bin_file.cast(link.File.MachO)) |_| {
diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig
index f0ab8b3317..e7dce48dbf 100644
--- a/src/arch/riscv64/CodeGen.zig
+++ b/src/arch/riscv64/CodeGen.zig
@@ -1734,6 +1734,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
const func = func_payload.data;
const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl);
const atom = elf_file.getAtom(atom_index);
+ _ = try atom.getOrCreateOffsetTableEntry(elf_file);
const got_addr = @intCast(u32, atom.getOffsetTableAddress(elf_file));
try self.genSetReg(Type.initTag(.usize), .ra, .{ .memory = got_addr });
_ = try self.addInst(.{
diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig
index 2bcc1e1c4e..beb2ce2fd2 100644
--- a/src/arch/sparc64/CodeGen.zig
+++ b/src/arch/sparc64/CodeGen.zig
@@ -1254,6 +1254,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
const got_addr = if (self.bin_file.cast(link.File.Elf)) |elf_file| blk: {
const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl);
const atom = elf_file.getAtom(atom_index);
+ _ = try atom.getOrCreateOffsetTableEntry(elf_file);
break :blk @intCast(u32, atom.getOffsetTableAddress(elf_file));
} else unreachable;
diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig
index 1c72e2296b..f237b98e75 100644
--- a/src/arch/x86_64/CodeGen.zig
+++ b/src/arch/x86_64/CodeGen.zig
@@ -5624,7 +5624,9 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl);
- const got_addr = elf_file.getAtom(atom_index).getOffsetTableAddress(elf_file);
+ const atom = elf_file.getAtom(atom_index);
+ _ = try atom.getOrCreateOffsetTableEntry(elf_file);
+ const got_addr = atom.getOffsetTableAddress(elf_file);
try self.asmMemory(.call, Memory.sib(.qword, .{
.base = .ds,
.disp = @intCast(i32, got_addr),
@@ -5853,7 +5855,9 @@ fn airCmpLtErrorsLen(self: *Self, inst: Air.Inst.Index) !void {
.{ .kind = .const_data, .ty = Type.anyerror },
4, // dword alignment
);
- const got_addr = elf_file.getAtom(atom_index).getOffsetTableAddress(elf_file);
+ const atom = elf_file.getAtom(atom_index);
+ _ = try atom.getOrCreateOffsetTableEntry(elf_file);
+ const got_addr = atom.getOffsetTableAddress(elf_file);
try self.asmRegisterMemory(.mov, addr_reg.to64(), Memory.sib(.qword, .{
.base = .ds,
.disp = @intCast(i32, got_addr),
@@ -7574,7 +7578,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
const atom_index = try self.getSymbolIndexForDecl(self.mod_fn.owner_decl);
if (self.bin_file.cast(link.File.MachO)) |_| {
_ = try self.addInst(.{
- .tag = .mov_linker,
+ .tag = .lea_linker,
.ops = .tlv_reloc,
.data = .{ .payload = try self.addExtra(Mir.LeaRegisterReloc{
.reg = @enumToInt(Register.rdi),
@@ -8230,7 +8234,9 @@ fn airErrorName(self: *Self, inst: Air.Inst.Index) !void {
.{ .kind = .const_data, .ty = Type.anyerror },
4, // dword alignment
);
- const got_addr = elf_file.getAtom(atom_index).getOffsetTableAddress(elf_file);
+ const atom = elf_file.getAtom(atom_index);
+ _ = try atom.getOrCreateOffsetTableEntry(elf_file);
+ const got_addr = atom.getOffsetTableAddress(elf_file);
try self.asmRegisterMemory(.mov, addr_reg.to64(), Memory.sib(.qword, .{
.base = .ds,
.disp = @intCast(i32, got_addr),
diff --git a/src/codegen.zig b/src/codegen.zig
index 6d6238ceda..dbcd76118a 100644
--- a/src/codegen.zig
+++ b/src/codegen.zig
@@ -1006,6 +1006,7 @@ fn genDeclRef(
if (bin_file.cast(link.File.Elf)) |elf_file| {
const atom_index = try elf_file.getOrCreateAtomForDecl(decl_index);
const atom = elf_file.getAtom(atom_index);
+ _ = try atom.getOrCreateOffsetTableEntry(elf_file);
return GenResult.mcv(.{ .memory = atom.getOffsetTableAddress(elf_file) });
} else if (bin_file.cast(link.File.MachO)) |macho_file| {
const atom_index = try macho_file.getOrCreateAtomForDecl(decl_index);
diff --git a/src/link/Coff.zig b/src/link/Coff.zig
index 825afff36d..ac98457360 100644
--- a/src/link/Coff.zig
+++ b/src/link/Coff.zig
@@ -37,13 +37,14 @@ strtab_offset: ?u32 = null,
temp_strtab: StringTable(.temp_strtab) = .{},
-got_entries: std.ArrayListUnmanaged(Entry) = .{},
-got_entries_free_list: std.ArrayListUnmanaged(u32) = .{},
-got_entries_table: std.AutoHashMapUnmanaged(SymbolWithLoc, u32) = .{},
+got_table: TableSection(SymbolWithLoc) = .{},
/// A table of ImportTables partitioned by the library name.
/// Key is an offset into the interning string table `temp_strtab`.
import_tables: std.AutoArrayHashMapUnmanaged(u32, ImportTable) = .{},
+
+got_table_count_dirty: bool = true,
+got_table_contents_dirty: bool = true,
imports_count_dirty: bool = true,
/// Virtual address of the entry point procedure relative to image base.
@@ -106,12 +107,6 @@ const HotUpdateState = struct {
loaded_base_address: ?std.os.windows.HMODULE = null,
};
-const Entry = struct {
- target: SymbolWithLoc,
- // Index into the synthetic symbol table (i.e., file == null).
- sym_index: u32,
-};
-
const RelocTable = std.AutoArrayHashMapUnmanaged(Atom.Index, std.ArrayListUnmanaged(Relocation));
const BaseRelocationTable = std.AutoArrayHashMapUnmanaged(Atom.Index, std.ArrayListUnmanaged(u32));
const UnnamedConstTable = std.AutoArrayHashMapUnmanaged(Module.Decl.Index, std.ArrayListUnmanaged(Atom.Index));
@@ -188,7 +183,8 @@ pub const PtrWidth = enum {
p32,
p64,
- fn abiSize(pw: PtrWidth) u4 {
+ /// Size in bytes.
+ pub fn size(pw: PtrWidth) u4 {
return switch (pw) {
.p32 => 4,
.p64 => 8,
@@ -310,9 +306,7 @@ pub fn deinit(self: *Coff) void {
self.globals_free_list.deinit(gpa);
self.strtab.deinit(gpa);
self.temp_strtab.deinit(gpa);
- self.got_entries.deinit(gpa);
- self.got_entries_free_list.deinit(gpa);
- self.got_entries_table.deinit(gpa);
+ self.got_table.deinit(gpa);
for (self.import_tables.values()) |*itab| {
itab.deinit(gpa);
@@ -371,7 +365,7 @@ fn populateMissingMetadata(self: *Coff) !void {
}
if (self.got_section_index == null) {
- const file_size = @intCast(u32, self.base.options.symbol_count_hint) * self.ptr_width.abiSize();
+ const file_size = @intCast(u32, self.base.options.symbol_count_hint) * self.ptr_width.size();
self.got_section_index = try self.allocateSection(".got", file_size, .{
.CNT_INITIALIZED_DATA = 1,
.MEM_READ = 1,
@@ -396,7 +390,7 @@ fn populateMissingMetadata(self: *Coff) !void {
}
if (self.idata_section_index == null) {
- const file_size = @intCast(u32, self.base.options.symbol_count_hint) * self.ptr_width.abiSize();
+ const file_size = @intCast(u32, self.base.options.symbol_count_hint) * self.ptr_width.size();
self.idata_section_index = try self.allocateSection(".idata", file_size, .{
.CNT_INITIALIZED_DATA = 1,
.MEM_READ = 1,
@@ -498,8 +492,8 @@ fn growSection(self: *Coff, sect_id: u32, needed_size: u32) !void {
const sect_vm_capacity = self.allocatedVirtualSize(header.virtual_address);
if (needed_size > sect_vm_capacity) {
- try self.growSectionVirtualMemory(sect_id, needed_size);
self.markRelocsDirtyByAddress(header.virtual_address + needed_size);
+ try self.growSectionVirtualMemory(sect_id, needed_size);
}
header.virtual_size = @max(header.virtual_size, needed_size);
@@ -698,26 +692,12 @@ fn allocateGlobal(self: *Coff) !u32 {
return index;
}
-pub fn allocateGotEntry(self: *Coff, target: SymbolWithLoc) !u32 {
- const gpa = self.base.allocator;
- try self.got_entries.ensureUnusedCapacity(gpa, 1);
-
- const index: u32 = blk: {
- if (self.got_entries_free_list.popOrNull()) |index| {
- log.debug(" (reusing GOT entry index {d})", .{index});
- break :blk index;
- } else {
- log.debug(" (allocating GOT entry at index {d})", .{self.got_entries.items.len});
- const index = @intCast(u32, self.got_entries.items.len);
- _ = self.got_entries.addOneAssumeCapacity();
- break :blk index;
- }
- };
-
- self.got_entries.items[index] = .{ .target = target, .sym_index = 0 };
- try self.got_entries_table.putNoClobber(gpa, target, index);
-
- return index;
+fn addGotEntry(self: *Coff, target: SymbolWithLoc) !void {
+ if (self.got_table.lookup.contains(target)) return;
+ const got_index = try self.got_table.allocateEntry(self.base.allocator, target);
+ try self.writeOffsetTableEntry(got_index);
+ self.got_table_count_dirty = true;
+ self.markRelocsDirtyByTarget(target);
}
pub fn createAtom(self: *Coff) !Atom.Index {
@@ -737,37 +717,6 @@ pub fn createAtom(self: *Coff) !Atom.Index {
return atom_index;
}
-fn createGotAtom(self: *Coff, target: SymbolWithLoc) !Atom.Index {
- const atom_index = try self.createAtom();
- const atom = self.getAtomPtr(atom_index);
- atom.size = @sizeOf(u64);
-
- const sym = atom.getSymbolPtr(self);
- sym.section_number = @intToEnum(coff.SectionNumber, self.got_section_index.? + 1);
- sym.value = try self.allocateAtom(atom_index, atom.size, @sizeOf(u64));
-
- log.debug("allocated GOT atom at 0x{x}", .{sym.value});
-
- try Atom.addRelocation(self, atom_index, .{
- .type = .direct,
- .target = target,
- .offset = 0,
- .addend = 0,
- .pcrel = false,
- .length = 3,
- });
-
- const target_sym = self.getSymbol(target);
- switch (target_sym.section_number) {
- .UNDEFINED => @panic("TODO generate a binding for undefined GOT target"),
- .ABSOLUTE => {},
- .DEBUG => unreachable, // not possible
- else => try Atom.addBaseRelocation(self, atom_index, 0),
- }
-
- return atom_index;
-}
-
fn growAtom(self: *Coff, atom_index: Atom.Index, new_atom_size: u32, alignment: u32) !u32 {
const atom = self.getAtom(atom_index);
const sym = atom.getSymbol(self);
@@ -873,17 +822,75 @@ fn writeMem(handle: std.ChildProcess.Id, pvaddr: std.os.windows.LPVOID, code: []
if (amt != code.len) return error.InputOutput;
}
-fn writePtrWidthAtom(self: *Coff, atom_index: Atom.Index) !void {
+fn writeOffsetTableEntry(self: *Coff, index: usize) !void {
+ const sect_id = self.got_section_index.?;
+
+ if (self.got_table_count_dirty) {
+ const needed_size = @intCast(u32, self.got_table.entries.items.len * self.ptr_width.size());
+ try self.growSection(sect_id, needed_size);
+ self.got_table_count_dirty = false;
+ }
+
+ const header = &self.sections.items(.header)[sect_id];
+ const entry = self.got_table.entries.items[index];
+ const entry_value = self.getSymbol(entry).value;
+ const entry_offset = index * self.ptr_width.size();
+ const file_offset = header.pointer_to_raw_data + entry_offset;
+ const vmaddr = header.virtual_address + entry_offset;
+
+ log.debug("writing GOT entry {d}: @{x} => {x}", .{ index, vmaddr, entry_value + self.getImageBase() });
+
switch (self.ptr_width) {
.p32 => {
- var buffer: [@sizeOf(u32)]u8 = [_]u8{0} ** @sizeOf(u32);
- try self.writeAtom(atom_index, &buffer);
+ var buf: [4]u8 = undefined;
+ mem.writeIntLittle(u32, &buf, @intCast(u32, entry_value + self.getImageBase()));
+ try self.base.file.?.pwriteAll(&buf, file_offset);
},
.p64 => {
- var buffer: [@sizeOf(u64)]u8 = [_]u8{0} ** @sizeOf(u64);
- try self.writeAtom(atom_index, &buffer);
+ var buf: [8]u8 = undefined;
+ mem.writeIntLittle(u64, &buf, entry_value + self.getImageBase());
+ try self.base.file.?.pwriteAll(&buf, file_offset);
},
}
+
+ if (is_hot_update_compatible) {
+ if (self.base.child_pid) |handle| {
+ const gpa = self.base.allocator;
+ const slide = @ptrToInt(self.hot_state.loaded_base_address.?);
+ const actual_vmaddr = vmaddr + slide;
+ const pvaddr = @intToPtr(*anyopaque, actual_vmaddr);
+ log.debug("writing GOT entry to memory at address {x}", .{actual_vmaddr});
+ if (build_options.enable_logging) {
+ switch (self.ptr_width) {
+ .p32 => {
+ var buf: [4]u8 = undefined;
+ try debugMem(gpa, handle, pvaddr, &buf);
+ },
+ .p64 => {
+ var buf: [8]u8 = undefined;
+ try debugMem(gpa, handle, pvaddr, &buf);
+ },
+ }
+ }
+
+ switch (self.ptr_width) {
+ .p32 => {
+ var buf: [4]u8 = undefined;
+ mem.writeIntLittle(u32, &buf, @intCast(u32, entry_value + slide));
+ writeMem(handle, pvaddr, &buf) catch |err| {
+ log.warn("writing to protected memory failed with error: {s}", .{@errorName(err)});
+ };
+ },
+ .p64 => {
+ var buf: [8]u8 = undefined;
+ mem.writeIntLittle(u64, &buf, entry_value + slide);
+ writeMem(handle, pvaddr, &buf) catch |err| {
+ log.warn("writing to protected memory failed with error: {s}", .{@errorName(err)});
+ };
+ },
+ }
+ }
+ }
}
fn markRelocsDirtyByTarget(self: *Coff, target: SymbolWithLoc) void {
@@ -904,6 +911,15 @@ fn markRelocsDirtyByAddress(self: *Coff, addr: u32) void {
reloc.dirty = true;
}
}
+
+ // TODO: dirty only really affected GOT cells
+ for (self.got_table.entries.items) |entry| {
+ const target_addr = self.getSymbol(entry).value;
+ if (target_addr >= addr) {
+ self.got_table_contents_dirty = true;
+ break;
+ }
+ }
}
fn resolveRelocs(self: *Coff, atom_index: Atom.Index, relocs: []*const Relocation, code: []u8, image_base: u64) void {
@@ -994,17 +1010,7 @@ fn freeAtom(self: *Coff, atom_index: Atom.Index) void {
self.locals_free_list.append(gpa, sym_index) catch {};
// Try freeing GOT atom if this decl had one
- const got_target = SymbolWithLoc{ .sym_index = sym_index, .file = null };
- if (self.got_entries_table.get(got_target)) |got_index| {
- self.got_entries_free_list.append(gpa, @intCast(u32, got_index)) catch {};
- self.got_entries.items[got_index] = .{
- .target = .{ .sym_index = 0, .file = null },
- .sym_index = 0,
- };
- _ = self.got_entries_table.remove(got_target);
-
- log.debug(" adding GOT index {d} to free list (target local@{d})", .{ got_index, sym_index });
- }
+ self.got_table.freeEntry(gpa, .{ .sym_index = sym_index });
self.locals.items[sym_index].section_number = .UNDEFINED;
_ = self.atom_by_index_table.remove(sym_index);
@@ -1243,14 +1249,7 @@ fn updateLazySymbolAtom(
atom.size = code_len;
symbol.value = vaddr;
- const got_target = SymbolWithLoc{ .sym_index = local_sym_index, .file = null };
- const got_index = try self.allocateGotEntry(got_target);
- const got_atom_index = try self.createGotAtom(got_target);
- const got_atom = self.getAtom(got_atom_index);
- self.got_entries.items[got_index].sym_index = got_atom.getSymbolIndex().?;
- try self.writePtrWidthAtom(got_atom_index);
-
- self.markRelocsDirtyByTarget(atom.getSymbolWithLoc());
+ try self.addGotEntry(.{ .sym_index = local_sym_index });
try self.writeAtom(atom_index, code);
}
@@ -1321,6 +1320,7 @@ fn updateDeclCode(self: *Coff, decl_index: Module.Decl.Index, code: []u8, comple
const decl_metadata = self.decls.get(decl_index).?;
const atom_index = decl_metadata.atom;
const atom = self.getAtom(atom_index);
+ const sym_index = atom.getSymbolIndex().?;
const sect_index = decl_metadata.section;
const code_len = @intCast(u32, code.len);
@@ -1340,10 +1340,9 @@ fn updateDeclCode(self: *Coff, decl_index: Module.Decl.Index, code: []u8, comple
if (vaddr != sym.value) {
sym.value = vaddr;
log.debug(" (updating GOT entry)", .{});
- const got_target = SymbolWithLoc{ .sym_index = atom.getSymbolIndex().?, .file = null };
- const got_atom_index = self.getGotAtomIndexForSymbol(got_target).?;
- self.markRelocsDirtyByTarget(got_target);
- try self.writePtrWidthAtom(got_atom_index);
+ const got_entry_index = self.got_table.lookup.get(.{ .sym_index = sym_index }).?;
+ try self.writeOffsetTableEntry(got_entry_index);
+ self.markRelocsDirtyByTarget(.{ .sym_index = sym_index });
}
} else if (code_len < atom.size) {
self.shrinkAtom(atom_index, code_len);
@@ -1361,15 +1360,9 @@ fn updateDeclCode(self: *Coff, decl_index: Module.Decl.Index, code: []u8, comple
self.getAtomPtr(atom_index).size = code_len;
sym.value = vaddr;
- const got_target = SymbolWithLoc{ .sym_index = atom.getSymbolIndex().?, .file = null };
- const got_index = try self.allocateGotEntry(got_target);
- const got_atom_index = try self.createGotAtom(got_target);
- const got_atom = self.getAtom(got_atom_index);
- self.got_entries.items[got_index].sym_index = got_atom.getSymbolIndex().?;
- try self.writePtrWidthAtom(got_atom_index);
+ try self.addGotEntry(.{ .sym_index = sym_index });
}
- self.markRelocsDirtyByTarget(atom.getSymbolWithLoc());
try self.writeAtom(atom_index, code);
}
@@ -1651,6 +1644,16 @@ pub fn flushModule(self: *Coff, comp: *Compilation, prog_node: *std.Progress.Nod
try self.writeAtom(atom_index, code.items);
}
+ // Update GOT if it got moved in memory.
+ if (self.got_table_contents_dirty) {
+ for (self.got_table.entries.items, 0..) |entry, i| {
+ if (!self.got_table.lookup.contains(entry)) continue;
+ // TODO: write all in one go rather than incrementally.
+ try self.writeOffsetTableEntry(i);
+ }
+ self.got_table_contents_dirty = false;
+ }
+
try self.writeBaseRelocations();
if (self.getEntryPoint()) |entry_sym_loc| {
@@ -1739,48 +1742,82 @@ pub fn updateDeclLineNumber(self: *Coff, module: *Module, decl_index: Module.Dec
fn writeBaseRelocations(self: *Coff) !void {
const gpa = self.base.allocator;
- var pages = std.AutoHashMap(u32, std.ArrayList(coff.BaseRelocation)).init(gpa);
+ var page_table = std.AutoHashMap(u32, std.ArrayList(coff.BaseRelocation)).init(gpa);
defer {
- var it = pages.valueIterator();
+ var it = page_table.valueIterator();
while (it.next()) |inner| {
inner.deinit();
}
- pages.deinit();
+ page_table.deinit();
}
- var it = self.base_relocs.iterator();
- while (it.next()) |entry| {
- const atom_index = entry.key_ptr.*;
- const atom = self.getAtom(atom_index);
- const offsets = entry.value_ptr.*;
-
- for (offsets.items) |offset| {
+ {
+ var it = self.base_relocs.iterator();
+ while (it.next()) |entry| {
+ const atom_index = entry.key_ptr.*;
+ const atom = self.getAtom(atom_index);
const sym = atom.getSymbol(self);
- const rva = sym.value + offset;
- const page = mem.alignBackwardGeneric(u32, rva, self.page_size);
- const gop = try pages.getOrPut(page);
- if (!gop.found_existing) {
- gop.value_ptr.* = std.ArrayList(coff.BaseRelocation).init(gpa);
+ const offsets = entry.value_ptr.*;
+
+ for (offsets.items) |offset| {
+ const rva = sym.value + offset;
+ const page = mem.alignBackwardGeneric(u32, rva, self.page_size);
+ const gop = try page_table.getOrPut(page);
+ if (!gop.found_existing) {
+ gop.value_ptr.* = std.ArrayList(coff.BaseRelocation).init(gpa);
+ }
+ try gop.value_ptr.append(.{
+ .offset = @intCast(u12, rva - page),
+ .type = .DIR64,
+ });
+ }
+ }
+
+ {
+ const header = &self.sections.items(.header)[self.got_section_index.?];
+ for (self.got_table.entries.items, 0..) |entry, index| {
+ if (!self.got_table.lookup.contains(entry)) continue;
+
+ const sym = self.getSymbol(entry);
+ if (sym.section_number == .UNDEFINED) continue;
+
+ const rva = @intCast(u32, header.virtual_address + index * self.ptr_width.size());
+ const page = mem.alignBackwardGeneric(u32, rva, self.page_size);
+ const gop = try page_table.getOrPut(page);
+ if (!gop.found_existing) {
+ gop.value_ptr.* = std.ArrayList(coff.BaseRelocation).init(gpa);
+ }
+ try gop.value_ptr.append(.{
+ .offset = @intCast(u12, rva - page),
+ .type = .DIR64,
+ });
}
- try gop.value_ptr.append(.{
- .offset = @intCast(u12, rva - page),
- .type = .DIR64,
- });
}
}
+ // Sort pages by address.
+ var pages = try std.ArrayList(u32).initCapacity(gpa, page_table.count());
+ defer pages.deinit();
+ {
+ var it = page_table.keyIterator();
+ while (it.next()) |page| {
+ pages.appendAssumeCapacity(page.*);
+ }
+ }
+ std.sort.sort(u32, pages.items, {}, std.sort.asc(u32));
+
var buffer = std.ArrayList(u8).init(gpa);
defer buffer.deinit();
- var pages_it = pages.iterator();
- while (pages_it.next()) |entry| {
+ for (pages.items) |page| {
+ const entries = page_table.getPtr(page).?;
// Pad to required 4byte alignment
if (!mem.isAlignedGeneric(
usize,
- entry.value_ptr.items.len * @sizeOf(coff.BaseRelocation),
+ entries.items.len * @sizeOf(coff.BaseRelocation),
@sizeOf(u32),
)) {
- try entry.value_ptr.append(.{
+ try entries.append(.{
.offset = 0,
.type = .ABSOLUTE,
});
@@ -1788,14 +1825,14 @@ fn writeBaseRelocations(self: *Coff) !void {
const block_size = @intCast(
u32,
- entry.value_ptr.items.len * @sizeOf(coff.BaseRelocation) + @sizeOf(coff.BaseRelocationDirectoryEntry),
+ entries.items.len * @sizeOf(coff.BaseRelocation) + @sizeOf(coff.BaseRelocationDirectoryEntry),
);
try buffer.ensureUnusedCapacity(block_size);
buffer.appendSliceAssumeCapacity(mem.asBytes(&coff.BaseRelocationDirectoryEntry{
- .page_rva = entry.key_ptr.*,
+ .page_rva = page,
.block_size = block_size,
}));
- buffer.appendSliceAssumeCapacity(mem.sliceAsBytes(entry.value_ptr.items));
+ buffer.appendSliceAssumeCapacity(mem.sliceAsBytes(entries.items));
}
const header = &self.sections.items(.header)[self.reloc_section_index.?];
@@ -2315,14 +2352,6 @@ pub fn getAtomIndexForSymbol(self: *const Coff, sym_loc: SymbolWithLoc) ?Atom.In
return self.atom_by_index_table.get(sym_loc.sym_index);
}
-/// Returns GOT atom that references `sym_loc` if one exists.
-/// Returns null otherwise.
-pub fn getGotAtomIndexForSymbol(self: *const Coff, sym_loc: SymbolWithLoc) ?Atom.Index {
- const got_index = self.got_entries_table.get(sym_loc) orelse return null;
- const got_entry = self.got_entries.items[got_index];
- return self.getAtomIndexForSymbol(.{ .sym_index = got_entry.sym_index, .file = null });
-}
-
fn setSectionName(self: *Coff, header: *coff.SectionHeader, name: []const u8) !void {
if (name.len <= 8) {
mem.copy(u8, &header.name, name);
@@ -2410,25 +2439,7 @@ fn logSymtab(self: *Coff) void {
}
log.debug("GOT entries:", .{});
- for (self.got_entries.items, 0..) |entry, i| {
- const got_sym = self.getSymbol(.{ .sym_index = entry.sym_index, .file = null });
- const target_sym = self.getSymbol(entry.target);
- if (target_sym.section_number == .UNDEFINED) {
- log.debug(" {d}@{x} => import('{s}')", .{
- i,
- got_sym.value,
- self.getSymbolName(entry.target),
- });
- } else {
- log.debug(" {d}@{x} => local(%{d}) in object({?d}) {s}", .{
- i,
- got_sym.value,
- entry.target.sym_index,
- entry.target.file,
- logSymAttributes(target_sym, &buf),
- });
- }
- }
+ log.debug("{}", .{self.got_table});
}
fn logSections(self: *Coff) void {
@@ -2484,6 +2495,7 @@ const LlvmObject = @import("../codegen/llvm.zig").Object;
const Module = @import("../Module.zig");
const Object = @import("Coff/Object.zig");
const Relocation = @import("Coff/Relocation.zig");
+const TableSection = @import("table_section.zig").TableSection;
const StringTable = @import("strtab.zig").StringTable;
const Type = @import("../type.zig").Type;
const TypedValue = @import("../TypedValue.zig");
diff --git a/src/link/Coff/Relocation.zig b/src/link/Coff/Relocation.zig
index 2fafa0bbdc..4449691ac0 100644
--- a/src/link/Coff/Relocation.zig
+++ b/src/link/Coff/Relocation.zig
@@ -48,17 +48,16 @@ dirty: bool = true,
/// Returns address of the target if any.
pub fn getTargetAddress(self: Relocation, coff_file: *const Coff) ?u32 {
switch (self.type) {
- .got, .got_page, .got_pageoff, .direct, .page, .pageoff => {
- const maybe_target_atom_index = switch (self.type) {
- .got, .got_page, .got_pageoff => coff_file.getGotAtomIndexForSymbol(self.target),
- .direct, .page, .pageoff => coff_file.getAtomIndexForSymbol(self.target),
- else => unreachable,
- };
- const target_atom_index = maybe_target_atom_index orelse return null;
+ .got, .got_page, .got_pageoff => {
+ const got_index = coff_file.got_table.lookup.get(self.target) orelse return null;
+ const header = coff_file.sections.items(.header)[coff_file.got_section_index.?];
+ return header.virtual_address + got_index * coff_file.ptr_width.size();
+ },
+ .direct, .page, .pageoff => {
+ const target_atom_index = coff_file.getAtomIndexForSymbol(self.target) orelse return null;
const target_atom = coff_file.getAtom(target_atom_index);
return target_atom.getSymbol(coff_file).value;
},
-
.import, .import_page, .import_pageoff => {
const sym = coff_file.getSymbol(self.target);
const index = coff_file.import_tables.getIndex(sym.value) orelse return null;
@@ -74,7 +73,8 @@ pub fn getTargetAddress(self: Relocation, coff_file: *const Coff) ?u32 {
/// Returns true if and only if the reloc is dirty AND the target address is available.
pub fn isResolvable(self: Relocation, coff_file: *Coff) bool {
- _ = self.getTargetAddress(coff_file) orelse return false;
+ const addr = self.getTargetAddress(coff_file) orelse return false;
+ if (addr == 0) return false;
return self.dirty;
}
diff --git a/src/link/Elf.zig b/src/link/Elf.zig
index b25a6f8f8a..4a6bb99818 100644
--- a/src/link/Elf.zig
+++ b/src/link/Elf.zig
@@ -30,6 +30,7 @@ const LlvmObject = @import("../codegen/llvm.zig").Object;
const Module = @import("../Module.zig");
const Package = @import("../Package.zig");
const StringTable = @import("strtab.zig").StringTable;
+const TableSection = @import("table_section.zig").TableSection;
const Type = @import("../type.zig").Type;
const TypedValue = @import("../TypedValue.zig");
const Value = @import("../value.zig").Value;
@@ -148,17 +149,13 @@ global_symbols: std.ArrayListUnmanaged(elf.Elf64_Sym) = .{},
local_symbol_free_list: std.ArrayListUnmanaged(u32) = .{},
global_symbol_free_list: std.ArrayListUnmanaged(u32) = .{},
-offset_table_free_list: std.ArrayListUnmanaged(u32) = .{},
-/// Same order as in the file. The value is the absolute vaddr value.
-/// If the vaddr of the executable program header changes, the entire
-/// offset table needs to be rewritten.
-offset_table: std.ArrayListUnmanaged(u64) = .{},
+got_table: TableSection(u32) = .{},
phdr_table_dirty: bool = false,
shdr_table_dirty: bool = false,
shstrtab_dirty: bool = false,
-offset_table_count_dirty: bool = false,
+got_table_count_dirty: bool = false,
debug_strtab_dirty: bool = false,
debug_abbrev_section_dirty: bool = false,
@@ -329,8 +326,7 @@ pub fn deinit(self: *Elf) void {
self.global_symbols.deinit(gpa);
self.global_symbol_free_list.deinit(gpa);
self.local_symbol_free_list.deinit(gpa);
- self.offset_table_free_list.deinit(gpa);
- self.offset_table.deinit(gpa);
+ self.got_table.deinit(gpa);
{
var it = self.decls.iterator();
@@ -1289,6 +1285,7 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
assert(!self.shdr_table_dirty);
assert(!self.shstrtab_dirty);
assert(!self.debug_strtab_dirty);
+ assert(!self.got_table_count_dirty);
}
fn linkWithLLD(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node) !void {
@@ -2168,7 +2165,7 @@ fn freeAtom(self: *Elf, atom_index: Atom.Index) void {
_ = self.atom_by_index_table.remove(local_sym_index);
self.getAtomPtr(atom_index).local_sym_index = 0;
- self.offset_table_free_list.append(self.base.allocator, atom.offset_table_index) catch {};
+ self.got_table.freeEntry(gpa, local_sym_index);
}
fn shrinkAtom(self: *Elf, atom_index: Atom.Index, new_block_size: u64) void {
@@ -2191,11 +2188,9 @@ pub fn createAtom(self: *Elf) !Atom.Index {
const atom_index = @intCast(Atom.Index, self.atoms.items.len);
const atom = try self.atoms.addOne(gpa);
const local_sym_index = try self.allocateLocalSymbol();
- const offset_table_index = try self.allocateGotOffset();
try self.atom_by_index_table.putNoClobber(gpa, local_sym_index, atom_index);
atom.* = .{
.local_sym_index = local_sym_index,
- .offset_table_index = offset_table_index,
.prev_index = null,
.next_index = null,
};
@@ -2352,26 +2347,6 @@ pub fn allocateLocalSymbol(self: *Elf) !u32 {
return index;
}
-pub fn allocateGotOffset(self: *Elf) !u32 {
- try self.offset_table.ensureUnusedCapacity(self.base.allocator, 1);
-
- const index = blk: {
- if (self.offset_table_free_list.popOrNull()) |index| {
- log.debug(" (reusing GOT offset at index {d})", .{index});
- break :blk index;
- } else {
- log.debug(" (allocating GOT offset at index {d})", .{self.offset_table.items.len});
- const index = @intCast(u32, self.offset_table.items.len);
- _ = self.offset_table.addOneAssumeCapacity();
- self.offset_table_count_dirty = true;
- break :blk index;
- }
- };
-
- self.offset_table.items[index] = 0;
- return index;
-}
-
fn freeUnnamedConsts(self: *Elf, decl_index: Module.Decl.Index) void {
const unnamed_consts = self.unnamed_const_atoms.getPtr(decl_index) orelse return;
for (unnamed_consts.items) |atom| {
@@ -2465,6 +2440,7 @@ fn updateDeclCode(self: *Elf, decl_index: Module.Decl.Index, code: []const u8, s
const decl_metadata = self.decls.get(decl_index).?;
const atom_index = decl_metadata.atom;
const atom = self.getAtom(atom_index);
+ const local_sym_index = atom.getSymbolIndex().?;
const shdr_index = decl_metadata.shdr;
if (atom.getSymbol(self).st_size != 0 and self.base.child_pid == null) {
@@ -2485,8 +2461,9 @@ fn updateDeclCode(self: *Elf, decl_index: Module.Decl.Index, code: []const u8, s
local_sym.st_value = vaddr;
log.debug(" (writing new offset table entry)", .{});
- self.offset_table.items[atom.offset_table_index] = vaddr;
- try self.writeOffsetTableEntry(atom.offset_table_index);
+ const got_entry_index = self.got_table.lookup.get(local_sym_index).?;
+ self.got_table.entries.items[got_entry_index] = local_sym_index;
+ try self.writeOffsetTableEntry(got_entry_index);
}
} else if (code.len < local_sym.st_size) {
self.shrinkAtom(atom_index, code.len);
@@ -2494,7 +2471,7 @@ fn updateDeclCode(self: *Elf, decl_index: Module.Decl.Index, code: []const u8, s
local_sym.st_size = code.len;
// TODO this write could be avoided if no fields of the symbol were changed.
- try self.writeSymbol(atom.getSymbolIndex().?);
+ try self.writeSymbol(local_sym_index);
} else {
const local_sym = atom.getSymbolPtr(self);
local_sym.* = .{
@@ -2509,12 +2486,12 @@ fn updateDeclCode(self: *Elf, decl_index: Module.Decl.Index, code: []const u8, s
errdefer self.freeAtom(atom_index);
log.debug("allocated text block for {s} at 0x{x}", .{ decl_name, vaddr });
- self.offset_table.items[atom.offset_table_index] = vaddr;
local_sym.st_value = vaddr;
local_sym.st_size = code.len;
- try self.writeSymbol(atom.getSymbolIndex().?);
- try self.writeOffsetTableEntry(atom.offset_table_index);
+ try self.writeSymbol(local_sym_index);
+ const got_entry_index = try atom.getOrCreateOffsetTableEntry(self);
+ try self.writeOffsetTableEntry(got_entry_index);
}
const local_sym = atom.getSymbolPtr(self);
@@ -2755,12 +2732,12 @@ fn updateLazySymbolAtom(
errdefer self.freeAtom(atom_index);
log.debug("allocated text block for {s} at 0x{x}", .{ name, vaddr });
- self.offset_table.items[atom.offset_table_index] = vaddr;
local_sym.st_value = vaddr;
local_sym.st_size = code.len;
try self.writeSymbol(local_sym_index);
- try self.writeOffsetTableEntry(atom.offset_table_index);
+ const got_entry_index = try atom.getOrCreateOffsetTableEntry(self);
+ try self.writeOffsetTableEntry(got_entry_index);
const section_offset = vaddr - self.program_headers.items[phdr_index].p_vaddr;
const file_offset = self.sections.items(.shdr)[shdr_index].sh_offset + section_offset;
@@ -2989,32 +2966,34 @@ fn writeSectHeader(self: *Elf, index: usize) !void {
}
}
-fn writeOffsetTableEntry(self: *Elf, index: usize) !void {
+fn writeOffsetTableEntry(self: *Elf, index: @TypeOf(self.got_table).Index) !void {
const entry_size: u16 = self.archPtrWidthBytes();
- if (self.offset_table_count_dirty) {
- const needed_size = self.offset_table.items.len * entry_size;
+ if (self.got_table_count_dirty) {
+ const needed_size = self.got_table.entries.items.len * entry_size;
try self.growAllocSection(self.got_section_index.?, needed_size);
- self.offset_table_count_dirty = false;
+ self.got_table_count_dirty = false;
}
const endian = self.base.options.target.cpu.arch.endian();
const shdr = &self.sections.items(.shdr)[self.got_section_index.?];
const off = shdr.sh_offset + @as(u64, entry_size) * index;
const phdr = &self.program_headers.items[self.phdr_got_index.?];
const vaddr = phdr.p_vaddr + @as(u64, entry_size) * index;
+ const got_entry = self.got_table.entries.items[index];
+ const got_value = self.getSymbol(got_entry).st_value;
switch (entry_size) {
2 => {
var buf: [2]u8 = undefined;
- mem.writeInt(u16, &buf, @intCast(u16, self.offset_table.items[index]), endian);
+ mem.writeInt(u16, &buf, @intCast(u16, got_value), endian);
try self.base.file.?.pwriteAll(&buf, off);
},
4 => {
var buf: [4]u8 = undefined;
- mem.writeInt(u32, &buf, @intCast(u32, self.offset_table.items[index]), endian);
+ mem.writeInt(u32, &buf, @intCast(u32, got_value), endian);
try self.base.file.?.pwriteAll(&buf, off);
},
8 => {
var buf: [8]u8 = undefined;
- mem.writeInt(u64, &buf, self.offset_table.items[index], endian);
+ mem.writeInt(u64, &buf, got_value, endian);
try self.base.file.?.pwriteAll(&buf, off);
if (self.base.child_pid) |pid| {
diff --git a/src/link/Elf/Atom.zig b/src/link/Elf/Atom.zig
index 4ab304ef71..70be5abbca 100644
--- a/src/link/Elf/Atom.zig
+++ b/src/link/Elf/Atom.zig
@@ -14,9 +14,6 @@ const Elf = @import("../Elf.zig");
/// offset table entry.
local_sym_index: u32,
-/// This field is undefined for symbols with size = 0.
-offset_table_index: u32,
-
/// Points to the previous and next neighbors, based on the `text_offset`.
/// This can be used to find, for example, the capacity of this `TextBlock`.
prev_index: ?Index,
@@ -48,13 +45,24 @@ pub fn getName(self: Atom, elf_file: *const Elf) []const u8 {
return elf_file.getSymbolName(self.getSymbolIndex().?);
}
+/// If entry already exists, returns index to it.
+/// Otherwise, creates a new entry in the Global Offset Table for this Atom.
+pub fn getOrCreateOffsetTableEntry(self: Atom, elf_file: *Elf) !u32 {
+ const sym_index = self.getSymbolIndex().?;
+ if (elf_file.got_table.lookup.get(sym_index)) |index| return index;
+ const index = try elf_file.got_table.allocateEntry(elf_file.base.allocator, sym_index);
+ elf_file.got_table_count_dirty = true;
+ return index;
+}
+
pub fn getOffsetTableAddress(self: Atom, elf_file: *Elf) u64 {
- assert(self.getSymbolIndex() != null);
+ const sym_index = self.getSymbolIndex().?;
+ const got_entry_index = elf_file.got_table.lookup.get(sym_index).?;
const target = elf_file.base.options.target;
const ptr_bits = target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
const got = elf_file.program_headers.items[elf_file.phdr_got_index.?];
- return got.p_vaddr + self.offset_table_index * ptr_bytes;
+ return got.p_vaddr + got_entry_index * ptr_bytes;
}
/// Returns how much room there is to grow in virtual address space.
diff --git a/src/link/MachO.zig b/src/link/MachO.zig
index 0ffb72f087..47954f8871 100644
--- a/src/link/MachO.zig
+++ b/src/link/MachO.zig
@@ -19,6 +19,7 @@ const fat = @import("MachO/fat.zig");
const link = @import("../link.zig");
const llvm_backend = @import("../codegen/llvm.zig");
const load_commands = @import("MachO/load_commands.zig");
+const stubs = @import("MachO/stubs.zig");
const target_util = @import("../target.zig");
const trace = @import("../tracy.zig").trace;
const zld = @import("MachO/zld.zig");
@@ -41,6 +42,7 @@ const Md5 = std.crypto.hash.Md5;
const Module = @import("../Module.zig");
const Relocation = @import("MachO/Relocation.zig");
const StringTable = @import("strtab.zig").StringTable;
+const TableSection = @import("table_section.zig").TableSection;
const Trie = @import("MachO/Trie.zig");
const Type = @import("../type.zig").Type;
const TypedValue = @import("../TypedValue.zig");
@@ -150,17 +152,20 @@ globals_free_list: std.ArrayListUnmanaged(u32) = .{},
dyld_stub_binder_index: ?u32 = null,
dyld_private_atom_index: ?Atom.Index = null,
-stub_helper_preamble_atom_index: ?Atom.Index = null,
strtab: StringTable(.strtab) = .{},
-got_table: SectionTable = .{},
-stubs_table: SectionTable = .{},
-tlv_table: SectionTable = .{},
+got_table: TableSection(SymbolWithLoc) = .{},
+stub_table: TableSection(SymbolWithLoc) = .{},
error_flags: File.ErrorFlags = File.ErrorFlags{},
segment_table_dirty: bool = false,
+got_table_count_dirty: bool = false,
+got_table_contents_dirty: bool = false,
+stub_table_count_dirty: bool = false,
+stub_table_contents_dirty: bool = false,
+stub_helper_preamble_allocated: bool = false,
/// A helper var to indicate if we are at the start of the incremental updates, or
/// already somewhere further along the update-and-run chain.
@@ -210,17 +215,16 @@ rebases: RebaseTable = .{},
/// this will be a table indexed by index into the list of Atoms.
bindings: BindingTable = .{},
-/// A table of lazy bindings indexed by the owning them `Atom`.
-/// Note that once we refactor `Atom`'s lifetime and ownership rules,
-/// this will be a table indexed by index into the list of Atoms.
-lazy_bindings: BindingTable = .{},
-
/// Table of tracked LazySymbols.
lazy_syms: LazySymbolTable = .{},
/// Table of tracked Decls.
decls: std.AutoArrayHashMapUnmanaged(Module.Decl.Index, DeclMetadata) = .{},
+/// Table of threadlocal variables descriptors.
+/// They are emitted in the `__thread_vars` section.
+tlv_table: TlvSymbolTable = .{},
+
/// Hot-code swapping state.
hot_state: if (is_hot_update_compatible) HotUpdateState else struct {} = .{},
@@ -237,6 +241,8 @@ const LazySymbolMetadata = struct {
alignment: u32,
};
+const TlvSymbolTable = std.AutoArrayHashMapUnmanaged(SymbolWithLoc, Atom.Index);
+
const DeclMetadata = struct {
atom: Atom.Index,
section: u8,
@@ -265,122 +271,6 @@ const DeclMetadata = struct {
}
};
-const SectionTable = struct {
- entries: std.ArrayListUnmanaged(Entry) = .{},
- free_list: std.ArrayListUnmanaged(u32) = .{},
- lookup: std.AutoHashMapUnmanaged(SymbolWithLoc, u32) = .{},
-
- pub fn deinit(st: *ST, allocator: Allocator) void {
- st.entries.deinit(allocator);
- st.free_list.deinit(allocator);
- st.lookup.deinit(allocator);
- }
-
- pub fn allocateEntry(st: *ST, allocator: Allocator, target: SymbolWithLoc) !u32 {
- try st.entries.ensureUnusedCapacity(allocator, 1);
- const index = blk: {
- if (st.free_list.popOrNull()) |index| {
- log.debug(" (reusing entry index {d})", .{index});
- break :blk index;
- } else {
- log.debug(" (allocating entry at index {d})", .{st.entries.items.len});
- const index = @intCast(u32, st.entries.items.len);
- _ = st.entries.addOneAssumeCapacity();
- break :blk index;
- }
- };
- st.entries.items[index] = .{ .target = target, .sym_index = 0 };
- try st.lookup.putNoClobber(allocator, target, index);
- return index;
- }
-
- pub fn freeEntry(st: *ST, allocator: Allocator, target: SymbolWithLoc) void {
- const index = st.lookup.get(target) orelse return;
- st.free_list.append(allocator, index) catch {};
- st.entries.items[index] = .{
- .target = .{ .sym_index = 0 },
- .sym_index = 0,
- };
- _ = st.lookup.remove(target);
- }
-
- pub fn getAtomIndex(st: *const ST, macho_file: *MachO, target: SymbolWithLoc) ?Atom.Index {
- const index = st.lookup.get(target) orelse return null;
- return st.entries.items[index].getAtomIndex(macho_file);
- }
-
- const FormatContext = struct {
- macho_file: *MachO,
- st: *const ST,
- };
-
- fn fmt(
- ctx: FormatContext,
- comptime unused_format_string: []const u8,
- options: std.fmt.FormatOptions,
- writer: anytype,
- ) @TypeOf(writer).Error!void {
- _ = options;
- comptime assert(unused_format_string.len == 0);
- try writer.writeAll("SectionTable:\n");
- for (ctx.st.entries.items, 0..) |entry, i| {
- const atom_sym = entry.getSymbol(ctx.macho_file);
- const target_sym = ctx.macho_file.getSymbol(entry.target);
- try writer.print(" {d}@{x} => ", .{ i, atom_sym.n_value });
- if (target_sym.undf()) {
- try writer.print("import('{s}')", .{
- ctx.macho_file.getSymbolName(entry.target),
- });
- } else {
- try writer.print("local(%{d}) in object({?d})", .{
- entry.target.sym_index,
- entry.target.file,
- });
- }
- try writer.writeByte('\n');
- }
- }
-
- fn format(st: *const ST, comptime unused_format_string: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void {
- _ = st;
- _ = unused_format_string;
- _ = options;
- _ = writer;
- @compileError("do not format SectionTable directly; use st.fmtDebug()");
- }
-
- pub fn fmtDebug(st: *const ST, macho_file: *MachO) std.fmt.Formatter(fmt) {
- return .{ .data = .{
- .macho_file = macho_file,
- .st = st,
- } };
- }
-
- const ST = @This();
-
- const Entry = struct {
- target: SymbolWithLoc,
- // Index into the synthetic symbol table (i.e., file == null).
- sym_index: u32,
-
- pub fn getSymbol(entry: Entry, macho_file: *MachO) macho.nlist_64 {
- return macho_file.getSymbol(.{ .sym_index = entry.sym_index });
- }
-
- pub fn getSymbolPtr(entry: Entry, macho_file: *MachO) *macho.nlist_64 {
- return macho_file.getSymbolPtr(.{ .sym_index = entry.sym_index });
- }
-
- pub fn getAtomIndex(entry: Entry, macho_file: *MachO) ?Atom.Index {
- return macho_file.getAtomIndexForSymbol(.{ .sym_index = entry.sym_index });
- }
-
- pub fn getName(entry: Entry, macho_file: *MachO) []const u8 {
- return macho_file.getSymbolName(.{ .sym_index = entry.sym_index });
- }
- };
-};
-
const BindingTable = std.AutoArrayHashMapUnmanaged(Atom.Index, std.ArrayListUnmanaged(Atom.Binding));
const UnnamedConstTable = std.AutoArrayHashMapUnmanaged(Module.Decl.Index, std.ArrayListUnmanaged(Atom.Index));
const RebaseTable = std.AutoArrayHashMapUnmanaged(Atom.Index, std.ArrayListUnmanaged(u32));
@@ -722,15 +612,15 @@ pub fn flushModule(self: *MachO, comp: *Compilation, prog_node: *std.Progress.No
return error.UndefinedSymbolReference;
}
- try self.createDyldPrivateAtom();
- try self.createStubHelperPreambleAtom();
-
for (actions.items) |action| switch (action.kind) {
.none => {},
.add_got => try self.addGotEntry(action.target),
.add_stub => try self.addStubEntry(action.target),
};
+ try self.createDyldPrivateAtom();
+ try self.writeStubHelperPreamble();
+
try self.allocateSpecialSymbols();
for (self.relocs.keys()) |atom_index| {
@@ -756,6 +646,27 @@ pub fn flushModule(self: *MachO, comp: *Compilation, prog_node: *std.Progress.No
try self.writeAtom(atom_index, code.items);
}
+ // Update GOT if it got moved in memory.
+ if (self.got_table_contents_dirty) {
+ for (self.got_table.entries.items, 0..) |entry, i| {
+ if (!self.got_table.lookup.contains(entry)) continue;
+ // TODO: write all in one go rather than incrementally.
+ try self.writeOffsetTableEntry(i);
+ }
+ self.got_table_contents_dirty = false;
+ }
+
+ // Update stubs if we moved any section in memory.
+ // TODO: we probably don't need to update all sections if only one got moved.
+ if (self.stub_table_contents_dirty) {
+ for (self.stub_table.entries.items, 0..) |entry, i| {
+ if (!self.stub_table.lookup.contains(entry)) continue;
+ // TODO: write all in one go rather than incrementally.
+ try self.writeStubTableEntry(i);
+ }
+ self.stub_table_contents_dirty = false;
+ }
+
if (build_options.enable_logging) {
self.logSymtab();
self.logSections();
@@ -1239,19 +1150,16 @@ pub fn writeAtom(self: *MachO, atom_index: Atom.Index, code: []u8) !void {
}
}
+ Atom.resolveRelocations(self, atom_index, relocs.items, code);
+
if (is_hot_update_compatible) {
- if (self.base.child_pid) |pid| blk: {
- const task = self.hot_state.mach_task orelse {
- log.warn("cannot hot swap: no Mach task acquired for child process with pid {d}", .{pid});
- break :blk;
- };
- self.updateAtomInMemory(task, section.segment_index, sym.n_value, code) catch |err| {
+ if (self.hot_state.mach_task) |task| {
+ self.writeToMemory(task, section.segment_index, sym.n_value, code) catch |err| {
log.warn("cannot hot swap: writing to memory failed: {s}", .{@errorName(err)});
};
}
}
- Atom.resolveRelocations(self, atom_index, relocs.items, code);
try self.base.file.?.pwriteAll(code, file_offset);
// Now we can mark the relocs as resolved.
@@ -1260,7 +1168,7 @@ pub fn writeAtom(self: *MachO, atom_index: Atom.Index, code: []u8) !void {
}
}
-fn updateAtomInMemory(self: *MachO, task: std.os.darwin.MachTask, segment_index: u8, addr: u64, code: []const u8) !void {
+fn writeToMemory(self: *MachO, task: std.os.darwin.MachTask, segment_index: u8, addr: u64, code: []const u8) !void {
const segment = self.segments.items[segment_index];
const cpu_arch = self.base.options.target.cpu.arch;
const nwritten = if (!segment.isWriteable())
@@ -1270,9 +1178,145 @@ fn updateAtomInMemory(self: *MachO, task: std.os.darwin.MachTask, segment_index:
if (nwritten != code.len) return error.InputOutput;
}
-fn writePtrWidthAtom(self: *MachO, atom_index: Atom.Index) !void {
- var buffer: [@sizeOf(u64)]u8 = [_]u8{0} ** @sizeOf(u64);
- try self.writeAtom(atom_index, &buffer);
+fn writeOffsetTableEntry(self: *MachO, index: usize) !void {
+ const sect_id = self.got_section_index.?;
+
+ if (self.got_table_count_dirty) {
+ const needed_size = self.got_table.entries.items.len * @sizeOf(u64);
+ try self.growSection(sect_id, needed_size);
+ self.got_table_count_dirty = false;
+ }
+
+ const header = &self.sections.items(.header)[sect_id];
+ const segment_index = self.sections.items(.segment_index)[sect_id];
+ const entry = self.got_table.entries.items[index];
+ const entry_value = self.getSymbol(entry).n_value;
+ const entry_offset = index * @sizeOf(u64);
+ const file_offset = header.offset + entry_offset;
+ const vmaddr = header.addr + entry_offset;
+
+ log.debug("writing GOT entry {d}: @{x} => {x}", .{ index, vmaddr, entry_value });
+
+ var buf: [@sizeOf(u64)]u8 = undefined;
+ mem.writeIntLittle(u64, &buf, entry_value);
+ try self.base.file.?.pwriteAll(&buf, file_offset);
+
+ if (is_hot_update_compatible) {
+ if (self.hot_state.mach_task) |task| {
+ self.writeToMemory(task, segment_index, vmaddr, &buf) catch |err| {
+ log.warn("cannot hot swap: writing to memory failed: {s}", .{@errorName(err)});
+ };
+ }
+ }
+}
+
+fn writeStubHelperPreamble(self: *MachO) !void {
+ if (self.stub_helper_preamble_allocated) return;
+
+ const gpa = self.base.allocator;
+ const cpu_arch = self.base.options.target.cpu.arch;
+ const size = stubs.calcStubHelperPreambleSize(cpu_arch);
+
+ var buf = try std.ArrayList(u8).initCapacity(gpa, size);
+ defer buf.deinit();
+
+ const dyld_private_addr = self.getAtom(self.dyld_private_atom_index.?).getSymbol(self).n_value;
+ const dyld_stub_binder_got_addr = blk: {
+ const index = self.got_table.lookup.get(self.getGlobalByIndex(self.dyld_stub_binder_index.?)).?;
+ const header = self.sections.items(.header)[self.got_section_index.?];
+ break :blk header.addr + @sizeOf(u64) * index;
+ };
+ const header = self.sections.items(.header)[self.stub_helper_section_index.?];
+
+ try stubs.writeStubHelperPreambleCode(.{
+ .cpu_arch = cpu_arch,
+ .source_addr = header.addr,
+ .dyld_private_addr = dyld_private_addr,
+ .dyld_stub_binder_got_addr = dyld_stub_binder_got_addr,
+ }, buf.writer());
+ try self.base.file.?.pwriteAll(buf.items, header.offset);
+
+ self.stub_helper_preamble_allocated = true;
+}
+
+fn writeStubTableEntry(self: *MachO, index: usize) !void {
+ const stubs_sect_id = self.stubs_section_index.?;
+ const stub_helper_sect_id = self.stub_helper_section_index.?;
+ const laptr_sect_id = self.la_symbol_ptr_section_index.?;
+
+ const cpu_arch = self.base.options.target.cpu.arch;
+ const stub_entry_size = stubs.calcStubEntrySize(cpu_arch);
+ const stub_helper_entry_size = stubs.calcStubHelperEntrySize(cpu_arch);
+ const stub_helper_preamble_size = stubs.calcStubHelperPreambleSize(cpu_arch);
+
+ if (self.stub_table_count_dirty) {
+ // We grow all 3 sections one by one.
+ {
+ const needed_size = stub_entry_size * self.stub_table.entries.items.len;
+ try self.growSection(stubs_sect_id, needed_size);
+ }
+ {
+ const needed_size = stub_helper_preamble_size + stub_helper_entry_size * self.stub_table.entries.items.len;
+ try self.growSection(stub_helper_sect_id, needed_size);
+ }
+ {
+ const needed_size = @sizeOf(u64) * self.stub_table.entries.items.len;
+ try self.growSection(laptr_sect_id, needed_size);
+ }
+ self.stub_table_count_dirty = false;
+ }
+
+ const gpa = self.base.allocator;
+
+ const stubs_header = self.sections.items(.header)[stubs_sect_id];
+ const stub_helper_header = self.sections.items(.header)[stub_helper_sect_id];
+ const laptr_header = self.sections.items(.header)[laptr_sect_id];
+
+ const entry = self.stub_table.entries.items[index];
+ const stub_addr: u64 = stubs_header.addr + stub_entry_size * index;
+ const stub_helper_addr: u64 = stub_helper_header.addr + stub_helper_preamble_size + stub_helper_entry_size * index;
+ const laptr_addr: u64 = laptr_header.addr + @sizeOf(u64) * index;
+
+ log.debug("writing stub entry {d}: @{x} => '{s}'", .{ index, stub_addr, self.getSymbolName(entry) });
+
+ {
+ var buf = try std.ArrayList(u8).initCapacity(gpa, stub_entry_size);
+ defer buf.deinit();
+ try stubs.writeStubCode(.{
+ .cpu_arch = cpu_arch,
+ .source_addr = stub_addr,
+ .target_addr = laptr_addr,
+ }, buf.writer());
+ const off = stubs_header.offset + stub_entry_size * index;
+ try self.base.file.?.pwriteAll(buf.items, off);
+ }
+
+ {
+ var buf = try std.ArrayList(u8).initCapacity(gpa, stub_helper_entry_size);
+ defer buf.deinit();
+ try stubs.writeStubHelperCode(.{
+ .cpu_arch = cpu_arch,
+ .source_addr = stub_helper_addr,
+ .target_addr = stub_helper_header.addr,
+ }, buf.writer());
+ const off = stub_helper_header.offset + stub_helper_preamble_size + stub_helper_entry_size * index;
+ try self.base.file.?.pwriteAll(buf.items, off);
+ }
+
+ {
+ var buf: [@sizeOf(u64)]u8 = undefined;
+ mem.writeIntLittle(u64, &buf, stub_helper_addr);
+ const off = laptr_header.offset + @sizeOf(u64) * index;
+ try self.base.file.?.pwriteAll(&buf, off);
+ }
+
+ // TODO: generating new stub entry will require pulling the address of the symbol from the
+ // target dylib when updating directly in memory.
+ if (is_hot_update_compatible) {
+ if (self.hot_state.mach_task) |_| {
+ @panic("TODO: update a stub entry in memory");
+ }
+ }
}
fn markRelocsDirtyByTarget(self: *MachO, target: SymbolWithLoc) void {
@@ -1290,13 +1334,28 @@ fn markRelocsDirtyByAddress(self: *MachO, addr: u64) void {
log.debug("marking relocs dirty by address: {x}", .{addr});
for (self.relocs.values()) |*relocs| {
for (relocs.items) |*reloc| {
- const target_atom_index = reloc.getTargetAtomIndex(self) orelse continue;
- const target_atom = self.getAtom(target_atom_index);
- const target_sym = target_atom.getSymbol(self);
- if (target_sym.n_value < addr) continue;
+ const target_addr = reloc.getTargetBaseAddress(self) orelse continue;
+ if (target_addr < addr) continue;
reloc.dirty = true;
}
}
+
+ // TODO: dirty only really affected GOT cells
+ for (self.got_table.entries.items) |entry| {
+ const target_addr = self.getSymbol(entry).n_value;
+ if (target_addr >= addr) {
+ self.got_table_contents_dirty = true;
+ break;
+ }
+ }
+
+ {
+ const stubs_addr = self.getSegment(self.stubs_section_index.?).vmaddr;
+ const stub_helper_addr = self.getSegment(self.stub_helper_section_index.?).vmaddr;
+ const laptr_addr = self.getSegment(self.la_symbol_ptr_section_index.?).vmaddr;
+ if (stubs_addr >= addr or stub_helper_addr >= addr or laptr_addr >= addr)
+ self.stub_table_contents_dirty = true;
+ }
}
pub fn allocateSpecialSymbols(self: *MachO) !void {
@@ -1335,40 +1394,6 @@ pub fn createAtom(self: *MachO) !Atom.Index {
return atom_index;
}
-pub fn createGotAtom(self: *MachO, target: SymbolWithLoc) !Atom.Index {
- const atom_index = try self.createAtom();
- self.getAtomPtr(atom_index).size = @sizeOf(u64);
-
- const sym = self.getAtom(atom_index).getSymbolPtr(self);
- sym.n_type = macho.N_SECT;
- sym.n_sect = self.got_section_index.? + 1;
- sym.n_value = try self.allocateAtom(atom_index, @sizeOf(u64), @alignOf(u64));
-
- log.debug("allocated GOT atom at 0x{x}", .{sym.n_value});
-
- try Atom.addRelocation(self, atom_index, .{
- .type = .unsigned,
- .target = target,
- .offset = 0,
- .addend = 0,
- .pcrel = false,
- .length = 3,
- });
-
- const target_sym = self.getSymbol(target);
- if (target_sym.undf()) {
- try Atom.addBinding(self, atom_index, .{
- .target = self.getGlobal(self.getSymbolName(target)).?,
- .offset = 0,
- });
- } else {
- try Atom.addRebase(self, atom_index, 0);
- }
- try self.writePtrWidthAtom(atom_index);
-
- return atom_index;
-}
-
fn createDyldPrivateAtom(self: *MachO) !void {
if (self.dyld_private_atom_index != null) return;
@@ -1383,339 +1408,17 @@ fn createDyldPrivateAtom(self: *MachO) !void {
sym.n_value = try self.allocateAtom(atom_index, atom.size, @alignOf(u64));
log.debug("allocated dyld_private atom at 0x{x}", .{sym.n_value});
- try self.writePtrWidthAtom(atom_index);
-}
-
-fn createStubHelperPreambleAtom(self: *MachO) !void {
- if (self.stub_helper_preamble_atom_index != null) return;
-
- const gpa = self.base.allocator;
- const arch = self.base.options.target.cpu.arch;
- const size: u5 = switch (arch) {
- .x86_64 => 15,
- .aarch64 => 6 * @sizeOf(u32),
- else => unreachable,
- };
- const atom_index = try self.createAtom();
- const atom = self.getAtomPtr(atom_index);
- atom.size = size;
-
- const required_alignment: u32 = switch (arch) {
- .x86_64 => 1,
- .aarch64 => @alignOf(u32),
- else => unreachable,
- };
-
- const sym = atom.getSymbolPtr(self);
- sym.n_type = macho.N_SECT;
- sym.n_sect = self.stub_helper_section_index.? + 1;
-
- const dyld_private = self.getAtom(self.dyld_private_atom_index.?).getSymbolWithLoc();
- const dyld_stub_binder = self.globals.items[self.dyld_stub_binder_index.?];
-
- const code = try gpa.alloc(u8, size);
- defer gpa.free(code);
- mem.set(u8, code, 0);
-
- switch (arch) {
- .x86_64 => {
- // lea %r11, [rip + disp]
- code[0] = 0x4c;
- code[1] = 0x8d;
- code[2] = 0x1d;
- // push %r11
- code[7] = 0x41;
- code[8] = 0x53;
- // jmp [rip + disp]
- code[9] = 0xff;
- code[10] = 0x25;
-
- try Atom.addRelocations(self, atom_index, &[_]Relocation{ .{
- .type = .signed,
- .target = dyld_private,
- .offset = 3,
- .addend = 0,
- .pcrel = true,
- .length = 2,
- }, .{
- .type = .got,
- .target = dyld_stub_binder,
- .offset = 11,
- .addend = 0,
- .pcrel = true,
- .length = 2,
- } });
- },
-
- .aarch64 => {
- // adrp x17, 0
- mem.writeIntLittle(u32, code[0..][0..4], aarch64.Instruction.adrp(.x17, 0).toU32());
- // add x17, x17, 0
- mem.writeIntLittle(u32, code[4..][0..4], aarch64.Instruction.add(.x17, .x17, 0, false).toU32());
- // stp x16, x17, [sp, #-16]!
- mem.writeIntLittle(u32, code[8..][0..4], aarch64.Instruction.stp(
- .x16,
- .x17,
- aarch64.Register.sp,
- aarch64.Instruction.LoadStorePairOffset.pre_index(-16),
- ).toU32());
- // adrp x16, 0
- mem.writeIntLittle(u32, code[12..][0..4], aarch64.Instruction.adrp(.x16, 0).toU32());
- // ldr x16, [x16, 0]
- mem.writeIntLittle(u32, code[16..][0..4], aarch64.Instruction.ldr(
- .x16,
- .x16,
- aarch64.Instruction.LoadStoreOffset.imm(0),
- ).toU32());
- // br x16
- mem.writeIntLittle(u32, code[20..][0..4], aarch64.Instruction.br(.x16).toU32());
-
- try Atom.addRelocations(self, atom_index, &[_]Relocation{ .{
- .type = .page,
- .target = dyld_private,
- .offset = 0,
- .addend = 0,
- .pcrel = true,
- .length = 2,
- }, .{
- .type = .pageoff,
- .target = dyld_private,
- .offset = 4,
- .addend = 0,
- .pcrel = false,
- .length = 2,
- }, .{
- .type = .got_page,
- .target = dyld_stub_binder,
- .offset = 12,
- .addend = 0,
- .pcrel = true,
- .length = 2,
- }, .{
- .type = .got_pageoff,
- .target = dyld_stub_binder,
- .offset = 16,
- .addend = 0,
- .pcrel = false,
- .length = 2,
- } });
- },
-
- else => unreachable,
- }
- self.stub_helper_preamble_atom_index = atom_index;
-
- sym.n_value = try self.allocateAtom(atom_index, size, required_alignment);
- log.debug("allocated stub preamble atom at 0x{x}", .{sym.n_value});
- try self.writeAtom(atom_index, code);
-}
-
-fn createStubHelperAtom(self: *MachO) !Atom.Index {
- const gpa = self.base.allocator;
- const arch = self.base.options.target.cpu.arch;
- const size: u4 = switch (arch) {
- .x86_64 => 10,
- .aarch64 => 3 * @sizeOf(u32),
- else => unreachable,
- };
- const atom_index = try self.createAtom();
- const atom = self.getAtomPtr(atom_index);
- atom.size = size;
-
- const required_alignment: u32 = switch (arch) {
- .x86_64 => 1,
- .aarch64 => @alignOf(u32),
- else => unreachable,
- };
-
- const sym = atom.getSymbolPtr(self);
- sym.n_type = macho.N_SECT;
- sym.n_sect = self.stub_helper_section_index.? + 1;
-
- const code = try gpa.alloc(u8, size);
- defer gpa.free(code);
- mem.set(u8, code, 0);
-
- const stub_helper_preamble_atom_sym_index = if (self.stub_helper_preamble_atom_index) |stub_index|
- self.getAtom(stub_index).getSymbolIndex().?
- else
- unreachable;
-
- switch (arch) {
- .x86_64 => {
- // pushq
- code[0] = 0x68;
- // Next 4 bytes 1..4 are just a placeholder populated in `populateLazyBindOffsetsInStubHelper`.
- // jmpq
- code[5] = 0xe9;
-
- try Atom.addRelocation(self, atom_index, .{
- .type = .branch,
- .target = .{ .sym_index = stub_helper_preamble_atom_sym_index },
- .offset = 6,
- .addend = 0,
- .pcrel = true,
- .length = 2,
- });
- },
- .aarch64 => {
- const literal = blk: {
- const div_res = try math.divExact(u64, size - @sizeOf(u32), 4);
- break :blk math.cast(u18, div_res) orelse return error.Overflow;
- };
- // ldr w16, literal
- mem.writeIntLittle(u32, code[0..4], aarch64.Instruction.ldrLiteral(
- .w16,
- literal,
- ).toU32());
- // b disp
- mem.writeIntLittle(u32, code[4..8], aarch64.Instruction.b(0).toU32());
- // Next 4 bytes 8..12 are just a placeholder populated in `populateLazyBindOffsetsInStubHelper`.
-
- try Atom.addRelocation(self, atom_index, .{
- .type = .branch,
- .target = .{ .sym_index = stub_helper_preamble_atom_sym_index },
- .offset = 4,
- .addend = 0,
- .pcrel = true,
- .length = 2,
- });
- },
- else => unreachable,
- }
-
- sym.n_value = try self.allocateAtom(atom_index, size, required_alignment);
- log.debug("allocated stub helper atom at 0x{x}", .{sym.n_value});
- try self.writeAtom(atom_index, code);
-
- return atom_index;
-}
-
-fn createLazyPointerAtom(self: *MachO, stub_sym_index: u32, target: SymbolWithLoc) !Atom.Index {
- const atom_index = try self.createAtom();
- const atom = self.getAtomPtr(atom_index);
- atom.size = @sizeOf(u64);
-
- const sym = atom.getSymbolPtr(self);
- sym.n_type = macho.N_SECT;
- sym.n_sect = self.la_symbol_ptr_section_index.? + 1;
-
- try Atom.addRelocation(self, atom_index, .{
- .type = .unsigned,
- .target = .{ .sym_index = stub_sym_index },
- .offset = 0,
- .addend = 0,
- .pcrel = false,
- .length = 3,
- });
- try Atom.addRebase(self, atom_index, 0);
- try Atom.addLazyBinding(self, atom_index, .{
- .target = self.getGlobal(self.getSymbolName(target)).?,
- .offset = 0,
- });
-
- sym.n_value = try self.allocateAtom(atom_index, atom.size, @alignOf(u64));
- log.debug("allocated lazy pointer atom at 0x{x} ({s})", .{ sym.n_value, self.getSymbolName(target) });
- try self.writePtrWidthAtom(atom_index);
-
- return atom_index;
-}
-
-fn createStubAtom(self: *MachO, laptr_sym_index: u32) !Atom.Index {
- const gpa = self.base.allocator;
- const arch = self.base.options.target.cpu.arch;
- const size: u4 = switch (arch) {
- .x86_64 => 6,
- .aarch64 => 3 * @sizeOf(u32),
- else => unreachable, // unhandled architecture type
- };
- const atom_index = try self.createAtom();
- const atom = self.getAtomPtr(atom_index);
- atom.size = size;
-
- const required_alignment: u32 = switch (arch) {
- .x86_64 => 1,
- .aarch64 => @alignOf(u32),
- else => unreachable, // unhandled architecture type
-
- };
-
- const sym = atom.getSymbolPtr(self);
- sym.n_type = macho.N_SECT;
- sym.n_sect = self.stubs_section_index.? + 1;
-
- const code = try gpa.alloc(u8, size);
- defer gpa.free(code);
- mem.set(u8, code, 0);
-
- switch (arch) {
- .x86_64 => {
- // jmp
- code[0] = 0xff;
- code[1] = 0x25;
-
- try Atom.addRelocation(self, atom_index, .{
- .type = .branch,
- .target = .{ .sym_index = laptr_sym_index },
- .offset = 2,
- .addend = 0,
- .pcrel = true,
- .length = 2,
- });
- },
- .aarch64 => {
- // adrp x16, pages
- mem.writeIntLittle(u32, code[0..4], aarch64.Instruction.adrp(.x16, 0).toU32());
- // ldr x16, x16, offset
- mem.writeIntLittle(u32, code[4..8], aarch64.Instruction.ldr(
- .x16,
- .x16,
- aarch64.Instruction.LoadStoreOffset.imm(0),
- ).toU32());
- // br x16
- mem.writeIntLittle(u32, code[8..12], aarch64.Instruction.br(.x16).toU32());
-
- try Atom.addRelocations(self, atom_index, &[_]Relocation{
- .{
- .type = .page,
- .target = .{ .sym_index = laptr_sym_index },
- .offset = 0,
- .addend = 0,
- .pcrel = true,
- .length = 2,
- },
- .{
- .type = .pageoff,
- .target = .{ .sym_index = laptr_sym_index },
- .offset = 4,
- .addend = 0,
- .pcrel = false,
- .length = 2,
- },
- });
- },
- else => unreachable,
- }
-
- sym.n_value = try self.allocateAtom(atom_index, size, required_alignment);
- log.debug("allocated stub atom at 0x{x}", .{sym.n_value});
- try self.writeAtom(atom_index, code);
-
- return atom_index;
+ var buffer: [@sizeOf(u64)]u8 = [_]u8{0} ** @sizeOf(u64);
+ try self.writeAtom(atom_index, &buffer);
}
-fn createThreadLocalDescriptorAtom(self: *MachO, target: SymbolWithLoc) !Atom.Index {
+fn createThreadLocalDescriptorAtom(self: *MachO, sym_name: []const u8, target: SymbolWithLoc) !Atom.Index {
const gpa = self.base.allocator;
const size = 3 * @sizeOf(u64);
const required_alignment: u32 = 1;
const atom_index = try self.createAtom();
self.getAtomPtr(atom_index).size = size;
- const target_sym_name = self.getSymbolName(target);
- const name_delimiter = mem.indexOf(u8, target_sym_name, "$").?;
- const sym_name = try gpa.dupe(u8, target_sym_name[0..name_delimiter]);
- defer gpa.free(sym_name);
-
const sym = self.getAtom(atom_index).getSymbolPtr(self);
sym.n_type = macho.N_SECT;
sym.n_sect = self.thread_vars_section_index.? + 1;
@@ -1889,8 +1592,7 @@ pub fn deinit(self: *MachO) void {
}
self.got_table.deinit(gpa);
- self.stubs_table.deinit(gpa);
- self.tlv_table.deinit(gpa);
+ self.stub_table.deinit(gpa);
self.strtab.deinit(gpa);
self.locals.deinit(gpa);
@@ -1923,14 +1625,12 @@ pub fn deinit(self: *MachO) void {
self.atoms.deinit(gpa);
- if (self.base.options.module) |_| {
- for (self.decls.values()) |*m| {
- m.exports.deinit(gpa);
- }
- self.decls.deinit(gpa);
- } else {
- assert(self.decls.count() == 0);
+ for (self.decls.values()) |*m| {
+ m.exports.deinit(gpa);
}
+ self.decls.deinit(gpa);
+ self.lazy_syms.deinit(gpa);
+ self.tlv_table.deinit(gpa);
for (self.unnamed_const_atoms.values()) |*atoms| {
atoms.deinit(gpa);
@@ -1953,11 +1653,6 @@ pub fn deinit(self: *MachO) void {
bindings.deinit(gpa);
}
self.bindings.deinit(gpa);
-
- for (self.lazy_bindings.values()) |*bindings| {
- bindings.deinit(gpa);
- }
- self.lazy_bindings.deinit(gpa);
}
fn freeAtom(self: *MachO, atom_index: Atom.Index) void {
@@ -2104,31 +1799,16 @@ fn allocateGlobal(self: *MachO) !u32 {
fn addGotEntry(self: *MachO, target: SymbolWithLoc) !void {
if (self.got_table.lookup.contains(target)) return;
const got_index = try self.got_table.allocateEntry(self.base.allocator, target);
- const got_atom_index = try self.createGotAtom(target);
- const got_atom = self.getAtom(got_atom_index);
- self.got_table.entries.items[got_index].sym_index = got_atom.getSymbolIndex().?;
+ try self.writeOffsetTableEntry(got_index);
+ self.got_table_count_dirty = true;
self.markRelocsDirtyByTarget(target);
}
fn addStubEntry(self: *MachO, target: SymbolWithLoc) !void {
- if (self.stubs_table.lookup.contains(target)) return;
- const stub_index = try self.stubs_table.allocateEntry(self.base.allocator, target);
- const stub_helper_atom_index = try self.createStubHelperAtom();
- const stub_helper_atom = self.getAtom(stub_helper_atom_index);
- const laptr_atom_index = try self.createLazyPointerAtom(stub_helper_atom.getSymbolIndex().?, target);
- const laptr_atom = self.getAtom(laptr_atom_index);
- const stub_atom_index = try self.createStubAtom(laptr_atom.getSymbolIndex().?);
- const stub_atom = self.getAtom(stub_atom_index);
- self.stubs_table.entries.items[stub_index].sym_index = stub_atom.getSymbolIndex().?;
- self.markRelocsDirtyByTarget(target);
-}
-
-fn addTlvEntry(self: *MachO, target: SymbolWithLoc) !void {
- if (self.tlv_table.lookup.contains(target)) return;
- const tlv_index = try self.tlv_table.allocateEntry(self.base.allocator, target);
- const tlv_atom_index = try self.createThreadLocalDescriptorAtom(target);
- const tlv_atom = self.getAtom(tlv_atom_index);
- self.tlv_table.entries.items[tlv_index].sym_index = tlv_atom.getSymbolIndex().?;
+ if (self.stub_table.lookup.contains(target)) return;
+ const stub_index = try self.stub_table.allocateEntry(self.base.allocator, target);
+ try self.writeStubTableEntry(stub_index);
+ self.stub_table_count_dirty = true;
self.markRelocsDirtyByTarget(target);
}
@@ -2278,6 +1958,12 @@ pub fn updateDecl(self: *MachO, module: *Module, decl_index: Module.Decl.Index)
}
}
+ const is_threadlocal = if (decl.val.castTag(.variable)) |payload|
+ payload.data.is_threadlocal and !self.base.options.single_threaded
+ else
+ false;
+ if (is_threadlocal) return self.updateThreadlocalVariable(module, decl_index);
+
const atom_index = try self.getOrCreateAtomForDecl(decl_index);
const sym_index = self.getAtom(atom_index).getSymbolIndex().?;
Atom.freeRelocations(self, atom_index);
@@ -2426,6 +2112,101 @@ pub fn getOrCreateAtomForLazySymbol(self: *MachO, sym: File.LazySymbol, alignmen
return atom.*.?;
}
+fn updateThreadlocalVariable(self: *MachO, module: *Module, decl_index: Module.Decl.Index) !void {
+ // Lowering a TLV on macOS involves two stages:
+ // 1. first we lower the initializer into appopriate section (__thread_data or __thread_bss)
+ // 2. next, we create a corresponding threadlocal variable descriptor in __thread_vars
+
+ // 1. Lower the initializer value.
+ const init_atom_index = try self.getOrCreateAtomForDecl(decl_index);
+ const init_atom = self.getAtomPtr(init_atom_index);
+ const init_sym_index = init_atom.getSymbolIndex().?;
+ Atom.freeRelocations(self, init_atom_index);
+
+ const gpa = self.base.allocator;
+
+ var code_buffer = std.ArrayList(u8).init(gpa);
+ defer code_buffer.deinit();
+
+ var decl_state: ?Dwarf.DeclState = if (self.d_sym) |*d_sym|
+ try d_sym.dwarf.initDeclState(module, decl_index)
+ else
+ null;
+ defer if (decl_state) |*ds| ds.deinit();
+
+ const decl = module.declPtr(decl_index);
+ const decl_metadata = self.decls.get(decl_index).?;
+ const decl_val = decl.val.castTag(.variable).?.data.init;
+ const res = if (decl_state) |*ds|
+ try codegen.generateSymbol(&self.base, decl.srcLoc(), .{
+ .ty = decl.ty,
+ .val = decl_val,
+ }, &code_buffer, .{
+ .dwarf = ds,
+ }, .{
+ .parent_atom_index = init_sym_index,
+ })
+ else
+ try codegen.generateSymbol(&self.base, decl.srcLoc(), .{
+ .ty = decl.ty,
+ .val = decl_val,
+ }, &code_buffer, .none, .{
+ .parent_atom_index = init_sym_index,
+ });
+
+ var code = switch (res) {
+ .ok => code_buffer.items,
+ .fail => |em| {
+ decl.analysis = .codegen_failure;
+ try module.failed_decls.put(module.gpa, decl_index, em);
+ return;
+ },
+ };
+
+ const required_alignment = decl.getAlignment(self.base.options.target);
+
+ const decl_name = try decl.getFullyQualifiedName(module);
+ defer gpa.free(decl_name);
+
+ const init_sym_name = try std.fmt.allocPrint(gpa, "{s}$tlv$init", .{decl_name});
+ defer gpa.free(init_sym_name);
+
+ const sect_id = decl_metadata.section;
+ const init_sym = init_atom.getSymbolPtr(self);
+ init_sym.n_strx = try self.strtab.insert(gpa, init_sym_name);
+ init_sym.n_type = macho.N_SECT;
+ init_sym.n_sect = sect_id + 1;
+ init_sym.n_desc = 0;
+ init_atom.size = code.len;
+
+ init_sym.n_value = try self.allocateAtom(init_atom_index, code.len, required_alignment);
+ errdefer self.freeAtom(init_atom_index);
+
+ log.debug("allocated atom for {s} at 0x{x}", .{ init_sym_name, init_sym.n_value });
+ log.debug(" (required alignment 0x{x})", .{required_alignment});
+
+ try self.writeAtom(init_atom_index, code);
+
+ if (decl_state) |*ds| {
+ try self.d_sym.?.dwarf.commitDeclState(
+ module,
+ decl_index,
+ init_sym.n_value,
+ self.getAtom(init_atom_index).size,
+ ds,
+ );
+ }
+
+ try self.updateDeclExports(module, decl_index, module.getDeclExports(decl_index));
+
+ // 2. Create a TLV descriptor.
+ const init_atom_sym_loc = init_atom.getSymbolWithLoc();
+ const gop = try self.tlv_table.getOrPut(gpa, init_atom_sym_loc);
+ assert(!gop.found_existing);
+ gop.value_ptr.* = try self.createThreadLocalDescriptorAtom(decl_name, init_atom_sym_loc);
+ self.markRelocsDirtyByTarget(init_atom_sym_loc);
+}
+
pub fn getOrCreateAtomForDecl(self: *MachO, decl_index: Module.Decl.Index) !Atom.Index {
const gop = try self.decls.getOrPut(self.base.allocator, decl_index);
if (!gop.found_existing) {
@@ -2493,21 +2274,11 @@ fn updateDeclCode(self: *MachO, decl_index: Module.Decl.Index, code: []u8) !u64
const sect_id = decl_metadata.section;
const header = &self.sections.items(.header)[sect_id];
const segment = self.getSegment(sect_id);
- const is_threadlocal = if (!self.base.options.single_threaded)
- header.flags == macho.S_THREAD_LOCAL_REGULAR or header.flags == macho.S_THREAD_LOCAL_ZEROFILL
- else
- false;
const code_len = code.len;
- const sym_name = if (is_threadlocal)
- try std.fmt.allocPrint(gpa, "{s}$tlv$init", .{decl_name})
- else
- decl_name;
- defer if (is_threadlocal) gpa.free(sym_name);
-
if (atom.size != 0) {
const sym = atom.getSymbolPtr(self);
- sym.n_strx = try self.strtab.insert(gpa, sym_name);
+ sym.n_strx = try self.strtab.insert(gpa, decl_name);
sym.n_type = macho.N_SECT;
sym.n_sect = sect_id + 1;
sym.n_desc = 0;
@@ -2517,23 +2288,15 @@ fn updateDeclCode(self: *MachO, decl_index: Module.Decl.Index, code: []u8) !u64
if (need_realloc) {
const vaddr = try self.growAtom(atom_index, code_len, required_alignment);
- log.debug("growing {s} and moving from 0x{x} to 0x{x}", .{ sym_name, sym.n_value, vaddr });
+ log.debug("growing {s} and moving from 0x{x} to 0x{x}", .{ decl_name, sym.n_value, vaddr });
log.debug(" (required alignment 0x{x})", .{required_alignment});
if (vaddr != sym.n_value) {
sym.n_value = vaddr;
- // TODO: I think we should update the offset to the initializer here too.
- const target: SymbolWithLoc = if (is_threadlocal) blk: {
- const tlv_atom_index = self.tlv_table.getAtomIndex(self, .{
- .sym_index = sym_index,
- }).?;
- const tlv_atom = self.getAtom(tlv_atom_index);
- break :blk tlv_atom.getSymbolWithLoc();
- } else .{ .sym_index = sym_index };
- self.markRelocsDirtyByTarget(target);
log.debug(" (updating GOT entry)", .{});
- const got_atom_index = self.got_table.getAtomIndex(self, target).?;
- try self.writePtrWidthAtom(got_atom_index);
+ const got_atom_index = self.got_table.lookup.get(.{ .sym_index = sym_index }).?;
+ try self.writeOffsetTableEntry(got_atom_index);
+ self.markRelocsDirtyByTarget(.{ .sym_index = sym_index });
}
} else if (code_len < atom.size) {
self.shrinkAtom(atom_index, code_len);
@@ -2544,7 +2307,7 @@ fn updateDeclCode(self: *MachO, decl_index: Module.Decl.Index, code: []u8) !u64
self.getAtomPtr(atom_index).size = code_len;
} else {
const sym = atom.getSymbolPtr(self);
- sym.n_strx = try self.strtab.insert(gpa, sym_name);
+ sym.n_strx = try self.strtab.insert(gpa, decl_name);
sym.n_type = macho.N_SECT;
sym.n_sect = sect_id + 1;
sym.n_desc = 0;
@@ -2552,21 +2315,13 @@ fn updateDeclCode(self: *MachO, decl_index: Module.Decl.Index, code: []u8) !u64
const vaddr = try self.allocateAtom(atom_index, code_len, required_alignment);
errdefer self.freeAtom(atom_index);
- log.debug("allocated atom for {s} at 0x{x}", .{ sym_name, vaddr });
+ log.debug("allocated atom for {s} at 0x{x}", .{ decl_name, vaddr });
log.debug(" (required alignment 0x{x})", .{required_alignment});
self.getAtomPtr(atom_index).size = code_len;
sym.n_value = vaddr;
- if (is_threadlocal) {
- try self.addTlvEntry(.{ .sym_index = sym_index });
- }
- const target: SymbolWithLoc = if (is_threadlocal) blk: {
- const tlv_atom_index = self.tlv_table.getAtomIndex(self, .{ .sym_index = sym_index }).?;
- const tlv_atom = self.getAtom(tlv_atom_index);
- break :blk tlv_atom.getSymbolWithLoc();
- } else .{ .sym_index = sym_index };
- try self.addGotEntry(target);
+ try self.addGotEntry(.{ .sym_index = sym_index });
}
try self.writeAtom(atom_index, code);
@@ -2828,11 +2583,7 @@ fn populateMissingMetadata(self: *MachO) !void {
}
if (self.stubs_section_index == null) {
- const stub_size: u32 = switch (cpu_arch) {
- .x86_64 => 6,
- .aarch64 => 3 * @sizeOf(u32),
- else => unreachable, // unhandled architecture type
- };
+ const stub_size = stubs.calcStubEntrySize(cpu_arch);
self.stubs_section_index = try self.allocateSection("__TEXT2", "__stubs", .{
.size = stub_size,
.alignment = switch (cpu_arch) {
@@ -3021,7 +2772,7 @@ fn growSection(self: *MachO, sect_id: u8, needed_size: u64) !void {
const last_atom = self.getAtom(last_atom_index);
const sym = last_atom.getSymbol(self);
break :blk (sym.n_value + last_atom.size) - segment.vmaddr;
- } else 0;
+ } else header.size;
log.debug("moving {s},{s} from 0x{x} to 0x{x}", .{
header.segName(),
@@ -3249,6 +3000,28 @@ fn writeLinkeditSegmentData(self: *MachO) !void {
seg.vmsize = mem.alignForwardGeneric(u64, seg.filesize, self.page_size);
}
+fn collectRebaseDataFromTableSection(self: *MachO, sect_id: u8, rebase: *Rebase, table: anytype) !void {
+ const header = self.sections.items(.header)[sect_id];
+ const segment_index = self.sections.items(.segment_index)[sect_id];
+ const segment = self.segments.items[segment_index];
+ const base_offset = header.addr - segment.vmaddr;
+ const is_got = if (self.got_section_index) |index| index == sect_id else false;
+
+ try rebase.entries.ensureUnusedCapacity(self.base.allocator, table.entries.items.len);
+
+ for (table.entries.items, 0..) |entry, i| {
+ if (!table.lookup.contains(entry)) continue;
+ const sym = self.getSymbol(entry);
+ if (is_got and sym.undf()) continue;
+ const offset = i * @sizeOf(u64);
+ log.debug(" | rebase at {x}", .{base_offset + offset});
+ rebase.entries.appendAssumeCapacity(.{
+ .offset = base_offset + offset,
+ .segment_id = segment_index,
+ });
+ }
+}
+
fn collectRebaseData(self: *MachO, rebase: *Rebase) !void {
const gpa = self.base.allocator;
const slice = self.sections.slice();
@@ -3276,9 +3049,42 @@ fn collectRebaseData(self: *MachO, rebase: *Rebase) !void {
}
}
+ try self.collectRebaseDataFromTableSection(self.got_section_index.?, rebase, self.got_table);
+ try self.collectRebaseDataFromTableSection(self.la_symbol_ptr_section_index.?, rebase, self.stub_table);
+
try rebase.finalize(gpa);
}
+fn collectBindDataFromTableSection(self: *MachO, sect_id: u8, bind: anytype, table: anytype) !void {
+ const header = self.sections.items(.header)[sect_id];
+ const segment_index = self.sections.items(.segment_index)[sect_id];
+ const segment = self.segments.items[segment_index];
+ const base_offset = header.addr - segment.vmaddr;
+
+ try bind.entries.ensureUnusedCapacity(self.base.allocator, table.entries.items.len);
+
+ for (table.entries.items, 0..) |entry, i| {
+ if (!table.lookup.contains(entry)) continue;
+ const bind_sym = self.getSymbol(entry);
+ if (!bind_sym.undf()) continue;
+ const offset = i * @sizeOf(u64);
+ log.debug(" | bind at {x}, import('{s}') in dylib({d})", .{
+ base_offset + offset,
+ self.getSymbolName(entry),
+ @divTrunc(@bitCast(i16, bind_sym.n_desc), macho.N_SYMBOL_RESOLVER),
+ });
+ if (bind_sym.weakRef()) {
+ log.debug(" | marking as weak ref ", .{});
+ }
+ bind.entries.appendAssumeCapacity(.{
+ .target = entry,
+ .offset = base_offset + offset,
+ .segment_id = segment_index,
+ .addend = 0,
+ });
+ }
+}
+
fn collectBindData(self: *MachO, bind: anytype, raw_bindings: anytype) !void {
const gpa = self.base.allocator;
const slice = self.sections.slice();
@@ -3320,9 +3126,16 @@ fn collectBindData(self: *MachO, bind: anytype, raw_bindings: anytype) !void {
}
}
+ // Gather GOT pointers
+ try self.collectBindDataFromTableSection(self.got_section_index.?, bind, self.got_table);
try bind.finalize(gpa, self);
}
+fn collectLazyBindData(self: *MachO, bind: anytype) !void {
+ try self.collectBindDataFromTableSection(self.la_symbol_ptr_section_index.?, bind, self.stub_table);
+ try bind.finalize(self.base.allocator, self);
+}
+
fn collectExportData(self: *MachO, trie: *Trie) !void {
const gpa = self.base.allocator;
@@ -3366,7 +3179,7 @@ fn writeDyldInfoData(self: *MachO) !void {
var lazy_bind = LazyBind{};
defer lazy_bind.deinit(gpa);
- try self.collectBindData(&lazy_bind, self.lazy_bindings);
+ try self.collectLazyBindData(&lazy_bind);
var trie: Trie = .{};
defer trie.deinit(gpa);
@@ -3442,34 +3255,26 @@ fn populateLazyBindOffsetsInStubHelper(self: *MachO, lazy_bind: LazyBind) !void
if (lazy_bind.size() == 0) return;
const stub_helper_section_index = self.stub_helper_section_index.?;
- assert(self.stub_helper_preamble_atom_index != null);
+ assert(self.stub_helper_preamble_allocated);
- const section = self.sections.get(stub_helper_section_index);
+ const header = self.sections.items(.header)[stub_helper_section_index];
- const stub_offset: u4 = switch (self.base.options.target.cpu.arch) {
- .x86_64 => 1,
- .aarch64 => 2 * @sizeOf(u32),
- else => unreachable,
- };
- const header = section.header;
- var atom_index = section.last_atom_index.?;
+ const cpu_arch = self.base.options.target.cpu.arch;
+ const preamble_size = stubs.calcStubHelperPreambleSize(cpu_arch);
+ const stub_size = stubs.calcStubHelperEntrySize(cpu_arch);
+ const stub_offset = stubs.calcStubOffsetInStubHelper(cpu_arch);
+ const base_offset = header.offset + preamble_size;
- var index: usize = lazy_bind.offsets.items.len;
- while (index > 0) : (index -= 1) {
- const atom = self.getAtom(atom_index);
- const sym = atom.getSymbol(self);
- const file_offset = header.offset + sym.n_value - header.addr + stub_offset;
- const bind_offset = lazy_bind.offsets.items[index - 1];
+ for (lazy_bind.offsets.items, 0..) |bind_offset, index| {
+ const file_offset = base_offset + index * stub_size + stub_offset;
log.debug("writing lazy bind offset 0x{x} ({s}) in stub helper at 0x{x}", .{
bind_offset,
- self.getSymbolName(lazy_bind.entries.items[index - 1].target),
+ self.getSymbolName(lazy_bind.entries.items[index].target),
file_offset,
});
try self.base.file.?.pwriteAll(mem.asBytes(&bind_offset), file_offset);
-
- atom_index = atom.prev_index.?;
}
}
@@ -3585,7 +3390,7 @@ const SymtabCtx = struct {
fn writeDysymtab(self: *MachO, ctx: SymtabCtx) !void {
const gpa = self.base.allocator;
- const nstubs = @intCast(u32, self.stubs_table.lookup.count());
+ const nstubs = @intCast(u32, self.stub_table.lookup.count());
const ngot_entries = @intCast(u32, self.got_table.lookup.count());
const nindirectsyms = nstubs * 2 + ngot_entries;
const iextdefsym = ctx.nlocalsym;
@@ -3606,13 +3411,13 @@ fn writeDysymtab(self: *MachO, ctx: SymtabCtx) !void {
const writer = buf.writer();
if (self.stubs_section_index) |sect_id| {
- const stubs = &self.sections.items(.header)[sect_id];
- stubs.reserved1 = 0;
- for (self.stubs_table.entries.items) |entry| {
- if (entry.sym_index == 0) continue;
- const target_sym = self.getSymbol(entry.target);
+ const stubs_header = &self.sections.items(.header)[sect_id];
+ stubs_header.reserved1 = 0;
+ for (self.stub_table.entries.items) |entry| {
+ if (!self.stub_table.lookup.contains(entry)) continue;
+ const target_sym = self.getSymbol(entry);
assert(target_sym.undf());
- try writer.writeIntLittle(u32, iundefsym + ctx.imports_table.get(entry.target).?);
+ try writer.writeIntLittle(u32, iundefsym + ctx.imports_table.get(entry).?);
}
}
@@ -3620,10 +3425,10 @@ fn writeDysymtab(self: *MachO, ctx: SymtabCtx) !void {
const got = &self.sections.items(.header)[sect_id];
got.reserved1 = nstubs;
for (self.got_table.entries.items) |entry| {
- if (entry.sym_index == 0) continue;
- const target_sym = self.getSymbol(entry.target);
+ if (!self.got_table.lookup.contains(entry)) continue;
+ const target_sym = self.getSymbol(entry);
if (target_sym.undf()) {
- try writer.writeIntLittle(u32, iundefsym + ctx.imports_table.get(entry.target).?);
+ try writer.writeIntLittle(u32, iundefsym + ctx.imports_table.get(entry).?);
} else {
try writer.writeIntLittle(u32, macho.INDIRECT_SYMBOL_LOCAL);
}
@@ -3633,11 +3438,11 @@ fn writeDysymtab(self: *MachO, ctx: SymtabCtx) !void {
if (self.la_symbol_ptr_section_index) |sect_id| {
const la_symbol_ptr = &self.sections.items(.header)[sect_id];
la_symbol_ptr.reserved1 = nstubs + ngot_entries;
- for (self.stubs_table.entries.items) |entry| {
- if (entry.sym_index == 0) continue;
- const target_sym = self.getSymbol(entry.target);
+ for (self.stub_table.entries.items) |entry| {
+ if (!self.stub_table.lookup.contains(entry)) continue;
+ const target_sym = self.getSymbol(entry);
assert(target_sym.undf());
- try writer.writeIntLittle(u32, iundefsym + ctx.imports_table.get(entry.target).?);
+ try writer.writeIntLittle(u32, iundefsym + ctx.imports_table.get(entry).?);
}
}
@@ -4321,13 +4126,13 @@ pub fn logSymtab(self: *MachO) void {
}
log.debug("GOT entries:", .{});
- log.debug("{}", .{self.got_table.fmtDebug(self)});
+ log.debug("{}", .{self.got_table});
log.debug("stubs entries:", .{});
- log.debug("{}", .{self.stubs_table.fmtDebug(self)});
+ log.debug("{}", .{self.stub_table});
- log.debug("threadlocal entries:", .{});
- log.debug("{}", .{self.tlv_table.fmtDebug(self)});
+ // log.debug("threadlocal entries:", .{});
+ // log.debug("{}", .{self.tlv_table});
}
pub fn logAtoms(self: *MachO) void {
diff --git a/src/link/MachO/Atom.zig b/src/link/MachO/Atom.zig
index 5b17dc689d..fb05595b7d 100644
--- a/src/link/MachO/Atom.zig
+++ b/src/link/MachO/Atom.zig
@@ -158,21 +158,6 @@ pub fn addBinding(macho_file: *MachO, atom_index: Index, binding: Binding) !void
try gop.value_ptr.append(gpa, binding);
}
-pub fn addLazyBinding(macho_file: *MachO, atom_index: Index, binding: Binding) !void {
- const gpa = macho_file.base.allocator;
- const atom = macho_file.getAtom(atom_index);
- log.debug(" (adding lazy binding to symbol {s} at offset 0x{x} in %{?d})", .{
- macho_file.getSymbolName(binding.target),
- binding.offset,
- atom.getSymbolIndex(),
- });
- const gop = try macho_file.lazy_bindings.getOrPut(gpa, atom_index);
- if (!gop.found_existing) {
- gop.value_ptr.* = .{};
- }
- try gop.value_ptr.append(gpa, binding);
-}
-
pub fn resolveRelocations(
macho_file: *MachO,
atom_index: Index,
@@ -193,6 +178,4 @@ pub fn freeRelocations(macho_file: *MachO, atom_index: Index) void {
if (removed_rebases) |*rebases| rebases.value.deinit(gpa);
var removed_bindings = macho_file.bindings.fetchOrderedRemove(atom_index);
if (removed_bindings) |*bindings| bindings.value.deinit(gpa);
- var removed_lazy_bindings = macho_file.lazy_bindings.fetchOrderedRemove(atom_index);
- if (removed_lazy_bindings) |*lazy_bindings| lazy_bindings.value.deinit(gpa);
}
diff --git a/src/link/MachO/DebugSymbols.zig b/src/link/MachO/DebugSymbols.zig
index 98abf2b1cc..24a0c9ea34 100644
--- a/src/link/MachO/DebugSymbols.zig
+++ b/src/link/MachO/DebugSymbols.zig
@@ -230,7 +230,7 @@ pub fn flushModule(self: *DebugSymbols, macho_file: *MachO) !void {
.got_load => blk: {
const got_index = macho_file.got_table.lookup.get(.{ .sym_index = reloc.target }).?;
const got_entry = macho_file.got_table.entries.items[got_index];
- break :blk got_entry.getSymbol(macho_file);
+ break :blk macho_file.getSymbol(got_entry);
},
};
if (sym.n_value == reloc.prev_vaddr) continue;
@@ -240,7 +240,7 @@ pub fn flushModule(self: *DebugSymbols, macho_file: *MachO) !void {
.got_load => blk: {
const got_index = macho_file.got_table.lookup.get(.{ .sym_index = reloc.target }).?;
const got_entry = macho_file.got_table.entries.items[got_index];
- break :blk got_entry.getName(macho_file);
+ break :blk macho_file.getSymbolName(got_entry);
},
};
const sect = &self.sections.items[self.debug_info_section_index.?];
diff --git a/src/link/MachO/Relocation.zig b/src/link/MachO/Relocation.zig
index 81340b1120..e511901009 100644
--- a/src/link/MachO/Relocation.zig
+++ b/src/link/MachO/Relocation.zig
@@ -15,7 +15,7 @@ pub const Type = enum {
got,
/// RIP-relative displacement
signed,
- /// RIP-relative displacement to GOT pointer to TLV thunk
+ /// RIP-relative displacement to a TLV thunk
tlv,
// aarch64
@@ -39,25 +39,35 @@ pub const Type = enum {
/// Returns true if and only if the reloc is dirty AND the target address is available.
pub fn isResolvable(self: Relocation, macho_file: *MachO) bool {
- _ = self.getTargetAtomIndex(macho_file) orelse return false;
+ const addr = self.getTargetBaseAddress(macho_file) orelse return false;
+ if (addr == 0) return false;
return self.dirty;
}
-pub fn getTargetAtomIndex(self: Relocation, macho_file: *MachO) ?Atom.Index {
- return switch (self.type) {
- .got, .got_page, .got_pageoff => macho_file.got_table.getAtomIndex(macho_file, self.target),
+pub fn getTargetBaseAddress(self: Relocation, macho_file: *MachO) ?u64 {
+ switch (self.type) {
+ .got, .got_page, .got_pageoff => {
+ const got_index = macho_file.got_table.lookup.get(self.target) orelse return null;
+ const header = macho_file.sections.items(.header)[macho_file.got_section_index.?];
+ return header.addr + got_index * @sizeOf(u64);
+ },
.tlv => {
- const thunk_atom_index = macho_file.tlv_table.getAtomIndex(macho_file, self.target) orelse
- return null;
- const thunk_atom = macho_file.getAtom(thunk_atom_index);
- return macho_file.got_table.getAtomIndex(macho_file, thunk_atom.getSymbolWithLoc());
+ const atom_index = macho_file.tlv_table.get(self.target) orelse return null;
+ const atom = macho_file.getAtom(atom_index);
+ return atom.getSymbol(macho_file).n_value;
},
- .branch => if (macho_file.stubs_table.getAtomIndex(macho_file, self.target)) |index|
- index
- else
- macho_file.getAtomIndexForSymbol(self.target),
- else => macho_file.getAtomIndexForSymbol(self.target),
- };
+ .branch => {
+ if (macho_file.stub_table.lookup.get(self.target)) |index| {
+ const header = macho_file.sections.items(.header)[macho_file.stubs_section_index.?];
+ return header.addr +
+ index * @import("stubs.zig").calcStubEntrySize(macho_file.base.options.target.cpu.arch);
+ }
+ const atom_index = macho_file.getAtomIndexForSymbol(self.target) orelse return null;
+ const atom = macho_file.getAtom(atom_index);
+ return atom.getSymbol(macho_file).n_value;
+ },
+ else => return macho_file.getSymbol(self.target).n_value,
+ }
}
pub fn resolve(self: Relocation, macho_file: *MachO, atom_index: Atom.Index, code: []u8) void {
@@ -66,17 +76,14 @@ pub fn resolve(self: Relocation, macho_file: *MachO, atom_index: Atom.Index, cod
const source_sym = atom.getSymbol(macho_file);
const source_addr = source_sym.n_value + self.offset;
- const target_atom_index = self.getTargetAtomIndex(macho_file).?; // Oops, you didn't check if the relocation can be resolved with isResolvable().
- const target_atom = macho_file.getAtom(target_atom_index);
-
+ const target_base_addr = self.getTargetBaseAddress(macho_file).?; // Oops, you didn't check if the relocation can be resolved with isResolvable().
const target_addr: i64 = switch (self.type) {
.tlv_initializer => blk: {
assert(self.addend == 0); // Addend here makes no sense.
const header = macho_file.sections.items(.header)[macho_file.thread_data_section_index.?];
- const target_sym = target_atom.getSymbol(macho_file);
- break :blk @intCast(i64, target_sym.n_value - header.addr);
+ break :blk @intCast(i64, target_base_addr - header.addr);
},
- else => @intCast(i64, target_atom.getSymbol(macho_file).n_value) + self.addend,
+ else => @intCast(i64, target_base_addr) + self.addend,
};
log.debug(" ({x}: [() => 0x{x} ({s})) ({s})", .{
@@ -189,11 +196,48 @@ fn resolveX8664(self: Relocation, source_addr: u64, target_addr: i64, code: []u8
}
}
-inline fn isArithmeticOp(inst: *const [4]u8) bool {
+pub inline fn isArithmeticOp(inst: *const [4]u8) bool {
const group_decode = @truncate(u5, inst[3]);
return ((group_decode >> 2) == 4);
}
+pub fn calcPcRelativeDisplacementX86(source_addr: u64, target_addr: u64, correction: u3) error{Overflow}!i32 {
+ const disp = @intCast(i64, target_addr) - @intCast(i64, source_addr + 4 + correction);
+ return math.cast(i32, disp) orelse error.Overflow;
+}
+
+pub fn calcPcRelativeDisplacementArm64(source_addr: u64, target_addr: u64) error{Overflow}!i28 {
+ const disp = @intCast(i64, target_addr) - @intCast(i64, source_addr);
+ return math.cast(i28, disp) orelse error.Overflow;
+}
+
+pub fn calcNumberOfPages(source_addr: u64, target_addr: u64) i21 {
+ const source_page = @intCast(i32, source_addr >> 12);
+ const target_page = @intCast(i32, target_addr >> 12);
+ const pages = @intCast(i21, target_page - source_page);
+ return pages;
+}
+
+pub const PageOffsetInstKind = enum {
+ arithmetic,
+ load_store_8,
+ load_store_16,
+ load_store_32,
+ load_store_64,
+ load_store_128,
+};
+
+pub fn calcPageOffset(target_addr: u64, kind: PageOffsetInstKind) !u12 {
+ const narrowed = @truncate(u12, target_addr);
+ return switch (kind) {
+ .arithmetic, .load_store_8 => narrowed,
+ .load_store_16 => try math.divExact(u12, narrowed, 2),
+ .load_store_32 => try math.divExact(u12, narrowed, 4),
+ .load_store_64 => try math.divExact(u12, narrowed, 8),
+ .load_store_128 => try math.divExact(u12, narrowed, 16),
+ };
+}
+
const Relocation = @This();
const std = @import("std");
diff --git a/src/link/MachO/ZldAtom.zig b/src/link/MachO/ZldAtom.zig
index 7e784ded05..baa6340a13 100644
--- a/src/link/MachO/ZldAtom.zig
+++ b/src/link/MachO/ZldAtom.zig
@@ -21,6 +21,7 @@ const Allocator = mem.Allocator;
const Arch = std.Target.Cpu.Arch;
const AtomIndex = @import("zld.zig").AtomIndex;
const Object = @import("Object.zig");
+const Relocation = @import("Relocation.zig");
const SymbolWithLoc = @import("zld.zig").SymbolWithLoc;
const Zld = @import("zld.zig").Zld;
@@ -571,7 +572,7 @@ fn resolveRelocsArm64(
zld.getAtom(getRelocTargetAtomIndex(zld, target, is_via_got).?).getFile(),
});
- const displacement = if (calcPcRelativeDisplacementArm64(
+ const displacement = if (Relocation.calcPcRelativeDisplacementArm64(
source_addr,
zld.getSymbol(actual_target).n_value,
)) |disp| blk: {
@@ -585,7 +586,7 @@ fn resolveRelocsArm64(
actual_target,
).?);
log.debug(" | target_addr = 0x{x} (thunk)", .{thunk_sym.n_value});
- break :blk try calcPcRelativeDisplacementArm64(source_addr, thunk_sym.n_value);
+ break :blk try Relocation.calcPcRelativeDisplacementArm64(source_addr, thunk_sym.n_value);
};
const code = atom_code[rel_offset..][0..4];
@@ -607,7 +608,7 @@ fn resolveRelocsArm64(
log.debug(" | target_addr = 0x{x}", .{adjusted_target_addr});
- const pages = @bitCast(u21, calcNumberOfPages(source_addr, adjusted_target_addr));
+ const pages = @bitCast(u21, Relocation.calcNumberOfPages(source_addr, adjusted_target_addr));
const code = atom_code[rel_offset..][0..4];
var inst = aarch64.Instruction{
.pc_relative_address = mem.bytesToValue(meta.TagPayload(
@@ -627,8 +628,8 @@ fn resolveRelocsArm64(
log.debug(" | target_addr = 0x{x}", .{adjusted_target_addr});
const code = atom_code[rel_offset..][0..4];
- if (isArithmeticOp(code)) {
- const off = try calcPageOffset(adjusted_target_addr, .arithmetic);
+ if (Relocation.isArithmeticOp(code)) {
+ const off = try Relocation.calcPageOffset(adjusted_target_addr, .arithmetic);
var inst = aarch64.Instruction{
.add_subtract_immediate = mem.bytesToValue(meta.TagPayload(
aarch64.Instruction,
@@ -644,11 +645,11 @@ fn resolveRelocsArm64(
aarch64.Instruction.load_store_register,
), code),
};
- const off = try calcPageOffset(adjusted_target_addr, switch (inst.load_store_register.size) {
+ const off = try Relocation.calcPageOffset(adjusted_target_addr, switch (inst.load_store_register.size) {
0 => if (inst.load_store_register.v == 1)
- PageOffsetInstKind.load_store_128
+ Relocation.PageOffsetInstKind.load_store_128
else
- PageOffsetInstKind.load_store_8,
+ Relocation.PageOffsetInstKind.load_store_8,
1 => .load_store_16,
2 => .load_store_32,
3 => .load_store_64,
@@ -665,7 +666,7 @@ fn resolveRelocsArm64(
log.debug(" | target_addr = 0x{x}", .{adjusted_target_addr});
- const off = try calcPageOffset(adjusted_target_addr, .load_store_64);
+ const off = try Relocation.calcPageOffset(adjusted_target_addr, .load_store_64);
var inst: aarch64.Instruction = .{
.load_store_register = mem.bytesToValue(meta.TagPayload(
aarch64.Instruction,
@@ -689,7 +690,7 @@ fn resolveRelocsArm64(
size: u2,
};
const reg_info: RegInfo = blk: {
- if (isArithmeticOp(code)) {
+ if (Relocation.isArithmeticOp(code)) {
const inst = mem.bytesToValue(meta.TagPayload(
aarch64.Instruction,
aarch64.Instruction.add_subtract_immediate,
@@ -716,7 +717,7 @@ fn resolveRelocsArm64(
.load_store_register = .{
.rt = reg_info.rd,
.rn = reg_info.rn,
- .offset = try calcPageOffset(adjusted_target_addr, .load_store_64),
+ .offset = try Relocation.calcPageOffset(adjusted_target_addr, .load_store_64),
.opc = 0b01,
.op1 = 0b01,
.v = 0,
@@ -726,7 +727,7 @@ fn resolveRelocsArm64(
.add_subtract_immediate = .{
.rd = reg_info.rd,
.rn = reg_info.rn,
- .imm12 = try calcPageOffset(adjusted_target_addr, .arithmetic),
+ .imm12 = try Relocation.calcPageOffset(adjusted_target_addr, .arithmetic),
.sh = 0,
.s = 0,
.op = 0,
@@ -858,7 +859,7 @@ fn resolveRelocsX86(
const addend = mem.readIntLittle(i32, atom_code[rel_offset..][0..4]);
const adjusted_target_addr = @intCast(u64, @intCast(i64, target_addr) + addend);
log.debug(" | target_addr = 0x{x}", .{adjusted_target_addr});
- const disp = try calcPcRelativeDisplacementX86(source_addr, adjusted_target_addr, 0);
+ const disp = try Relocation.calcPcRelativeDisplacementX86(source_addr, adjusted_target_addr, 0);
mem.writeIntLittle(i32, atom_code[rel_offset..][0..4], disp);
},
@@ -868,7 +869,7 @@ fn resolveRelocsX86(
const addend = mem.readIntLittle(i32, atom_code[rel_offset..][0..4]);
const adjusted_target_addr = @intCast(u64, @intCast(i64, target_addr) + addend);
log.debug(" | target_addr = 0x{x}", .{adjusted_target_addr});
- const disp = try calcPcRelativeDisplacementX86(source_addr, adjusted_target_addr, 0);
+ const disp = try Relocation.calcPcRelativeDisplacementX86(source_addr, adjusted_target_addr, 0);
mem.writeIntLittle(i32, atom_code[rel_offset..][0..4], disp);
},
@@ -876,7 +877,7 @@ fn resolveRelocsX86(
const addend = mem.readIntLittle(i32, atom_code[rel_offset..][0..4]);
const adjusted_target_addr = @intCast(u64, @intCast(i64, target_addr) + addend);
log.debug(" | target_addr = 0x{x}", .{adjusted_target_addr});
- const disp = try calcPcRelativeDisplacementX86(source_addr, adjusted_target_addr, 0);
+ const disp = try Relocation.calcPcRelativeDisplacementX86(source_addr, adjusted_target_addr, 0);
if (zld.tlv_ptr_table.get(target) == null) {
// We need to rewrite the opcode from movq to leaq.
@@ -913,7 +914,7 @@ fn resolveRelocsX86(
log.debug(" | target_addr = 0x{x}", .{adjusted_target_addr});
- const disp = try calcPcRelativeDisplacementX86(source_addr, adjusted_target_addr, correction);
+ const disp = try Relocation.calcPcRelativeDisplacementX86(source_addr, adjusted_target_addr, correction);
mem.writeIntLittle(i32, atom_code[rel_offset..][0..4], disp);
},
@@ -955,11 +956,6 @@ fn resolveRelocsX86(
}
}
-inline fn isArithmeticOp(inst: *const [4]u8) bool {
- const group_decode = @truncate(u5, inst[3]);
- return ((group_decode >> 2) == 4);
-}
-
pub fn getAtomCode(zld: *Zld, atom_index: AtomIndex) []const u8 {
const atom = zld.getAtom(atom_index);
assert(atom.getFile() != null); // Synthetic atom shouldn't need to inquire for code.
@@ -1006,43 +1002,6 @@ pub fn getAtomRelocs(zld: *Zld, atom_index: AtomIndex) []const macho.relocation_
return relocs[cache.start..][0..cache.len];
}
-pub fn calcPcRelativeDisplacementX86(source_addr: u64, target_addr: u64, correction: u3) error{Overflow}!i32 {
- const disp = @intCast(i64, target_addr) - @intCast(i64, source_addr + 4 + correction);
- return math.cast(i32, disp) orelse error.Overflow;
-}
-
-pub fn calcPcRelativeDisplacementArm64(source_addr: u64, target_addr: u64) error{Overflow}!i28 {
- const disp = @intCast(i64, target_addr) - @intCast(i64, source_addr);
- return math.cast(i28, disp) orelse error.Overflow;
-}
-
-pub fn calcNumberOfPages(source_addr: u64, target_addr: u64) i21 {
- const source_page = @intCast(i32, source_addr >> 12);
- const target_page = @intCast(i32, target_addr >> 12);
- const pages = @intCast(i21, target_page - source_page);
- return pages;
-}
-
-const PageOffsetInstKind = enum {
- arithmetic,
- load_store_8,
- load_store_16,
- load_store_32,
- load_store_64,
- load_store_128,
-};
-
-pub fn calcPageOffset(target_addr: u64, kind: PageOffsetInstKind) !u12 {
- const narrowed = @truncate(u12, target_addr);
- return switch (kind) {
- .arithmetic, .load_store_8 => narrowed,
- .load_store_16 => try math.divExact(u12, narrowed, 2),
- .load_store_32 => try math.divExact(u12, narrowed, 4),
- .load_store_64 => try math.divExact(u12, narrowed, 8),
- .load_store_128 => try math.divExact(u12, narrowed, 16),
- };
-}
-
pub fn relocRequiresGot(zld: *Zld, rel: macho.relocation_info) bool {
switch (zld.options.target.cpu.arch) {
.aarch64 => switch (@intToEnum(macho.reloc_type_arm64, rel.r_type)) {
diff --git a/src/link/MachO/eh_frame.zig b/src/link/MachO/eh_frame.zig
index 7c5c5b7c25..ea8d4128c2 100644
--- a/src/link/MachO/eh_frame.zig
+++ b/src/link/MachO/eh_frame.zig
@@ -9,6 +9,7 @@ const log = std.log.scoped(.eh_frame);
const Allocator = mem.Allocator;
const AtomIndex = @import("zld.zig").AtomIndex;
const Atom = @import("ZldAtom.zig");
+const Relocation = @import("Relocation.zig");
const SymbolWithLoc = @import("zld.zig").SymbolWithLoc;
const UnwindInfo = @import("UnwindInfo.zig");
const Zld = @import("zld.zig").Zld;
@@ -368,7 +369,7 @@ pub fn EhFrameRecord(comptime is_mutable: bool) type {
const target_addr = try Atom.getRelocTargetAddress(zld, target, true, false);
const addend = mem.readIntLittle(i32, rec.data[rel_offset..][0..4]);
const adjusted_target_addr = @intCast(u64, @intCast(i64, target_addr) + addend);
- const disp = try Atom.calcPcRelativeDisplacementX86(source_addr, adjusted_target_addr, 0);
+ const disp = try Relocation.calcPcRelativeDisplacementX86(source_addr, adjusted_target_addr, 0);
mem.writeIntLittle(i32, rec.data[rel_offset..][0..4], disp);
},
else => unreachable,
diff --git a/src/link/MachO/stubs.zig b/src/link/MachO/stubs.zig
new file mode 100644
index 0000000000..236ba2cfb8
--- /dev/null
+++ b/src/link/MachO/stubs.zig
@@ -0,0 +1,161 @@
+const std = @import("std");
+const aarch64 = @import("../../arch/aarch64/bits.zig");
+
+const Relocation = @import("Relocation.zig");
+
+pub inline fn calcStubHelperPreambleSize(cpu_arch: std.Target.Cpu.Arch) u5 {
+ return switch (cpu_arch) {
+ .x86_64 => 15,
+ .aarch64 => 6 * @sizeOf(u32),
+ else => unreachable, // unhandled architecture type
+ };
+}
+
+pub inline fn calcStubHelperEntrySize(cpu_arch: std.Target.Cpu.Arch) u4 {
+ return switch (cpu_arch) {
+ .x86_64 => 10,
+ .aarch64 => 3 * @sizeOf(u32),
+ else => unreachable, // unhandled architecture type
+ };
+}
+
+pub inline fn calcStubEntrySize(cpu_arch: std.Target.Cpu.Arch) u4 {
+ return switch (cpu_arch) {
+ .x86_64 => 6,
+ .aarch64 => 3 * @sizeOf(u32),
+ else => unreachable, // unhandled architecture type
+ };
+}
+
+pub inline fn calcStubOffsetInStubHelper(cpu_arch: std.Target.Cpu.Arch) u4 {
+ return switch (cpu_arch) {
+ .x86_64 => 1,
+ .aarch64 => 2 * @sizeOf(u32),
+ else => unreachable,
+ };
+}
+
+pub fn writeStubHelperPreambleCode(args: struct {
+ cpu_arch: std.Target.Cpu.Arch,
+ source_addr: u64,
+ dyld_private_addr: u64,
+ dyld_stub_binder_got_addr: u64,
+}, writer: anytype) !void {
+ switch (args.cpu_arch) {
+ .x86_64 => {
+ try writer.writeAll(&.{ 0x4c, 0x8d, 0x1d });
+ {
+ const disp = try Relocation.calcPcRelativeDisplacementX86(
+ args.source_addr + 3,
+ args.dyld_private_addr,
+ 0,
+ );
+ try writer.writeIntLittle(i32, disp);
+ }
+ try writer.writeAll(&.{ 0x41, 0x53, 0xff, 0x25 });
+ {
+ const disp = try Relocation.calcPcRelativeDisplacementX86(
+ args.source_addr + 11,
+ args.dyld_stub_binder_got_addr,
+ 0,
+ );
+ try writer.writeIntLittle(i32, disp);
+ }
+ },
+ .aarch64 => {
+ {
+ const pages = Relocation.calcNumberOfPages(args.source_addr, args.dyld_private_addr);
+ try writer.writeIntLittle(u32, aarch64.Instruction.adrp(.x17, pages).toU32());
+ }
+ {
+ const off = try Relocation.calcPageOffset(args.dyld_private_addr, .arithmetic);
+ try writer.writeIntLittle(u32, aarch64.Instruction.add(.x17, .x17, off, false).toU32());
+ }
+ try writer.writeIntLittle(u32, aarch64.Instruction.stp(
+ .x16,
+ .x17,
+ aarch64.Register.sp,
+ aarch64.Instruction.LoadStorePairOffset.pre_index(-16),
+ ).toU32());
+ {
+ const pages = Relocation.calcNumberOfPages(args.source_addr + 12, args.dyld_stub_binder_got_addr);
+ try writer.writeIntLittle(u32, aarch64.Instruction.adrp(.x16, pages).toU32());
+ }
+ {
+ const off = try Relocation.calcPageOffset(args.dyld_stub_binder_got_addr, .load_store_64);
+ try writer.writeIntLittle(u32, aarch64.Instruction.ldr(
+ .x16,
+ .x16,
+ aarch64.Instruction.LoadStoreOffset.imm(off),
+ ).toU32());
+ }
+ try writer.writeIntLittle(u32, aarch64.Instruction.br(.x16).toU32());
+ },
+ else => unreachable,
+ }
+}
+
+pub fn writeStubHelperCode(args: struct {
+ cpu_arch: std.Target.Cpu.Arch,
+ source_addr: u64,
+ target_addr: u64,
+}, writer: anytype) !void {
+ switch (args.cpu_arch) {
+ .x86_64 => {
+ try writer.writeAll(&.{ 0x68, 0x0, 0x0, 0x0, 0x0, 0xe9 });
+ {
+ const disp = try Relocation.calcPcRelativeDisplacementX86(args.source_addr + 6, args.target_addr, 0);
+ try writer.writeIntLittle(i32, disp);
+ }
+ },
+ .aarch64 => {
+ const stub_size: u4 = 3 * @sizeOf(u32);
+ const literal = blk: {
+ const div_res = try std.math.divExact(u64, stub_size - @sizeOf(u32), 4);
+ break :blk std.math.cast(u18, div_res) orelse return error.Overflow;
+ };
+ try writer.writeIntLittle(u32, aarch64.Instruction.ldrLiteral(
+ .w16,
+ literal,
+ ).toU32());
+ {
+ const disp = try Relocation.calcPcRelativeDisplacementArm64(args.source_addr + 4, args.target_addr);
+ try writer.writeIntLittle(u32, aarch64.Instruction.b(disp).toU32());
+ }
+ try writer.writeAll(&.{ 0x0, 0x0, 0x0, 0x0 });
+ },
+ else => unreachable,
+ }
+}
+
+pub fn writeStubCode(args: struct {
+ cpu_arch: std.Target.Cpu.Arch,
+ source_addr: u64,
+ target_addr: u64,
+}, writer: anytype) !void {
+ switch (args.cpu_arch) {
+ .x86_64 => {
+ try writer.writeAll(&.{ 0xff, 0x25 });
+ {
+ const disp = try Relocation.calcPcRelativeDisplacementX86(args.source_addr + 2, args.target_addr, 0);
+ try writer.writeIntLittle(i32, disp);
+ }
+ },
+ .aarch64 => {
+ {
+ const pages = Relocation.calcNumberOfPages(args.source_addr, args.target_addr);
+ try writer.writeIntLittle(u32, aarch64.Instruction.adrp(.x16, pages).toU32());
+ }
+ {
+ const off = try Relocation.calcPageOffset(args.target_addr, .load_store_64);
+ try writer.writeIntLittle(u32, aarch64.Instruction.ldr(
+ .x16,
+ .x16,
+ aarch64.Instruction.LoadStoreOffset.imm(off),
+ ).toU32());
+ }
+ try writer.writeIntLittle(u32, aarch64.Instruction.br(.x16).toU32());
+ },
+ else => unreachable,
+ }
+}
diff --git a/src/link/MachO/thunks.zig b/src/link/MachO/thunks.zig
index afea08750c..48d1faac6b 100644
--- a/src/link/MachO/thunks.zig
+++ b/src/link/MachO/thunks.zig
@@ -17,6 +17,7 @@ const aarch64 = @import("../../arch/aarch64/bits.zig");
const Allocator = mem.Allocator;
const Atom = @import("ZldAtom.zig");
const AtomIndex = @import("zld.zig").AtomIndex;
+const Relocation = @import("Relocation.zig");
const SymbolWithLoc = @import("zld.zig").SymbolWithLoc;
const Zld = @import("zld.zig").Zld;
@@ -317,7 +318,7 @@ fn isReachable(
const source_addr = source_sym.n_value + @intCast(u32, rel.r_address - base_offset);
const is_via_got = Atom.relocRequiresGot(zld, rel);
const target_addr = Atom.getRelocTargetAddress(zld, target, is_via_got, false) catch unreachable;
- _ = Atom.calcPcRelativeDisplacementArm64(source_addr, target_addr) catch
+ _ = Relocation.calcPcRelativeDisplacementArm64(source_addr, target_addr) catch
return false;
return true;
@@ -364,9 +365,9 @@ pub fn writeThunkCode(zld: *Zld, atom_index: AtomIndex, writer: anytype) !void {
if (atom_index == target_atom_index) break zld.getSymbol(target).n_value;
} else unreachable;
- const pages = Atom.calcNumberOfPages(source_addr, target_addr);
+ const pages = Relocation.calcNumberOfPages(source_addr, target_addr);
try writer.writeIntLittle(u32, aarch64.Instruction.adrp(.x16, pages).toU32());
- const off = try Atom.calcPageOffset(target_addr, .arithmetic);
+ const off = try Relocation.calcPageOffset(target_addr, .arithmetic);
try writer.writeIntLittle(u32, aarch64.Instruction.add(.x16, .x16, off, false).toU32());
try writer.writeIntLittle(u32, aarch64.Instruction.br(.x16).toU32());
}
diff --git a/src/link/MachO/zld.zig b/src/link/MachO/zld.zig
index 475b3396fd..bc658fc8d2 100644
--- a/src/link/MachO/zld.zig
+++ b/src/link/MachO/zld.zig
@@ -16,6 +16,7 @@ const link = @import("../../link.zig");
const load_commands = @import("load_commands.zig");
const thunks = @import("thunks.zig");
const trace = @import("../../tracy.zig").trace;
+const stub_helpers = @import("stubs.zig");
const Allocator = mem.Allocator;
const Archive = @import("Archive.zig");
@@ -666,59 +667,17 @@ pub const Zld = struct {
const entry = self.got_entries.items[index];
break :blk entry.getAtomSymbol(self).n_value;
};
- switch (cpu_arch) {
- .x86_64 => {
- try writer.writeAll(&.{ 0x4c, 0x8d, 0x1d });
- {
- const disp = try Atom.calcPcRelativeDisplacementX86(source_addr + 3, dyld_private_addr, 0);
- try writer.writeIntLittle(i32, disp);
- }
- try writer.writeAll(&.{ 0x41, 0x53, 0xff, 0x25 });
- {
- const disp = try Atom.calcPcRelativeDisplacementX86(source_addr + 11, dyld_stub_binder_got_addr, 0);
- try writer.writeIntLittle(i32, disp);
- }
- },
- .aarch64 => {
- {
- const pages = Atom.calcNumberOfPages(source_addr, dyld_private_addr);
- try writer.writeIntLittle(u32, aarch64.Instruction.adrp(.x17, pages).toU32());
- }
- {
- const off = try Atom.calcPageOffset(dyld_private_addr, .arithmetic);
- try writer.writeIntLittle(u32, aarch64.Instruction.add(.x17, .x17, off, false).toU32());
- }
- try writer.writeIntLittle(u32, aarch64.Instruction.stp(
- .x16,
- .x17,
- aarch64.Register.sp,
- aarch64.Instruction.LoadStorePairOffset.pre_index(-16),
- ).toU32());
- {
- const pages = Atom.calcNumberOfPages(source_addr + 12, dyld_stub_binder_got_addr);
- try writer.writeIntLittle(u32, aarch64.Instruction.adrp(.x16, pages).toU32());
- }
- {
- const off = try Atom.calcPageOffset(dyld_stub_binder_got_addr, .load_store_64);
- try writer.writeIntLittle(u32, aarch64.Instruction.ldr(
- .x16,
- .x16,
- aarch64.Instruction.LoadStoreOffset.imm(off),
- ).toU32());
- }
- try writer.writeIntLittle(u32, aarch64.Instruction.br(.x16).toU32());
- },
- else => unreachable,
- }
+ try stub_helpers.writeStubHelperPreambleCode(.{
+ .cpu_arch = cpu_arch,
+ .source_addr = source_addr,
+ .dyld_private_addr = dyld_private_addr,
+ .dyld_stub_binder_got_addr = dyld_stub_binder_got_addr,
+ }, writer);
}
pub fn createStubHelperAtom(self: *Zld) !AtomIndex {
const cpu_arch = self.options.target.cpu.arch;
- const stub_size: u4 = switch (cpu_arch) {
- .x86_64 => 10,
- .aarch64 => 3 * @sizeOf(u32),
- else => unreachable,
- };
+ const stub_size = stub_helpers.calcStubHelperEntrySize(cpu_arch);
const alignment: u2 = switch (cpu_arch) {
.x86_64 => 0,
.aarch64 => 2,
@@ -749,32 +708,11 @@ pub const Zld = struct {
const sym = self.getSymbol(.{ .sym_index = self.stub_helper_preamble_sym_index.? });
break :blk sym.n_value;
};
- switch (cpu_arch) {
- .x86_64 => {
- try writer.writeAll(&.{ 0x68, 0x0, 0x0, 0x0, 0x0, 0xe9 });
- {
- const disp = try Atom.calcPcRelativeDisplacementX86(source_addr + 6, target_addr, 0);
- try writer.writeIntLittle(i32, disp);
- }
- },
- .aarch64 => {
- const stub_size: u4 = 3 * @sizeOf(u32);
- const literal = blk: {
- const div_res = try math.divExact(u64, stub_size - @sizeOf(u32), 4);
- break :blk math.cast(u18, div_res) orelse return error.Overflow;
- };
- try writer.writeIntLittle(u32, aarch64.Instruction.ldrLiteral(
- .w16,
- literal,
- ).toU32());
- {
- const disp = try Atom.calcPcRelativeDisplacementArm64(source_addr + 4, target_addr);
- try writer.writeIntLittle(u32, aarch64.Instruction.b(disp).toU32());
- }
- try writer.writeAll(&.{ 0x0, 0x0, 0x0, 0x0 });
- },
- else => unreachable,
- }
+ try stub_helpers.writeStubHelperCode(.{
+ .cpu_arch = cpu_arch,
+ .source_addr = source_addr,
+ .target_addr = target_addr,
+ }, writer);
}
pub fn createLazyPointerAtom(self: *Zld) !AtomIndex {
@@ -819,11 +757,7 @@ pub const Zld = struct {
.aarch64 => 2,
else => unreachable, // unhandled architecture type
};
- const stub_size: u4 = switch (cpu_arch) {
- .x86_64 => 6,
- .aarch64 => 3 * @sizeOf(u32),
- else => unreachable, // unhandled architecture type
- };
+ const stub_size = stub_helpers.calcStubEntrySize(cpu_arch);
const sym_index = try self.allocateSymbol();
const atom_index = try self.createEmptyAtom(sym_index, stub_size, alignment);
const sym = self.getSymbolPtr(.{ .sym_index = sym_index });
@@ -863,31 +797,11 @@ pub const Zld = struct {
const sym = self.getSymbol(atom.getSymbolWithLoc());
break :blk sym.n_value;
};
- switch (cpu_arch) {
- .x86_64 => {
- try writer.writeAll(&.{ 0xff, 0x25 });
- {
- const disp = try Atom.calcPcRelativeDisplacementX86(source_addr + 2, target_addr, 0);
- try writer.writeIntLittle(i32, disp);
- }
- },
- .aarch64 => {
- {
- const pages = Atom.calcNumberOfPages(source_addr, target_addr);
- try writer.writeIntLittle(u32, aarch64.Instruction.adrp(.x16, pages).toU32());
- }
- {
- const off = try Atom.calcPageOffset(target_addr, .load_store_64);
- try writer.writeIntLittle(u32, aarch64.Instruction.ldr(
- .x16,
- .x16,
- aarch64.Instruction.LoadStoreOffset.imm(off),
- ).toU32());
- }
- try writer.writeIntLittle(u32, aarch64.Instruction.br(.x16).toU32());
- },
- else => unreachable,
- }
+ try stub_helpers.writeStubCode(.{
+ .cpu_arch = cpu_arch,
+ .source_addr = source_addr,
+ .target_addr = target_addr,
+ }, writer);
}
fn createTentativeDefAtoms(self: *Zld) !void {
@@ -2267,11 +2181,7 @@ pub const Zld = struct {
assert(self.stub_helper_preamble_sym_index != null);
const section = self.sections.get(stub_helper_section_index);
- const stub_offset: u4 = switch (self.options.target.cpu.arch) {
- .x86_64 => 1,
- .aarch64 => 2 * @sizeOf(u32),
- else => unreachable,
- };
+ const stub_offset = stub_helpers.calcStubOffsetInStubHelper(self.options.target.cpu.arch);
const header = section.header;
var atom_index = section.first_atom_index;
atom_index = self.getAtom(atom_index).next_index.?; // skip preamble
diff --git a/src/link/table_section.zig b/src/link/table_section.zig
new file mode 100644
index 0000000000..891f3b1a50
--- /dev/null
+++ b/src/link/table_section.zig
@@ -0,0 +1,65 @@
+pub fn TableSection(comptime Entry: type) type {
+ return struct {
+ entries: std.ArrayListUnmanaged(Entry) = .{},
+ free_list: std.ArrayListUnmanaged(Index) = .{},
+ lookup: std.AutoHashMapUnmanaged(Entry, Index) = .{},
+
+ pub fn deinit(self: *Self, allocator: Allocator) void {
+ self.entries.deinit(allocator);
+ self.free_list.deinit(allocator);
+ self.lookup.deinit(allocator);
+ }
+
+ pub fn allocateEntry(self: *Self, allocator: Allocator, entry: Entry) Allocator.Error!Index {
+ try self.entries.ensureUnusedCapacity(allocator, 1);
+ const index = blk: {
+ if (self.free_list.popOrNull()) |index| {
+ log.debug(" (reusing entry index {d})", .{index});
+ break :blk index;
+ } else {
+ log.debug(" (allocating entry at index {d})", .{self.entries.items.len});
+ const index = @intCast(u32, self.entries.items.len);
+ _ = self.entries.addOneAssumeCapacity();
+ break :blk index;
+ }
+ };
+ self.entries.items[index] = entry;
+ try self.lookup.putNoClobber(allocator, entry, index);
+ return index;
+ }
+
+ pub fn freeEntry(self: *Self, allocator: Allocator, entry: Entry) void {
+ const index = self.lookup.get(entry) orelse return;
+ self.free_list.append(allocator, index) catch {};
+ self.entries.items[index] = undefined;
+ _ = self.lookup.remove(entry);
+ }
+
+ pub fn count(self: Self) usize {
+ return self.entries.items.len;
+ }
+
+ pub fn format(
+ self: Self,
+ comptime unused_format_string: []const u8,
+ options: std.fmt.FormatOptions,
+ writer: anytype,
+ ) !void {
+ _ = options;
+ comptime assert(unused_format_string.len == 0);
+ try writer.writeAll("TableSection:\n");
+ for (self.entries.items, 0..) |entry, i| {
+ try writer.print(" {d} => {}\n", .{ i, entry });
+ }
+ }
+
+ const Self = @This();
+ pub const Index = u32;
+ };
+}
+
+const std = @import("std");
+const assert = std.debug.assert;
+const log = std.log.scoped(.link);
+
+const Allocator = std.mem.Allocator;