aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJakub Konka <kubkon@jakubkonka.com>2023-03-30 20:56:25 +0200
committerJakub Konka <kubkon@jakubkonka.com>2023-03-30 21:08:32 +0200
commitee0c4457657523e218c1e211c447d3e196575ddc (patch)
tree51c14da38f52ac7a0170485747d8b4f7bab37d95
parent349349fa01f77fe3bf2b57dc821f889e2e869004 (diff)
downloadzig-ee0c4457657523e218c1e211c447d3e196575ddc.tar.gz
zig-ee0c4457657523e218c1e211c447d3e196575ddc.zip
coff: due to ASLR we need to dupe the code for relocating
In addition, we need to be careful not to mark the relocations as resolved prematurely as then we are risking malforming the binary as we need to resolve the relocs twice: once for in-memory writes, and once for in-file updates.
-rw-r--r--src/link/Coff.zig59
-rw-r--r--src/link/Coff/Relocation.zig14
2 files changed, 48 insertions, 25 deletions
diff --git a/src/link/Coff.zig b/src/link/Coff.zig
index 87ad1085aa..a7ca47c151 100644
--- a/src/link/Coff.zig
+++ b/src/link/Coff.zig
@@ -781,24 +781,47 @@ fn writeAtom(self: *Coff, atom_index: Atom.Index, code: []u8) !void {
const sym = atom.getSymbol(self);
const section = self.sections.get(@enumToInt(sym.section_number) - 1);
const file_offset = section.header.pointer_to_raw_data + sym.value - section.header.virtual_address;
+
log.debug("writing atom for symbol {s} at file offset 0x{x} to 0x{x}", .{
atom.getName(self),
file_offset,
file_offset + code.len,
});
+ const gpa = self.base.allocator;
+
+ // Gather relocs which can be resolved.
+ // We need to do this as we will be applying different slide values depending
+ // if we are running in hot-code swapping mode or not.
+ // TODO: how crazy would it be to try and apply the actual image base of the loaded
+ // process for the in-file values rather than the Windows defaults?
+ var relocs = std.ArrayList(*Relocation).init(gpa);
+ defer relocs.deinit();
+
+ if (self.relocs.getPtr(atom_index)) |rels| {
+ try relocs.ensureTotalCapacityPrecise(rels.items.len);
+ for (rels.items) |*reloc| {
+ if (reloc.isResolvable(self)) relocs.appendAssumeCapacity(reloc);
+ }
+ }
+
if (self.base.child_pid) |handle| {
const slide = @ptrToInt(self.hot_state.loaded_base_address.?);
- const mem_code = try self.base.allocator.dupe(u8, code);
- defer self.base.allocator.free(mem_code);
- self.resolveRelocs(atom_index, mem_code, slide);
+ const mem_code = try gpa.dupe(u8, code);
+ defer gpa.free(mem_code);
+ self.resolveRelocs(atom_index, relocs.items, mem_code, slide);
const vaddr = sym.value + slide;
const pvaddr = @intToPtr(*anyopaque, vaddr);
+
log.debug("writing to memory at address {x}", .{vaddr});
+
+ if (build_options.enable_logging) {
+ try debugMem(gpa, handle, pvaddr, mem_code);
+ }
+
if (section.header.flags.MEM_WRITE == 0) {
- log.debug("page not mapped for write access; re-mapping...", .{});
writeMemProtected(handle, pvaddr, mem_code) catch |err| {
log.warn("writing to protected memory failed with error: {s}", .{@errorName(err)});
};
@@ -809,25 +832,29 @@ fn writeAtom(self: *Coff, atom_index: Atom.Index, code: []u8) !void {
}
}
- self.resolveRelocs(atom_index, code, self.getImageBase());
+ self.resolveRelocs(atom_index, relocs.items, code, self.getImageBase());
try self.base.file.?.pwriteAll(code, file_offset);
+
+ // Now we can mark the relocs as resolved.
+ while (relocs.popOrNull()) |reloc| {
+ reloc.dirty = false;
+ }
}
fn debugMem(allocator: Allocator, handle: std.ChildProcess.Id, pvaddr: std.os.windows.LPVOID, code: []const u8) !void {
var buffer = try allocator.alloc(u8, code.len);
defer allocator.free(buffer);
const memread = try std.os.windows.ReadProcessMemory(handle, pvaddr, buffer);
- log.debug("in memory: {x}", .{std.fmt.fmtSliceHexLower(memread)});
log.debug("to write: {x}", .{std.fmt.fmtSliceHexLower(code)});
+ log.debug("in memory: {x}", .{std.fmt.fmtSliceHexLower(memread)});
}
fn writeMemProtected(handle: std.ChildProcess.Id, pvaddr: std.os.windows.LPVOID, code: []const u8) !void {
- var old_prot: std.os.windows.DWORD = undefined;
- try std.os.windows.VirtualProtectEx(handle, pvaddr, code.len, std.os.windows.PAGE_EXECUTE_WRITECOPY, &old_prot);
+ const old_prot = try std.os.windows.VirtualProtectEx(handle, pvaddr, code.len, std.os.windows.PAGE_EXECUTE_WRITECOPY);
try writeMem(handle, pvaddr, code);
// TODO: We can probably just set the pages writeable and leave it at that without having to restore the attributes.
// For that though, we want to track which page has already been modified.
- try std.os.windows.VirtualProtectEx(handle, pvaddr, code.len, old_prot, null);
+ _ = try std.os.windows.VirtualProtectEx(handle, pvaddr, code.len, old_prot);
}
fn writeMem(handle: std.ChildProcess.Id, pvaddr: std.os.windows.LPVOID, code: []const u8) !void {
@@ -868,16 +895,10 @@ fn markRelocsDirtyByAddress(self: *Coff, addr: u32) void {
}
}
-fn resolveRelocs(self: *Coff, atom_index: Atom.Index, code: []u8, image_base: u64) void {
- const relocs = self.relocs.getPtr(atom_index) orelse return;
-
+fn resolveRelocs(self: *Coff, atom_index: Atom.Index, relocs: []*const Relocation, code: []u8, image_base: u64) void {
log.debug("relocating '{s}'", .{self.getAtom(atom_index).getName(self)});
-
- for (relocs.items) |*reloc| {
- if (!reloc.dirty) continue;
- if (reloc.resolve(atom_index, code, image_base, self)) {
- reloc.dirty = false;
- }
+ for (relocs) |reloc| {
+ reloc.resolve(atom_index, code, image_base, self);
}
}
@@ -1488,7 +1509,7 @@ pub fn flushModule(self: *Coff, comp: *Compilation, prog_node: *std.Progress.Nod
for (self.relocs.keys(), self.relocs.values()) |atom_index, relocs| {
const needs_update = for (relocs.items) |reloc| {
- if (reloc.dirty) break true;
+ if (reloc.isResolvable(self)) break true;
} else false;
if (!needs_update) continue;
diff --git a/src/link/Coff/Relocation.zig b/src/link/Coff/Relocation.zig
index 6b35de93f4..2fafa0bbdc 100644
--- a/src/link/Coff/Relocation.zig
+++ b/src/link/Coff/Relocation.zig
@@ -72,14 +72,18 @@ pub fn getTargetAddress(self: Relocation, coff_file: *const Coff) ?u32 {
}
}
-/// Returns `false` if obtaining the target address has been deferred until `flushModule`.
-/// This can happen when trying to resolve address of an import table entry ahead of time.
-pub fn resolve(self: Relocation, atom_index: Atom.Index, code: []u8, image_base: u64, coff_file: *Coff) bool {
+/// Returns true if and only if the reloc is dirty AND the target address is available.
+pub fn isResolvable(self: Relocation, coff_file: *Coff) bool {
+ _ = self.getTargetAddress(coff_file) orelse return false;
+ return self.dirty;
+}
+
+pub fn resolve(self: Relocation, atom_index: Atom.Index, code: []u8, image_base: u64, coff_file: *Coff) void {
const atom = coff_file.getAtom(atom_index);
const source_sym = atom.getSymbol(coff_file);
const source_vaddr = source_sym.value + self.offset;
- const target_vaddr = self.getTargetAddress(coff_file) orelse return false;
+ const target_vaddr = self.getTargetAddress(coff_file).?; // Oops, you didn't check if the relocation can be resolved with isResolvable().
const target_vaddr_with_addend = target_vaddr + self.addend;
log.debug(" ({x}: [() => 0x{x} ({s})) ({s}) ", .{
@@ -102,8 +106,6 @@ pub fn resolve(self: Relocation, atom_index: Atom.Index, code: []u8, image_base:
.x86, .x86_64 => self.resolveX86(ctx),
else => unreachable, // unhandled target architecture
}
-
- return true;
}
const Context = struct {