aboutsummaryrefslogtreecommitdiff
path: root/src/link
diff options
context:
space:
mode:
authorAndrew Kelley <andrew@ziglang.org>2021-07-21 03:18:39 -0400
committerGitHub <noreply@github.com>2021-07-21 03:18:39 -0400
commit26984852bdfdbe3564b19f3ff7b3ecfd606c9902 (patch)
treea64c806e3e9900c4eb0cd281a9d6946ce07421f5 /src/link
parentbfe20051673e285d3b1788cd637fab9ca84d1cb1 (diff)
parentc39c46c0d12b15874b1586ff47cf473b31867918 (diff)
downloadzig-26984852bdfdbe3564b19f3ff7b3ecfd606c9902.tar.gz
zig-26984852bdfdbe3564b19f3ff7b3ecfd606c9902.zip
Merge pull request #9353 from ziglang/stage2-air
stage2: rework AIR memory layout
Diffstat (limited to 'src/link')
-rw-r--r--src/link/C.zig28
-rw-r--r--src/link/Coff.zig61
-rw-r--r--src/link/Elf.zig562
-rw-r--r--src/link/MachO.zig295
-rw-r--r--src/link/Plan9.zig385
-rw-r--r--src/link/SpirV.zig38
-rw-r--r--src/link/Wasm.zig84
7 files changed, 924 insertions, 529 deletions
diff --git a/src/link/C.zig b/src/link/C.zig
index 53561d16cd..09f789f7d1 100644
--- a/src/link/C.zig
+++ b/src/link/C.zig
@@ -2,14 +2,17 @@ const std = @import("std");
const mem = std.mem;
const assert = std.debug.assert;
const Allocator = std.mem.Allocator;
+const fs = std.fs;
+
+const C = @This();
const Module = @import("../Module.zig");
const Compilation = @import("../Compilation.zig");
-const fs = std.fs;
const codegen = @import("../codegen/c.zig");
const link = @import("../link.zig");
const trace = @import("../tracy.zig").trace;
-const C = @This();
const Type = @import("../type.zig").Type;
+const Air = @import("../Air.zig");
+const Liveness = @import("../Liveness.zig");
pub const base_tag: link.File.Tag = .c;
pub const zig_h = @embedFile("C/zig.h");
@@ -95,10 +98,7 @@ fn deinitDecl(gpa: *Allocator, decl: *Module.Decl) void {
decl.fn_link.c.typedefs.deinit(gpa);
}
-pub fn updateDecl(self: *C, module: *Module, decl: *Module.Decl) !void {
- const tracy = trace(@src());
- defer tracy.end();
-
+pub fn finishUpdateDecl(self: *C, module: *Module, decl: *Module.Decl, air: Air, liveness: Liveness) !void {
// Keep track of all decls so we can iterate over them on flush().
_ = try self.decl_table.getOrPut(self.base.allocator, decl);
@@ -126,6 +126,8 @@ pub fn updateDecl(self: *C, module: *Module, decl: *Module.Decl) !void {
.code = code.toManaged(module.gpa),
.value_map = codegen.CValueMap.init(module.gpa),
.indent_writer = undefined, // set later so we can get a pointer to object.code
+ .air = air,
+ .liveness = liveness,
};
object.indent_writer = .{ .underlying_writer = object.code.writer() };
defer {
@@ -157,6 +159,20 @@ pub fn updateDecl(self: *C, module: *Module, decl: *Module.Decl) !void {
code.shrinkAndFree(module.gpa, code.items.len);
}
+pub fn updateFunc(self: *C, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void {
+ const tracy = trace(@src());
+ defer tracy.end();
+
+ return self.finishUpdateDecl(module, func.owner_decl, air, liveness);
+}
+
+pub fn updateDecl(self: *C, module: *Module, decl: *Module.Decl) !void {
+ const tracy = trace(@src());
+ defer tracy.end();
+
+ return self.finishUpdateDecl(module, decl, undefined, undefined);
+}
+
pub fn updateDeclLineNumber(self: *C, module: *Module, decl: *Module.Decl) !void {
// The C backend does not have the ability to fix line numbers without re-generating
// the entire Decl.
diff --git a/src/link/Coff.zig b/src/link/Coff.zig
index b466cf9136..50ad6bc1a0 100644
--- a/src/link/Coff.zig
+++ b/src/link/Coff.zig
@@ -1,6 +1,7 @@
const Coff = @This();
const std = @import("std");
+const builtin = @import("builtin");
const log = std.log.scoped(.link);
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
@@ -17,6 +18,8 @@ const build_options = @import("build_options");
const Cache = @import("../Cache.zig");
const mingw = @import("../mingw.zig");
const llvm_backend = @import("../codegen/llvm.zig");
+const Air = @import("../Air.zig");
+const Liveness = @import("../Liveness.zig");
const allocation_padding = 4 / 3;
const minimum_text_block_size = 64 * allocation_padding;
@@ -653,19 +656,63 @@ fn writeOffsetTableEntry(self: *Coff, index: usize) !void {
}
}
-pub fn updateDecl(self: *Coff, module: *Module, decl: *Module.Decl) !void {
- // TODO COFF/PE debug information
- // TODO Implement exports
+pub fn updateFunc(self: *Coff, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void {
+ if (build_options.skip_non_native and
+ builtin.object_format != .coff and
+ builtin.object_format != .pe)
+ {
+ @panic("Attempted to compile for object format that was disabled by build configuration");
+ }
+ if (build_options.have_llvm) {
+ if (self.llvm_object) |llvm_object| {
+ return llvm_object.updateFunc(module, func, air, liveness);
+ }
+ }
const tracy = trace(@src());
defer tracy.end();
- if (build_options.have_llvm)
- if (self.llvm_object) |llvm_object| return try llvm_object.updateDecl(module, decl);
+ var code_buffer = std.ArrayList(u8).init(self.base.allocator);
+ defer code_buffer.deinit();
+
+ const decl = func.owner_decl;
+ const res = try codegen.generateFunction(
+ &self.base,
+ decl.srcLoc(),
+ func,
+ air,
+ liveness,
+ &code_buffer,
+ .none,
+ );
+ const code = switch (res) {
+ .appended => code_buffer.items,
+ .fail => |em| {
+ decl.analysis = .codegen_failure;
+ try module.failed_decls.put(module.gpa, decl, em);
+ return;
+ },
+ };
+
+ return self.finishUpdateDecl(module, func.owner_decl, code);
+}
+
+pub fn updateDecl(self: *Coff, module: *Module, decl: *Module.Decl) !void {
+ if (build_options.skip_non_native and builtin.object_format != .coff and builtin.object_format != .pe) {
+ @panic("Attempted to compile for object format that was disabled by build configuration");
+ }
+ if (build_options.have_llvm) {
+ if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(module, decl);
+ }
+ const tracy = trace(@src());
+ defer tracy.end();
if (decl.val.tag() == .extern_fn) {
return; // TODO Should we do more when front-end analyzed extern decl?
}
+ // TODO COFF/PE debug information
+ // TODO Implement exports
+
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
defer code_buffer.deinit();
@@ -683,6 +730,10 @@ pub fn updateDecl(self: *Coff, module: *Module, decl: *Module.Decl) !void {
},
};
+ return self.finishUpdateDecl(module, decl, code);
+}
+
+fn finishUpdateDecl(self: *Coff, module: *Module, decl: *Module.Decl, code: []const u8) !void {
const required_alignment = decl.ty.abiAlignment(self.base.options.target);
const curr_size = decl.link.coff.size;
if (curr_size != 0) {
diff --git a/src/link/Elf.zig b/src/link/Elf.zig
index d754b478b9..315dfb563b 100644
--- a/src/link/Elf.zig
+++ b/src/link/Elf.zig
@@ -1,6 +1,7 @@
const Elf = @This();
const std = @import("std");
+const builtin = @import("builtin");
const mem = std.mem;
const assert = std.debug.assert;
const Allocator = std.mem.Allocator;
@@ -10,7 +11,6 @@ const log = std.log.scoped(.link);
const DW = std.dwarf;
const leb128 = std.leb;
-const ir = @import("../air.zig");
const Module = @import("../Module.zig");
const Compilation = @import("../Compilation.zig");
const codegen = @import("../codegen.zig");
@@ -26,6 +26,8 @@ const glibc = @import("../glibc.zig");
const musl = @import("../musl.zig");
const Cache = @import("../Cache.zig");
const llvm_backend = @import("../codegen/llvm.zig");
+const Air = @import("../Air.zig");
+const Liveness = @import("../Liveness.zig");
const default_entry_addr = 0x8000000;
@@ -2155,138 +2157,17 @@ pub fn freeDecl(self: *Elf, decl: *Module.Decl) void {
}
}
-pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void {
- const tracy = trace(@src());
- defer tracy.end();
-
- if (build_options.have_llvm)
- if (self.llvm_object) |llvm_object| return try llvm_object.updateDecl(module, decl);
-
- if (decl.val.tag() == .extern_fn) {
- return; // TODO Should we do more when front-end analyzed extern decl?
- }
- if (decl.val.castTag(.variable)) |payload| {
- const variable = payload.data;
- if (variable.is_extern) {
- return; // TODO Should we do more when front-end analyzed extern decl?
- }
- }
-
- var code_buffer = std.ArrayList(u8).init(self.base.allocator);
- defer code_buffer.deinit();
-
- var dbg_line_buffer = std.ArrayList(u8).init(self.base.allocator);
- defer dbg_line_buffer.deinit();
-
- var dbg_info_buffer = std.ArrayList(u8).init(self.base.allocator);
- defer dbg_info_buffer.deinit();
-
- var dbg_info_type_relocs: File.DbgInfoTypeRelocsTable = .{};
- defer {
- var it = dbg_info_type_relocs.valueIterator();
- while (it.next()) |value| {
- value.relocs.deinit(self.base.allocator);
- }
- dbg_info_type_relocs.deinit(self.base.allocator);
- }
-
- const is_fn: bool = switch (decl.ty.zigTypeTag()) {
- .Fn => true,
- else => false,
- };
- if (is_fn) {
- // For functions we need to add a prologue to the debug line program.
- try dbg_line_buffer.ensureCapacity(26);
-
- const func = decl.val.castTag(.function).?.data;
- const line_off = @intCast(u28, decl.src_line + func.lbrace_line);
-
- const ptr_width_bytes = self.ptrWidthBytes();
- dbg_line_buffer.appendSliceAssumeCapacity(&[_]u8{
- DW.LNS_extended_op,
- ptr_width_bytes + 1,
- DW.LNE_set_address,
- });
- // This is the "relocatable" vaddr, corresponding to `code_buffer` index `0`.
- assert(dbg_line_vaddr_reloc_index == dbg_line_buffer.items.len);
- dbg_line_buffer.items.len += ptr_width_bytes;
-
- dbg_line_buffer.appendAssumeCapacity(DW.LNS_advance_line);
- // This is the "relocatable" relative line offset from the previous function's end curly
- // to this function's begin curly.
- assert(self.getRelocDbgLineOff() == dbg_line_buffer.items.len);
- // Here we use a ULEB128-fixed-4 to make sure this field can be overwritten later.
- leb128.writeUnsignedFixed(4, dbg_line_buffer.addManyAsArrayAssumeCapacity(4), line_off);
-
- dbg_line_buffer.appendAssumeCapacity(DW.LNS_set_file);
- assert(self.getRelocDbgFileIndex() == dbg_line_buffer.items.len);
- // Once we support more than one source file, this will have the ability to be more
- // than one possible value.
- const file_index = 1;
- leb128.writeUnsignedFixed(4, dbg_line_buffer.addManyAsArrayAssumeCapacity(4), file_index);
-
- // Emit a line for the begin curly with prologue_end=false. The codegen will
- // do the work of setting prologue_end=true and epilogue_begin=true.
- dbg_line_buffer.appendAssumeCapacity(DW.LNS_copy);
-
- // .debug_info subprogram
- const decl_name_with_null = decl.name[0 .. mem.lenZ(decl.name) + 1];
- try dbg_info_buffer.ensureCapacity(dbg_info_buffer.items.len + 25 + decl_name_with_null.len);
-
- const fn_ret_type = decl.ty.fnReturnType();
- const fn_ret_has_bits = fn_ret_type.hasCodeGenBits();
- if (fn_ret_has_bits) {
- dbg_info_buffer.appendAssumeCapacity(abbrev_subprogram);
- } else {
- dbg_info_buffer.appendAssumeCapacity(abbrev_subprogram_retvoid);
- }
- // These get overwritten after generating the machine code. These values are
- // "relocations" and have to be in this fixed place so that functions can be
- // moved in virtual address space.
- assert(dbg_info_low_pc_reloc_index == dbg_info_buffer.items.len);
- dbg_info_buffer.items.len += ptr_width_bytes; // DW.AT_low_pc, DW.FORM_addr
- assert(self.getRelocDbgInfoSubprogramHighPC() == dbg_info_buffer.items.len);
- dbg_info_buffer.items.len += 4; // DW.AT_high_pc, DW.FORM_data4
- if (fn_ret_has_bits) {
- const gop = try dbg_info_type_relocs.getOrPut(self.base.allocator, fn_ret_type);
- if (!gop.found_existing) {
- gop.value_ptr.* = .{
- .off = undefined,
- .relocs = .{},
- };
- }
- try gop.value_ptr.relocs.append(self.base.allocator, @intCast(u32, dbg_info_buffer.items.len));
- dbg_info_buffer.items.len += 4; // DW.AT_type, DW.FORM_ref4
- }
- dbg_info_buffer.appendSliceAssumeCapacity(decl_name_with_null); // DW.AT_name, DW.FORM_string
- } else {
- // TODO implement .debug_info for global variables
+fn deinitRelocs(gpa: *Allocator, table: *File.DbgInfoTypeRelocsTable) void {
+ var it = table.valueIterator();
+ while (it.next()) |value| {
+ value.relocs.deinit(gpa);
}
- const decl_val = if (decl.val.castTag(.variable)) |payload| payload.data.init else decl.val;
- const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), .{
- .ty = decl.ty,
- .val = decl_val,
- }, &code_buffer, .{
- .dwarf = .{
- .dbg_line = &dbg_line_buffer,
- .dbg_info = &dbg_info_buffer,
- .dbg_info_type_relocs = &dbg_info_type_relocs,
- },
- });
- const code = switch (res) {
- .externally_managed => |x| x,
- .appended => code_buffer.items,
- .fail => |em| {
- decl.analysis = .codegen_failure;
- try module.failed_decls.put(module.gpa, decl, em);
- return;
- },
- };
+ table.deinit(gpa);
+}
+fn updateDeclCode(self: *Elf, decl: *Module.Decl, code: []const u8, stt_bits: u8) !*elf.Elf64_Sym {
const required_alignment = decl.ty.abiAlignment(self.base.options.target);
- const stt_bits: u8 = if (is_fn) elf.STT_FUNC else elf.STT_OBJECT;
-
assert(decl.link.elf.local_sym_index != 0); // Caller forgot to allocateDeclIndexes()
const local_sym = &self.local_symbols.items[decl.link.elf.local_sym_index];
if (local_sym.st_size != 0) {
@@ -2338,128 +2219,16 @@ pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void {
const file_offset = self.sections.items[self.text_section_index.?].sh_offset + section_offset;
try self.base.file.?.pwriteAll(code, file_offset);
- const target_endian = self.base.options.target.cpu.arch.endian();
-
- const text_block = &decl.link.elf;
-
- // If the Decl is a function, we need to update the .debug_line program.
- if (is_fn) {
- // Perform the relocations based on vaddr.
- switch (self.ptr_width) {
- .p32 => {
- {
- const ptr = dbg_line_buffer.items[dbg_line_vaddr_reloc_index..][0..4];
- mem.writeInt(u32, ptr, @intCast(u32, local_sym.st_value), target_endian);
- }
- {
- const ptr = dbg_info_buffer.items[dbg_info_low_pc_reloc_index..][0..4];
- mem.writeInt(u32, ptr, @intCast(u32, local_sym.st_value), target_endian);
- }
- },
- .p64 => {
- {
- const ptr = dbg_line_buffer.items[dbg_line_vaddr_reloc_index..][0..8];
- mem.writeInt(u64, ptr, local_sym.st_value, target_endian);
- }
- {
- const ptr = dbg_info_buffer.items[dbg_info_low_pc_reloc_index..][0..8];
- mem.writeInt(u64, ptr, local_sym.st_value, target_endian);
- }
- },
- }
- {
- const ptr = dbg_info_buffer.items[self.getRelocDbgInfoSubprogramHighPC()..][0..4];
- mem.writeInt(u32, ptr, @intCast(u32, local_sym.st_size), target_endian);
- }
-
- try dbg_line_buffer.appendSlice(&[_]u8{ DW.LNS_extended_op, 1, DW.LNE_end_sequence });
-
- // Now we have the full contents and may allocate a region to store it.
-
- // This logic is nearly identical to the logic below in `updateDeclDebugInfoAllocation` for
- // `TextBlock` and the .debug_info. If you are editing this logic, you
- // probably need to edit that logic too.
-
- const debug_line_sect = &self.sections.items[self.debug_line_section_index.?];
- const src_fn = &decl.fn_link.elf;
- src_fn.len = @intCast(u32, dbg_line_buffer.items.len);
- if (self.dbg_line_fn_last) |last| not_first: {
- if (src_fn.next) |next| {
- // Update existing function - non-last item.
- if (src_fn.off + src_fn.len + min_nop_size > next.off) {
- // It grew too big, so we move it to a new location.
- if (src_fn.prev) |prev| {
- self.dbg_line_fn_free_list.put(self.base.allocator, prev, {}) catch {};
- prev.next = src_fn.next;
- }
- assert(src_fn.prev != next);
- next.prev = src_fn.prev;
- src_fn.next = null;
- // Populate where it used to be with NOPs.
- const file_pos = debug_line_sect.sh_offset + src_fn.off;
- try self.pwriteDbgLineNops(0, &[0]u8{}, src_fn.len, file_pos);
- // TODO Look at the free list before appending at the end.
- src_fn.prev = last;
- last.next = src_fn;
- self.dbg_line_fn_last = src_fn;
-
- src_fn.off = last.off + padToIdeal(last.len);
- }
- } else if (src_fn.prev == null) {
- if (src_fn == last) {
- // Special case: there is only 1 function and it is being updated.
- // In this case there is nothing to do. The function's length has
- // already been updated, and the logic below takes care of
- // resizing the .debug_line section.
- break :not_first;
- }
- // Append new function.
- // TODO Look at the free list before appending at the end.
- src_fn.prev = last;
- last.next = src_fn;
- self.dbg_line_fn_last = src_fn;
-
- src_fn.off = last.off + padToIdeal(last.len);
- }
- } else {
- // This is the first function of the Line Number Program.
- self.dbg_line_fn_first = src_fn;
- self.dbg_line_fn_last = src_fn;
-
- src_fn.off = padToIdeal(self.dbgLineNeededHeaderBytes());
- }
-
- const last_src_fn = self.dbg_line_fn_last.?;
- const needed_size = last_src_fn.off + last_src_fn.len;
- if (needed_size != debug_line_sect.sh_size) {
- if (needed_size > self.allocatedSize(debug_line_sect.sh_offset)) {
- const new_offset = self.findFreeSpace(needed_size, 1);
- const existing_size = last_src_fn.off;
- log.debug("moving .debug_line section: {d} bytes from 0x{x} to 0x{x}", .{
- existing_size,
- debug_line_sect.sh_offset,
- new_offset,
- });
- const amt = try self.base.file.?.copyRangeAll(debug_line_sect.sh_offset, self.base.file.?, new_offset, existing_size);
- if (amt != existing_size) return error.InputOutput;
- debug_line_sect.sh_offset = new_offset;
- }
- debug_line_sect.sh_size = needed_size;
- self.shdr_table_dirty = true; // TODO look into making only the one section dirty
- self.debug_line_header_dirty = true;
- }
- const prev_padding_size: u32 = if (src_fn.prev) |prev| src_fn.off - (prev.off + prev.len) else 0;
- const next_padding_size: u32 = if (src_fn.next) |next| next.off - (src_fn.off + src_fn.len) else 0;
-
- // We only have support for one compilation unit so far, so the offsets are directly
- // from the .debug_line section.
- const file_pos = debug_line_sect.sh_offset + src_fn.off;
- try self.pwriteDbgLineNops(prev_padding_size, dbg_line_buffer.items, next_padding_size, file_pos);
-
- // .debug_info - End the TAG_subprogram children.
- try dbg_info_buffer.append(0);
- }
+ return local_sym;
+}
+fn finishUpdateDecl(
+ self: *Elf,
+ module: *Module,
+ decl: *Module.Decl,
+ dbg_info_type_relocs: *File.DbgInfoTypeRelocsTable,
+ dbg_info_buffer: *std.ArrayList(u8),
+) !void {
// Now we emit the .debug_info types of the Decl. These will count towards the size of
// the buffer, so we have to do it before computing the offset, and we can't perform the actual
// relocations yet.
@@ -2467,12 +2236,15 @@ pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void {
var it = dbg_info_type_relocs.iterator();
while (it.next()) |entry| {
entry.value_ptr.off = @intCast(u32, dbg_info_buffer.items.len);
- try self.addDbgInfoType(entry.key_ptr.*, &dbg_info_buffer);
+ try self.addDbgInfoType(entry.key_ptr.*, dbg_info_buffer);
}
}
+ const text_block = &decl.link.elf;
try self.updateDeclDebugInfoAllocation(text_block, @intCast(u32, dbg_info_buffer.items.len));
+ const target_endian = self.base.options.target.cpu.arch.endian();
+
{
// Now that we have the offset assigned we can finally perform type relocations.
var it = dbg_info_type_relocs.valueIterator();
@@ -2495,6 +2267,292 @@ pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void {
return self.updateDeclExports(module, decl, decl_exports);
}
+pub fn updateFunc(self: *Elf, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void {
+ if (build_options.skip_non_native and builtin.object_format != .elf) {
+ @panic("Attempted to compile for object format that was disabled by build configuration");
+ }
+ if (build_options.have_llvm) {
+ if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(module, func, air, liveness);
+ }
+
+ const tracy = trace(@src());
+ defer tracy.end();
+
+ var code_buffer = std.ArrayList(u8).init(self.base.allocator);
+ defer code_buffer.deinit();
+
+ var dbg_line_buffer = std.ArrayList(u8).init(self.base.allocator);
+ defer dbg_line_buffer.deinit();
+
+ var dbg_info_buffer = std.ArrayList(u8).init(self.base.allocator);
+ defer dbg_info_buffer.deinit();
+
+ var dbg_info_type_relocs: File.DbgInfoTypeRelocsTable = .{};
+ defer deinitRelocs(self.base.allocator, &dbg_info_type_relocs);
+
+ // For functions we need to add a prologue to the debug line program.
+ try dbg_line_buffer.ensureCapacity(26);
+
+ const decl = func.owner_decl;
+ const line_off = @intCast(u28, decl.src_line + func.lbrace_line);
+
+ const ptr_width_bytes = self.ptrWidthBytes();
+ dbg_line_buffer.appendSliceAssumeCapacity(&[_]u8{
+ DW.LNS_extended_op,
+ ptr_width_bytes + 1,
+ DW.LNE_set_address,
+ });
+ // This is the "relocatable" vaddr, corresponding to `code_buffer` index `0`.
+ assert(dbg_line_vaddr_reloc_index == dbg_line_buffer.items.len);
+ dbg_line_buffer.items.len += ptr_width_bytes;
+
+ dbg_line_buffer.appendAssumeCapacity(DW.LNS_advance_line);
+ // This is the "relocatable" relative line offset from the previous function's end curly
+ // to this function's begin curly.
+ assert(self.getRelocDbgLineOff() == dbg_line_buffer.items.len);
+ // Here we use a ULEB128-fixed-4 to make sure this field can be overwritten later.
+ leb128.writeUnsignedFixed(4, dbg_line_buffer.addManyAsArrayAssumeCapacity(4), line_off);
+
+ dbg_line_buffer.appendAssumeCapacity(DW.LNS_set_file);
+ assert(self.getRelocDbgFileIndex() == dbg_line_buffer.items.len);
+ // Once we support more than one source file, this will have the ability to be more
+ // than one possible value.
+ const file_index = 1;
+ leb128.writeUnsignedFixed(4, dbg_line_buffer.addManyAsArrayAssumeCapacity(4), file_index);
+
+ // Emit a line for the begin curly with prologue_end=false. The codegen will
+ // do the work of setting prologue_end=true and epilogue_begin=true.
+ dbg_line_buffer.appendAssumeCapacity(DW.LNS_copy);
+
+ // .debug_info subprogram
+ const decl_name_with_null = decl.name[0 .. mem.lenZ(decl.name) + 1];
+ try dbg_info_buffer.ensureCapacity(dbg_info_buffer.items.len + 25 + decl_name_with_null.len);
+
+ const fn_ret_type = decl.ty.fnReturnType();
+ const fn_ret_has_bits = fn_ret_type.hasCodeGenBits();
+ if (fn_ret_has_bits) {
+ dbg_info_buffer.appendAssumeCapacity(abbrev_subprogram);
+ } else {
+ dbg_info_buffer.appendAssumeCapacity(abbrev_subprogram_retvoid);
+ }
+ // These get overwritten after generating the machine code. These values are
+ // "relocations" and have to be in this fixed place so that functions can be
+ // moved in virtual address space.
+ assert(dbg_info_low_pc_reloc_index == dbg_info_buffer.items.len);
+ dbg_info_buffer.items.len += ptr_width_bytes; // DW.AT_low_pc, DW.FORM_addr
+ assert(self.getRelocDbgInfoSubprogramHighPC() == dbg_info_buffer.items.len);
+ dbg_info_buffer.items.len += 4; // DW.AT_high_pc, DW.FORM_data4
+ if (fn_ret_has_bits) {
+ const gop = try dbg_info_type_relocs.getOrPut(self.base.allocator, fn_ret_type);
+ if (!gop.found_existing) {
+ gop.value_ptr.* = .{
+ .off = undefined,
+ .relocs = .{},
+ };
+ }
+ try gop.value_ptr.relocs.append(self.base.allocator, @intCast(u32, dbg_info_buffer.items.len));
+ dbg_info_buffer.items.len += 4; // DW.AT_type, DW.FORM_ref4
+ }
+ dbg_info_buffer.appendSliceAssumeCapacity(decl_name_with_null); // DW.AT_name, DW.FORM_string
+
+ const res = try codegen.generateFunction(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .{
+ .dwarf = .{
+ .dbg_line = &dbg_line_buffer,
+ .dbg_info = &dbg_info_buffer,
+ .dbg_info_type_relocs = &dbg_info_type_relocs,
+ },
+ });
+ const code = switch (res) {
+ .appended => code_buffer.items,
+ .fail => |em| {
+ decl.analysis = .codegen_failure;
+ try module.failed_decls.put(module.gpa, decl, em);
+ return;
+ },
+ };
+
+ const local_sym = try self.updateDeclCode(decl, code, elf.STT_FUNC);
+
+ const target_endian = self.base.options.target.cpu.arch.endian();
+
+ // Since the Decl is a function, we need to update the .debug_line program.
+ // Perform the relocations based on vaddr.
+ switch (self.ptr_width) {
+ .p32 => {
+ {
+ const ptr = dbg_line_buffer.items[dbg_line_vaddr_reloc_index..][0..4];
+ mem.writeInt(u32, ptr, @intCast(u32, local_sym.st_value), target_endian);
+ }
+ {
+ const ptr = dbg_info_buffer.items[dbg_info_low_pc_reloc_index..][0..4];
+ mem.writeInt(u32, ptr, @intCast(u32, local_sym.st_value), target_endian);
+ }
+ },
+ .p64 => {
+ {
+ const ptr = dbg_line_buffer.items[dbg_line_vaddr_reloc_index..][0..8];
+ mem.writeInt(u64, ptr, local_sym.st_value, target_endian);
+ }
+ {
+ const ptr = dbg_info_buffer.items[dbg_info_low_pc_reloc_index..][0..8];
+ mem.writeInt(u64, ptr, local_sym.st_value, target_endian);
+ }
+ },
+ }
+ {
+ const ptr = dbg_info_buffer.items[self.getRelocDbgInfoSubprogramHighPC()..][0..4];
+ mem.writeInt(u32, ptr, @intCast(u32, local_sym.st_size), target_endian);
+ }
+
+ try dbg_line_buffer.appendSlice(&[_]u8{ DW.LNS_extended_op, 1, DW.LNE_end_sequence });
+
+ // Now we have the full contents and may allocate a region to store it.
+
+ // This logic is nearly identical to the logic below in `updateDeclDebugInfoAllocation` for
+ // `TextBlock` and the .debug_info. If you are editing this logic, you
+ // probably need to edit that logic too.
+
+ const debug_line_sect = &self.sections.items[self.debug_line_section_index.?];
+ const src_fn = &decl.fn_link.elf;
+ src_fn.len = @intCast(u32, dbg_line_buffer.items.len);
+ if (self.dbg_line_fn_last) |last| not_first: {
+ if (src_fn.next) |next| {
+ // Update existing function - non-last item.
+ if (src_fn.off + src_fn.len + min_nop_size > next.off) {
+ // It grew too big, so we move it to a new location.
+ if (src_fn.prev) |prev| {
+ self.dbg_line_fn_free_list.put(self.base.allocator, prev, {}) catch {};
+ prev.next = src_fn.next;
+ }
+ assert(src_fn.prev != next);
+ next.prev = src_fn.prev;
+ src_fn.next = null;
+ // Populate where it used to be with NOPs.
+ const file_pos = debug_line_sect.sh_offset + src_fn.off;
+ try self.pwriteDbgLineNops(0, &[0]u8{}, src_fn.len, file_pos);
+ // TODO Look at the free list before appending at the end.
+ src_fn.prev = last;
+ last.next = src_fn;
+ self.dbg_line_fn_last = src_fn;
+
+ src_fn.off = last.off + padToIdeal(last.len);
+ }
+ } else if (src_fn.prev == null) {
+ if (src_fn == last) {
+ // Special case: there is only 1 function and it is being updated.
+ // In this case there is nothing to do. The function's length has
+ // already been updated, and the logic below takes care of
+ // resizing the .debug_line section.
+ break :not_first;
+ }
+ // Append new function.
+ // TODO Look at the free list before appending at the end.
+ src_fn.prev = last;
+ last.next = src_fn;
+ self.dbg_line_fn_last = src_fn;
+
+ src_fn.off = last.off + padToIdeal(last.len);
+ }
+ } else {
+ // This is the first function of the Line Number Program.
+ self.dbg_line_fn_first = src_fn;
+ self.dbg_line_fn_last = src_fn;
+
+ src_fn.off = padToIdeal(self.dbgLineNeededHeaderBytes());
+ }
+
+ const last_src_fn = self.dbg_line_fn_last.?;
+ const needed_size = last_src_fn.off + last_src_fn.len;
+ if (needed_size != debug_line_sect.sh_size) {
+ if (needed_size > self.allocatedSize(debug_line_sect.sh_offset)) {
+ const new_offset = self.findFreeSpace(needed_size, 1);
+ const existing_size = last_src_fn.off;
+ log.debug("moving .debug_line section: {d} bytes from 0x{x} to 0x{x}", .{
+ existing_size,
+ debug_line_sect.sh_offset,
+ new_offset,
+ });
+ const amt = try self.base.file.?.copyRangeAll(debug_line_sect.sh_offset, self.base.file.?, new_offset, existing_size);
+ if (amt != existing_size) return error.InputOutput;
+ debug_line_sect.sh_offset = new_offset;
+ }
+ debug_line_sect.sh_size = needed_size;
+ self.shdr_table_dirty = true; // TODO look into making only the one section dirty
+ self.debug_line_header_dirty = true;
+ }
+ const prev_padding_size: u32 = if (src_fn.prev) |prev| src_fn.off - (prev.off + prev.len) else 0;
+ const next_padding_size: u32 = if (src_fn.next) |next| next.off - (src_fn.off + src_fn.len) else 0;
+
+ // We only have support for one compilation unit so far, so the offsets are directly
+ // from the .debug_line section.
+ const file_pos = debug_line_sect.sh_offset + src_fn.off;
+ try self.pwriteDbgLineNops(prev_padding_size, dbg_line_buffer.items, next_padding_size, file_pos);
+
+ // .debug_info - End the TAG_subprogram children.
+ try dbg_info_buffer.append(0);
+
+ return self.finishUpdateDecl(module, decl, &dbg_info_type_relocs, &dbg_info_buffer);
+}
+
+pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void {
+ if (build_options.skip_non_native and builtin.object_format != .elf) {
+ @panic("Attempted to compile for object format that was disabled by build configuration");
+ }
+ if (build_options.have_llvm) {
+ if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(module, decl);
+ }
+
+ const tracy = trace(@src());
+ defer tracy.end();
+
+ if (decl.val.tag() == .extern_fn) {
+ return; // TODO Should we do more when front-end analyzed extern decl?
+ }
+ if (decl.val.castTag(.variable)) |payload| {
+ const variable = payload.data;
+ if (variable.is_extern) {
+ return; // TODO Should we do more when front-end analyzed extern decl?
+ }
+ }
+
+ var code_buffer = std.ArrayList(u8).init(self.base.allocator);
+ defer code_buffer.deinit();
+
+ var dbg_line_buffer = std.ArrayList(u8).init(self.base.allocator);
+ defer dbg_line_buffer.deinit();
+
+ var dbg_info_buffer = std.ArrayList(u8).init(self.base.allocator);
+ defer dbg_info_buffer.deinit();
+
+ var dbg_info_type_relocs: File.DbgInfoTypeRelocsTable = .{};
+ defer deinitRelocs(self.base.allocator, &dbg_info_type_relocs);
+
+ // TODO implement .debug_info for global variables
+ const decl_val = if (decl.val.castTag(.variable)) |payload| payload.data.init else decl.val;
+ const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), .{
+ .ty = decl.ty,
+ .val = decl_val,
+ }, &code_buffer, .{
+ .dwarf = .{
+ .dbg_line = &dbg_line_buffer,
+ .dbg_info = &dbg_info_buffer,
+ .dbg_info_type_relocs = &dbg_info_type_relocs,
+ },
+ });
+ const code = switch (res) {
+ .externally_managed => |x| x,
+ .appended => code_buffer.items,
+ .fail => |em| {
+ decl.analysis = .codegen_failure;
+ try module.failed_decls.put(module.gpa, decl, em);
+ return;
+ },
+ };
+
+ _ = try self.updateDeclCode(decl, code, elf.STT_OBJECT);
+ return self.finishUpdateDecl(module, decl, &dbg_info_type_relocs, &dbg_info_buffer);
+}
+
/// Asserts the type has codegen bits.
fn addDbgInfoType(self: *Elf, ty: Type, dbg_info_buffer: *std.ArrayList(u8)) !void {
switch (ty.zigTypeTag()) {
@@ -3022,7 +3080,7 @@ fn pwriteDbgLineNops(
const page_of_nops = [1]u8{DW.LNS_negate_stmt} ** 4096;
const three_byte_nop = [3]u8{ DW.LNS_advance_pc, 0b1000_0000, 0 };
- var vecs: [256]std.os.iovec_const = undefined;
+ var vecs: [512]std.os.iovec_const = undefined;
var vec_index: usize = 0;
{
var padding_left = prev_padding_size;
diff --git a/src/link/MachO.zig b/src/link/MachO.zig
index df2e0134e4..02ea5856f4 100644
--- a/src/link/MachO.zig
+++ b/src/link/MachO.zig
@@ -1,6 +1,7 @@
const MachO = @This();
const std = @import("std");
+const builtin = @import("builtin");
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const fmt = std.fmt;
@@ -22,11 +23,14 @@ const link = @import("../link.zig");
const File = link.File;
const Cache = @import("../Cache.zig");
const target_util = @import("../target.zig");
+const Air = @import("../Air.zig");
+const Liveness = @import("../Liveness.zig");
const DebugSymbols = @import("MachO/DebugSymbols.zig");
const Trie = @import("MachO/Trie.zig");
const CodeSignature = @import("MachO/CodeSignature.zig");
const Zld = @import("MachO/Zld.zig");
+const llvm_backend = @import("../codegen/llvm.zig");
usingnamespace @import("MachO/commands.zig");
@@ -34,6 +38,9 @@ pub const base_tag: File.Tag = File.Tag.macho;
base: File,
+/// If this is not null, an object file is created by LLVM and linked with LLD afterwards.
+llvm_object: ?*llvm_backend.Object = null,
+
/// Debug symbols bundle (or dSym).
d_sym: ?DebugSymbols = null,
@@ -344,8 +351,13 @@ pub const SrcFn = struct {
pub fn openPath(allocator: *Allocator, sub_path: []const u8, options: link.Options) !*MachO {
assert(options.object_format == .macho);
- if (options.use_llvm) return error.LLVM_BackendIsTODO_ForMachO; // TODO
- if (options.use_lld) return error.LLD_LinkingIsTODO_ForMachO; // TODO
+ if (build_options.have_llvm and options.use_llvm) {
+ const self = try createEmpty(allocator, options);
+ errdefer self.base.destroy();
+
+ self.llvm_object = try llvm_backend.Object.create(allocator, sub_path, options);
+ return self;
+ }
const file = try options.emit.?.directory.handle.createFile(sub_path, .{
.truncate = false,
@@ -1132,20 +1144,28 @@ pub fn allocateDeclIndexes(self: *MachO, decl: *Module.Decl) !void {
};
}
-pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void {
+pub fn updateFunc(self: *MachO, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void {
+ if (build_options.skip_non_native and builtin.object_format != .macho) {
+ @panic("Attempted to compile for object format that was disabled by build configuration");
+ }
+ if (build_options.have_llvm) {
+ if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(module, func, air, liveness);
+ }
const tracy = trace(@src());
defer tracy.end();
- if (decl.val.tag() == .extern_fn) {
- return; // TODO Should we do more when front-end analyzed extern decl?
- }
+ const decl = func.owner_decl;
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
defer code_buffer.deinit();
- var debug_buffers = if (self.d_sym) |*ds| try ds.initDeclDebugBuffers(self.base.allocator, module, decl) else null;
+ var debug_buffers_buf: DebugSymbols.DeclDebugBuffers = undefined;
+ const debug_buffers = if (self.d_sym) |*ds| blk: {
+ debug_buffers_buf = try ds.initDeclDebugBuffers(self.base.allocator, module, decl);
+ break :blk &debug_buffers_buf;
+ } else null;
defer {
- if (debug_buffers) |*dbg| {
+ if (debug_buffers) |dbg| {
dbg.dbg_line_buffer.deinit();
dbg.dbg_info_buffer.deinit();
var it = dbg.dbg_info_type_relocs.valueIterator();
@@ -1156,11 +1176,8 @@ pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void {
}
}
- const res = if (debug_buffers) |*dbg|
- try codegen.generateSymbol(&self.base, decl.srcLoc(), .{
- .ty = decl.ty,
- .val = decl.val,
- }, &code_buffer, .{
+ const res = if (debug_buffers) |dbg|
+ try codegen.generateFunction(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .{
.dwarf = .{
.dbg_line = &dbg.dbg_line_buffer,
.dbg_info = &dbg.dbg_info_buffer,
@@ -1168,14 +1185,9 @@ pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void {
},
})
else
- try codegen.generateSymbol(&self.base, decl.srcLoc(), .{
- .ty = decl.ty,
- .val = decl.val,
- }, &code_buffer, .none);
-
- const code = switch (res) {
- .externally_managed => |x| x,
- .appended => code_buffer.items,
+ try codegen.generateFunction(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .none);
+ switch (res) {
+ .appended => {},
.fail => |em| {
// Clear any PIE fixups for this decl.
self.pie_fixups.shrinkRetainingCapacity(0);
@@ -1185,76 +1197,8 @@ pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void {
try module.failed_decls.put(module.gpa, decl, em);
return;
},
- };
-
- const required_alignment = decl.ty.abiAlignment(self.base.options.target);
- assert(decl.link.macho.local_sym_index != 0); // Caller forgot to call allocateDeclIndexes()
- const symbol = &self.locals.items[decl.link.macho.local_sym_index];
-
- if (decl.link.macho.size != 0) {
- const capacity = decl.link.macho.capacity(self.*);
- const need_realloc = code.len > capacity or !mem.isAlignedGeneric(u64, symbol.n_value, required_alignment);
- if (need_realloc) {
- const vaddr = try self.growTextBlock(&decl.link.macho, code.len, required_alignment);
-
- log.debug("growing {s} and moving from 0x{x} to 0x{x}", .{ decl.name, symbol.n_value, vaddr });
-
- if (vaddr != symbol.n_value) {
- log.debug(" (writing new offset table entry)", .{});
- self.offset_table.items[decl.link.macho.offset_table_index] = .{
- .kind = .Local,
- .symbol = decl.link.macho.local_sym_index,
- .index = decl.link.macho.offset_table_index,
- };
- try self.writeOffsetTableEntry(decl.link.macho.offset_table_index);
- }
-
- symbol.n_value = vaddr;
- } else if (code.len < decl.link.macho.size) {
- self.shrinkTextBlock(&decl.link.macho, code.len);
- }
- decl.link.macho.size = code.len;
-
- const new_name = try std.fmt.allocPrint(self.base.allocator, "_{s}", .{mem.spanZ(decl.name)});
- defer self.base.allocator.free(new_name);
-
- symbol.n_strx = try self.updateString(symbol.n_strx, new_name);
- symbol.n_type = macho.N_SECT;
- symbol.n_sect = @intCast(u8, self.text_section_index.?) + 1;
- symbol.n_desc = 0;
-
- try self.writeLocalSymbol(decl.link.macho.local_sym_index);
- if (self.d_sym) |*ds|
- try ds.writeLocalSymbol(decl.link.macho.local_sym_index);
- } else {
- const decl_name = try std.fmt.allocPrint(self.base.allocator, "_{s}", .{mem.spanZ(decl.name)});
- defer self.base.allocator.free(decl_name);
-
- const name_str_index = try self.makeString(decl_name);
- const addr = try self.allocateTextBlock(&decl.link.macho, code.len, required_alignment);
-
- log.debug("allocated text block for {s} at 0x{x}", .{ decl_name, addr });
-
- errdefer self.freeTextBlock(&decl.link.macho);
-
- symbol.* = .{
- .n_strx = name_str_index,
- .n_type = macho.N_SECT,
- .n_sect = @intCast(u8, self.text_section_index.?) + 1,
- .n_desc = 0,
- .n_value = addr,
- };
- self.offset_table.items[decl.link.macho.offset_table_index] = .{
- .kind = .Local,
- .symbol = decl.link.macho.local_sym_index,
- .index = decl.link.macho.offset_table_index,
- };
-
- try self.writeLocalSymbol(decl.link.macho.local_sym_index);
- if (self.d_sym) |*ds|
- try ds.writeLocalSymbol(decl.link.macho.local_sym_index);
- try self.writeOffsetTableEntry(decl.link.macho.offset_table_index);
}
+ const symbol = try self.placeDecl(decl, code_buffer.items.len);
// Calculate displacements to target addr (if any).
while (self.pie_fixups.popOrNull()) |fixup| {
@@ -1271,7 +1215,7 @@ pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void {
// TODO optimize instruction based on jump length (use ldr(literal) + nop if possible).
{
const inst = code_buffer.items[fixup.offset..][0..4];
- var parsed = mem.bytesAsValue(meta.TagPayload(
+ const parsed = mem.bytesAsValue(meta.TagPayload(
aarch64.Instruction,
aarch64.Instruction.pc_relative_address,
), inst);
@@ -1283,7 +1227,7 @@ pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void {
}
{
const inst = code_buffer.items[fixup.offset + 4 ..][0..4];
- var parsed = mem.bytesAsValue(meta.TagPayload(
+ const parsed = mem.bytesAsValue(meta.TagPayload(
aarch64.Instruction,
aarch64.Instruction.load_store_register,
), inst);
@@ -1306,13 +1250,13 @@ pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void {
.x86_64 => {
assert(stub_addr >= text_addr + fixup.len);
const displacement = try math.cast(u32, stub_addr - text_addr - fixup.len);
- var placeholder = code_buffer.items[fixup.start + fixup.len - @sizeOf(u32) ..][0..@sizeOf(u32)];
+ const placeholder = code_buffer.items[fixup.start + fixup.len - @sizeOf(u32) ..][0..@sizeOf(u32)];
mem.writeIntSliceLittle(u32, placeholder, displacement);
},
.aarch64 => {
assert(stub_addr >= text_addr);
const displacement = try math.cast(i28, stub_addr - text_addr);
- var placeholder = code_buffer.items[fixup.start..][0..fixup.len];
+ const placeholder = code_buffer.items[fixup.start..][0..fixup.len];
mem.writeIntSliceLittle(u32, placeholder, aarch64.Instruction.bl(displacement).toU32());
},
else => unreachable, // unsupported target architecture
@@ -1328,12 +1272,9 @@ pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void {
}
self.stub_fixups.shrinkRetainingCapacity(0);
- const text_section = text_segment.sections.items[self.text_section_index.?];
- const section_offset = symbol.n_value - text_section.addr;
- const file_offset = text_section.offset + section_offset;
- try self.base.file.?.pwriteAll(code, file_offset);
+ try self.writeCode(symbol, code_buffer.items);
- if (debug_buffers) |*db| {
+ if (debug_buffers) |db| {
try self.d_sym.?.commitDeclDebugInfo(
self.base.allocator,
module,
@@ -1343,11 +1284,165 @@ pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void {
);
}
- // Since we updated the vaddr and the size, each corresponding export symbol also needs to be updated.
+ // Since we updated the vaddr and the size, each corresponding export symbol also
+ // needs to be updated.
const decl_exports = module.decl_exports.get(decl) orelse &[0]*Module.Export{};
try self.updateDeclExports(module, decl, decl_exports);
}
+pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void {
+ if (build_options.skip_non_native and builtin.object_format != .macho) {
+ @panic("Attempted to compile for object format that was disabled by build configuration");
+ }
+ if (build_options.have_llvm) {
+ if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(module, decl);
+ }
+ const tracy = trace(@src());
+ defer tracy.end();
+
+ if (decl.val.tag() == .extern_fn) {
+ return; // TODO Should we do more when front-end analyzed extern decl?
+ }
+
+ var code_buffer = std.ArrayList(u8).init(self.base.allocator);
+ defer code_buffer.deinit();
+
+ var debug_buffers_buf: DebugSymbols.DeclDebugBuffers = undefined;
+ const debug_buffers = if (self.d_sym) |*ds| blk: {
+ debug_buffers_buf = try ds.initDeclDebugBuffers(self.base.allocator, module, decl);
+ break :blk &debug_buffers_buf;
+ } else null;
+ defer {
+ if (debug_buffers) |dbg| {
+ dbg.dbg_line_buffer.deinit();
+ dbg.dbg_info_buffer.deinit();
+ var it = dbg.dbg_info_type_relocs.valueIterator();
+ while (it.next()) |value| {
+ value.relocs.deinit(self.base.allocator);
+ }
+ dbg.dbg_info_type_relocs.deinit(self.base.allocator);
+ }
+ }
+
+ const res = if (debug_buffers) |dbg|
+ try codegen.generateSymbol(&self.base, decl.srcLoc(), .{
+ .ty = decl.ty,
+ .val = decl.val,
+ }, &code_buffer, .{
+ .dwarf = .{
+ .dbg_line = &dbg.dbg_line_buffer,
+ .dbg_info = &dbg.dbg_info_buffer,
+ .dbg_info_type_relocs = &dbg.dbg_info_type_relocs,
+ },
+ })
+ else
+ try codegen.generateSymbol(&self.base, decl.srcLoc(), .{
+ .ty = decl.ty,
+ .val = decl.val,
+ }, &code_buffer, .none);
+
+ const code = switch (res) {
+ .externally_managed => |x| x,
+ .appended => code_buffer.items,
+ .fail => |em| {
+ decl.analysis = .codegen_failure;
+ try module.failed_decls.put(module.gpa, decl, em);
+ return;
+ },
+ };
+ const symbol = try self.placeDecl(decl, code.len);
+ assert(self.pie_fixups.items.len == 0);
+ assert(self.stub_fixups.items.len == 0);
+
+ try self.writeCode(symbol, code);
+
+ // Since we updated the vaddr and the size, each corresponding export symbol also
+ // needs to be updated.
+ const decl_exports = module.decl_exports.get(decl) orelse &[0]*Module.Export{};
+ try self.updateDeclExports(module, decl, decl_exports);
+}
+
+fn placeDecl(self: *MachO, decl: *Module.Decl, code_len: usize) !*macho.nlist_64 {
+ const required_alignment = decl.ty.abiAlignment(self.base.options.target);
+ assert(decl.link.macho.local_sym_index != 0); // Caller forgot to call allocateDeclIndexes()
+ const symbol = &self.locals.items[decl.link.macho.local_sym_index];
+
+ if (decl.link.macho.size != 0) {
+ const capacity = decl.link.macho.capacity(self.*);
+ const need_realloc = code_len > capacity or !mem.isAlignedGeneric(u64, symbol.n_value, required_alignment);
+ if (need_realloc) {
+ const vaddr = try self.growTextBlock(&decl.link.macho, code_len, required_alignment);
+
+ log.debug("growing {s} and moving from 0x{x} to 0x{x}", .{ decl.name, symbol.n_value, vaddr });
+
+ if (vaddr != symbol.n_value) {
+ log.debug(" (writing new offset table entry)", .{});
+ self.offset_table.items[decl.link.macho.offset_table_index] = .{
+ .kind = .Local,
+ .symbol = decl.link.macho.local_sym_index,
+ .index = decl.link.macho.offset_table_index,
+ };
+ try self.writeOffsetTableEntry(decl.link.macho.offset_table_index);
+ }
+
+ symbol.n_value = vaddr;
+ } else if (code_len < decl.link.macho.size) {
+ self.shrinkTextBlock(&decl.link.macho, code_len);
+ }
+ decl.link.macho.size = code_len;
+
+ const new_name = try std.fmt.allocPrint(self.base.allocator, "_{s}", .{mem.spanZ(decl.name)});
+ defer self.base.allocator.free(new_name);
+
+ symbol.n_strx = try self.updateString(symbol.n_strx, new_name);
+ symbol.n_type = macho.N_SECT;
+ symbol.n_sect = @intCast(u8, self.text_section_index.?) + 1;
+ symbol.n_desc = 0;
+
+ try self.writeLocalSymbol(decl.link.macho.local_sym_index);
+ if (self.d_sym) |*ds|
+ try ds.writeLocalSymbol(decl.link.macho.local_sym_index);
+ } else {
+ const decl_name = try std.fmt.allocPrint(self.base.allocator, "_{s}", .{mem.spanZ(decl.name)});
+ defer self.base.allocator.free(decl_name);
+
+ const name_str_index = try self.makeString(decl_name);
+ const addr = try self.allocateTextBlock(&decl.link.macho, code_len, required_alignment);
+
+ log.debug("allocated text block for {s} at 0x{x}", .{ decl_name, addr });
+
+ errdefer self.freeTextBlock(&decl.link.macho);
+
+ symbol.* = .{
+ .n_strx = name_str_index,
+ .n_type = macho.N_SECT,
+ .n_sect = @intCast(u8, self.text_section_index.?) + 1,
+ .n_desc = 0,
+ .n_value = addr,
+ };
+ self.offset_table.items[decl.link.macho.offset_table_index] = .{
+ .kind = .Local,
+ .symbol = decl.link.macho.local_sym_index,
+ .index = decl.link.macho.offset_table_index,
+ };
+
+ try self.writeLocalSymbol(decl.link.macho.local_sym_index);
+ if (self.d_sym) |*ds|
+ try ds.writeLocalSymbol(decl.link.macho.local_sym_index);
+ try self.writeOffsetTableEntry(decl.link.macho.offset_table_index);
+ }
+
+ return symbol;
+}
+
+fn writeCode(self: *MachO, symbol: *macho.nlist_64, code: []const u8) !void {
+ const text_segment = &self.load_commands.items[self.text_segment_cmd_index.?].Segment;
+ const text_section = text_segment.sections.items[self.text_section_index.?];
+ const section_offset = symbol.n_value - text_section.addr;
+ const file_offset = text_section.offset + section_offset;
+ try self.base.file.?.pwriteAll(code, file_offset);
+}
+
pub fn updateDeclLineNumber(self: *MachO, module: *Module, decl: *const Module.Decl) !void {
if (self.d_sym) |*ds| {
try ds.updateDeclLineNumber(module, decl);
diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig
index 80a92f9cdb..135b59f82b 100644
--- a/src/link/Plan9.zig
+++ b/src/link/Plan9.zig
@@ -2,18 +2,21 @@
//! would be to add incremental linking in a similar way as ELF does.
const Plan9 = @This();
-
-const std = @import("std");
const link = @import("../link.zig");
const Module = @import("../Module.zig");
const Compilation = @import("../Compilation.zig");
const aout = @import("Plan9/aout.zig");
const codegen = @import("../codegen.zig");
const trace = @import("../tracy.zig").trace;
-const mem = std.mem;
const File = link.File;
-const Allocator = std.mem.Allocator;
+const build_options = @import("build_options");
+const Air = @import("../Air.zig");
+const Liveness = @import("../Liveness.zig");
+const std = @import("std");
+const builtin = @import("builtin");
+const mem = std.mem;
+const Allocator = std.mem.Allocator;
const log = std.log.scoped(.link);
const assert = std.debug.assert;
@@ -22,20 +25,22 @@ sixtyfour_bit: bool,
error_flags: File.ErrorFlags = File.ErrorFlags{},
bases: Bases,
-decl_table: std.AutoArrayHashMapUnmanaged(*Module.Decl, void) = .{},
-/// is just casted down when 32 bit
+/// A symbol's value is just casted down when compiling
+/// for a 32 bit target.
syms: std.ArrayListUnmanaged(aout.Sym) = .{},
-text_buf: std.ArrayListUnmanaged(u8) = .{},
-data_buf: std.ArrayListUnmanaged(u8) = .{},
+
+fn_decl_table: std.AutoArrayHashMapUnmanaged(*Module.Decl, []const u8) = .{},
+data_decl_table: std.AutoArrayHashMapUnmanaged(*Module.Decl, []const u8) = .{},
hdr: aout.ExecHdr = undefined,
-entry_decl: ?*Module.Decl = null,
+entry_val: ?u64 = null,
+
+got_len: u64 = 0,
-got: std.ArrayListUnmanaged(u64) = .{},
const Bases = struct {
text: u64,
- /// the addr of the got
+ /// the Global Offset Table starts at the beginning of the data section
data: u64,
};
@@ -46,14 +51,6 @@ fn getAddr(self: Plan9, addr: u64, t: aout.Sym.Type) u64 {
else => unreachable,
};
}
-/// opposite of getAddr
-fn takeAddr(self: Plan9, addr: u64, t: aout.Sym.Type) u64 {
- return addr - switch (t) {
- .T, .t, .l, .L => self.bases.text,
- .D, .d, .B, .b => self.bases.data,
- else => unreachable,
- };
-}
fn getSymAddr(self: Plan9, s: aout.Sym) u64 {
return self.getAddr(s.value, s.type);
@@ -120,9 +117,84 @@ pub fn createEmpty(gpa: *Allocator, options: link.Options) !*Plan9 {
return self;
}
+pub fn updateFunc(self: *Plan9, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void {
+ if (build_options.skip_non_native and builtin.object_format != .plan9) {
+ @panic("Attempted to compile for object format that was disabled by build configuration");
+ }
+
+ const decl = func.owner_decl;
+ log.debug("codegen decl {*} ({s})", .{ decl, decl.name });
+
+ var code_buffer = std.ArrayList(u8).init(self.base.allocator);
+ defer code_buffer.deinit();
+ const res = try codegen.generateFunction(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .{ .none = .{} });
+ const code = switch (res) {
+ .appended => code_buffer.toOwnedSlice(),
+ .fail => |em| {
+ decl.analysis = .codegen_failure;
+ try module.failed_decls.put(module.gpa, decl, em);
+ return;
+ },
+ };
+ try self.fn_decl_table.put(self.base.allocator, decl, code);
+ return self.updateFinish(decl);
+}
+
pub fn updateDecl(self: *Plan9, module: *Module, decl: *Module.Decl) !void {
- _ = module;
- _ = try self.decl_table.getOrPut(self.base.allocator, decl);
+ if (decl.val.tag() == .extern_fn) {
+ return; // TODO Should we do more when front-end analyzed extern decl?
+ }
+ if (decl.val.castTag(.variable)) |payload| {
+ const variable = payload.data;
+ if (variable.is_extern) {
+ return; // TODO Should we do more when front-end analyzed extern decl?
+ }
+ }
+
+ log.debug("codegen decl {*} ({s})", .{ decl, decl.name });
+
+ var code_buffer = std.ArrayList(u8).init(self.base.allocator);
+ defer code_buffer.deinit();
+ const decl_val = if (decl.val.castTag(.variable)) |payload| payload.data.init else decl.val;
+ const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), .{
+ .ty = decl.ty,
+ .val = decl_val,
+ }, &code_buffer, .{ .none = .{} });
+ const code = switch (res) {
+ .externally_managed => |x| x,
+ .appended => code_buffer.items,
+ .fail => |em| {
+ decl.analysis = .codegen_failure;
+ try module.failed_decls.put(module.gpa, decl, em);
+ return;
+ },
+ };
+ var duped_code = try std.mem.dupe(self.base.allocator, u8, code);
+ errdefer self.base.allocator.free(duped_code);
+ try self.data_decl_table.put(self.base.allocator, decl, duped_code);
+ return self.updateFinish(decl);
+}
+/// called at the end of update{Decl,Func}
+fn updateFinish(self: *Plan9, decl: *Module.Decl) !void {
+ const is_fn = (decl.ty.zigTypeTag() == .Fn);
+ log.debug("update the symbol table and got for decl {*} ({s})", .{ decl, decl.name });
+ const sym_t: aout.Sym.Type = if (is_fn) .t else .d;
+ // write the internal linker metadata
+ decl.link.plan9.type = sym_t;
+ // write the symbol
+ // we already have the got index because that got allocated in allocateDeclIndexes
+ const sym: aout.Sym = .{
+ .value = undefined, // the value of stuff gets filled in in flushModule
+ .type = decl.link.plan9.type,
+ .name = mem.span(decl.name),
+ };
+
+ if (decl.link.plan9.sym_index) |s| {
+ self.syms.items[s] = sym;
+ } else {
+ try self.syms.append(self.base.allocator, sym);
+ decl.link.plan9.sym_index = self.syms.items.len - 1;
+ }
}
pub fn flush(self: *Plan9, comp: *Compilation) !void {
@@ -138,6 +210,10 @@ pub fn flush(self: *Plan9, comp: *Compilation) !void {
}
pub fn flushModule(self: *Plan9, comp: *Compilation) !void {
+ if (build_options.skip_non_native and builtin.object_format != .plan9) {
+ @panic("Attempted to compile for object format that was disabled by build configuration");
+ }
+
_ = comp;
const tracy = trace(@src());
defer tracy.end();
@@ -146,160 +222,147 @@ pub fn flushModule(self: *Plan9, comp: *Compilation) !void {
defer assert(self.hdr.entry != 0x0);
- const module = self.base.options.module orelse return error.LinkingWithoutZigSourceUnimplemented;
+ const mod = self.base.options.module orelse return error.LinkingWithoutZigSourceUnimplemented;
- self.text_buf.items.len = 0;
- self.data_buf.items.len = 0;
- // ensure space to write the got later
- assert(self.got.items.len == self.decl_table.count());
- try self.data_buf.appendNTimes(self.base.allocator, 0x69, self.got.items.len * if (!self.sixtyfour_bit) @as(u32, 4) else 8);
- // temporary buffer
- var code_buffer = std.ArrayList(u8).init(self.base.allocator);
- defer code_buffer.deinit();
- {
- for (self.decl_table.keys()) |decl| {
- if (!decl.has_tv) continue;
- const is_fn = (decl.ty.zigTypeTag() == .Fn);
-
- log.debug("update the symbol table and got for decl {*} ({s})", .{ decl, decl.name });
- decl.link.plan9 = if (is_fn) .{
- .offset = self.getAddr(self.text_buf.items.len, .t),
- .type = .t,
- .sym_index = decl.link.plan9.sym_index,
- .got_index = decl.link.plan9.got_index,
- } else .{
- .offset = self.getAddr(self.data_buf.items.len, .d),
- .type = .d,
- .sym_index = decl.link.plan9.sym_index,
- .got_index = decl.link.plan9.got_index,
- };
- self.got.items[decl.link.plan9.got_index.?] = decl.link.plan9.offset.?;
- if (decl.link.plan9.sym_index) |s| {
- self.syms.items[s] = .{
- .value = decl.link.plan9.offset.?,
- .type = decl.link.plan9.type,
- .name = mem.span(decl.name),
- };
- } else {
- try self.syms.append(self.base.allocator, .{
- .value = decl.link.plan9.offset.?,
- .type = decl.link.plan9.type,
- .name = mem.span(decl.name),
- });
- decl.link.plan9.sym_index = self.syms.items.len - 1;
- }
+ assert(self.got_len == self.fn_decl_table.count() + self.data_decl_table.count());
+ const got_size = self.got_len * if (!self.sixtyfour_bit) @as(u32, 4) else 8;
+ var got_table = try self.base.allocator.alloc(u8, got_size);
+ defer self.base.allocator.free(got_table);
- if (module.decl_exports.get(decl)) |exports| {
- for (exports) |exp| {
- // plan9 does not support custom sections
- if (exp.options.section) |section_name| {
- if (!mem.eql(u8, section_name, ".text") or !mem.eql(u8, section_name, ".data")) {
- try module.failed_exports.put(module.gpa, exp, try Module.ErrorMsg.create(self.base.allocator, decl.srcLoc(), "plan9 does not support extra sections", .{}));
- break;
- }
- }
- if (std.mem.eql(u8, exp.options.name, "_start")) {
- std.debug.assert(decl.link.plan9.type == .t); // we tried to link a non-function as the entry
- self.entry_decl = decl;
- }
- if (exp.link.plan9) |i| {
- self.syms.items[i] = .{
- .value = decl.link.plan9.offset.?,
- .type = decl.link.plan9.type.toGlobal(),
- .name = exp.options.name,
- };
- } else {
- try self.syms.append(self.base.allocator, .{
- .value = decl.link.plan9.offset.?,
- .type = decl.link.plan9.type.toGlobal(),
- .name = exp.options.name,
- });
- exp.link.plan9 = self.syms.items.len - 1;
- }
- }
- }
+ // + 2 for header, got, symbols
+ var iovecs = try self.base.allocator.alloc(std.os.iovec_const, self.fn_decl_table.count() + self.data_decl_table.count() + 3);
+ defer self.base.allocator.free(iovecs);
- log.debug("codegen decl {*} ({s})", .{ decl, decl.name });
- const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), .{
- .ty = decl.ty,
- .val = decl.val,
- }, &code_buffer, .{ .none = {} });
- const code = switch (res) {
- .externally_managed => |x| x,
- .appended => code_buffer.items,
- .fail => |em| {
- decl.analysis = .codegen_failure;
- try module.failed_decls.put(module.gpa, decl, em);
- // TODO try to do more decls
- return;
- },
- };
- if (is_fn) {
- try self.text_buf.appendSlice(self.base.allocator, code);
- code_buffer.items.len = 0;
+ const file = self.base.file.?;
+
+ var hdr_buf: [40]u8 = undefined;
+ // account for the fat header
+ const hdr_size = if (self.sixtyfour_bit) @as(usize, 40) else 32;
+ const hdr_slice: []u8 = hdr_buf[0..hdr_size];
+ var foff = hdr_size;
+ iovecs[0] = .{ .iov_base = hdr_slice.ptr, .iov_len = hdr_slice.len };
+ var iovecs_i: u64 = 1;
+ var text_i: u64 = 0;
+ // text
+ {
+ var it = self.fn_decl_table.iterator();
+ while (it.next()) |entry| {
+ const decl = entry.key_ptr.*;
+ const code = entry.value_ptr.*;
+ log.debug("write text decl {*} ({s})", .{ decl, decl.name });
+ foff += code.len;
+ iovecs[iovecs_i] = .{ .iov_base = code.ptr, .iov_len = code.len };
+ iovecs_i += 1;
+ const off = self.getAddr(text_i, .t);
+ text_i += code.len;
+ decl.link.plan9.offset = off;
+ if (!self.sixtyfour_bit) {
+ mem.writeIntNative(u32, got_table[decl.link.plan9.got_index.? * 4 ..][0..4], @intCast(u32, off));
+ mem.writeInt(u32, got_table[decl.link.plan9.got_index.? * 4 ..][0..4], @intCast(u32, off), self.base.options.target.cpu.arch.endian());
} else {
- try self.data_buf.appendSlice(self.base.allocator, code);
- code_buffer.items.len = 0;
+ mem.writeInt(u64, got_table[decl.link.plan9.got_index.? * 8 ..][0..8], off, self.base.options.target.cpu.arch.endian());
+ }
+ self.syms.items[decl.link.plan9.sym_index.?].value = off;
+ if (mod.decl_exports.get(decl)) |exports| {
+ try self.addDeclExports(mod, decl, exports);
}
}
+ // etext symbol
+ self.syms.items[2].value = self.getAddr(text_i, .t);
}
-
- // write the got
- if (!self.sixtyfour_bit) {
- for (self.got.items) |p, i| {
- mem.writeInt(u32, self.data_buf.items[i * 4 ..][0..4], @intCast(u32, p), self.base.options.target.cpu.arch.endian());
- }
- } else {
- for (self.got.items) |p, i| {
- mem.writeInt(u64, self.data_buf.items[i * 8 ..][0..8], p, self.base.options.target.cpu.arch.endian());
+ // global offset table is in data
+ iovecs[iovecs_i] = .{ .iov_base = got_table.ptr, .iov_len = got_table.len };
+ iovecs_i += 1;
+ // data
+ var data_i: u64 = got_size;
+ {
+ var it = self.data_decl_table.iterator();
+ while (it.next()) |entry| {
+ const decl = entry.key_ptr.*;
+ const code = entry.value_ptr.*;
+ log.debug("write data decl {*} ({s})", .{ decl, decl.name });
+
+ foff += code.len;
+ iovecs[iovecs_i] = .{ .iov_base = code.ptr, .iov_len = code.len };
+ iovecs_i += 1;
+ const off = self.getAddr(data_i, .d);
+ data_i += code.len;
+ decl.link.plan9.offset = off;
+ if (!self.sixtyfour_bit) {
+ mem.writeInt(u32, got_table[decl.link.plan9.got_index.? * 4 ..][0..4], @intCast(u32, off), self.base.options.target.cpu.arch.endian());
+ } else {
+ mem.writeInt(u64, got_table[decl.link.plan9.got_index.? * 8 ..][0..8], off, self.base.options.target.cpu.arch.endian());
+ }
+ self.syms.items[decl.link.plan9.sym_index.?].value = off;
+ if (mod.decl_exports.get(decl)) |exports| {
+ try self.addDeclExports(mod, decl, exports);
+ }
}
+ // edata symbol
+ self.syms.items[0].value = self.getAddr(data_i, .b);
}
-
- self.hdr.entry = @truncate(u32, self.entry_decl.?.link.plan9.offset.?);
-
- // edata, end, etext
- self.syms.items[0].value = self.getAddr(0x0, .b);
+ // edata
self.syms.items[1].value = self.getAddr(0x0, .b);
- self.syms.items[2].value = self.getAddr(self.text_buf.items.len, .t);
-
var sym_buf = std.ArrayList(u8).init(self.base.allocator);
defer sym_buf.deinit();
try self.writeSyms(&sym_buf);
-
+ assert(2 + self.fn_decl_table.count() + self.data_decl_table.count() == iovecs_i); // we didn't write all the decls
+ iovecs[iovecs_i] = .{ .iov_base = sym_buf.items.ptr, .iov_len = sym_buf.items.len };
+ iovecs_i += 1;
// generate the header
self.hdr = .{
.magic = try aout.magicFromArch(self.base.options.target.cpu.arch),
- .text = @intCast(u32, self.text_buf.items.len),
- .data = @intCast(u32, self.data_buf.items.len),
+ .text = @intCast(u32, text_i),
+ .data = @intCast(u32, data_i),
.syms = @intCast(u32, sym_buf.items.len),
.bss = 0,
.pcsz = 0,
.spsz = 0,
- .entry = self.hdr.entry,
+ .entry = @intCast(u32, self.entry_val.?),
};
-
- const file = self.base.file.?;
-
- var hdr_buf = self.hdr.toU8s();
- const hdr_slice: []const u8 = &hdr_buf;
- // account for the fat header
- const hdr_size: u8 = if (!self.sixtyfour_bit) 32 else 40;
+ std.mem.copy(u8, hdr_slice, self.hdr.toU8s()[0..hdr_size]);
// write the fat header for 64 bit entry points
if (self.sixtyfour_bit) {
- mem.writeIntSliceBig(u64, hdr_buf[32..40], self.hdr.entry);
+ mem.writeIntSliceBig(u64, hdr_buf[32..40], self.entry_val.?);
}
// write it all!
- var vectors: [4]std.os.iovec_const = .{
- .{ .iov_base = hdr_slice.ptr, .iov_len = hdr_size },
- .{ .iov_base = self.text_buf.items.ptr, .iov_len = self.text_buf.items.len },
- .{ .iov_base = self.data_buf.items.ptr, .iov_len = self.data_buf.items.len },
- .{ .iov_base = sym_buf.items.ptr, .iov_len = sym_buf.items.len },
- // TODO spsz, pcsz
- };
- try file.pwritevAll(&vectors, 0);
+ try file.pwritevAll(iovecs, 0);
+}
+fn addDeclExports(
+ self: *Plan9,
+ module: *Module,
+ decl: *Module.Decl,
+ exports: []const *Module.Export,
+) !void {
+ for (exports) |exp| {
+ // plan9 does not support custom sections
+ if (exp.options.section) |section_name| {
+ if (!mem.eql(u8, section_name, ".text") or !mem.eql(u8, section_name, ".data")) {
+ try module.failed_exports.put(module.gpa, exp, try Module.ErrorMsg.create(self.base.allocator, decl.srcLoc(), "plan9 does not support extra sections", .{}));
+ break;
+ }
+ }
+ const sym = .{
+ .value = decl.link.plan9.offset.?,
+ .type = decl.link.plan9.type.toGlobal(),
+ .name = exp.options.name,
+ };
+
+ if (exp.link.plan9) |i| {
+ self.syms.items[i] = sym;
+ } else {
+ try self.syms.append(self.base.allocator, sym);
+ exp.link.plan9 = self.syms.items.len - 1;
+ }
+ }
}
+
pub fn freeDecl(self: *Plan9, decl: *Module.Decl) void {
- assert(self.decl_table.swapRemove(decl));
+ const is_fn = (decl.ty.zigTypeTag() == .Fn);
+ if (is_fn)
+ assert(self.fn_decl_table.swapRemove(decl))
+ else
+ assert(self.data_decl_table.swapRemove(decl));
}
pub fn updateDeclExports(
@@ -315,11 +378,17 @@ pub fn updateDeclExports(
_ = exports;
}
pub fn deinit(self: *Plan9) void {
- self.decl_table.deinit(self.base.allocator);
+ var itf = self.fn_decl_table.iterator();
+ while (itf.next()) |entry| {
+ self.base.allocator.free(entry.value_ptr.*);
+ }
+ self.fn_decl_table.deinit(self.base.allocator);
+ var itd = self.data_decl_table.iterator();
+ while (itd.next()) |entry| {
+ self.base.allocator.free(entry.value_ptr.*);
+ }
+ self.data_decl_table.deinit(self.base.allocator);
self.syms.deinit(self.base.allocator);
- self.text_buf.deinit(self.base.allocator);
- self.data_buf.deinit(self.base.allocator);
- self.got.deinit(self.base.allocator);
}
pub const Export = ?usize;
@@ -366,18 +435,24 @@ pub fn openPath(allocator: *Allocator, sub_path: []const u8, options: link.Optio
pub fn writeSyms(self: *Plan9, buf: *std.ArrayList(u8)) !void {
const writer = buf.writer();
for (self.syms.items) |sym| {
+ log.debug("sym.name: {s}", .{sym.name});
+ log.debug("sym.value: {x}", .{sym.value});
+ if (mem.eql(u8, sym.name, "_start"))
+ self.entry_val = sym.value;
if (!self.sixtyfour_bit) {
try writer.writeIntBig(u32, @intCast(u32, sym.value));
} else {
try writer.writeIntBig(u64, sym.value);
}
try writer.writeByte(@enumToInt(sym.type));
- try writer.writeAll(std.mem.span(sym.name));
+ try writer.writeAll(sym.name);
try writer.writeByte(0);
}
}
pub fn allocateDeclIndexes(self: *Plan9, decl: *Module.Decl) !void {
- try self.got.append(self.base.allocator, 0xdeadbeef);
- decl.link.plan9.got_index = self.got.items.len - 1;
+ if (decl.link.plan9.got_index == null) {
+ self.got_len += 1;
+ decl.link.plan9.got_index = self.got_len - 1;
+ }
}
diff --git a/src/link/SpirV.zig b/src/link/SpirV.zig
index bfae799462..17b656a06c 100644
--- a/src/link/SpirV.zig
+++ b/src/link/SpirV.zig
@@ -36,6 +36,8 @@ const ResultId = codegen.ResultId;
const trace = @import("../tracy.zig").trace;
const build_options = @import("build_options");
const spec = @import("../codegen/spirv/spec.zig");
+const Air = @import("../Air.zig");
+const Liveness = @import("../Liveness.zig");
// TODO: Should this struct be used at all rather than just a hashmap of aux data for every decl?
pub const FnData = struct {
@@ -49,7 +51,12 @@ base: link.File,
/// This linker backend does not try to incrementally link output SPIR-V code.
/// Instead, it tracks all declarations in this table, and iterates over it
/// in the flush function.
-decl_table: std.AutoArrayHashMapUnmanaged(*Module.Decl, void) = .{},
+decl_table: std.AutoArrayHashMapUnmanaged(*Module.Decl, DeclGenContext) = .{},
+
+const DeclGenContext = struct {
+ air: Air,
+ liveness: Liveness,
+};
pub fn createEmpty(gpa: *Allocator, options: link.Options) !*SpirV {
const spirv = try gpa.create(SpirV);
@@ -101,7 +108,23 @@ pub fn deinit(self: *SpirV) void {
self.decl_table.deinit(self.base.allocator);
}
+pub fn updateFunc(self: *SpirV, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void {
+ if (build_options.skip_non_native) {
+ @panic("Attempted to compile for architecture that was disabled by build configuration");
+ }
+ _ = module;
+ // Keep track of all decls so we can iterate over them on flush().
+ _ = try self.decl_table.getOrPut(self.base.allocator, func.owner_decl);
+
+ _ = air;
+ _ = liveness;
+ @panic("TODO SPIR-V needs to keep track of Air and Liveness so it can use them later");
+}
+
pub fn updateDecl(self: *SpirV, module: *Module, decl: *Module.Decl) !void {
+ if (build_options.skip_non_native) {
+ @panic("Attempted to compile for architecture that was disabled by build configuration");
+ }
_ = module;
// Keep track of all decls so we can iterate over them on flush().
_ = try self.decl_table.getOrPut(self.base.allocator, decl);
@@ -132,6 +155,10 @@ pub fn flush(self: *SpirV, comp: *Compilation) !void {
}
pub fn flushModule(self: *SpirV, comp: *Compilation) !void {
+ if (build_options.skip_non_native) {
+ @panic("Attempted to compile for architecture that was disabled by build configuration");
+ }
+
const tracy = trace(@src());
defer tracy.end();
@@ -159,10 +186,15 @@ pub fn flushModule(self: *SpirV, comp: *Compilation) !void {
var decl_gen = codegen.DeclGen.init(&spv);
defer decl_gen.deinit();
- for (self.decl_table.keys()) |decl| {
+ var it = self.decl_table.iterator();
+ while (it.next()) |entry| {
+ const decl = entry.key_ptr.*;
if (!decl.has_tv) continue;
- if (try decl_gen.gen(decl)) |msg| {
+ const air = entry.value_ptr.air;
+ const liveness = entry.value_ptr.liveness;
+
+ if (try decl_gen.gen(decl, air, liveness)) |msg| {
try module.failed_decls.put(module.gpa, decl, msg);
return; // TODO: Attempt to generate more decls?
}
diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig
index 15a36a4bcc..f478d2ee47 100644
--- a/src/link/Wasm.zig
+++ b/src/link/Wasm.zig
@@ -1,6 +1,7 @@
const Wasm = @This();
const std = @import("std");
+const builtin = @import("builtin");
const mem = std.mem;
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
@@ -18,10 +19,15 @@ const build_options = @import("build_options");
const wasi_libc = @import("../wasi_libc.zig");
const Cache = @import("../Cache.zig");
const TypedValue = @import("../TypedValue.zig");
+const llvm_backend = @import("../codegen/llvm.zig");
+const Air = @import("../Air.zig");
+const Liveness = @import("../Liveness.zig");
pub const base_tag = link.File.Tag.wasm;
base: link.File,
+/// If this is not null, an object file is created by LLVM and linked with LLD afterwards.
+llvm_object: ?*llvm_backend.Object = null,
/// List of all function Decls to be written to the output file. The index of
/// each Decl in this list at the time of writing the binary is used as the
/// function index. In the event where ext_funcs' size is not 0, the index of
@@ -111,8 +117,13 @@ pub const DeclBlock = struct {
pub fn openPath(allocator: *Allocator, sub_path: []const u8, options: link.Options) !*Wasm {
assert(options.object_format == .wasm);
- if (options.use_llvm) return error.LLVM_BackendIsTODO_ForWasm; // TODO
- if (options.use_lld) return error.LLD_LinkingIsTODO_ForWasm; // TODO
+ if (build_options.have_llvm and options.use_llvm) {
+ const self = try createEmpty(allocator, options);
+ errdefer self.base.destroy();
+
+ self.llvm_object = try llvm_backend.Object.create(allocator, sub_path, options);
+ return self;
+ }
// TODO: read the file and keep valid parts instead of truncating
const file = try options.emit.?.directory.handle.createFile(sub_path, .{ .truncate = true, .read = true });
@@ -186,11 +197,60 @@ pub fn allocateDeclIndexes(self: *Wasm, decl: *Module.Decl) !void {
}
}
+pub fn updateFunc(self: *Wasm, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void {
+ if (build_options.skip_non_native and builtin.object_format != .wasm) {
+ @panic("Attempted to compile for object format that was disabled by build configuration");
+ }
+ if (build_options.have_llvm) {
+ if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(module, func, air, liveness);
+ }
+ const decl = func.owner_decl;
+ assert(decl.link.wasm.init); // Must call allocateDeclIndexes()
+
+ const fn_data = &decl.fn_link.wasm;
+ fn_data.functype.items.len = 0;
+ fn_data.code.items.len = 0;
+ fn_data.idx_refs.items.len = 0;
+
+ var context = codegen.Context{
+ .gpa = self.base.allocator,
+ .air = air,
+ .liveness = liveness,
+ .values = .{},
+ .code = fn_data.code.toManaged(self.base.allocator),
+ .func_type_data = fn_data.functype.toManaged(self.base.allocator),
+ .decl = decl,
+ .err_msg = undefined,
+ .locals = .{},
+ .target = self.base.options.target,
+ .global_error_set = self.base.options.module.?.global_error_set,
+ };
+ defer context.deinit();
+
+ // generate the 'code' section for the function declaration
+ const result = context.genFunc() catch |err| switch (err) {
+ error.CodegenFail => {
+ decl.analysis = .codegen_failure;
+ try module.failed_decls.put(module.gpa, decl, context.err_msg);
+ return;
+ },
+ else => |e| return e,
+ };
+ return self.finishUpdateDecl(decl, result, &context);
+}
+
// Generate code for the Decl, storing it in memory to be later written to
// the file on flush().
pub fn updateDecl(self: *Wasm, module: *Module, decl: *Module.Decl) !void {
- std.debug.assert(decl.link.wasm.init); // Must call allocateDeclIndexes()
+ if (build_options.skip_non_native and builtin.object_format != .wasm) {
+ @panic("Attempted to compile for object format that was disabled by build configuration");
+ }
+ if (build_options.have_llvm) {
+ if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(module, decl);
+ }
+ assert(decl.link.wasm.init); // Must call allocateDeclIndexes()
+ // TODO don't use this for non-functions
const fn_data = &decl.fn_link.wasm;
fn_data.functype.items.len = 0;
fn_data.code.items.len = 0;
@@ -198,6 +258,8 @@ pub fn updateDecl(self: *Wasm, module: *Module, decl: *Module.Decl) !void {
var context = codegen.Context{
.gpa = self.base.allocator,
+ .air = undefined,
+ .liveness = undefined,
.values = .{},
.code = fn_data.code.toManaged(self.base.allocator),
.func_type_data = fn_data.functype.toManaged(self.base.allocator),
@@ -219,14 +281,20 @@ pub fn updateDecl(self: *Wasm, module: *Module, decl: *Module.Decl) !void {
else => |e| return e,
};
- const code: []const u8 = switch (result) {
- .appended => @as([]const u8, context.code.items),
- .externally_managed => |payload| payload,
- };
+ return self.finishUpdateDecl(decl, result, &context);
+}
+
+fn finishUpdateDecl(self: *Wasm, decl: *Module.Decl, result: codegen.Result, context: *codegen.Context) !void {
+ const fn_data: *FnData = &decl.fn_link.wasm;
fn_data.code = context.code.toUnmanaged();
fn_data.functype = context.func_type_data.toUnmanaged();
+ const code: []const u8 = switch (result) {
+ .appended => @as([]const u8, fn_data.code.items),
+ .externally_managed => |payload| payload,
+ };
+
const block = &decl.link.wasm;
if (decl.ty.zigTypeTag() == .Fn) {
// as locals are patched afterwards, the offsets of funcidx's are off,
@@ -521,7 +589,7 @@ pub fn flushModule(self: *Wasm, comp: *Compilation) !void {
var data_offset = offset_table_size;
while (cur) |cur_block| : (cur = cur_block.next) {
if (cur_block.size == 0) continue;
- std.debug.assert(cur_block.init);
+ assert(cur_block.init);
const offset = (cur_block.offset_index) * ptr_width;
var buf: [4]u8 = undefined;