aboutsummaryrefslogtreecommitdiff
path: root/src/link
diff options
context:
space:
mode:
authorJakub Konka <kubkon@jakubkonka.com>2022-12-09 14:11:28 +0100
committerGitHub <noreply@github.com>2022-12-09 14:11:28 +0100
commitd88eb75a69d27ef50635f9aa1caf2a7177d99ead (patch)
tree85d25fdddfda28dd2b65b49ac0d91284e043cfca /src/link
parent71c82393ebba8b40266551781d4b2c09b1fca1dc (diff)
parent9735953ae2ed04117181cf2cb6cde5cbfb8ca76a (diff)
downloadzig-d88eb75a69d27ef50635f9aa1caf2a7177d99ead.tar.gz
zig-d88eb75a69d27ef50635f9aa1caf2a7177d99ead.zip
Merge pull request #13820 from ziglang/dwarf-multiple-files
dwarf: add support for multiple source files
Diffstat (limited to 'src/link')
-rw-r--r--src/link/Dwarf.zig643
-rw-r--r--src/link/Elf.zig231
-rw-r--r--src/link/MachO.zig13
-rw-r--r--src/link/MachO/DebugSymbols.zig94
-rw-r--r--src/link/Wasm.zig15
5 files changed, 554 insertions, 442 deletions
diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig
index 730a2562c6..d02f994eb7 100644
--- a/src/link/Dwarf.zig
+++ b/src/link/Dwarf.zig
@@ -22,7 +22,7 @@ const Value = @import("../value.zig").Value;
const Type = @import("../type.zig").Type;
allocator: Allocator,
-tag: File.Tag,
+bin_file: *File,
ptr_width: PtrWidth,
target: std.Target,
@@ -44,6 +44,12 @@ abbrev_table_offset: ?u64 = null,
/// Table of debug symbol names.
strtab: std.ArrayListUnmanaged(u8) = .{},
+/// Quick lookup array of all defined source files referenced by at least one Decl.
+/// They will end up in the DWARF debug_line header as two lists:
+/// * []include_directory
+/// * []file_names
+di_files: std.AutoArrayHashMapUnmanaged(*const Module.File, void) = .{},
+
/// List of atoms that are owned directly by the DWARF module.
/// TODO convert links in DebugInfoAtom into indices and make
/// sure every atom is owned by this module.
@@ -887,7 +893,7 @@ const min_nop_size = 2;
/// actual_capacity + (actual_capacity / ideal_factor)
const ideal_factor = 3;
-pub fn init(allocator: Allocator, tag: File.Tag, target: std.Target) Dwarf {
+pub fn init(allocator: Allocator, bin_file: *File, target: std.Target) Dwarf {
const ptr_width: PtrWidth = switch (target.cpu.arch.ptrBitWidth()) {
0...32 => .p32,
33...64 => .p64,
@@ -895,7 +901,7 @@ pub fn init(allocator: Allocator, tag: File.Tag, target: std.Target) Dwarf {
};
return Dwarf{
.allocator = allocator,
- .tag = tag,
+ .bin_file = bin_file,
.ptr_width = ptr_width,
.target = target,
};
@@ -906,6 +912,7 @@ pub fn deinit(self: *Dwarf) void {
self.dbg_line_fn_free_list.deinit(gpa);
self.atom_free_list.deinit(gpa);
self.strtab.deinit(gpa);
+ self.di_files.deinit(gpa);
self.global_abbrev_relocs.deinit(gpa);
for (self.managed_atoms.items) |atom| {
@@ -968,7 +975,7 @@ pub fn initDeclState(self: *Dwarf, mod: *Module, decl_index: Module.Decl.Index)
assert(self.getRelocDbgFileIndex() == dbg_line_buffer.items.len);
// Once we support more than one source file, this will have the ability to be more
// than one possible value.
- const file_index = 1;
+ const file_index = try self.addDIFile(mod, decl_index);
leb128.writeUnsignedFixed(4, dbg_line_buffer.addManyAsArrayAssumeCapacity(4), file_index);
// Emit a line for the begin curly with prologue_end=false. The codegen will
@@ -995,7 +1002,7 @@ pub fn initDeclState(self: *Dwarf, mod: *Module, decl_index: Module.Decl.Index)
dbg_info_buffer.items.len += 4; // DW.AT.high_pc, DW.FORM.data4
//
if (fn_ret_has_bits) {
- const atom = getDbgInfoAtom(self.tag, mod, decl_index);
+ const atom = getDbgInfoAtom(self.bin_file.tag, mod, decl_index);
try decl_state.addTypeRelocGlobal(atom, fn_ret_type, @intCast(u32, dbg_info_buffer.items.len));
dbg_info_buffer.items.len += 4; // DW.AT.type, DW.FORM.ref4
}
@@ -1013,7 +1020,6 @@ pub fn initDeclState(self: *Dwarf, mod: *Module, decl_index: Module.Decl.Index)
pub fn commitDeclState(
self: *Dwarf,
- file: *File,
module: *Module,
decl_index: Module.Decl.Index,
sym_addr: u64,
@@ -1069,7 +1075,7 @@ pub fn commitDeclState(
// This logic is nearly identical to the logic below in `updateDeclDebugInfo` for
// `TextBlock` and the .debug_info. If you are editing this logic, you
// probably need to edit that logic too.
- const src_fn = switch (self.tag) {
+ const src_fn = switch (self.bin_file.tag) {
.elf => &decl.fn_link.elf,
.macho => &decl.fn_link.macho,
.wasm => &decl.fn_link.wasm.src_fn,
@@ -1090,22 +1096,21 @@ pub fn commitDeclState(
next.prev = src_fn.prev;
src_fn.next = null;
// Populate where it used to be with NOPs.
- switch (self.tag) {
+ switch (self.bin_file.tag) {
.elf => {
- const elf_file = file.cast(File.Elf).?;
+ const elf_file = self.bin_file.cast(File.Elf).?;
const debug_line_sect = &elf_file.sections.items[elf_file.debug_line_section_index.?];
const file_pos = debug_line_sect.sh_offset + src_fn.off;
try pwriteDbgLineNops(elf_file.base.file.?, file_pos, 0, &[0]u8{}, src_fn.len);
},
.macho => {
- const macho_file = file.cast(File.MachO).?;
- const d_sym = &macho_file.d_sym.?;
- const debug_line_sect = &d_sym.sections.items[d_sym.debug_line_section_index.?];
+ const d_sym = self.bin_file.cast(File.MachO).?.getDebugSymbols().?;
+ const debug_line_sect = d_sym.getSectionPtr(d_sym.debug_line_section_index.?);
const file_pos = debug_line_sect.offset + src_fn.off;
try pwriteDbgLineNops(d_sym.file, file_pos, 0, &[0]u8{}, src_fn.len);
},
.wasm => {
- const wasm_file = file.cast(File.Wasm).?;
+ const wasm_file = self.bin_file.cast(File.Wasm).?;
const debug_line = wasm_file.debug_line_atom.?.code;
writeDbgLineNopsBuffered(debug_line.items, src_fn.off, 0, &.{}, src_fn.len);
},
@@ -1132,7 +1137,7 @@ pub fn commitDeclState(
self.dbg_line_fn_first = src_fn;
self.dbg_line_fn_last = src_fn;
- src_fn.off = padToIdeal(self.dbgLineNeededHeaderBytes(module));
+ src_fn.off = padToIdeal(self.dbgLineNeededHeaderBytes(&[0][]u8{}, &[0][]u8{}));
}
const last_src_fn = self.dbg_line_fn_last.?;
@@ -1142,32 +1147,12 @@ pub fn commitDeclState(
// We only have support for one compilation unit so far, so the offsets are directly
// from the .debug_line section.
- switch (self.tag) {
+ switch (self.bin_file.tag) {
.elf => {
- const elf_file = file.cast(File.Elf).?;
- const debug_line_sect = &elf_file.sections.items[elf_file.debug_line_section_index.?];
- if (needed_size != debug_line_sect.sh_size) {
- if (needed_size > elf_file.allocatedSize(debug_line_sect.sh_offset)) {
- const new_offset = elf_file.findFreeSpace(needed_size, 1);
- const existing_size = last_src_fn.off;
- log.debug("moving .debug_line section: {d} bytes from 0x{x} to 0x{x}", .{
- existing_size,
- debug_line_sect.sh_offset,
- new_offset,
- });
- const amt = try elf_file.base.file.?.copyRangeAll(
- debug_line_sect.sh_offset,
- elf_file.base.file.?,
- new_offset,
- existing_size,
- );
- if (amt != existing_size) return error.InputOutput;
- debug_line_sect.sh_offset = new_offset;
- }
- debug_line_sect.sh_size = needed_size;
- elf_file.shdr_table_dirty = true; // TODO look into making only the one section dirty
- elf_file.debug_line_header_dirty = true;
- }
+ const elf_file = self.bin_file.cast(File.Elf).?;
+ const shdr_index = elf_file.debug_line_section_index.?;
+ try elf_file.growNonAllocSection(shdr_index, needed_size, 1, true);
+ const debug_line_sect = elf_file.sections.items[shdr_index];
const file_pos = debug_line_sect.sh_offset + src_fn.off;
try pwriteDbgLineNops(
elf_file.base.file.?,
@@ -1179,31 +1164,11 @@ pub fn commitDeclState(
},
.macho => {
- const macho_file = file.cast(File.MachO).?;
- const d_sym = &macho_file.d_sym.?;
- const debug_line_sect = &d_sym.sections.items[d_sym.debug_line_section_index.?];
- if (needed_size != debug_line_sect.size) {
- if (needed_size > d_sym.allocatedSize(debug_line_sect.offset)) {
- const new_offset = d_sym.findFreeSpace(needed_size, 1);
- const existing_size = last_src_fn.off;
- std.log.scoped(.dsym).debug("moving __debug_line section: {} bytes from 0x{x} to 0x{x}", .{
- existing_size,
- debug_line_sect.offset,
- new_offset,
- });
- const amt = try d_sym.file.copyRangeAll(
- debug_line_sect.offset,
- d_sym.file,
- new_offset,
- existing_size,
- );
- if (amt != existing_size) return error.InputOutput;
- debug_line_sect.offset = @intCast(u32, new_offset);
- }
- debug_line_sect.size = needed_size;
- d_sym.debug_line_header_dirty = true;
- }
- const file_pos = debug_line_sect.offset + src_fn.off;
+ const d_sym = self.bin_file.cast(File.MachO).?.getDebugSymbols().?;
+ const sect_index = d_sym.debug_line_section_index.?;
+ try d_sym.growSection(sect_index, needed_size, true);
+ const sect = d_sym.getSection(sect_index);
+ const file_pos = sect.offset + src_fn.off;
try pwriteDbgLineNops(
d_sym.file,
file_pos,
@@ -1214,7 +1179,7 @@ pub fn commitDeclState(
},
.wasm => {
- const wasm_file = file.cast(File.Wasm).?;
+ const wasm_file = self.bin_file.cast(File.Wasm).?;
const atom = wasm_file.debug_line_atom.?;
const debug_line = &atom.code;
const segment_size = debug_line.items.len;
@@ -1247,7 +1212,7 @@ pub fn commitDeclState(
if (dbg_info_buffer.items.len == 0)
return;
- const atom = getDbgInfoAtom(self.tag, module, decl_index);
+ const atom = getDbgInfoAtom(self.bin_file.tag, module, decl_index);
if (decl_state.abbrev_table.items.len > 0) {
// Now we emit the .debug_info types of the Decl. These will count towards the size of
// the buffer, so we have to do it before computing the offset, and we can't perform the actual
@@ -1274,7 +1239,7 @@ pub fn commitDeclState(
}
log.debug("updateDeclDebugInfoAllocation for '{s}'", .{decl.name});
- try self.updateDeclDebugInfoAllocation(file, atom, @intCast(u32, dbg_info_buffer.items.len));
+ try self.updateDeclDebugInfoAllocation(atom, @intCast(u32, dbg_info_buffer.items.len));
while (decl_state.abbrev_relocs.popOrNull()) |reloc| {
if (reloc.target) |target| {
@@ -1319,10 +1284,9 @@ pub fn commitDeclState(
}
while (decl_state.exprloc_relocs.popOrNull()) |reloc| {
- switch (self.tag) {
+ switch (self.bin_file.tag) {
.macho => {
- const macho_file = file.cast(File.MachO).?;
- const d_sym = &macho_file.d_sym.?;
+ const d_sym = self.bin_file.cast(File.MachO).?.getDebugSymbols().?;
try d_sym.relocs.append(d_sym.allocator, .{
.type = switch (reloc.type) {
.direct_load => .direct_load,
@@ -1339,10 +1303,10 @@ pub fn commitDeclState(
}
log.debug("writeDeclDebugInfo for '{s}", .{decl.name});
- try self.writeDeclDebugInfo(file, atom, dbg_info_buffer.items);
+ try self.writeDeclDebugInfo(atom, dbg_info_buffer.items);
}
-fn updateDeclDebugInfoAllocation(self: *Dwarf, file: *File, atom: *Atom, len: u32) !void {
+fn updateDeclDebugInfoAllocation(self: *Dwarf, atom: *Atom, len: u32) !void {
const tracy = trace(@src());
defer tracy.end();
@@ -1365,22 +1329,21 @@ fn updateDeclDebugInfoAllocation(self: *Dwarf, file: *File, atom: *Atom, len: u3
next.prev = atom.prev;
atom.next = null;
// Populate where it used to be with NOPs.
- switch (self.tag) {
+ switch (self.bin_file.tag) {
.elf => {
- const elf_file = file.cast(File.Elf).?;
+ const elf_file = self.bin_file.cast(File.Elf).?;
const debug_info_sect = &elf_file.sections.items[elf_file.debug_info_section_index.?];
const file_pos = debug_info_sect.sh_offset + atom.off;
try pwriteDbgInfoNops(elf_file.base.file.?, file_pos, 0, &[0]u8{}, atom.len, false);
},
.macho => {
- const macho_file = file.cast(File.MachO).?;
- const d_sym = &macho_file.d_sym.?;
- const debug_info_sect = &d_sym.sections.items[d_sym.debug_info_section_index.?];
+ const d_sym = self.bin_file.cast(File.MachO).?.getDebugSymbols().?;
+ const debug_info_sect = d_sym.getSectionPtr(d_sym.debug_info_section_index.?);
const file_pos = debug_info_sect.offset + atom.off;
try pwriteDbgInfoNops(d_sym.file, file_pos, 0, &[0]u8{}, atom.len, false);
},
.wasm => {
- const wasm_file = file.cast(File.Wasm).?;
+ const wasm_file = self.bin_file.cast(File.Wasm).?;
const debug_info = &wasm_file.debug_info_atom.?.code;
try writeDbgInfoNopsToArrayList(gpa, debug_info, atom.off, 0, &.{0}, atom.len, false);
},
@@ -1411,7 +1374,7 @@ fn updateDeclDebugInfoAllocation(self: *Dwarf, file: *File, atom: *Atom, len: u3
}
}
-fn writeDeclDebugInfo(self: *Dwarf, file: *File, atom: *Atom, dbg_info_buf: []const u8) !void {
+fn writeDeclDebugInfo(self: *Dwarf, atom: *Atom, dbg_info_buf: []const u8) !void {
const tracy = trace(@src());
defer tracy.end();
@@ -1431,32 +1394,12 @@ fn writeDeclDebugInfo(self: *Dwarf, file: *File, atom: *Atom, dbg_info_buf: []co
// We only have support for one compilation unit so far, so the offsets are directly
// from the .debug_info section.
- switch (self.tag) {
+ switch (self.bin_file.tag) {
.elf => {
- const elf_file = file.cast(File.Elf).?;
- const debug_info_sect = &elf_file.sections.items[elf_file.debug_info_section_index.?];
- if (needed_size != debug_info_sect.sh_size) {
- if (needed_size > elf_file.allocatedSize(debug_info_sect.sh_offset)) {
- const new_offset = elf_file.findFreeSpace(needed_size, 1);
- const existing_size = last_decl.off;
- log.debug("moving .debug_info section: {d} bytes from 0x{x} to 0x{x}", .{
- existing_size,
- debug_info_sect.sh_offset,
- new_offset,
- });
- const amt = try elf_file.base.file.?.copyRangeAll(
- debug_info_sect.sh_offset,
- elf_file.base.file.?,
- new_offset,
- existing_size,
- );
- if (amt != existing_size) return error.InputOutput;
- debug_info_sect.sh_offset = new_offset;
- }
- debug_info_sect.sh_size = needed_size;
- elf_file.shdr_table_dirty = true; // TODO look into making only the one section dirty
- elf_file.debug_info_header_dirty = true;
- }
+ const elf_file = self.bin_file.cast(File.Elf).?;
+ const shdr_index = elf_file.debug_info_section_index.?;
+ try elf_file.growNonAllocSection(shdr_index, needed_size, 1, true);
+ const debug_info_sect = elf_file.sections.items[shdr_index];
const file_pos = debug_info_sect.sh_offset + atom.off;
try pwriteDbgInfoNops(
elf_file.base.file.?,
@@ -1469,31 +1412,11 @@ fn writeDeclDebugInfo(self: *Dwarf, file: *File, atom: *Atom, dbg_info_buf: []co
},
.macho => {
- const macho_file = file.cast(File.MachO).?;
- const d_sym = &macho_file.d_sym.?;
- const debug_info_sect = &d_sym.sections.items[d_sym.debug_info_section_index.?];
- if (needed_size != debug_info_sect.size) {
- if (needed_size > d_sym.allocatedSize(debug_info_sect.offset)) {
- const new_offset = d_sym.findFreeSpace(needed_size, 1);
- const existing_size = last_decl.off;
- std.log.scoped(.dsym).debug("moving __debug_info section: {} bytes from 0x{x} to 0x{x}", .{
- existing_size,
- debug_info_sect.offset,
- new_offset,
- });
- const amt = try d_sym.file.copyRangeAll(
- debug_info_sect.offset,
- d_sym.file,
- new_offset,
- existing_size,
- );
- if (amt != existing_size) return error.InputOutput;
- debug_info_sect.offset = @intCast(u32, new_offset);
- }
- debug_info_sect.size = needed_size;
- d_sym.debug_info_header_dirty = true;
- }
- const file_pos = debug_info_sect.offset + atom.off;
+ const d_sym = self.bin_file.cast(File.MachO).?.getDebugSymbols().?;
+ const sect_index = d_sym.debug_info_section_index.?;
+ try d_sym.growSection(sect_index, needed_size, true);
+ const sect = d_sym.getSection(sect_index);
+ const file_pos = sect.offset + atom.off;
try pwriteDbgInfoNops(
d_sym.file,
file_pos,
@@ -1505,7 +1428,7 @@ fn writeDeclDebugInfo(self: *Dwarf, file: *File, atom: *Atom, dbg_info_buf: []co
},
.wasm => {
- const wasm_file = file.cast(File.Wasm).?;
+ const wasm_file = self.bin_file.cast(File.Wasm).?;
const info_atom = wasm_file.debug_info_atom.?;
const debug_info = &info_atom.code;
const segment_size = debug_info.items.len;
@@ -1535,7 +1458,7 @@ fn writeDeclDebugInfo(self: *Dwarf, file: *File, atom: *Atom, dbg_info_buf: []co
}
}
-pub fn updateDeclLineNumber(self: *Dwarf, file: *File, decl: *const Module.Decl) !void {
+pub fn updateDeclLineNumber(self: *Dwarf, decl: *const Module.Decl) !void {
const tracy = trace(@src());
defer tracy.end();
@@ -1549,22 +1472,21 @@ pub fn updateDeclLineNumber(self: *Dwarf, file: *File, decl: *const Module.Decl)
var data: [4]u8 = undefined;
leb128.writeUnsignedFixed(4, &data, line);
- switch (self.tag) {
+ switch (self.bin_file.tag) {
.elf => {
- const elf_file = file.cast(File.Elf).?;
+ const elf_file = self.bin_file.cast(File.Elf).?;
const shdr = elf_file.sections.items[elf_file.debug_line_section_index.?];
const file_pos = shdr.sh_offset + decl.fn_link.elf.off + self.getRelocDbgLineOff();
try elf_file.base.file.?.pwriteAll(&data, file_pos);
},
.macho => {
- const macho_file = file.cast(File.MachO).?;
- const d_sym = macho_file.d_sym.?;
- const sect = d_sym.sections.items[d_sym.debug_line_section_index.?];
+ const d_sym = self.bin_file.cast(File.MachO).?.getDebugSymbols().?;
+ const sect = d_sym.getSection(d_sym.debug_line_section_index.?);
const file_pos = sect.offset + decl.fn_link.macho.off + self.getRelocDbgLineOff();
try d_sym.file.pwriteAll(&data, file_pos);
},
.wasm => {
- const wasm_file = file.cast(File.Wasm).?;
+ const wasm_file = self.bin_file.cast(File.Wasm).?;
const offset = decl.fn_link.wasm.src_fn.off + self.getRelocDbgLineOff();
const atom = wasm_file.debug_line_atom.?;
mem.copy(u8, atom.code.items[offset..], &data);
@@ -1601,7 +1523,7 @@ pub fn freeDecl(self: *Dwarf, decl: *Module.Decl) void {
// TODO make this logic match freeTextBlock. Maybe abstract the logic out since the same thing
// is desired for both.
const gpa = self.allocator;
- const fn_link = switch (self.tag) {
+ const fn_link = switch (self.bin_file.tag) {
.elf => &decl.fn_link.elf,
.macho => &decl.fn_link.macho,
.wasm => &decl.fn_link.wasm.src_fn,
@@ -1629,7 +1551,7 @@ pub fn freeDecl(self: *Dwarf, decl: *Module.Decl) void {
}
}
-pub fn writeDbgAbbrev(self: *Dwarf, file: *File) !void {
+pub fn writeDbgAbbrev(self: *Dwarf) !void {
// These are LEB encoded but since the values are all less than 127
// we can simply append these bytes.
const abbrev_buf = [_]u8{
@@ -1763,46 +1685,25 @@ pub fn writeDbgAbbrev(self: *Dwarf, file: *File) !void {
self.abbrev_table_offset = abbrev_offset;
const needed_size = abbrev_buf.len;
- switch (self.tag) {
+ switch (self.bin_file.tag) {
.elf => {
- const elf_file = file.cast(File.Elf).?;
- const debug_abbrev_sect = &elf_file.sections.items[elf_file.debug_abbrev_section_index.?];
- const allocated_size = elf_file.allocatedSize(debug_abbrev_sect.sh_offset);
- if (needed_size > allocated_size) {
- debug_abbrev_sect.sh_size = 0; // free the space
- debug_abbrev_sect.sh_offset = elf_file.findFreeSpace(needed_size, 1);
- }
- debug_abbrev_sect.sh_size = needed_size;
- log.debug(".debug_abbrev start=0x{x} end=0x{x}", .{
- debug_abbrev_sect.sh_offset,
- debug_abbrev_sect.sh_offset + needed_size,
- });
-
+ const elf_file = self.bin_file.cast(File.Elf).?;
+ const shdr_index = elf_file.debug_abbrev_section_index.?;
+ try elf_file.growNonAllocSection(shdr_index, needed_size, 1, false);
+ const debug_abbrev_sect = elf_file.sections.items[shdr_index];
const file_pos = debug_abbrev_sect.sh_offset + abbrev_offset;
try elf_file.base.file.?.pwriteAll(&abbrev_buf, file_pos);
},
.macho => {
- const macho_file = file.cast(File.MachO).?;
- const d_sym = &macho_file.d_sym.?;
- const dwarf_segment = d_sym.segments.items[d_sym.dwarf_segment_cmd_index.?];
- const debug_abbrev_sect = &d_sym.sections.items[d_sym.debug_abbrev_section_index.?];
- const allocated_size = d_sym.allocatedSize(debug_abbrev_sect.offset);
- if (needed_size > allocated_size) {
- debug_abbrev_sect.size = 0; // free the space
- const offset = d_sym.findFreeSpace(needed_size, 1);
- debug_abbrev_sect.offset = @intCast(u32, offset);
- debug_abbrev_sect.addr = dwarf_segment.vmaddr + offset - dwarf_segment.fileoff;
- }
- debug_abbrev_sect.size = needed_size;
- log.debug("__debug_abbrev start=0x{x} end=0x{x}", .{
- debug_abbrev_sect.offset,
- debug_abbrev_sect.offset + needed_size,
- });
- const file_pos = debug_abbrev_sect.offset + abbrev_offset;
+ const d_sym = self.bin_file.cast(File.MachO).?.getDebugSymbols().?;
+ const sect_index = d_sym.debug_abbrev_section_index.?;
+ try d_sym.growSection(sect_index, needed_size, false);
+ const sect = d_sym.getSection(sect_index);
+ const file_pos = sect.offset + abbrev_offset;
try d_sym.file.pwriteAll(&abbrev_buf, file_pos);
},
.wasm => {
- const wasm_file = file.cast(File.Wasm).?;
+ const wasm_file = self.bin_file.cast(File.Wasm).?;
const debug_abbrev = &wasm_file.debug_abbrev_atom.?.code;
try debug_abbrev.resize(wasm_file.base.allocator, needed_size);
mem.copy(u8, debug_abbrev.items, &abbrev_buf);
@@ -1816,7 +1717,7 @@ fn dbgInfoHeaderBytes(self: *Dwarf) usize {
return 120;
}
-pub fn writeDbgInfoHeader(self: *Dwarf, file: *File, module: *Module, low_pc: u64, high_pc: u64) !void {
+pub fn writeDbgInfoHeader(self: *Dwarf, module: *Module, low_pc: u64, high_pc: u64) !void {
// If this value is null it means there is an error in the module;
// leave debug_info_header_dirty=true.
const first_dbg_info_off = self.getDebugInfoOff() orelse return;
@@ -1828,7 +1729,7 @@ pub fn writeDbgInfoHeader(self: *Dwarf, file: *File, module: *Module, low_pc: u6
defer di_buf.deinit();
const target_endian = self.target.cpu.arch.endian();
- const init_len_size: usize = if (self.tag == .macho)
+ const init_len_size: usize = if (self.bin_file.tag == .macho)
4
else switch (self.ptr_width) {
.p32 => @as(usize, 4),
@@ -1842,7 +1743,7 @@ pub fn writeDbgInfoHeader(self: *Dwarf, file: *File, module: *Module, low_pc: u6
// +1 for the final 0 that ends the compilation unit children.
const dbg_info_end = self.getDebugInfoEnd().? + 1;
const init_len = dbg_info_end - after_init_len;
- if (self.tag == .macho) {
+ if (self.bin_file.tag == .macho) {
mem.writeIntLittle(u32, di_buf.addManyAsArrayAssumeCapacity(4), @intCast(u32, init_len));
} else switch (self.ptr_width) {
.p32 => {
@@ -1855,7 +1756,7 @@ pub fn writeDbgInfoHeader(self: *Dwarf, file: *File, module: *Module, low_pc: u6
}
mem.writeInt(u16, di_buf.addManyAsArrayAssumeCapacity(2), 4, target_endian); // DWARF version
const abbrev_offset = self.abbrev_table_offset.?;
- if (self.tag == .macho) {
+ if (self.bin_file.tag == .macho) {
mem.writeIntLittle(u32, di_buf.addManyAsArrayAssumeCapacity(4), @intCast(u32, abbrev_offset));
di_buf.appendAssumeCapacity(8); // address size
} else switch (self.ptr_width) {
@@ -1870,11 +1771,12 @@ pub fn writeDbgInfoHeader(self: *Dwarf, file: *File, module: *Module, low_pc: u6
}
// Write the form for the compile unit, which must match the abbrev table above.
const name_strp = try self.makeString(module.root_pkg.root_src_path);
- const comp_dir_strp = try self.makeString(module.root_pkg.root_src_directory.path orelse ".");
+ const compile_unit_dir = self.getCompDir(module);
+ const comp_dir_strp = try self.makeString(compile_unit_dir);
const producer_strp = try self.makeString(link.producer_string);
di_buf.appendAssumeCapacity(@enumToInt(AbbrevKind.compile_unit));
- if (self.tag == .macho) {
+ if (self.bin_file.tag == .macho) {
mem.writeIntLittle(u32, di_buf.addManyAsArrayAssumeCapacity(4), 0); // DW.AT.stmt_list, DW.FORM.sec_offset
mem.writeIntLittle(u64, di_buf.addManyAsArrayAssumeCapacity(8), low_pc);
mem.writeIntLittle(u64, di_buf.addManyAsArrayAssumeCapacity(8), high_pc);
@@ -1899,22 +1801,21 @@ pub fn writeDbgInfoHeader(self: *Dwarf, file: *File, module: *Module, low_pc: u6
@panic("TODO: handle .debug_info header exceeding its padding");
}
const jmp_amt = first_dbg_info_off - di_buf.items.len;
- switch (self.tag) {
+ switch (self.bin_file.tag) {
.elf => {
- const elf_file = file.cast(File.Elf).?;
+ const elf_file = self.bin_file.cast(File.Elf).?;
const debug_info_sect = elf_file.sections.items[elf_file.debug_info_section_index.?];
const file_pos = debug_info_sect.sh_offset;
try pwriteDbgInfoNops(elf_file.base.file.?, file_pos, 0, di_buf.items, jmp_amt, false);
},
.macho => {
- const macho_file = file.cast(File.MachO).?;
- const d_sym = &macho_file.d_sym.?;
- const debug_info_sect = d_sym.sections.items[d_sym.debug_info_section_index.?];
+ const d_sym = self.bin_file.cast(File.MachO).?.getDebugSymbols().?;
+ const debug_info_sect = d_sym.getSection(d_sym.debug_info_section_index.?);
const file_pos = debug_info_sect.offset;
try pwriteDbgInfoNops(d_sym.file, file_pos, 0, di_buf.items, jmp_amt, false);
},
.wasm => {
- const wasm_file = file.cast(File.Wasm).?;
+ const wasm_file = self.bin_file.cast(File.Wasm).?;
const debug_info = &wasm_file.debug_info_atom.?.code;
try writeDbgInfoNopsToArrayList(self.allocator, debug_info, 0, 0, di_buf.items, jmp_amt, false);
},
@@ -1922,6 +1823,21 @@ pub fn writeDbgInfoHeader(self: *Dwarf, file: *File, module: *Module, low_pc: u6
}
}
+fn getCompDir(self: Dwarf, module: *Module) []const u8 {
+ // For macOS stack traces, we want to avoid having to parse the compilation unit debug
+ // info. As long as each debug info file has a path independent of the compilation unit
+ // directory (DW_AT_comp_dir), then we never have to look at the compilation unit debug
+ // info. If we provide an absolute path to LLVM here for the compilation unit debug
+ // info, LLVM will emit DWARF info that depends on DW_AT_comp_dir. To avoid this, we
+ // pass "." for the compilation unit directory. This forces each debug file to have a
+ // directory rather than be relative to DW_AT_comp_dir. According to DWARF 5, debug
+ // files will no longer reference DW_AT_comp_dir, for the purpose of being able to
+ // support the common practice of stripping all but the line number sections from an
+ // executable.
+ if (self.bin_file.tag == .macho) return ".";
+ return module.root_pkg.root_src_directory.path orelse ".";
+}
+
fn writeAddrAssumeCapacity(self: *Dwarf, buf: *std.ArrayList(u8), addr: u64) void {
const target_endian = self.target.cpu.arch.endian();
switch (self.ptr_width) {
@@ -2144,9 +2060,9 @@ fn writeDbgInfoNopsToArrayList(
}
}
-pub fn writeDbgAranges(self: *Dwarf, file: *File, addr: u64, size: u64) !void {
+pub fn writeDbgAranges(self: *Dwarf, addr: u64, size: u64) !void {
const target_endian = self.target.cpu.arch.endian();
- const init_len_size: usize = if (self.tag == .macho)
+ const init_len_size: usize = if (self.bin_file.tag == .macho)
4
else switch (self.ptr_width) {
.p32 => @as(usize, 4),
@@ -2168,7 +2084,7 @@ pub fn writeDbgAranges(self: *Dwarf, file: *File, addr: u64, size: u64) !void {
mem.writeInt(u16, di_buf.addManyAsArrayAssumeCapacity(2), 2, target_endian); // version
// When more than one compilation unit is supported, this will be the offset to it.
// For now it is always at offset 0 in .debug_info.
- if (self.tag == .macho) {
+ if (self.bin_file.tag == .macho) {
mem.writeIntLittle(u32, di_buf.addManyAsArrayAssumeCapacity(4), 0); // __debug_info offset
} else {
self.writeAddrAssumeCapacity(&di_buf, 0); // .debug_info offset
@@ -2191,7 +2107,7 @@ pub fn writeDbgAranges(self: *Dwarf, file: *File, addr: u64, size: u64) !void {
// Go back and populate the initial length.
const init_len = di_buf.items.len - after_init_len;
- if (self.tag == .macho) {
+ if (self.bin_file.tag == .macho) {
mem.writeIntLittle(u32, di_buf.items[init_len_index..][0..4], @intCast(u32, init_len));
} else switch (self.ptr_width) {
.p32 => {
@@ -2205,46 +2121,26 @@ pub fn writeDbgAranges(self: *Dwarf, file: *File, addr: u64, size: u64) !void {
},
}
- const needed_size = di_buf.items.len;
- switch (self.tag) {
+ const needed_size = @intCast(u32, di_buf.items.len);
+ switch (self.bin_file.tag) {
.elf => {
- const elf_file = file.cast(File.Elf).?;
- const debug_aranges_sect = &elf_file.sections.items[elf_file.debug_aranges_section_index.?];
- const allocated_size = elf_file.allocatedSize(debug_aranges_sect.sh_offset);
- if (needed_size > allocated_size) {
- debug_aranges_sect.sh_size = 0; // free the space
- debug_aranges_sect.sh_offset = elf_file.findFreeSpace(needed_size, 16);
- }
- debug_aranges_sect.sh_size = needed_size;
- log.debug(".debug_aranges start=0x{x} end=0x{x}", .{
- debug_aranges_sect.sh_offset,
- debug_aranges_sect.sh_offset + needed_size,
- });
+ const elf_file = self.bin_file.cast(File.Elf).?;
+ const shdr_index = elf_file.debug_aranges_section_index.?;
+ try elf_file.growNonAllocSection(shdr_index, needed_size, 16, false);
+ const debug_aranges_sect = elf_file.sections.items[shdr_index];
const file_pos = debug_aranges_sect.sh_offset;
try elf_file.base.file.?.pwriteAll(di_buf.items, file_pos);
},
.macho => {
- const macho_file = file.cast(File.MachO).?;
- const d_sym = &macho_file.d_sym.?;
- const dwarf_seg = d_sym.segments.items[d_sym.dwarf_segment_cmd_index.?];
- const debug_aranges_sect = &d_sym.sections.items[d_sym.debug_aranges_section_index.?];
- const allocated_size = d_sym.allocatedSize(debug_aranges_sect.offset);
- if (needed_size > allocated_size) {
- debug_aranges_sect.size = 0; // free the space
- const new_offset = d_sym.findFreeSpace(needed_size, 16);
- debug_aranges_sect.addr = dwarf_seg.vmaddr + new_offset - dwarf_seg.fileoff;
- debug_aranges_sect.offset = @intCast(u32, new_offset);
- }
- debug_aranges_sect.size = needed_size;
- log.debug("__debug_aranges start=0x{x} end=0x{x}", .{
- debug_aranges_sect.offset,
- debug_aranges_sect.offset + needed_size,
- });
- const file_pos = debug_aranges_sect.offset;
+ const d_sym = self.bin_file.cast(File.MachO).?.getDebugSymbols().?;
+ const sect_index = d_sym.debug_aranges_section_index.?;
+ try d_sym.growSection(sect_index, needed_size, false);
+ const sect = d_sym.getSection(sect_index);
+ const file_pos = sect.offset;
try d_sym.file.pwriteAll(di_buf.items, file_pos);
},
.wasm => {
- const wasm_file = file.cast(File.Wasm).?;
+ const wasm_file = self.bin_file.cast(File.Wasm).?;
const debug_ranges = &wasm_file.debug_ranges_atom.?.code;
try debug_ranges.resize(wasm_file.base.allocator, needed_size);
mem.copy(u8, debug_ranges.items, di_buf.items);
@@ -2253,10 +2149,12 @@ pub fn writeDbgAranges(self: *Dwarf, file: *File, addr: u64, size: u64) !void {
}
}
-pub fn writeDbgLineHeader(self: *Dwarf, file: *File, module: *Module) !void {
+pub fn writeDbgLineHeader(self: *Dwarf, module: *Module) !void {
+ const gpa = self.allocator;
+
const ptr_width_bytes: u8 = self.ptrWidthBytes();
const target_endian = self.target.cpu.arch.endian();
- const init_len_size: usize = if (self.tag == .macho)
+ const init_len_size: usize = if (self.bin_file.tag == .macho)
4
else switch (self.ptr_width) {
.p32 => @as(usize, 4),
@@ -2264,29 +2162,37 @@ pub fn writeDbgLineHeader(self: *Dwarf, file: *File, module: *Module) !void {
};
const dbg_line_prg_off = self.getDebugLineProgramOff() orelse return;
- const dbg_line_prg_end = self.getDebugLineProgramEnd().?;
- assert(dbg_line_prg_end != 0);
+ assert(self.getDebugLineProgramEnd().? != 0);
+
+ // Convert all input DI files into a set of include dirs and file names.
+ var arena = std.heap.ArenaAllocator.init(gpa);
+ defer arena.deinit();
+ const paths = try self.genIncludeDirsAndFileNames(arena.allocator(), module);
// The size of this header is variable, depending on the number of directories,
// files, and padding. We have a function to compute the upper bound size, however,
// because it's needed for determining where to put the offset of the first `SrcFn`.
- const needed_bytes = self.dbgLineNeededHeaderBytes(module);
- var di_buf = try std.ArrayList(u8).initCapacity(self.allocator, needed_bytes);
+ const needed_bytes = self.dbgLineNeededHeaderBytes(paths.dirs, paths.files);
+ var di_buf = try std.ArrayList(u8).initCapacity(gpa, needed_bytes);
defer di_buf.deinit();
// initial length - length of the .debug_line contribution for this compilation unit,
// not including the initial length itself.
- const after_init_len = di_buf.items.len + init_len_size;
- const init_len = dbg_line_prg_end - after_init_len;
- if (self.tag == .macho) {
- mem.writeIntLittle(u32, di_buf.addManyAsArrayAssumeCapacity(4), @intCast(u32, init_len));
- } else switch (self.ptr_width) {
- .p32 => {
- mem.writeInt(u32, di_buf.addManyAsArrayAssumeCapacity(4), @intCast(u32, init_len), target_endian);
+ // We will backpatch this value later so just remember where we need to write it.
+ const before_init_len = di_buf.items.len;
+
+ switch (self.bin_file.tag) {
+ .macho => {
+ mem.writeIntLittle(u32, di_buf.addManyAsArrayAssumeCapacity(4), @as(u32, 0));
},
- .p64 => {
- di_buf.appendNTimesAssumeCapacity(0xff, 4);
- mem.writeInt(u64, di_buf.addManyAsArrayAssumeCapacity(8), init_len, target_endian);
+ else => switch (self.ptr_width) {
+ .p32 => {
+ mem.writeInt(u32, di_buf.addManyAsArrayAssumeCapacity(4), @as(u32, 0), target_endian);
+ },
+ .p64 => {
+ di_buf.appendNTimesAssumeCapacity(0xff, 4);
+ mem.writeInt(u64, di_buf.addManyAsArrayAssumeCapacity(8), @as(u64, 0), target_endian);
+ },
},
}
@@ -2297,7 +2203,12 @@ pub fn writeDbgLineHeader(self: *Dwarf, file: *File, module: *Module) !void {
// Therefore we rely on the NOP jump at the beginning of the Line Number Program for
// padding rather than this field.
const before_header_len = di_buf.items.len;
- di_buf.items.len += if (self.tag == .macho) @sizeOf(u32) else ptr_width_bytes; // We will come back and write this.
+
+ di_buf.items.len += switch (self.bin_file.tag) { // We will come back and write this.
+ .macho => @sizeOf(u32),
+ else => ptr_width_bytes,
+ };
+
const after_header_len = di_buf.items.len;
const opcode_base = DW.LNS.set_isa + 1;
@@ -2323,52 +2234,133 @@ pub fn writeDbgLineHeader(self: *Dwarf, file: *File, module: *Module) !void {
0, // `DW.LNS.set_prologue_end`
0, // `DW.LNS.set_epilogue_begin`
1, // `DW.LNS.set_isa`
- 0, // include_directories (none except the compilation unit cwd)
- });
- // file_names[0]
- di_buf.appendSliceAssumeCapacity(module.root_pkg.root_src_path); // relative path name
- di_buf.appendSliceAssumeCapacity(&[_]u8{
- 0, // null byte for the relative path name
- 0, // directory_index
- 0, // mtime (TODO supply this)
- 0, // file size bytes (TODO supply this)
- 0, // file_names sentinel
});
+ for (paths.dirs) |dir, i| {
+ log.debug("adding new include dir at {d} of '{s}'", .{ i + 1, dir });
+ di_buf.appendSliceAssumeCapacity(dir);
+ di_buf.appendAssumeCapacity(0);
+ }
+ di_buf.appendAssumeCapacity(0); // include directories sentinel
+
+ for (paths.files) |file, i| {
+ const dir_index = paths.files_dirs_indexes[i];
+ log.debug("adding new file name at {d} of '{s}' referencing directory {d}", .{ i + 1, file, dir_index + 1 });
+ di_buf.appendSliceAssumeCapacity(file);
+ di_buf.appendSliceAssumeCapacity(&[_]u8{
+ 0, // null byte for the relative path name
+ @intCast(u8, dir_index), // directory_index
+ 0, // mtime (TODO supply this)
+ 0, // file size bytes (TODO supply this)
+ });
+ }
+ di_buf.appendAssumeCapacity(0); // file names sentinel
+
const header_len = di_buf.items.len - after_header_len;
- if (self.tag == .macho) {
- mem.writeIntLittle(u32, di_buf.items[before_header_len..][0..4], @intCast(u32, header_len));
- } else switch (self.ptr_width) {
- .p32 => {
- mem.writeInt(u32, di_buf.items[before_header_len..][0..4], @intCast(u32, header_len), target_endian);
+
+ switch (self.bin_file.tag) {
+ .macho => {
+ mem.writeIntLittle(u32, di_buf.items[before_header_len..][0..4], @intCast(u32, header_len));
},
- .p64 => {
- mem.writeInt(u64, di_buf.items[before_header_len..][0..8], header_len, target_endian);
+ else => switch (self.ptr_width) {
+ .p32 => {
+ mem.writeInt(u32, di_buf.items[before_header_len..][0..4], @intCast(u32, header_len), target_endian);
+ },
+ .p64 => {
+ mem.writeInt(u64, di_buf.items[before_header_len..][0..8], header_len, target_endian);
+ },
},
}
- // We use NOPs because consumers empirically do not respect the header length field.
+ assert(needed_bytes == di_buf.items.len);
+
if (di_buf.items.len > dbg_line_prg_off) {
- // Move the first N files to the end to make more padding for the header.
- @panic("TODO: handle .debug_line header exceeding its padding");
+ const needed_with_padding = padToIdeal(needed_bytes);
+ const delta = needed_with_padding - dbg_line_prg_off;
+
+ var src_fn = self.dbg_line_fn_first.?;
+ const last_fn = self.dbg_line_fn_last.?;
+
+ var buffer = try gpa.alloc(u8, last_fn.off + last_fn.len - src_fn.off);
+ defer gpa.free(buffer);
+
+ switch (self.bin_file.tag) {
+ .elf => {
+ const elf_file = self.bin_file.cast(File.Elf).?;
+ const shdr_index = elf_file.debug_line_section_index.?;
+ const needed_size = elf_file.sections.items[shdr_index].sh_size + delta;
+ try elf_file.growNonAllocSection(shdr_index, needed_size, 1, true);
+ const file_pos = elf_file.sections.items[shdr_index].sh_offset + src_fn.off;
+
+ const amt = try elf_file.base.file.?.preadAll(buffer, file_pos);
+ if (amt != buffer.len) return error.InputOutput;
+
+ try elf_file.base.file.?.pwriteAll(buffer, file_pos + delta);
+ },
+ .macho => {
+ const d_sym = self.bin_file.cast(File.MachO).?.getDebugSymbols().?;
+ const sect_index = d_sym.debug_line_section_index.?;
+ const needed_size = @intCast(u32, d_sym.getSection(sect_index).size + delta);
+ try d_sym.growSection(sect_index, needed_size, true);
+ const file_pos = d_sym.getSection(sect_index).offset + src_fn.off;
+
+ const amt = try d_sym.file.preadAll(buffer, file_pos);
+ if (amt != buffer.len) return error.InputOutput;
+
+ try d_sym.file.pwriteAll(buffer, file_pos + delta);
+ },
+ .wasm => {
+ const wasm_file = self.bin_file.cast(File.Wasm).?;
+ const debug_line = &wasm_file.debug_line_atom.?.code;
+ mem.copy(u8, buffer, debug_line.items[src_fn.off..]);
+ try debug_line.resize(self.allocator, debug_line.items.len + delta);
+ mem.copy(u8, debug_line.items[src_fn.off + delta ..], buffer);
+ },
+ else => unreachable,
+ }
+
+ while (true) {
+ src_fn.off += delta;
+
+ if (src_fn.next) |next| {
+ src_fn = next;
+ } else break;
+ }
+ }
+
+ // Backpatch actual length of the debug line program
+ const init_len = self.getDebugLineProgramEnd().? - before_init_len - init_len_size;
+ switch (self.bin_file.tag) {
+ .macho => {
+ mem.writeIntLittle(u32, di_buf.items[before_init_len..][0..4], @intCast(u32, init_len));
+ },
+ else => switch (self.ptr_width) {
+ .p32 => {
+ mem.writeInt(u32, di_buf.items[before_init_len..][0..4], @intCast(u32, init_len), target_endian);
+ },
+ .p64 => {
+ mem.writeInt(u64, di_buf.items[before_init_len + 4 ..][0..8], init_len, target_endian);
+ },
+ },
}
- const jmp_amt = dbg_line_prg_off - di_buf.items.len;
- switch (self.tag) {
+
+ // We use NOPs because consumers empirically do not respect the header length field.
+ const jmp_amt = self.getDebugLineProgramOff().? - di_buf.items.len;
+ switch (self.bin_file.tag) {
.elf => {
- const elf_file = file.cast(File.Elf).?;
+ const elf_file = self.bin_file.cast(File.Elf).?;
const debug_line_sect = elf_file.sections.items[elf_file.debug_line_section_index.?];
const file_pos = debug_line_sect.sh_offset;
try pwriteDbgLineNops(elf_file.base.file.?, file_pos, 0, di_buf.items, jmp_amt);
},
.macho => {
- const macho_file = file.cast(File.MachO).?;
- const d_sym = &macho_file.d_sym.?;
- const debug_line_sect = d_sym.sections.items[d_sym.debug_line_section_index.?];
+ const d_sym = self.bin_file.cast(File.MachO).?.getDebugSymbols().?;
+ const debug_line_sect = d_sym.getSection(d_sym.debug_line_section_index.?);
const file_pos = debug_line_sect.offset;
try pwriteDbgLineNops(d_sym.file, file_pos, 0, di_buf.items, jmp_amt);
},
.wasm => {
- const wasm_file = file.cast(File.Wasm).?;
+ const wasm_file = self.bin_file.cast(File.Wasm).?;
const debug_line = wasm_file.debug_line_atom.?.code;
writeDbgLineNopsBuffered(debug_line.items, 0, 0, di_buf.items, jmp_amt);
},
@@ -2404,19 +2396,32 @@ fn ptrWidthBytes(self: Dwarf) u8 {
};
}
-fn dbgLineNeededHeaderBytes(self: Dwarf, module: *Module) u32 {
- _ = self;
- const directory_entry_format_count = 1;
- const file_name_entry_format_count = 1;
- const directory_count = 1;
- const file_name_count = 1;
- const root_src_dir_path_len = if (module.root_pkg.root_src_directory.path) |p| p.len else 1; // "."
- return @intCast(u32, 53 + directory_entry_format_count * 2 + file_name_entry_format_count * 2 +
- directory_count * 8 + file_name_count * 8 +
- // These are encoded as DW.FORM.string rather than DW.FORM.strp as we would like
- // because of a workaround for readelf and gdb failing to understand DWARFv5 correctly.
- root_src_dir_path_len +
- module.root_pkg.root_src_path.len);
+fn dbgLineNeededHeaderBytes(self: Dwarf, dirs: []const []const u8, files: []const []const u8) u32 {
+ var size = switch (self.bin_file.tag) { // length field
+ .macho => @sizeOf(u32),
+ else => switch (self.ptr_width) {
+ .p32 => @as(usize, @sizeOf(u32)),
+ .p64 => @sizeOf(u32) + @sizeOf(u64),
+ },
+ };
+ size += @sizeOf(u16); // version field
+ size += switch (self.bin_file.tag) { // offset to end-of-header
+ .macho => @sizeOf(u32),
+ else => self.ptrWidthBytes(),
+ };
+ size += 18; // opcodes
+
+ for (dirs) |dir| { // include dirs
+ size += dir.len + 1;
+ }
+ size += 1; // include dirs sentinel
+
+ for (files) |file| { // file names
+ size += file.len + 1 + 1 + 1 + 1;
+ }
+ size += 1; // file names sentinel
+
+ return @intCast(u32, size);
}
/// The reloc offset for the line offset of a function from the previous function's line.
@@ -2448,7 +2453,7 @@ fn padToIdeal(actual_size: anytype) @TypeOf(actual_size) {
std.math.maxInt(@TypeOf(actual_size));
}
-pub fn flushModule(self: *Dwarf, file: *File, module: *Module) !void {
+pub fn flushModule(self: *Dwarf, module: *Module) !void {
if (self.global_abbrev_relocs.items.len > 0) {
const gpa = self.allocator;
var arena_alloc = std.heap.ArenaAllocator.init(gpa);
@@ -2479,21 +2484,20 @@ pub fn flushModule(self: *Dwarf, file: *File, module: *Module) !void {
try self.managed_atoms.append(gpa, atom);
log.debug("updateDeclDebugInfoAllocation in flushModule", .{});
- try self.updateDeclDebugInfoAllocation(file, atom, @intCast(u32, dbg_info_buffer.items.len));
+ try self.updateDeclDebugInfoAllocation(atom, @intCast(u32, dbg_info_buffer.items.len));
log.debug("writeDeclDebugInfo in flushModule", .{});
- try self.writeDeclDebugInfo(file, atom, dbg_info_buffer.items);
+ try self.writeDeclDebugInfo(atom, dbg_info_buffer.items);
const file_pos = blk: {
- switch (self.tag) {
+ switch (self.bin_file.tag) {
.elf => {
- const elf_file = file.cast(File.Elf).?;
+ const elf_file = self.bin_file.cast(File.Elf).?;
const debug_info_sect = &elf_file.sections.items[elf_file.debug_info_section_index.?];
break :blk debug_info_sect.sh_offset;
},
.macho => {
- const macho_file = file.cast(File.MachO).?;
- const d_sym = &macho_file.d_sym.?;
- const debug_info_sect = &d_sym.sections.items[d_sym.debug_info_section_index.?];
+ const d_sym = self.bin_file.cast(File.MachO).?.getDebugSymbols().?;
+ const debug_info_sect = d_sym.getSectionPtr(d_sym.debug_info_section_index.?);
break :blk debug_info_sect.offset;
},
// for wasm, the offset is always 0 as we write to memory first
@@ -2506,18 +2510,17 @@ pub fn flushModule(self: *Dwarf, file: *File, module: *Module) !void {
mem.writeInt(u32, &buf, atom.off, self.target.cpu.arch.endian());
while (self.global_abbrev_relocs.popOrNull()) |reloc| {
- switch (self.tag) {
+ switch (self.bin_file.tag) {
.elf => {
- const elf_file = file.cast(File.Elf).?;
+ const elf_file = self.bin_file.cast(File.Elf).?;
try elf_file.base.file.?.pwriteAll(&buf, file_pos + reloc.atom.off + reloc.offset);
},
.macho => {
- const macho_file = file.cast(File.MachO).?;
- const d_sym = &macho_file.d_sym.?;
+ const d_sym = self.bin_file.cast(File.MachO).?.getDebugSymbols().?;
try d_sym.file.pwriteAll(&buf, file_pos + reloc.atom.off + reloc.offset);
},
.wasm => {
- const wasm_file = file.cast(File.Wasm).?;
+ const wasm_file = self.bin_file.cast(File.Wasm).?;
const debug_info = wasm_file.debug_info_atom.?.code;
mem.copy(u8, debug_info.items[reloc.atom.off + reloc.offset ..], &buf);
},
@@ -2527,6 +2530,68 @@ pub fn flushModule(self: *Dwarf, file: *File, module: *Module) !void {
}
}
+fn addDIFile(self: *Dwarf, mod: *Module, decl_index: Module.Decl.Index) !u28 {
+ const decl = mod.declPtr(decl_index);
+ const file_scope = decl.getFileScope();
+ const gop = try self.di_files.getOrPut(self.allocator, file_scope);
+ if (!gop.found_existing) {
+ switch (self.bin_file.tag) {
+ .elf => {
+ const elf_file = self.bin_file.cast(File.Elf).?;
+ elf_file.markDirty(elf_file.debug_line_section_index.?, null);
+ },
+ .macho => {
+ const d_sym = self.bin_file.cast(File.MachO).?.getDebugSymbols().?;
+ d_sym.markDirty(d_sym.debug_line_section_index.?);
+ },
+ .wasm => {},
+ else => unreachable,
+ }
+ }
+ return @intCast(u28, gop.index + 1);
+}
+
+fn genIncludeDirsAndFileNames(self: *Dwarf, arena: Allocator, module: *Module) !struct {
+ dirs: []const []const u8,
+ files: []const []const u8,
+ files_dirs_indexes: []u28,
+} {
+ var dirs = std.StringArrayHashMap(void).init(arena);
+ try dirs.ensureTotalCapacity(self.di_files.count());
+
+ var files = std.ArrayList([]const u8).init(arena);
+ try files.ensureTotalCapacityPrecise(self.di_files.count());
+
+ var files_dir_indexes = std.ArrayList(u28).init(arena);
+ try files_dir_indexes.ensureTotalCapacity(self.di_files.count());
+
+ const comp_dir = self.getCompDir(module);
+
+ for (self.di_files.keys()) |dif| {
+ const full_path = try dif.fullPath(arena);
+ const dir_path = std.fs.path.dirname(full_path) orelse ".";
+ const sub_file_path = std.fs.path.basename(full_path);
+
+ const dir_index: u28 = blk: {
+ const actual_dir_path = if (mem.indexOf(u8, dir_path, comp_dir)) |_| inner: {
+ if (comp_dir.len == dir_path.len) break :blk 0;
+ break :inner dir_path[comp_dir.len + 1 ..];
+ } else dir_path;
+ const dirs_gop = dirs.getOrPutAssumeCapacity(actual_dir_path);
+ break :blk @intCast(u28, dirs_gop.index + 1);
+ };
+
+ files_dir_indexes.appendAssumeCapacity(dir_index);
+ files.appendAssumeCapacity(sub_file_path);
+ }
+
+ return .{
+ .dirs = dirs.keys(),
+ .files = files.items,
+ .files_dirs_indexes = files_dir_indexes.items,
+ };
+}
+
fn addDbgInfoErrorSet(
arena: Allocator,
module: *Module,
diff --git a/src/link/Elf.zig b/src/link/Elf.zig
index b7968b3f1c..601b1a3e93 100644
--- a/src/link/Elf.zig
+++ b/src/link/Elf.zig
@@ -312,7 +312,7 @@ pub fn createEmpty(gpa: Allocator, options: link.Options) !*Elf {
};
var dwarf: ?Dwarf = if (!options.strip and options.module != null)
- Dwarf.init(gpa, .elf, options.target)
+ Dwarf.init(gpa, &self.base, options.target)
else
null;
@@ -931,6 +931,104 @@ pub fn populateMissingMetadata(self: *Elf) !void {
}
}
+fn growAllocSection(self: *Elf, shdr_index: u16, phdr_index: u16, needed_size: u64) !void {
+ // TODO Also detect virtual address collisions.
+ const shdr = &self.sections.items[shdr_index];
+ const phdr = &self.program_headers.items[phdr_index];
+
+ if (needed_size > self.allocatedSize(shdr.sh_offset)) {
+ // Must move the entire section.
+ const new_offset = self.findFreeSpace(needed_size, self.page_size);
+ const existing_size = if (self.atoms.get(phdr_index)) |last| blk: {
+ const sym = self.local_symbols.items[last.local_sym_index];
+ break :blk (sym.st_value + sym.st_size) - phdr.p_vaddr;
+ } else if (shdr_index == self.got_section_index.?) blk: {
+ break :blk shdr.sh_size;
+ } else 0;
+ shdr.sh_size = 0;
+
+ log.debug("new '{s}' file offset 0x{x} to 0x{x}", .{
+ self.getString(shdr.sh_name),
+ new_offset,
+ new_offset + existing_size,
+ });
+
+ const amt = try self.base.file.?.copyRangeAll(shdr.sh_offset, self.base.file.?, new_offset, existing_size);
+ if (amt != existing_size) return error.InputOutput;
+
+ shdr.sh_offset = new_offset;
+ phdr.p_offset = new_offset;
+ }
+
+ shdr.sh_size = needed_size;
+ phdr.p_memsz = needed_size;
+ phdr.p_filesz = needed_size;
+
+ self.markDirty(shdr_index, phdr_index);
+}
+
+pub fn growNonAllocSection(
+ self: *Elf,
+ shdr_index: u16,
+ needed_size: u64,
+ min_alignment: u32,
+ requires_file_copy: bool,
+) !void {
+ const shdr = &self.sections.items[shdr_index];
+
+ if (needed_size > self.allocatedSize(shdr.sh_offset)) {
+ const existing_size = if (self.symtab_section_index.? == shdr_index) blk: {
+ const sym_size: u64 = switch (self.ptr_width) {
+ .p32 => @sizeOf(elf.Elf32_Sym),
+ .p64 => @sizeOf(elf.Elf64_Sym),
+ };
+ break :blk @as(u64, shdr.sh_info) * sym_size;
+ } else shdr.sh_size;
+ shdr.sh_size = 0;
+ // Move all the symbols to a new file location.
+ const new_offset = self.findFreeSpace(needed_size, min_alignment);
+ log.debug("moving '{s}' from 0x{x} to 0x{x}", .{ self.getString(shdr.sh_name), shdr.sh_offset, new_offset });
+
+ if (requires_file_copy) {
+ const amt = try self.base.file.?.copyRangeAll(
+ shdr.sh_offset,
+ self.base.file.?,
+ new_offset,
+ existing_size,
+ );
+ if (amt != existing_size) return error.InputOutput;
+ }
+
+ shdr.sh_offset = new_offset;
+ }
+
+ shdr.sh_size = needed_size; // anticipating adding the global symbols later
+
+ self.markDirty(shdr_index, null);
+}
+
+pub fn markDirty(self: *Elf, shdr_index: u16, phdr_index: ?u16) void {
+ self.shdr_table_dirty = true; // TODO look into only writing one section
+
+ if (phdr_index) |_| {
+ self.phdr_table_dirty = true; // TODO look into making only the one program header dirty
+ }
+
+ if (self.dwarf) |_| {
+ if (self.debug_info_section_index.? == shdr_index) {
+ self.debug_info_header_dirty = true;
+ } else if (self.debug_line_section_index.? == shdr_index) {
+ self.debug_line_header_dirty = true;
+ } else if (self.debug_abbrev_section_index.? == shdr_index) {
+ self.debug_abbrev_section_dirty = true;
+ } else if (self.debug_str_section_index.? == shdr_index) {
+ self.debug_strtab_dirty = true;
+ } else if (self.debug_aranges_section_index.? == shdr_index) {
+ self.debug_aranges_section_dirty = true;
+ }
+ }
+}
+
pub fn flush(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node) link.File.FlushError!void {
if (self.base.options.emit == null) {
if (build_options.have_llvm) {
@@ -972,7 +1070,7 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
const foreign_endian = target_endian != builtin.cpu.arch.endian();
if (self.dwarf) |*dw| {
- try dw.flushModule(&self.base, module);
+ try dw.flushModule(module);
}
{
@@ -1020,7 +1118,7 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
if (self.dwarf) |*dw| {
if (self.debug_abbrev_section_dirty) {
- try dw.writeDbgAbbrev(&self.base);
+ try dw.writeDbgAbbrev();
if (!self.shdr_table_dirty) {
// Then it won't get written with the others and we need to do it.
try self.writeSectHeader(self.debug_abbrev_section_index.?);
@@ -1034,7 +1132,7 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
const text_phdr = &self.program_headers.items[self.phdr_load_re_index.?];
const low_pc = text_phdr.p_vaddr;
const high_pc = text_phdr.p_vaddr + text_phdr.p_memsz;
- try dw.writeDbgInfoHeader(&self.base, module, low_pc, high_pc);
+ try dw.writeDbgInfoHeader(module, low_pc, high_pc);
self.debug_info_header_dirty = false;
}
@@ -1042,7 +1140,7 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
// Currently only one compilation unit is supported, so the address range is simply
// identical to the main program header virtual address and memory size.
const text_phdr = &self.program_headers.items[self.phdr_load_re_index.?];
- try dw.writeDbgAranges(&self.base, text_phdr.p_vaddr, text_phdr.p_memsz);
+ try dw.writeDbgAranges(text_phdr.p_vaddr, text_phdr.p_memsz);
if (!self.shdr_table_dirty) {
// Then it won't get written with the others and we need to do it.
try self.writeSectHeader(self.debug_aranges_section_index.?);
@@ -1051,7 +1149,7 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
}
if (self.debug_line_header_dirty) {
- try dw.writeDbgLineHeader(&self.base, module);
+ try dw.writeDbgLineHeader(module);
self.debug_line_header_dirty = false;
}
}
@@ -1103,45 +1201,21 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
}
{
- const shstrtab_sect = &self.sections.items[self.shstrtab_index.?];
- if (self.shstrtab_dirty or self.shstrtab.items.len != shstrtab_sect.sh_size) {
- const allocated_size = self.allocatedSize(shstrtab_sect.sh_offset);
- const needed_size = self.shstrtab.items.len;
-
- if (needed_size > allocated_size) {
- shstrtab_sect.sh_size = 0; // free the space
- shstrtab_sect.sh_offset = self.findFreeSpace(needed_size, 1);
- }
- shstrtab_sect.sh_size = needed_size;
- log.debug("writing shstrtab start=0x{x} end=0x{x}", .{ shstrtab_sect.sh_offset, shstrtab_sect.sh_offset + needed_size });
-
+ const shdr_index = self.shstrtab_index.?;
+ if (self.shstrtab_dirty or self.shstrtab.items.len != self.sections.items[shdr_index].sh_size) {
+ try self.growNonAllocSection(shdr_index, self.shstrtab.items.len, 1, false);
+ const shstrtab_sect = self.sections.items[shdr_index];
try self.base.file.?.pwriteAll(self.shstrtab.items, shstrtab_sect.sh_offset);
- if (!self.shdr_table_dirty) {
- // Then it won't get written with the others and we need to do it.
- try self.writeSectHeader(self.shstrtab_index.?);
- }
self.shstrtab_dirty = false;
}
}
if (self.dwarf) |dwarf| {
- const debug_strtab_sect = &self.sections.items[self.debug_str_section_index.?];
- if (self.debug_strtab_dirty or dwarf.strtab.items.len != debug_strtab_sect.sh_size) {
- const allocated_size = self.allocatedSize(debug_strtab_sect.sh_offset);
- const needed_size = dwarf.strtab.items.len;
-
- if (needed_size > allocated_size) {
- debug_strtab_sect.sh_size = 0; // free the space
- debug_strtab_sect.sh_offset = self.findFreeSpace(needed_size, 1);
- }
- debug_strtab_sect.sh_size = needed_size;
- log.debug("debug_strtab start=0x{x} end=0x{x}", .{ debug_strtab_sect.sh_offset, debug_strtab_sect.sh_offset + needed_size });
-
+ const shdr_index = self.debug_str_section_index.?;
+ if (self.debug_strtab_dirty or dwarf.strtab.items.len != self.sections.items[shdr_index].sh_size) {
+ try self.growNonAllocSection(shdr_index, dwarf.strtab.items.len, 1, false);
+ const debug_strtab_sect = self.sections.items[shdr_index];
try self.base.file.?.pwriteAll(dwarf.strtab.items, debug_strtab_sect.sh_offset);
- if (!self.shdr_table_dirty) {
- // Then it won't get written with the others and we need to do it.
- try self.writeSectHeader(self.debug_str_section_index.?);
- }
self.debug_strtab_dirty = false;
}
}
@@ -2134,27 +2208,10 @@ fn allocateTextBlock(self: *Elf, text_block: *TextBlock, new_block_size: u64, al
const expand_text_section = block_placement == null or block_placement.?.next == null;
if (expand_text_section) {
- const text_capacity = self.allocatedSize(shdr.sh_offset);
const needed_size = (vaddr + new_block_size) - phdr.p_vaddr;
- if (needed_size > text_capacity) {
- // Must move the entire section.
- const new_offset = self.findFreeSpace(needed_size, self.page_size);
- const text_size = if (self.atoms.get(phdr_index)) |last| blk: {
- const sym = self.local_symbols.items[last.local_sym_index];
- break :blk (sym.st_value + sym.st_size) - phdr.p_vaddr;
- } else 0;
- log.debug("new PT_LOAD file offset 0x{x} to 0x{x}", .{ new_offset, new_offset + text_size });
- const amt = try self.base.file.?.copyRangeAll(shdr.sh_offset, self.base.file.?, new_offset, text_size);
- if (amt != text_size) return error.InputOutput;
- shdr.sh_offset = new_offset;
- phdr.p_offset = new_offset;
- }
+ try self.growAllocSection(shdr_index, phdr_index, needed_size);
_ = try self.atoms.put(self.base.allocator, phdr_index, text_block);
- shdr.sh_size = needed_size;
- phdr.p_memsz = needed_size;
- phdr.p_filesz = needed_size;
-
if (self.dwarf) |_| {
// The .debug_info section has `low_pc` and `high_pc` values which is the virtual address
// range of the compilation unit. When we expand the text section, this range changes,
@@ -2165,9 +2222,6 @@ fn allocateTextBlock(self: *Elf, text_block: *TextBlock, new_block_size: u64, al
// model each package as a different compilation unit.
self.debug_aranges_section_dirty = true;
}
-
- self.phdr_table_dirty = true; // TODO look into making only the one program header dirty
- self.shdr_table_dirty = true; // TODO look into making only the one section dirty
}
shdr.sh_addralign = math.max(shdr.sh_addralign, alignment);
@@ -2422,7 +2476,6 @@ pub fn updateFunc(self: *Elf, module: *Module, func: *Module.Fn, air: Air, liven
const local_sym = try self.updateDeclCode(decl_index, code, elf.STT_FUNC);
if (decl_state) |*ds| {
try self.dwarf.?.commitDeclState(
- &self.base,
module,
decl_index,
local_sym.st_value,
@@ -2499,7 +2552,6 @@ pub fn updateDecl(self: *Elf, module: *Module, decl_index: Module.Decl.Index) !v
const local_sym = try self.updateDeclCode(decl_index, code, elf.STT_OBJECT);
if (decl_state) |*ds| {
try self.dwarf.?.commitDeclState(
- &self.base,
module,
decl_index,
local_sym.st_value,
@@ -2692,7 +2744,7 @@ pub fn updateDeclLineNumber(self: *Elf, mod: *Module, decl: *const Module.Decl)
if (self.llvm_object) |_| return;
if (self.dwarf) |*dw| {
- try dw.updateDeclLineNumber(&self.base, decl);
+ try dw.updateDeclLineNumber(decl);
}
}
@@ -2749,31 +2801,14 @@ fn writeSectHeader(self: *Elf, index: usize) !void {
}
fn writeOffsetTableEntry(self: *Elf, index: usize) !void {
- const shdr = &self.sections.items[self.got_section_index.?];
- const phdr = &self.program_headers.items[self.phdr_got_index.?];
const entry_size: u16 = self.archPtrWidthBytes();
if (self.offset_table_count_dirty) {
- // TODO Also detect virtual address collisions.
- const allocated_size = self.allocatedSize(shdr.sh_offset);
const needed_size = self.offset_table.items.len * entry_size;
- if (needed_size > allocated_size) {
- // Must move the entire got section.
- const new_offset = self.findFreeSpace(needed_size, self.page_size);
- const amt = try self.base.file.?.copyRangeAll(shdr.sh_offset, self.base.file.?, new_offset, shdr.sh_size);
- if (amt != shdr.sh_size) return error.InputOutput;
- shdr.sh_offset = new_offset;
- phdr.p_offset = new_offset;
- }
- shdr.sh_size = needed_size;
- phdr.p_memsz = needed_size;
- phdr.p_filesz = needed_size;
-
- self.shdr_table_dirty = true; // TODO look into making only the one section dirty
- self.phdr_table_dirty = true; // TODO look into making only the one program header dirty
-
+ try self.growAllocSection(self.got_section_index.?, self.phdr_got_index.?, needed_size);
self.offset_table_count_dirty = false;
}
const endian = self.base.options.target.cpu.arch.endian();
+ const shdr = &self.sections.items[self.got_section_index.?];
const off = shdr.sh_offset + @as(u64, entry_size) * index;
switch (entry_size) {
2 => {
@@ -2812,23 +2847,8 @@ fn writeSymbol(self: *Elf, index: usize) !void {
.p64 => @alignOf(elf.Elf64_Sym),
};
const needed_size = (self.local_symbols.items.len + self.global_symbols.items.len) * sym_size;
- if (needed_size > self.allocatedSize(syms_sect.sh_offset)) {
- // Move all the symbols to a new file location.
- const new_offset = self.findFreeSpace(needed_size, sym_align);
- log.debug("moving '.symtab' from 0x{x} to 0x{x}", .{ syms_sect.sh_offset, new_offset });
- const existing_size = @as(u64, syms_sect.sh_info) * sym_size;
- const amt = try self.base.file.?.copyRangeAll(
- syms_sect.sh_offset,
- self.base.file.?,
- new_offset,
- existing_size,
- );
- if (amt != existing_size) return error.InputOutput;
- syms_sect.sh_offset = new_offset;
- }
+ try self.growNonAllocSection(self.symtab_section_index.?, needed_size, sym_align, true);
syms_sect.sh_info = @intCast(u32, self.local_symbols.items.len);
- syms_sect.sh_size = needed_size; // anticipating adding the global symbols later
- self.shdr_table_dirty = true; // TODO look into only writing one section
}
const foreign_endian = self.base.options.target.cpu.arch.endian() != builtin.cpu.arch.endian();
const off = switch (self.ptr_width) {
@@ -2876,22 +2896,7 @@ fn writeAllGlobalSymbols(self: *Elf) !void {
.p64 => @alignOf(elf.Elf64_Sym),
};
const needed_size = (self.local_symbols.items.len + self.global_symbols.items.len) * sym_size;
- if (needed_size > self.allocatedSize(syms_sect.sh_offset)) {
- // Move all the symbols to a new file location.
- const new_offset = self.findFreeSpace(needed_size, sym_align);
- log.debug("moving '.symtab' from 0x{x} to 0x{x}", .{ syms_sect.sh_offset, new_offset });
- const existing_size = @as(u64, syms_sect.sh_info) * sym_size;
- const amt = try self.base.file.?.copyRangeAll(
- syms_sect.sh_offset,
- self.base.file.?,
- new_offset,
- existing_size,
- );
- if (amt != existing_size) return error.InputOutput;
- syms_sect.sh_offset = new_offset;
- }
- syms_sect.sh_size = needed_size; // anticipating adding the global symbols later
- self.shdr_table_dirty = true; // TODO look into only writing one section
+ try self.growNonAllocSection(self.symtab_section_index.?, needed_size, sym_align, true);
const foreign_endian = self.base.options.target.cpu.arch.endian() != builtin.cpu.arch.endian();
const global_syms_off = syms_sect.sh_offset + self.local_symbols.items.len * sym_size;
diff --git a/src/link/MachO.zig b/src/link/MachO.zig
index 8c9e5329f2..4a1ca9a357 100644
--- a/src/link/MachO.zig
+++ b/src/link/MachO.zig
@@ -347,7 +347,7 @@ pub fn openPath(allocator: Allocator, options: link.Options) !*MachO {
self.d_sym = .{
.allocator = allocator,
- .dwarf = link.File.Dwarf.init(allocator, .macho, options.target),
+ .dwarf = link.File.Dwarf.init(allocator, &self.base, options.target),
.file = d_sym_file,
.page_size = self.page_size,
};
@@ -449,7 +449,7 @@ pub fn flushModule(self: *MachO, comp: *Compilation, prog_node: *std.Progress.No
const module = self.base.options.module orelse return error.LinkingWithoutZigSourceUnimplemented;
if (self.d_sym) |*d_sym| {
- try d_sym.dwarf.flushModule(&self.base, module);
+ try d_sym.dwarf.flushModule(module);
}
var libs = std.StringArrayHashMap(link.SystemLib).init(arena);
@@ -2213,7 +2213,6 @@ pub fn updateFunc(self: *MachO, module: *Module, func: *Module.Fn, air: Air, liv
if (decl_state) |*ds| {
try self.d_sym.?.dwarf.commitDeclState(
- &self.base,
module,
decl_index,
addr,
@@ -2364,7 +2363,6 @@ pub fn updateDecl(self: *MachO, module: *Module, decl_index: Module.Decl.Index)
if (decl_state) |*ds| {
try self.d_sym.?.dwarf.commitDeclState(
- &self.base,
module,
decl_index,
addr,
@@ -2603,7 +2601,7 @@ fn updateDeclCode(self: *MachO, decl_index: Module.Decl.Index, code: []const u8)
pub fn updateDeclLineNumber(self: *MachO, module: *Module, decl: *const Module.Decl) !void {
_ = module;
if (self.d_sym) |*d_sym| {
- try d_sym.dwarf.updateDeclLineNumber(&self.base, decl);
+ try d_sym.dwarf.updateDeclLineNumber(decl);
}
}
@@ -4302,6 +4300,11 @@ pub fn getEntryPoint(self: MachO) error{MissingMainEntrypoint}!SymbolWithLoc {
return global;
}
+pub fn getDebugSymbols(self: *MachO) ?*DebugSymbols {
+ if (self.d_sym == null) return null;
+ return &self.d_sym.?;
+}
+
pub fn findFirst(comptime T: type, haystack: []align(1) const T, start: usize, predicate: anytype) usize {
if (!@hasDecl(@TypeOf(predicate), "predicate"))
@compileError("Predicate is required to define fn predicate(@This(), T) bool");
diff --git a/src/link/MachO/DebugSymbols.zig b/src/link/MachO/DebugSymbols.zig
index c4ce8e4223..7fa4a8f4dd 100644
--- a/src/link/MachO/DebugSymbols.zig
+++ b/src/link/MachO/DebugSymbols.zig
@@ -137,7 +137,6 @@ fn allocateSection(self: *DebugSymbols, sectname: []const u8, size: u64, alignme
off + size,
});
- sect.addr = segment.vmaddr + off - segment.fileoff;
sect.offset = @intCast(u32, off);
const index = @intCast(u8, self.sections.items.len);
@@ -148,6 +147,52 @@ fn allocateSection(self: *DebugSymbols, sectname: []const u8, size: u64, alignme
return index;
}
+pub fn growSection(self: *DebugSymbols, sect_index: u8, needed_size: u32, requires_file_copy: bool) !void {
+ const sect = self.getSectionPtr(sect_index);
+
+ if (needed_size > self.allocatedSize(sect.offset)) {
+ const existing_size = sect.size;
+ sect.size = 0; // free the space
+ const new_offset = self.findFreeSpace(needed_size, 1);
+
+ log.debug("moving {s} section: {} bytes from 0x{x} to 0x{x}", .{
+ sect.sectName(),
+ existing_size,
+ sect.offset,
+ new_offset,
+ });
+
+ if (requires_file_copy) {
+ const amt = try self.file.copyRangeAll(
+ sect.offset,
+ self.file,
+ new_offset,
+ existing_size,
+ );
+ if (amt != existing_size) return error.InputOutput;
+ }
+
+ sect.offset = @intCast(u32, new_offset);
+ }
+
+ sect.size = needed_size;
+ self.markDirty(sect_index);
+}
+
+pub fn markDirty(self: *DebugSymbols, sect_index: u8) void {
+ if (self.debug_info_section_index.? == sect_index) {
+ self.debug_info_header_dirty = true;
+ } else if (self.debug_line_section_index.? == sect_index) {
+ self.debug_line_header_dirty = true;
+ } else if (self.debug_abbrev_section_index.? == sect_index) {
+ self.debug_abbrev_section_dirty = true;
+ } else if (self.debug_str_section_index.? == sect_index) {
+ self.debug_string_table_dirty = true;
+ } else if (self.debug_aranges_section_index.? == sect_index) {
+ self.debug_aranges_section_dirty = true;
+ }
+}
+
fn detectAllocCollision(self: *DebugSymbols, start: u64, size: u64) ?u64 {
const end = start + padToIdeal(size);
for (self.sections.items) |section| {
@@ -160,7 +205,7 @@ fn detectAllocCollision(self: *DebugSymbols, start: u64, size: u64) ?u64 {
return null;
}
-pub fn findFreeSpace(self: *DebugSymbols, object_size: u64, min_alignment: u64) u64 {
+fn findFreeSpace(self: *DebugSymbols, object_size: u64, min_alignment: u64) u64 {
const segment = self.getDwarfSegmentPtr();
var offset: u64 = segment.fileoff;
while (self.detectAllocCollision(offset, object_size)) |item_end| {
@@ -213,7 +258,7 @@ pub fn flushModule(self: *DebugSymbols, macho_file: *MachO) !void {
}
if (self.debug_abbrev_section_dirty) {
- try self.dwarf.writeDbgAbbrev(&macho_file.base);
+ try self.dwarf.writeDbgAbbrev();
self.debug_abbrev_section_dirty = false;
}
@@ -223,7 +268,7 @@ pub fn flushModule(self: *DebugSymbols, macho_file: *MachO) !void {
const text_section = macho_file.sections.items(.header)[macho_file.text_section_index.?];
const low_pc = text_section.addr;
const high_pc = text_section.addr + text_section.size;
- try self.dwarf.writeDbgInfoHeader(&macho_file.base, module, low_pc, high_pc);
+ try self.dwarf.writeDbgInfoHeader(module, low_pc, high_pc);
self.debug_info_header_dirty = false;
}
@@ -231,36 +276,21 @@ pub fn flushModule(self: *DebugSymbols, macho_file: *MachO) !void {
// Currently only one compilation unit is supported, so the address range is simply
// identical to the main program header virtual address and memory size.
const text_section = macho_file.sections.items(.header)[macho_file.text_section_index.?];
- try self.dwarf.writeDbgAranges(&macho_file.base, text_section.addr, text_section.size);
+ try self.dwarf.writeDbgAranges(text_section.addr, text_section.size);
self.debug_aranges_section_dirty = false;
}
if (self.debug_line_header_dirty) {
- try self.dwarf.writeDbgLineHeader(&macho_file.base, module);
+ try self.dwarf.writeDbgLineHeader(module);
self.debug_line_header_dirty = false;
}
{
- const dwarf_segment = self.getDwarfSegmentPtr();
- const debug_strtab_sect = &self.sections.items[self.debug_str_section_index.?];
- if (self.debug_string_table_dirty or self.dwarf.strtab.items.len != debug_strtab_sect.size) {
- const allocated_size = self.allocatedSize(debug_strtab_sect.offset);
- const needed_size = self.dwarf.strtab.items.len;
-
- if (needed_size > allocated_size) {
- debug_strtab_sect.size = 0; // free the space
- const new_offset = self.findFreeSpace(needed_size, 1);
- debug_strtab_sect.addr = dwarf_segment.vmaddr + new_offset - dwarf_segment.fileoff;
- debug_strtab_sect.offset = @intCast(u32, new_offset);
- }
- debug_strtab_sect.size = @intCast(u32, needed_size);
-
- log.debug("__debug_strtab start=0x{x} end=0x{x}", .{
- debug_strtab_sect.offset,
- debug_strtab_sect.offset + needed_size,
- });
-
- try self.file.pwriteAll(self.dwarf.strtab.items, debug_strtab_sect.offset);
+ const sect_index = self.debug_str_section_index.?;
+ if (self.debug_string_table_dirty or self.dwarf.strtab.items.len != self.getSection(sect_index).size) {
+ const needed_size = @intCast(u32, self.dwarf.strtab.items.len);
+ try self.growSection(sect_index, needed_size, false);
+ try self.file.pwriteAll(self.dwarf.strtab.items, self.getSection(sect_index).offset);
self.debug_string_table_dirty = false;
}
}
@@ -424,7 +454,7 @@ fn writeHeader(self: *DebugSymbols, macho_file: *MachO, ncmds: u32, sizeofcmds:
try self.file.pwriteAll(mem.asBytes(&header), 0);
}
-pub fn allocatedSize(self: *DebugSymbols, start: u64) u64 {
+fn allocatedSize(self: *DebugSymbols, start: u64) u64 {
const seg = self.getDwarfSegmentPtr();
assert(start >= seg.fileoff);
var min_pos: u64 = std.math.maxInt(u64);
@@ -556,3 +586,13 @@ fn getLinkeditSegmentPtr(self: *DebugSymbols) *macho.segment_command_64 {
const index = self.linkedit_segment_cmd_index.?;
return &self.segments.items[index];
}
+
+pub fn getSectionPtr(self: *DebugSymbols, sect: u8) *macho.section_64 {
+ assert(sect < self.sections.items.len);
+ return &self.sections.items[sect];
+}
+
+pub fn getSection(self: DebugSymbols, sect: u8) macho.section_64 {
+ assert(sect < self.sections.items.len);
+ return self.sections.items[sect];
+}
diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig
index 4a9db75c98..f8821d796f 100644
--- a/src/link/Wasm.zig
+++ b/src/link/Wasm.zig
@@ -361,7 +361,7 @@ pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Option
}
if (!options.strip and options.module != null) {
- wasm_bin.dwarf = Dwarf.init(allocator, .wasm, options.target);
+ wasm_bin.dwarf = Dwarf.init(allocator, &wasm_bin.base, options.target);
try wasm_bin.initDebugSections();
}
@@ -910,7 +910,6 @@ pub fn updateFunc(wasm: *Wasm, mod: *Module, func: *Module.Fn, air: Air, livenes
if (wasm.dwarf) |*dwarf| {
try dwarf.commitDeclState(
- &wasm.base,
mod,
decl_index,
// Actual value will be written after relocation.
@@ -990,7 +989,7 @@ pub fn updateDeclLineNumber(wasm: *Wasm, mod: *Module, decl: *const Module.Decl)
defer wasm.base.allocator.free(decl_name);
log.debug("updateDeclLineNumber {s}{*}", .{ decl_name, decl });
- try dw.updateDeclLineNumber(&wasm.base, decl);
+ try dw.updateDeclLineNumber(decl);
}
}
@@ -2299,7 +2298,7 @@ pub fn flushModule(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Nod
}
if (wasm.dwarf) |*dwarf| {
- try dwarf.flushModule(&wasm.base, wasm.base.options.module.?);
+ try dwarf.flushModule(wasm.base.options.module.?);
}
}
@@ -2668,12 +2667,12 @@ pub fn flushModule(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Nod
if (!wasm.base.options.strip) {
if (wasm.dwarf) |*dwarf| {
const mod = wasm.base.options.module.?;
- try dwarf.writeDbgAbbrev(&wasm.base);
+ try dwarf.writeDbgAbbrev();
// for debug info and ranges, the address is always 0,
// as locations are always offsets relative to 'code' section.
- try dwarf.writeDbgInfoHeader(&wasm.base, mod, 0, code_section_size);
- try dwarf.writeDbgAranges(&wasm.base, 0, code_section_size);
- try dwarf.writeDbgLineHeader(&wasm.base, mod);
+ try dwarf.writeDbgInfoHeader(mod, 0, code_section_size);
+ try dwarf.writeDbgAranges(0, code_section_size);
+ try dwarf.writeDbgLineHeader(mod);
}
var debug_bytes = std.ArrayList(u8).init(wasm.base.allocator);