aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/link/Dwarf.zig8
-rw-r--r--src/link/Elf.zig836
-rw-r--r--src/link/Elf/Atom.zig175
-rw-r--r--src/link/Elf/LinkerDefined.zig32
-rw-r--r--src/link/Elf/Object.zig20
-rw-r--r--src/link/Elf/Symbol.zig14
-rw-r--r--src/link/Elf/ZigObject.zig348
-rw-r--r--src/link/Elf/eh_frame.zig19
-rw-r--r--src/link/Elf/merge_section.zig6
-rw-r--r--src/link/Elf/relocatable.zig75
-rw-r--r--src/link/Elf/synthetic_sections.zig52
-rw-r--r--src/link/Elf/thunks.zig7
12 files changed, 738 insertions, 854 deletions
diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig
index 40c036df9c..68579e3282 100644
--- a/src/link/Dwarf.zig
+++ b/src/link/Dwarf.zig
@@ -317,7 +317,7 @@ pub const Section = struct {
fn resize(sec: *Section, dwarf: *Dwarf, len: u64) UpdateError!void {
if (dwarf.bin_file.cast(.elf)) |elf_file| {
try elf_file.growNonAllocSection(sec.index, len, @intCast(sec.alignment.toByteUnits().?), true);
- const shdr = &elf_file.shdrs.items[sec.index];
+ const shdr = &elf_file.sections.items(.shdr)[sec.index];
sec.off = shdr.sh_offset;
sec.len = shdr.sh_size;
} else if (dwarf.bin_file.cast(.macho)) |macho_file| {
@@ -340,7 +340,7 @@ pub const Section = struct {
sec.off += len;
sec.len -= len;
if (dwarf.bin_file.cast(.elf)) |elf_file| {
- const shdr = &elf_file.shdrs.items[sec.index];
+ const shdr = &elf_file.sections.items(.shdr)[sec.index];
shdr.sh_offset = sec.off;
shdr.sh_size = sec.len;
} else if (dwarf.bin_file.cast(.macho)) |macho_file| {
@@ -771,7 +771,7 @@ const Entry = struct {
log.err("missing {} from {s}", .{
@as(Entry.Index, @enumFromInt(entry - unit.entries.items.ptr)),
std.mem.sliceTo(if (dwarf.bin_file.cast(.elf)) |elf_file|
- elf_file.shstrtab.items[elf_file.shdrs.items[sec.index].sh_name..]
+ elf_file.shstrtab.items[elf_file.sections.items(.shdr)[sec.index].sh_name..]
else if (dwarf.bin_file.cast(.macho)) |macho_file|
if (macho_file.d_sym) |*d_sym|
&d_sym.sections.items[sec.index].segname
@@ -1529,7 +1529,7 @@ pub fn reloadSectionMetadata(dwarf: *Dwarf) void {
elf_file.debug_rnglists_section_index.?,
elf_file.debug_str_section_index.?,
}) |sec, section_index| {
- const shdr = &elf_file.shdrs.items[section_index];
+ const shdr = &elf_file.sections.items(.shdr)[section_index];
sec.index = section_index;
sec.off = shdr.sh_offset;
sec.len = shdr.sh_size;
diff --git a/src/link/Elf.zig b/src/link/Elf.zig
index e89c475d10..b695329450 100644
--- a/src/link/Elf.zig
+++ b/src/link/Elf.zig
@@ -45,17 +45,10 @@ linker_defined_index: ?File.Index = null,
objects: std.ArrayListUnmanaged(File.Index) = .{},
shared_objects: std.ArrayListUnmanaged(File.Index) = .{},
-/// Stored in native-endian format, depending on target endianness needs to be bswapped on read/write.
-/// Same order as in the file.
-shdrs: std.ArrayListUnmanaged(elf.Elf64_Shdr) = .{},
-/// Given index to a section, pulls index of containing phdr if any.
-phdr_to_shdr_table: std.AutoHashMapUnmanaged(u32, u32) = .{},
+/// List of all output sections and their associated metadata.
+sections: std.MultiArrayList(Section) = .{},
/// File offset into the shdr table.
shdr_table_offset: ?u64 = null,
-/// Table of lists of atoms per output section.
-/// This table is not used to track incrementally generated atoms.
-output_sections: std.AutoArrayHashMapUnmanaged(u32, std.ArrayListUnmanaged(Ref)) = .{},
-output_rela_sections: std.AutoArrayHashMapUnmanaged(u32, RelaSection) = .{},
/// Stored in native-endian format, depending on target endianness needs to be bswapped on read/write.
/// Same order as in the file.
@@ -181,9 +174,6 @@ thunks: std.ArrayListUnmanaged(Thunk) = .{},
/// List of output merge sections with deduped contents.
merge_sections: std.ArrayListUnmanaged(MergeSection) = .{},
-/// Table of last atom index in a section and matching atom free list if any.
-last_atom_and_free_list_table: LastAtomAndFreeListTable = .{},
-
first_eflags: ?elf.Elf64_Word = null,
/// When allocating, the ideal_capacity is calculated by
@@ -387,8 +377,7 @@ pub fn createEmpty(
)}),
} });
self.zig_object_index = index;
- try self.zigObjectPtr().?.init(self);
- try self.initMetadata(.{
+ try self.zigObjectPtr().?.init(self, .{
.symbol_count_hint = options.symbol_count_hint,
.program_code_size_hint = options.program_code_size_hint,
});
@@ -430,17 +419,12 @@ pub fn deinit(self: *Elf) void {
self.objects.deinit(gpa);
self.shared_objects.deinit(gpa);
- self.shdrs.deinit(gpa);
- self.phdr_to_shdr_table.deinit(gpa);
- self.phdrs.deinit(gpa);
- for (self.output_sections.values()) |*list| {
- list.deinit(gpa);
+ for (self.sections.items(.atom_list), self.sections.items(.free_list)) |*atoms, *free_list| {
+ atoms.deinit(gpa);
+ free_list.deinit(gpa);
}
- self.output_sections.deinit(gpa);
- for (self.output_rela_sections.values()) |*sec| {
- sec.atom_list.deinit(gpa);
- }
- self.output_rela_sections.deinit(gpa);
+ self.sections.deinit(gpa);
+ self.phdrs.deinit(gpa);
self.shstrtab.deinit(gpa);
self.symtab.deinit(gpa);
self.strtab.deinit(gpa);
@@ -454,10 +438,6 @@ pub fn deinit(self: *Elf) void {
sect.deinit(gpa);
}
self.merge_sections.deinit(gpa);
- for (self.last_atom_and_free_list_table.values()) |*value| {
- value.free_list.deinit(gpa);
- }
- self.last_atom_and_free_list_table.deinit(gpa);
self.got.deinit(gpa);
self.plt.deinit(gpa);
@@ -506,7 +486,7 @@ fn detectAllocCollision(self: *Elf, start: u64, size: u64) !?u64 {
if (self.shdr_table_offset) |off| {
const shdr_size: u64 = if (small_ptr) @sizeOf(elf.Elf32_Shdr) else @sizeOf(elf.Elf64_Shdr);
- const tight_size = self.shdrs.items.len * shdr_size;
+ const tight_size = self.sections.items(.shdr).len * shdr_size;
const increased_size = padToIdeal(tight_size);
const test_end = off +| increased_size;
if (start < test_end) {
@@ -515,7 +495,7 @@ fn detectAllocCollision(self: *Elf, start: u64, size: u64) !?u64 {
}
}
- for (self.shdrs.items) |shdr| {
+ for (self.sections.items(.shdr)) |shdr| {
if (shdr.sh_type == elf.SHT_NOBITS) continue;
const increased_size = padToIdeal(shdr.sh_size);
const test_end = shdr.sh_offset +| increased_size;
@@ -545,7 +525,7 @@ pub fn allocatedSize(self: *Elf, start: u64) u64 {
if (self.shdr_table_offset) |off| {
if (off > start and off < min_pos) min_pos = off;
}
- for (self.shdrs.items) |section| {
+ for (self.sections.items(.shdr)) |section| {
if (section.sh_offset <= start) continue;
if (section.sh_offset < min_pos) min_pos = section.sh_offset;
}
@@ -574,318 +554,13 @@ pub fn findFreeSpace(self: *Elf, object_size: u64, min_alignment: u64) !u64 {
return start;
}
-pub const InitMetadataOptions = struct {
- symbol_count_hint: u64,
- program_code_size_hint: u64,
-};
-
-/// TODO move to ZigObject
-pub fn initMetadata(self: *Elf, options: InitMetadataOptions) !void {
- const gpa = self.base.comp.gpa;
- const ptr_size = self.ptrWidthBytes();
- const target = self.base.comp.root_mod.resolved_target.result;
- const ptr_bit_width = target.ptrBitWidth();
- const zo = self.zigObjectPtr().?;
-
- const fillSection = struct {
- fn fillSection(elf_file: *Elf, shdr: *elf.Elf64_Shdr, size: u64, phndx: ?u16) !void {
- if (elf_file.base.isRelocatable()) {
- const off = try elf_file.findFreeSpace(size, shdr.sh_addralign);
- shdr.sh_offset = off;
- shdr.sh_size = size;
- } else {
- const phdr = elf_file.phdrs.items[phndx.?];
- shdr.sh_addr = phdr.p_vaddr;
- shdr.sh_offset = phdr.p_offset;
- shdr.sh_size = phdr.p_memsz;
- }
- }
- }.fillSection;
-
- comptime assert(number_of_zig_segments == 5);
-
- if (!self.base.isRelocatable()) {
- if (self.phdr_zig_load_re_index == null) {
- const filesz = options.program_code_size_hint;
- const off = try self.findFreeSpace(filesz, self.page_size);
- self.phdr_zig_load_re_index = try self.addPhdr(.{
- .type = elf.PT_LOAD,
- .offset = off,
- .filesz = filesz,
- .addr = if (ptr_bit_width >= 32) 0x4000000 else 0x4000,
- .memsz = filesz,
- .@"align" = self.page_size,
- .flags = elf.PF_X | elf.PF_R | elf.PF_W,
- });
- }
-
- if (self.phdr_zig_load_ro_index == null) {
- const alignment = self.page_size;
- const filesz: u64 = 1024;
- const off = try self.findFreeSpace(filesz, alignment);
- self.phdr_zig_load_ro_index = try self.addPhdr(.{
- .type = elf.PT_LOAD,
- .offset = off,
- .filesz = filesz,
- .addr = if (ptr_bit_width >= 32) 0xc000000 else 0xa000,
- .memsz = filesz,
- .@"align" = alignment,
- .flags = elf.PF_R | elf.PF_W,
- });
- }
-
- if (self.phdr_zig_load_rw_index == null) {
- const alignment = self.page_size;
- const filesz: u64 = 1024;
- const off = try self.findFreeSpace(filesz, alignment);
- self.phdr_zig_load_rw_index = try self.addPhdr(.{
- .type = elf.PT_LOAD,
- .offset = off,
- .filesz = filesz,
- .addr = if (ptr_bit_width >= 32) 0x10000000 else 0xc000,
- .memsz = filesz,
- .@"align" = alignment,
- .flags = elf.PF_R | elf.PF_W,
- });
- }
-
- if (self.phdr_zig_load_zerofill_index == null) {
- const alignment = self.page_size;
- self.phdr_zig_load_zerofill_index = try self.addPhdr(.{
- .type = elf.PT_LOAD,
- .addr = if (ptr_bit_width >= 32) 0x14000000 else 0xf000,
- .memsz = 1024,
- .@"align" = alignment,
- .flags = elf.PF_R | elf.PF_W,
- });
- }
- }
-
- if (self.zig_text_section_index == null) {
- self.zig_text_section_index = try self.addSection(.{
- .name = try self.insertShString(".text.zig"),
- .type = elf.SHT_PROGBITS,
- .flags = elf.SHF_ALLOC | elf.SHF_EXECINSTR,
- .addralign = 1,
- .offset = std.math.maxInt(u64),
- });
- const shdr = &self.shdrs.items[self.zig_text_section_index.?];
- try fillSection(self, shdr, options.program_code_size_hint, self.phdr_zig_load_re_index);
- if (self.base.isRelocatable()) {
- const rela_shndx = try self.addRelaShdr(try self.insertShString(".rela.text.zig"), self.zig_text_section_index.?);
- try self.output_rela_sections.putNoClobber(gpa, self.zig_text_section_index.?, .{
- .shndx = rela_shndx,
- });
- } else {
- try self.phdr_to_shdr_table.putNoClobber(
- gpa,
- self.zig_text_section_index.?,
- self.phdr_zig_load_re_index.?,
- );
- }
- try self.output_sections.putNoClobber(gpa, self.zig_text_section_index.?, .{});
- try self.last_atom_and_free_list_table.putNoClobber(gpa, self.zig_text_section_index.?, .{});
- }
-
- if (self.zig_data_rel_ro_section_index == null) {
- self.zig_data_rel_ro_section_index = try self.addSection(.{
- .name = try self.insertShString(".data.rel.ro.zig"),
- .type = elf.SHT_PROGBITS,
- .addralign = 1,
- .flags = elf.SHF_ALLOC | elf.SHF_WRITE,
- .offset = std.math.maxInt(u64),
- });
- const shdr = &self.shdrs.items[self.zig_data_rel_ro_section_index.?];
- try fillSection(self, shdr, 1024, self.phdr_zig_load_ro_index);
- if (self.base.isRelocatable()) {
- const rela_shndx = try self.addRelaShdr(
- try self.insertShString(".rela.data.rel.ro.zig"),
- self.zig_data_rel_ro_section_index.?,
- );
- try self.output_rela_sections.putNoClobber(gpa, self.zig_data_rel_ro_section_index.?, .{
- .shndx = rela_shndx,
- });
- } else {
- try self.phdr_to_shdr_table.putNoClobber(
- gpa,
- self.zig_data_rel_ro_section_index.?,
- self.phdr_zig_load_ro_index.?,
- );
- }
- try self.output_sections.putNoClobber(gpa, self.zig_data_rel_ro_section_index.?, .{});
- try self.last_atom_and_free_list_table.putNoClobber(gpa, self.zig_data_rel_ro_section_index.?, .{});
- }
-
- if (self.zig_data_section_index == null) {
- self.zig_data_section_index = try self.addSection(.{
- .name = try self.insertShString(".data.zig"),
- .type = elf.SHT_PROGBITS,
- .addralign = ptr_size,
- .flags = elf.SHF_ALLOC | elf.SHF_WRITE,
- .offset = std.math.maxInt(u64),
- });
- const shdr = &self.shdrs.items[self.zig_data_section_index.?];
- try fillSection(self, shdr, 1024, self.phdr_zig_load_rw_index);
- if (self.base.isRelocatable()) {
- const rela_shndx = try self.addRelaShdr(
- try self.insertShString(".rela.data.zig"),
- self.zig_data_section_index.?,
- );
- try self.output_rela_sections.putNoClobber(gpa, self.zig_data_section_index.?, .{
- .shndx = rela_shndx,
- });
- } else {
- try self.phdr_to_shdr_table.putNoClobber(
- gpa,
- self.zig_data_section_index.?,
- self.phdr_zig_load_rw_index.?,
- );
- }
- try self.output_sections.putNoClobber(gpa, self.zig_data_section_index.?, .{});
- try self.last_atom_and_free_list_table.putNoClobber(gpa, self.zig_data_section_index.?, .{});
- }
-
- if (self.zig_bss_section_index == null) {
- self.zig_bss_section_index = try self.addSection(.{
- .name = try self.insertShString(".bss.zig"),
- .type = elf.SHT_NOBITS,
- .addralign = ptr_size,
- .flags = elf.SHF_ALLOC | elf.SHF_WRITE,
- .offset = 0,
- });
- const shdr = &self.shdrs.items[self.zig_bss_section_index.?];
- if (self.phdr_zig_load_zerofill_index) |phndx| {
- const phdr = self.phdrs.items[phndx];
- shdr.sh_addr = phdr.p_vaddr;
- shdr.sh_size = phdr.p_memsz;
- try self.phdr_to_shdr_table.putNoClobber(gpa, self.zig_bss_section_index.?, phndx);
- } else {
- shdr.sh_size = 1024;
- }
- try self.output_sections.putNoClobber(gpa, self.zig_bss_section_index.?, .{});
- try self.last_atom_and_free_list_table.putNoClobber(gpa, self.zig_bss_section_index.?, .{});
- }
-
- if (zo.dwarf) |*dwarf| {
- const addSectionSymbol = struct {
- fn addSectionSymbol(
- zig_object: *ZigObject,
- alloc: Allocator,
- name: [:0]const u8,
- alignment: Atom.Alignment,
- shndx: u32,
- ) !Symbol.Index {
- const name_off = try zig_object.addString(alloc, name);
- const index = try zig_object.newSymbolWithAtom(alloc, name_off);
- const sym = zig_object.symbol(index);
- const esym = &zig_object.symtab.items(.elf_sym)[sym.esym_index];
- esym.st_info |= elf.STT_SECTION;
- const atom_ptr = zig_object.atom(sym.ref.index).?;
- atom_ptr.alignment = alignment;
- atom_ptr.output_section_index = shndx;
- return index;
- }
- }.addSectionSymbol;
-
- if (self.debug_str_section_index == null) {
- self.debug_str_section_index = try self.addSection(.{
- .name = try self.insertShString(".debug_str"),
- .flags = elf.SHF_MERGE | elf.SHF_STRINGS,
- .entsize = 1,
- .type = elf.SHT_PROGBITS,
- .addralign = 1,
- });
- zo.debug_str_section_dirty = true;
- zo.debug_str_index = try addSectionSymbol(zo, gpa, ".debug_str", .@"1", self.debug_str_section_index.?);
- try self.output_sections.putNoClobber(gpa, self.debug_str_section_index.?, .{});
- }
-
- if (self.debug_info_section_index == null) {
- self.debug_info_section_index = try self.addSection(.{
- .name = try self.insertShString(".debug_info"),
- .type = elf.SHT_PROGBITS,
- .addralign = 1,
- });
- zo.debug_info_section_dirty = true;
- zo.debug_info_index = try addSectionSymbol(zo, gpa, ".debug_info", .@"1", self.debug_info_section_index.?);
- try self.output_sections.putNoClobber(gpa, self.debug_info_section_index.?, .{});
- }
-
- if (self.debug_abbrev_section_index == null) {
- self.debug_abbrev_section_index = try self.addSection(.{
- .name = try self.insertShString(".debug_abbrev"),
- .type = elf.SHT_PROGBITS,
- .addralign = 1,
- });
- zo.debug_abbrev_section_dirty = true;
- zo.debug_abbrev_index = try addSectionSymbol(zo, gpa, ".debug_abbrev", .@"1", self.debug_abbrev_section_index.?);
- try self.output_sections.putNoClobber(gpa, self.debug_abbrev_section_index.?, .{});
- }
-
- if (self.debug_aranges_section_index == null) {
- self.debug_aranges_section_index = try self.addSection(.{
- .name = try self.insertShString(".debug_aranges"),
- .type = elf.SHT_PROGBITS,
- .addralign = 16,
- });
- zo.debug_aranges_section_dirty = true;
- zo.debug_aranges_index = try addSectionSymbol(zo, gpa, ".debug_aranges", .@"16", self.debug_aranges_section_index.?);
- try self.output_sections.putNoClobber(gpa, self.debug_aranges_section_index.?, .{});
- }
-
- if (self.debug_line_section_index == null) {
- self.debug_line_section_index = try self.addSection(.{
- .name = try self.insertShString(".debug_line"),
- .type = elf.SHT_PROGBITS,
- .addralign = 1,
- });
- zo.debug_line_section_dirty = true;
- zo.debug_line_index = try addSectionSymbol(zo, gpa, ".debug_line", .@"1", self.debug_line_section_index.?);
- try self.output_sections.putNoClobber(gpa, self.debug_line_section_index.?, .{});
- }
-
- if (self.debug_line_str_section_index == null) {
- self.debug_line_str_section_index = try self.addSection(.{
- .name = try self.insertShString(".debug_line_str"),
- .flags = elf.SHF_MERGE | elf.SHF_STRINGS,
- .entsize = 1,
- .type = elf.SHT_PROGBITS,
- .addralign = 1,
- });
- zo.debug_line_str_section_dirty = true;
- zo.debug_line_str_index = try addSectionSymbol(zo, gpa, ".debug_line_str", .@"1", self.debug_line_str_section_index.?);
- try self.output_sections.putNoClobber(gpa, self.debug_line_str_section_index.?, .{});
- }
-
- if (self.debug_loclists_section_index == null) {
- self.debug_loclists_section_index = try self.addSection(.{
- .name = try self.insertShString(".debug_loclists"),
- .type = elf.SHT_PROGBITS,
- .addralign = 1,
- });
- zo.debug_loclists_section_dirty = true;
- zo.debug_loclists_index = try addSectionSymbol(zo, gpa, ".debug_loclists", .@"1", self.debug_loclists_section_index.?);
- try self.output_sections.putNoClobber(gpa, self.debug_loclists_section_index.?, .{});
- }
-
- if (self.debug_rnglists_section_index == null) {
- self.debug_rnglists_section_index = try self.addSection(.{
- .name = try self.insertShString(".debug_rnglists"),
- .type = elf.SHT_PROGBITS,
- .addralign = 1,
- });
- zo.debug_rnglists_section_dirty = true;
- zo.debug_rnglists_index = try addSectionSymbol(zo, gpa, ".debug_rnglists", .@"1", self.debug_rnglists_section_index.?);
- try self.output_sections.putNoClobber(gpa, self.debug_rnglists_section_index.?, .{});
- }
-
- try dwarf.initMetadata();
- }
-}
-
pub fn growAllocSection(self: *Elf, shdr_index: u32, needed_size: u64) !void {
- const shdr = &self.shdrs.items[shdr_index];
- const maybe_phdr = if (self.phdr_to_shdr_table.get(shdr_index)) |phndx| &self.phdrs.items[phndx] else null;
+ const slice = self.sections.slice();
+ const shdr = &slice.items(.shdr)[shdr_index];
+ assert(shdr.sh_flags & elf.SHF_ALLOC != 0);
+ const phndx = slice.items(.phndx)[shdr_index];
+ const maybe_phdr = if (phndx) |ndx| &self.phdrs.items[ndx] else null;
+
log.debug("allocated size {x} of {s}, needed size {x}", .{
self.allocatedSize(shdr.sh_offset),
self.getShString(shdr.sh_name),
@@ -924,9 +599,7 @@ pub fn growAllocSection(self: *Elf, shdr_index: u32, needed_size: u64) !void {
const mem_capacity = self.allocatedVirtualSize(phdr.p_vaddr);
if (needed_size > mem_capacity) {
var err = try self.base.addErrorWithNotes(2);
- try err.addMsg("fatal linker error: cannot expand load segment phdr({d}) in virtual memory", .{
- self.phdr_to_shdr_table.get(shdr_index).?,
- });
+ try err.addMsg("fatal linker error: cannot expand load segment phdr({d}) in virtual memory", .{phndx.?});
try err.addNote("TODO: emit relocations to memory locations in self-hosted backends", .{});
try err.addNote("as a workaround, try increasing pre-allocated virtual memory of each segment", .{});
}
@@ -944,7 +617,8 @@ pub fn growNonAllocSection(
min_alignment: u32,
requires_file_copy: bool,
) !void {
- const shdr = &self.shdrs.items[shdr_index];
+ const shdr = &self.sections.items(.shdr)[shdr_index];
+ assert(shdr.sh_flags & elf.SHF_ALLOC == 0);
const allocated_size = self.allocatedSize(shdr.sh_offset);
if (shdr.sh_offset + allocated_size == std.math.maxInt(u64)) {
@@ -1328,7 +1002,7 @@ pub fn flushModule(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_nod
const atom_ptr = zo.atom(atom_index) orelse continue;
if (!atom_ptr.alive) continue;
const out_shndx = atom_ptr.output_section_index;
- const shdr = &self.shdrs.items[out_shndx];
+ const shdr = &self.sections.items(.shdr)[out_shndx];
if (shdr.sh_type == elf.SHT_NOBITS) continue;
const code = try zo.codeAlloc(self, atom_index);
defer gpa.free(code);
@@ -2692,7 +2366,7 @@ pub fn writeShdrTable(self: *Elf) !void {
};
const shoff = self.shdr_table_offset orelse 0;
- const needed_size = self.shdrs.items.len * shsize;
+ const needed_size = self.sections.items(.shdr).len * shsize;
if (needed_size > self.allocatedSize(shoff)) {
self.shdr_table_offset = null;
@@ -2706,12 +2380,12 @@ pub fn writeShdrTable(self: *Elf) !void {
switch (self.ptr_width) {
.p32 => {
- const buf = try gpa.alloc(elf.Elf32_Shdr, self.shdrs.items.len);
+ const buf = try gpa.alloc(elf.Elf32_Shdr, self.sections.items(.shdr).len);
defer gpa.free(buf);
for (buf, 0..) |*shdr, i| {
- assert(self.shdrs.items[i].sh_offset != math.maxInt(u64));
- shdr.* = shdrTo32(self.shdrs.items[i]);
+ assert(self.sections.items(.shdr)[i].sh_offset != math.maxInt(u64));
+ shdr.* = shdrTo32(self.sections.items(.shdr)[i]);
if (foreign_endian) {
mem.byteSwapAllFields(elf.Elf32_Shdr, shdr);
}
@@ -2719,12 +2393,12 @@ pub fn writeShdrTable(self: *Elf) !void {
try self.base.file.?.pwriteAll(mem.sliceAsBytes(buf), self.shdr_table_offset.?);
},
.p64 => {
- const buf = try gpa.alloc(elf.Elf64_Shdr, self.shdrs.items.len);
+ const buf = try gpa.alloc(elf.Elf64_Shdr, self.sections.items(.shdr).len);
defer gpa.free(buf);
for (buf, 0..) |*shdr, i| {
- assert(self.shdrs.items[i].sh_offset != math.maxInt(u64));
- shdr.* = self.shdrs.items[i];
+ assert(self.sections.items(.shdr)[i].sh_offset != math.maxInt(u64));
+ shdr.* = self.sections.items(.shdr)[i];
if (foreign_endian) {
mem.byteSwapAllFields(elf.Elf64_Shdr, shdr);
}
@@ -2892,7 +2566,7 @@ pub fn writeElfHeader(self: *Elf) !void {
mem.writeInt(u16, hdr_buf[index..][0..2], e_shentsize, endian);
index += 2;
- const e_shnum = @as(u16, @intCast(self.shdrs.items.len));
+ const e_shnum = @as(u16, @intCast(self.sections.items(.shdr).len));
mem.writeInt(u16, hdr_buf[index..][0..2], e_shnum, endian);
index += 2;
@@ -3051,7 +2725,7 @@ pub fn updateMergeSectionSizes(self: *Elf) !void {
msec.updateSize();
}
for (self.merge_sections.items) |*msec| {
- const shdr = &self.shdrs.items[msec.output_section_index];
+ const shdr = &self.sections.items(.shdr)[msec.output_section_index];
const offset = msec.alignment.forward(shdr.sh_size);
const padding = offset - shdr.sh_size;
msec.value = @intCast(offset);
@@ -3067,7 +2741,7 @@ pub fn writeMergeSections(self: *Elf) !void {
defer buffer.deinit();
for (self.merge_sections.items) |*msec| {
- const shdr = self.shdrs.items[msec.output_section_index];
+ const shdr = self.sections.items(.shdr)[msec.output_section_index];
const fileoff = math.cast(usize, msec.value + shdr.sh_offset) orelse return error.Overflow;
const size = math.cast(usize, msec.size) orelse return error.Overflow;
try buffer.ensureTotalCapacity(size);
@@ -3355,7 +3029,7 @@ fn initSpecialPhdrs(self: *Elf) !void {
.@"align" = 1,
});
- const has_tls = for (self.shdrs.items) |shdr| {
+ const has_tls = for (self.sections.items(.shdr)) |shdr| {
if (shdr.sh_flags & elf.SHF_TLS != 0) break true;
} else false;
if (has_tls) {
@@ -3384,6 +3058,7 @@ fn initSpecialPhdrs(self: *Elf) !void {
/// we are about to sort.
fn sortInitFini(self: *Elf) !void {
const gpa = self.base.comp.gpa;
+ const slice = self.sections.slice();
const Entry = struct {
priority: i32,
@@ -3397,7 +3072,7 @@ fn sortInitFini(self: *Elf) !void {
}
};
- for (self.shdrs.items, 0..) |shdr, shndx| {
+ for (slice.items(.shdr), slice.items(.atom_list)) |shdr, *atom_list| {
if (shdr.sh_flags & elf.SHF_ALLOC == 0) continue;
var is_init_fini = false;
@@ -3414,8 +3089,6 @@ fn sortInitFini(self: *Elf) !void {
}
if (!is_init_fini and !is_ctor_dtor) continue;
-
- const atom_list = self.output_sections.getPtr(@intCast(shndx)).?;
if (atom_list.items.len == 0) continue;
var entries = std.ArrayList(Entry).init(gpa);
@@ -3483,7 +3156,7 @@ fn setVersionSymtab(self: *Elf) !void {
if (self.verneed_section_index) |shndx| {
try self.verneed.generate(self);
- const shdr = &self.shdrs.items[shndx];
+ const shdr = &self.sections.items(.shdr)[shndx];
shdr.sh_info = @as(u32, @intCast(self.verneed.verneed.items.len));
}
}
@@ -3563,16 +3236,15 @@ fn sortPhdrs(self: *Elf) error{OutOfMemory}!void {
}
}
- {
- var it = self.phdr_to_shdr_table.iterator();
- while (it.next()) |entry| {
- entry.value_ptr.* = backlinks[entry.value_ptr.*];
+ for (self.sections.items(.phndx)) |*maybe_phndx| {
+ if (maybe_phndx.*) |*index| {
+ index.* = backlinks[index.*];
}
}
}
fn shdrRank(self: *Elf, shndx: u32) u8 {
- const shdr = self.shdrs.items[shndx];
+ const shdr = self.sections.items(.shdr)[shndx];
const name = self.getShString(shdr.sh_name);
const flags = shdr.sh_flags;
@@ -3629,9 +3301,9 @@ pub fn sortShdrs(self: *Elf) !void {
};
const gpa = self.base.comp.gpa;
- var entries = try std.ArrayList(Entry).initCapacity(gpa, self.shdrs.items.len);
+ var entries = try std.ArrayList(Entry).initCapacity(gpa, self.sections.items(.shdr).len);
defer entries.deinit();
- for (0..self.shdrs.items.len) |shndx| {
+ for (0..self.sections.items(.shdr).len) |shndx| {
entries.appendAssumeCapacity(.{ .shndx = @intCast(shndx) });
}
@@ -3643,20 +3315,18 @@ pub fn sortShdrs(self: *Elf) !void {
backlinks[entry.shndx] = @intCast(i);
}
- const slice = try self.shdrs.toOwnedSlice(gpa);
- defer gpa.free(slice);
+ var slice = self.sections.toOwnedSlice();
+ defer slice.deinit(gpa);
- try self.shdrs.ensureTotalCapacityPrecise(gpa, slice.len);
+ try self.sections.ensureTotalCapacity(gpa, slice.len);
for (entries.items) |sorted| {
- self.shdrs.appendAssumeCapacity(slice[sorted.shndx]);
+ self.sections.appendAssumeCapacity(slice.get(sorted.shndx));
}
- try self.resetShdrIndexes(backlinks);
+ self.resetShdrIndexes(backlinks);
}
-fn resetShdrIndexes(self: *Elf, backlinks: []const u32) !void {
- const gpa = self.base.comp.gpa;
-
+fn resetShdrIndexes(self: *Elf, backlinks: []const u32) void {
for (&[_]*?u32{
&self.eh_frame_section_index,
&self.eh_frame_rela_section_index,
@@ -3697,142 +3367,88 @@ fn resetShdrIndexes(self: *Elf, backlinks: []const u32) !void {
}
}
+ for (self.merge_sections.items) |*msec| {
+ msec.output_section_index = backlinks[msec.output_section_index];
+ }
+
+ for (self.sections.items(.shdr)) |*shdr| {
+ if (shdr.sh_type != elf.SHT_RELA) continue;
+ // FIXME:JK we should spin up .symtab potentially earlier, or set all non-dynamic RELA sections
+ // to point at symtab
+ // shdr.sh_link = backlinks[shdr.sh_link];
+ shdr.sh_link = self.symtab_section_index.?;
+ shdr.sh_info = backlinks[shdr.sh_info];
+ }
+
+ if (self.zigObjectPtr()) |zo| {
+ for (zo.atoms_indexes.items) |atom_index| {
+ const atom_ptr = zo.atom(atom_index) orelse continue;
+ atom_ptr.output_section_index = backlinks[atom_ptr.output_section_index];
+ }
+ if (zo.dwarf) |*dwarf| dwarf.reloadSectionMetadata();
+ }
+
+ for (self.comdat_group_sections.items) |*cg| {
+ cg.shndx = backlinks[cg.shndx];
+ }
+
if (self.symtab_section_index) |index| {
- const shdr = &self.shdrs.items[index];
+ const shdr = &self.sections.items(.shdr)[index];
shdr.sh_link = self.strtab_section_index.?;
}
if (self.dynamic_section_index) |index| {
- const shdr = &self.shdrs.items[index];
+ const shdr = &self.sections.items(.shdr)[index];
shdr.sh_link = self.dynstrtab_section_index.?;
}
if (self.dynsymtab_section_index) |index| {
- const shdr = &self.shdrs.items[index];
+ const shdr = &self.sections.items(.shdr)[index];
shdr.sh_link = self.dynstrtab_section_index.?;
}
if (self.hash_section_index) |index| {
- const shdr = &self.shdrs.items[index];
+ const shdr = &self.sections.items(.shdr)[index];
shdr.sh_link = self.dynsymtab_section_index.?;
}
if (self.gnu_hash_section_index) |index| {
- const shdr = &self.shdrs.items[index];
+ const shdr = &self.sections.items(.shdr)[index];
shdr.sh_link = self.dynsymtab_section_index.?;
}
if (self.versym_section_index) |index| {
- const shdr = &self.shdrs.items[index];
+ const shdr = &self.sections.items(.shdr)[index];
shdr.sh_link = self.dynsymtab_section_index.?;
}
if (self.verneed_section_index) |index| {
- const shdr = &self.shdrs.items[index];
+ const shdr = &self.sections.items(.shdr)[index];
shdr.sh_link = self.dynstrtab_section_index.?;
}
if (self.rela_dyn_section_index) |index| {
- const shdr = &self.shdrs.items[index];
+ const shdr = &self.sections.items(.shdr)[index];
shdr.sh_link = self.dynsymtab_section_index orelse 0;
}
if (self.rela_plt_section_index) |index| {
- const shdr = &self.shdrs.items[index];
+ const shdr = &self.sections.items(.shdr)[index];
shdr.sh_link = self.dynsymtab_section_index.?;
shdr.sh_info = self.plt_section_index.?;
}
if (self.eh_frame_rela_section_index) |index| {
- const shdr = &self.shdrs.items[index];
+ const shdr = &self.sections.items(.shdr)[index];
shdr.sh_link = self.symtab_section_index.?;
shdr.sh_info = self.eh_frame_section_index.?;
}
-
- {
- var output_sections = try self.output_sections.clone(gpa);
- defer output_sections.deinit(gpa);
-
- self.output_sections.clearRetainingCapacity();
-
- var it = output_sections.iterator();
- while (it.next()) |entry| {
- const shndx = entry.key_ptr.*;
- const meta = entry.value_ptr.*;
- self.output_sections.putAssumeCapacityNoClobber(backlinks[shndx], meta);
- }
- }
-
- for (self.merge_sections.items) |*msec| {
- msec.output_section_index = backlinks[msec.output_section_index];
- }
-
- {
- var output_rela_sections = try self.output_rela_sections.clone(gpa);
- defer output_rela_sections.deinit(gpa);
-
- self.output_rela_sections.clearRetainingCapacity();
-
- var it = output_rela_sections.iterator();
- while (it.next()) |entry| {
- const shndx = entry.key_ptr.*;
- var meta = entry.value_ptr.*;
- meta.shndx = backlinks[meta.shndx];
- self.output_rela_sections.putAssumeCapacityNoClobber(backlinks[shndx], meta);
- }
- }
-
- {
- var last_atom_and_free_list_table = try self.last_atom_and_free_list_table.clone(gpa);
- defer last_atom_and_free_list_table.deinit(gpa);
-
- self.last_atom_and_free_list_table.clearRetainingCapacity();
-
- var it = last_atom_and_free_list_table.iterator();
- while (it.next()) |entry| {
- const shndx = entry.key_ptr.*;
- const meta = entry.value_ptr.*;
- self.last_atom_and_free_list_table.putAssumeCapacityNoClobber(backlinks[shndx], meta);
- }
- }
-
- {
- var phdr_to_shdr_table = try self.phdr_to_shdr_table.clone(gpa);
- defer phdr_to_shdr_table.deinit(gpa);
-
- self.phdr_to_shdr_table.clearRetainingCapacity();
-
- var it = phdr_to_shdr_table.iterator();
- while (it.next()) |entry| {
- const shndx = entry.key_ptr.*;
- const phndx = entry.value_ptr.*;
- self.phdr_to_shdr_table.putAssumeCapacityNoClobber(backlinks[shndx], phndx);
- }
- }
-
- if (self.zigObjectPtr()) |zo| {
- for (zo.atoms_indexes.items) |atom_index| {
- const atom_ptr = zo.atom(atom_index) orelse continue;
- atom_ptr.output_section_index = backlinks[atom_ptr.output_section_index];
- }
- if (zo.dwarf) |*dwarf| dwarf.reloadSectionMetadata();
- }
-
- for (self.output_rela_sections.keys(), self.output_rela_sections.values()) |shndx, sec| {
- const shdr = &self.shdrs.items[sec.shndx];
- shdr.sh_link = self.symtab_section_index.?;
- shdr.sh_info = shndx;
- }
-
- for (self.comdat_group_sections.items) |*cg| {
- cg.shndx = backlinks[cg.shndx];
- }
}
fn updateSectionSizes(self: *Elf) !void {
const target = self.base.comp.root_mod.resolved_target.result;
- for (self.output_sections.keys(), self.output_sections.values()) |shndx, atom_list| {
- const shdr = &self.shdrs.items[shndx];
+ const slice = self.sections.slice();
+ for (slice.items(.shdr), slice.items(.atom_list)) |*shdr, atom_list| {
if (atom_list.items.len == 0) continue;
if (self.requiresThunks() and shdr.sh_flags & elf.SHF_EXECINSTR != 0) continue;
for (atom_list.items) |ref| {
@@ -3847,38 +3463,38 @@ fn updateSectionSizes(self: *Elf) !void {
}
if (self.requiresThunks()) {
- for (self.output_sections.keys(), self.output_sections.values()) |shndx, atom_list| {
- const shdr = self.shdrs.items[shndx];
+ for (slice.items(.shdr), slice.items(.atom_list), 0..) |*shdr, atom_list, shndx| {
if (shdr.sh_flags & elf.SHF_EXECINSTR == 0) continue;
if (atom_list.items.len == 0) continue;
// Create jump/branch range extenders if needed.
- try thunks.createThunks(shndx, self);
+ try thunks.createThunks(shdr, @intCast(shndx), self);
}
}
+ const shdrs = slice.items(.shdr);
if (self.eh_frame_section_index) |index| {
- self.shdrs.items[index].sh_size = try eh_frame.calcEhFrameSize(self);
+ shdrs[index].sh_size = try eh_frame.calcEhFrameSize(self);
}
if (self.eh_frame_hdr_section_index) |index| {
- self.shdrs.items[index].sh_size = eh_frame.calcEhFrameHdrSize(self);
+ shdrs[index].sh_size = eh_frame.calcEhFrameHdrSize(self);
}
if (self.got_section_index) |index| {
- self.shdrs.items[index].sh_size = self.got.size(self);
+ shdrs[index].sh_size = self.got.size(self);
}
if (self.plt_section_index) |index| {
- self.shdrs.items[index].sh_size = self.plt.size(self);
+ shdrs[index].sh_size = self.plt.size(self);
}
if (self.got_plt_section_index) |index| {
- self.shdrs.items[index].sh_size = self.got_plt.size(self);
+ shdrs[index].sh_size = self.got_plt.size(self);
}
if (self.plt_got_section_index) |index| {
- self.shdrs.items[index].sh_size = self.plt_got.size(self);
+ shdrs[index].sh_size = self.plt_got.size(self);
}
if (self.rela_dyn_section_index) |shndx| {
@@ -3889,11 +3505,11 @@ fn updateSectionSizes(self: *Elf) !void {
for (self.objects.items) |index| {
num += self.file(index).?.object.num_dynrelocs;
}
- self.shdrs.items[shndx].sh_size = num * @sizeOf(elf.Elf64_Rela);
+ shdrs[shndx].sh_size = num * @sizeOf(elf.Elf64_Rela);
}
if (self.rela_plt_section_index) |index| {
- self.shdrs.items[index].sh_size = self.plt.numRela() * @sizeOf(elf.Elf64_Rela);
+ shdrs[index].sh_size = self.plt.numRela() * @sizeOf(elf.Elf64_Rela);
}
if (self.copy_rel_section_index) |index| {
@@ -3901,44 +3517,45 @@ fn updateSectionSizes(self: *Elf) !void {
}
if (self.interp_section_index) |index| {
- self.shdrs.items[index].sh_size = target.dynamic_linker.get().?.len + 1;
+ shdrs[index].sh_size = target.dynamic_linker.get().?.len + 1;
}
if (self.hash_section_index) |index| {
- self.shdrs.items[index].sh_size = self.hash.size();
+ shdrs[index].sh_size = self.hash.size();
}
if (self.gnu_hash_section_index) |index| {
- self.shdrs.items[index].sh_size = self.gnu_hash.size();
+ shdrs[index].sh_size = self.gnu_hash.size();
}
if (self.dynamic_section_index) |index| {
- self.shdrs.items[index].sh_size = self.dynamic.size(self);
+ shdrs[index].sh_size = self.dynamic.size(self);
}
if (self.dynsymtab_section_index) |index| {
- self.shdrs.items[index].sh_size = self.dynsym.size();
+ shdrs[index].sh_size = self.dynsym.size();
}
if (self.dynstrtab_section_index) |index| {
- self.shdrs.items[index].sh_size = self.dynstrtab.items.len;
+ shdrs[index].sh_size = self.dynstrtab.items.len;
}
if (self.versym_section_index) |index| {
- self.shdrs.items[index].sh_size = self.versym.items.len * @sizeOf(elf.Elf64_Versym);
+ shdrs[index].sh_size = self.versym.items.len * @sizeOf(elf.Elf64_Versym);
}
if (self.verneed_section_index) |index| {
- self.shdrs.items[index].sh_size = self.verneed.size();
+ shdrs[index].sh_size = self.verneed.size();
}
try self.updateSymtabSize();
self.updateShStrtabSize();
}
+// FIXME:JK this is very much obsolete, remove!
pub fn updateShStrtabSize(self: *Elf) void {
if (self.shstrtab_section_index) |index| {
- self.shdrs.items[index].sh_size = self.shstrtab.items.len;
+ self.sections.items(.shdr)[index].sh_size = self.shstrtab.items.len;
}
}
@@ -3971,7 +3588,7 @@ fn getMaxNumberOfPhdrs() u64 {
/// We permit a maximum of 3**2 number of segments.
fn calcNumberOfSegments(self: *Elf) usize {
var covers: [9]bool = [_]bool{false} ** 9;
- for (self.shdrs.items, 0..) |shdr, shndx| {
+ for (self.sections.items(.shdr), 0..) |shdr, shndx| {
if (shdr.sh_type == elf.SHT_NULL) continue;
if (shdr.sh_flags & elf.SHF_ALLOC == 0) continue;
if (self.isZigSection(@intCast(shndx))) continue;
@@ -4042,8 +3659,9 @@ pub fn allocateAllocSections(self: *Elf) !void {
}
};
+ const slice = self.sections.slice();
var alignment = Align{};
- for (self.shdrs.items, 0..) |shdr, i| {
+ for (slice.items(.shdr), 0..) |shdr, i| {
if (shdr.sh_type == elf.SHT_NULL) continue;
if (shdr.sh_flags & elf.SHF_TLS == 0) continue;
if (alignment.first_tls_index == null) alignment.first_tls_index = i;
@@ -4068,7 +3686,7 @@ pub fn allocateAllocSections(self: *Elf) !void {
cover.deinit();
};
- for (self.shdrs.items, 0..) |shdr, shndx| {
+ for (slice.items(.shdr), 0..) |shdr, shndx| {
if (shdr.sh_type == elf.SHT_NULL) continue;
if (shdr.sh_flags & elf.SHF_ALLOC == 0) continue;
if (self.isZigSection(@intCast(shndx))) continue;
@@ -4089,7 +3707,7 @@ pub fn allocateAllocSections(self: *Elf) !void {
var @"align": u64 = self.page_size;
for (cover.items) |shndx| {
- const shdr = self.shdrs.items[shndx];
+ const shdr = slice.items(.shdr)[shndx];
if (shdr.sh_type == elf.SHT_NOBITS and shdr.sh_flags & elf.SHF_TLS != 0) continue;
@"align" = @max(@"align", shdr.sh_addralign);
}
@@ -4101,7 +3719,7 @@ pub fn allocateAllocSections(self: *Elf) !void {
var i: usize = 0;
while (i < cover.items.len) : (i += 1) {
const shndx = cover.items[i];
- const shdr = &self.shdrs.items[shndx];
+ const shdr = &slice.items(.shdr)[shndx];
if (shdr.sh_type == elf.SHT_NOBITS and shdr.sh_flags & elf.SHF_TLS != 0) {
// .tbss is a little special as it's used only by the loader meaning it doesn't
// need to be actually mmap'ed at runtime. We still need to correctly increment
@@ -4117,11 +3735,11 @@ pub fn allocateAllocSections(self: *Elf) !void {
// ...
var tbss_addr = addr;
while (i < cover.items.len and
- self.shdrs.items[cover.items[i]].sh_type == elf.SHT_NOBITS and
- self.shdrs.items[cover.items[i]].sh_flags & elf.SHF_TLS != 0) : (i += 1)
+ slice.items(.shdr)[cover.items[i]].sh_type == elf.SHT_NOBITS and
+ slice.items(.shdr)[cover.items[i]].sh_flags & elf.SHF_TLS != 0) : (i += 1)
{
const tbss_shndx = cover.items[i];
- const tbss_shdr = &self.shdrs.items[tbss_shndx];
+ const tbss_shdr = &slice.items(.shdr)[tbss_shndx];
tbss_addr = alignment.@"align"(tbss_shndx, tbss_shdr.sh_addralign, tbss_addr);
tbss_shdr.sh_addr = tbss_addr;
tbss_addr += tbss_shdr.sh_size;
@@ -4140,7 +3758,7 @@ pub fn allocateAllocSections(self: *Elf) !void {
addr += shdr.sh_size;
}
- const first = self.shdrs.items[cover.items[0]];
+ const first = slice.items(.shdr)[cover.items[0]];
var off = try self.findFreeSpace(filesz, @"align");
const phndx = try self.addPhdr(.{
.type = elf.PT_LOAD,
@@ -4153,7 +3771,8 @@ pub fn allocateAllocSections(self: *Elf) !void {
});
for (cover.items) |shndx| {
- const shdr = &self.shdrs.items[shndx];
+ const shdr = &slice.items(.shdr)[shndx];
+ slice.items(.phndx)[shndx] = phndx;
if (shdr.sh_type == elf.SHT_NOBITS) {
shdr.sh_offset = 0;
continue;
@@ -4161,7 +3780,6 @@ pub fn allocateAllocSections(self: *Elf) !void {
off = alignment.@"align"(shndx, shdr.sh_addralign, off);
shdr.sh_offset = off;
off += shdr.sh_size;
- try self.phdr_to_shdr_table.putNoClobber(gpa, shndx, phndx);
}
addr = mem.alignForward(u64, addr, self.page_size);
@@ -4170,7 +3788,7 @@ pub fn allocateAllocSections(self: *Elf) !void {
/// Allocates non-alloc sections (debug info, symtabs, etc.).
pub fn allocateNonAllocSections(self: *Elf) !void {
- for (self.shdrs.items, 0..) |*shdr, shndx| {
+ for (self.sections.items(.shdr), 0..) |*shdr, shndx| {
if (shdr.sh_type == elf.SHT_NULL) continue;
if (shdr.sh_flags & elf.SHF_ALLOC != 0) continue;
const needed_size = shdr.sh_size;
@@ -4215,13 +3833,15 @@ pub fn allocateNonAllocSections(self: *Elf) !void {
}
fn allocateSpecialPhdrs(self: *Elf) void {
+ const slice = self.sections.slice();
+
for (&[_]struct { ?u16, ?u32 }{
.{ self.phdr_interp_index, self.interp_section_index },
.{ self.phdr_dynamic_index, self.dynamic_section_index },
.{ self.phdr_gnu_eh_frame_index, self.eh_frame_hdr_section_index },
}) |pair| {
if (pair[0]) |index| {
- const shdr = self.shdrs.items[pair[1].?];
+ const shdr = slice.items(.shdr)[pair[1].?];
const phdr = &self.phdrs.items[index];
phdr.p_align = shdr.sh_addralign;
phdr.p_offset = shdr.sh_offset;
@@ -4236,11 +3856,11 @@ fn allocateSpecialPhdrs(self: *Elf) void {
// We assume TLS sections are laid out contiguously and that there is
// a single TLS segment.
if (self.phdr_tls_index) |index| {
- const slice = self.shdrs.items;
+ const shdrs = slice.items(.shdr);
const phdr = &self.phdrs.items[index];
var shndx: u32 = 0;
- while (shndx < slice.len) {
- const shdr = slice[shndx];
+ while (shndx < shdrs.len) {
+ const shdr = shdrs[shndx];
if (shdr.sh_flags & elf.SHF_TLS == 0) {
shndx += 1;
continue;
@@ -4256,8 +3876,8 @@ fn allocateSpecialPhdrs(self: *Elf) void {
}
phdr.p_memsz = shdr.sh_addr + shdr.sh_size - phdr.p_vaddr;
- while (shndx < slice.len) : (shndx += 1) {
- const next = slice[shndx];
+ while (shndx < shdrs.len) : (shndx += 1) {
+ const next = shdrs[shndx];
if (next.sh_flags & elf.SHF_TLS == 0) break;
phdr.p_align = @max(phdr.p_align, next.sh_addralign);
if (next.sh_type != elf.SHT_NOBITS) {
@@ -4281,13 +3901,10 @@ fn writeAtoms(self: *Elf) !void {
}
var has_reloc_errors = false;
-
- // TODO iterate over `output_sections` directly
- for (self.shdrs.items, 0..) |shdr, shndx| {
+ const slice = self.sections.slice();
+ for (slice.items(.shdr), slice.items(.atom_list), 0..) |shdr, atom_list, shndx| {
if (shdr.sh_type == elf.SHT_NULL) continue;
if (shdr.sh_type == elf.SHT_NOBITS) continue;
-
- const atom_list = self.output_sections.get(@intCast(shndx)) orelse continue;
if (atom_list.items.len == 0) continue;
log.debug("writing atoms in '{s}' section", .{self.getShString(shdr.sh_name)});
@@ -4366,7 +3983,7 @@ fn writeAtoms(self: *Elf) !void {
for (self.thunks.items) |th| {
const thunk_size = th.size(self);
try buffer.ensureUnusedCapacity(thunk_size);
- const shdr = self.shdrs.items[th.output_section_index];
+ const shdr = slice.items(.shdr)[th.output_section_index];
const offset = @as(u64, @intCast(th.value)) + shdr.sh_offset;
try th.write(self, buffer.writer());
assert(buffer.items.len == thunk_size);
@@ -4396,15 +4013,10 @@ pub fn updateSymtabSize(self: *Elf) !void {
if (self.linker_defined_index) |index| files.appendAssumeCapacity(index);
// Section symbols
- for (self.output_sections.keys()) |_| {
- nlocals += 1;
- }
- if (self.eh_frame_section_index) |_| {
- nlocals += 1;
- }
+ nlocals += @intCast(self.sections.slice().len);
if (self.requiresThunks()) for (self.thunks.items) |*th| {
- th.output_symtab_ctx.ilocal = nlocals + 1;
+ th.output_symtab_ctx.ilocal = nlocals;
th.calcSymtabSize(self);
nlocals += th.output_symtab_ctx.nlocals;
strsize += th.output_symtab_ctx.strsize;
@@ -4415,8 +4027,8 @@ pub fn updateSymtabSize(self: *Elf) !void {
const ctx = switch (file_ptr) {
inline else => |x| &x.output_symtab_ctx,
};
- ctx.ilocal = nlocals + 1;
- ctx.iglobal = nglobals + 1;
+ ctx.ilocal = nlocals;
+ ctx.iglobal = nglobals;
try file_ptr.updateSymtabSize(self);
nlocals += ctx.nlocals;
nglobals += ctx.nglobals;
@@ -4424,21 +4036,21 @@ pub fn updateSymtabSize(self: *Elf) !void {
}
if (self.got_section_index) |_| {
- self.got.output_symtab_ctx.ilocal = nlocals + 1;
+ self.got.output_symtab_ctx.ilocal = nlocals;
self.got.updateSymtabSize(self);
nlocals += self.got.output_symtab_ctx.nlocals;
strsize += self.got.output_symtab_ctx.strsize;
}
if (self.plt_section_index) |_| {
- self.plt.output_symtab_ctx.ilocal = nlocals + 1;
+ self.plt.output_symtab_ctx.ilocal = nlocals;
self.plt.updateSymtabSize(self);
nlocals += self.plt.output_symtab_ctx.nlocals;
strsize += self.plt.output_symtab_ctx.strsize;
}
if (self.plt_got_section_index) |_| {
- self.plt_got.output_symtab_ctx.ilocal = nlocals + 1;
+ self.plt_got.output_symtab_ctx.ilocal = nlocals;
self.plt_got.updateSymtabSize(self);
nlocals += self.plt_got.output_symtab_ctx.nlocals;
strsize += self.plt_got.output_symtab_ctx.strsize;
@@ -4452,24 +4064,26 @@ pub fn updateSymtabSize(self: *Elf) !void {
ctx.iglobal += nlocals;
}
- const symtab_shdr = &self.shdrs.items[self.symtab_section_index.?];
- symtab_shdr.sh_info = nlocals + 1;
+ const slice = self.sections.slice();
+ const symtab_shdr = &slice.items(.shdr)[self.symtab_section_index.?];
+ symtab_shdr.sh_info = nlocals;
symtab_shdr.sh_link = self.strtab_section_index.?;
const sym_size: u64 = switch (self.ptr_width) {
.p32 => @sizeOf(elf.Elf32_Sym),
.p64 => @sizeOf(elf.Elf64_Sym),
};
- const needed_size = (nlocals + nglobals + 1) * sym_size;
+ const needed_size = (nlocals + nglobals) * sym_size;
symtab_shdr.sh_size = needed_size;
- const strtab = &self.shdrs.items[self.strtab_section_index.?];
+ const strtab = &slice.items(.shdr)[self.strtab_section_index.?];
strtab.sh_size = strsize + 1;
}
fn writeSyntheticSections(self: *Elf) !void {
- const target = self.base.comp.root_mod.resolved_target.result;
const gpa = self.base.comp.gpa;
+ const target = self.getTarget();
+ const slice = self.sections.slice();
if (self.interp_section_index) |shndx| {
var buffer: [256]u8 = undefined;
@@ -4477,18 +4091,18 @@ fn writeSyntheticSections(self: *Elf) !void {
@memcpy(buffer[0..interp.len], interp);
buffer[interp.len] = 0;
const contents = buffer[0 .. interp.len + 1];
- const shdr = self.shdrs.items[shndx];
+ const shdr = slice.items(.shdr)[shndx];
assert(shdr.sh_size == contents.len);
try self.base.file.?.pwriteAll(contents, shdr.sh_offset);
}
if (self.hash_section_index) |shndx| {
- const shdr = self.shdrs.items[shndx];
+ const shdr = slice.items(.shdr)[shndx];
try self.base.file.?.pwriteAll(self.hash.buffer.items, shdr.sh_offset);
}
if (self.gnu_hash_section_index) |shndx| {
- const shdr = self.shdrs.items[shndx];
+ const shdr = slice.items(.shdr)[shndx];
var buffer = try std.ArrayList(u8).initCapacity(gpa, self.gnu_hash.size());
defer buffer.deinit();
try self.gnu_hash.write(self, buffer.writer());
@@ -4496,12 +4110,12 @@ fn writeSyntheticSections(self: *Elf) !void {
}
if (self.versym_section_index) |shndx| {
- const shdr = self.shdrs.items[shndx];
+ const shdr = slice.items(.shdr)[shndx];
try self.base.file.?.pwriteAll(mem.sliceAsBytes(self.versym.items), shdr.sh_offset);
}
if (self.verneed_section_index) |shndx| {
- const shdr = self.shdrs.items[shndx];
+ const shdr = slice.items(.shdr)[shndx];
var buffer = try std.ArrayList(u8).initCapacity(gpa, self.verneed.size());
defer buffer.deinit();
try self.verneed.write(buffer.writer());
@@ -4509,7 +4123,7 @@ fn writeSyntheticSections(self: *Elf) !void {
}
if (self.dynamic_section_index) |shndx| {
- const shdr = self.shdrs.items[shndx];
+ const shdr = slice.items(.shdr)[shndx];
var buffer = try std.ArrayList(u8).initCapacity(gpa, self.dynamic.size(self));
defer buffer.deinit();
try self.dynamic.write(self, buffer.writer());
@@ -4517,7 +4131,7 @@ fn writeSyntheticSections(self: *Elf) !void {
}
if (self.dynsymtab_section_index) |shndx| {
- const shdr = self.shdrs.items[shndx];
+ const shdr = slice.items(.shdr)[shndx];
var buffer = try std.ArrayList(u8).initCapacity(gpa, self.dynsym.size());
defer buffer.deinit();
try self.dynsym.write(self, buffer.writer());
@@ -4525,12 +4139,12 @@ fn writeSyntheticSections(self: *Elf) !void {
}
if (self.dynstrtab_section_index) |shndx| {
- const shdr = self.shdrs.items[shndx];
+ const shdr = slice.items(.shdr)[shndx];
try self.base.file.?.pwriteAll(self.dynstrtab.items, shdr.sh_offset);
}
if (self.eh_frame_section_index) |shndx| {
- const shdr = self.shdrs.items[shndx];
+ const shdr = slice.items(.shdr)[shndx];
const sh_size = math.cast(usize, shdr.sh_size) orelse return error.Overflow;
var buffer = try std.ArrayList(u8).initCapacity(gpa, sh_size);
defer buffer.deinit();
@@ -4539,7 +4153,7 @@ fn writeSyntheticSections(self: *Elf) !void {
}
if (self.eh_frame_hdr_section_index) |shndx| {
- const shdr = self.shdrs.items[shndx];
+ const shdr = slice.items(.shdr)[shndx];
const sh_size = math.cast(usize, shdr.sh_size) orelse return error.Overflow;
var buffer = try std.ArrayList(u8).initCapacity(gpa, sh_size);
defer buffer.deinit();
@@ -4548,7 +4162,7 @@ fn writeSyntheticSections(self: *Elf) !void {
}
if (self.got_section_index) |index| {
- const shdr = self.shdrs.items[index];
+ const shdr = slice.items(.shdr)[index];
var buffer = try std.ArrayList(u8).initCapacity(gpa, self.got.size(self));
defer buffer.deinit();
try self.got.write(self, buffer.writer());
@@ -4556,7 +4170,7 @@ fn writeSyntheticSections(self: *Elf) !void {
}
if (self.rela_dyn_section_index) |shndx| {
- const shdr = self.shdrs.items[shndx];
+ const shdr = slice.items(.shdr)[shndx];
try self.got.addRela(self);
try self.copy_rel.addRela(self);
self.sortRelaDyn();
@@ -4564,7 +4178,7 @@ fn writeSyntheticSections(self: *Elf) !void {
}
if (self.plt_section_index) |shndx| {
- const shdr = self.shdrs.items[shndx];
+ const shdr = slice.items(.shdr)[shndx];
var buffer = try std.ArrayList(u8).initCapacity(gpa, self.plt.size(self));
defer buffer.deinit();
try self.plt.write(self, buffer.writer());
@@ -4572,7 +4186,7 @@ fn writeSyntheticSections(self: *Elf) !void {
}
if (self.got_plt_section_index) |shndx| {
- const shdr = self.shdrs.items[shndx];
+ const shdr = slice.items(.shdr)[shndx];
var buffer = try std.ArrayList(u8).initCapacity(gpa, self.got_plt.size(self));
defer buffer.deinit();
try self.got_plt.write(self, buffer.writer());
@@ -4580,7 +4194,7 @@ fn writeSyntheticSections(self: *Elf) !void {
}
if (self.plt_got_section_index) |shndx| {
- const shdr = self.shdrs.items[shndx];
+ const shdr = slice.items(.shdr)[shndx];
var buffer = try std.ArrayList(u8).initCapacity(gpa, self.plt_got.size(self));
defer buffer.deinit();
try self.plt_got.write(self, buffer.writer());
@@ -4588,7 +4202,7 @@ fn writeSyntheticSections(self: *Elf) !void {
}
if (self.rela_plt_section_index) |shndx| {
- const shdr = self.shdrs.items[shndx];
+ const shdr = slice.items(.shdr)[shndx];
try self.plt.addRela(self);
try self.base.file.?.pwriteAll(mem.sliceAsBytes(self.rela_plt.items), shdr.sh_offset);
}
@@ -4597,19 +4211,21 @@ fn writeSyntheticSections(self: *Elf) !void {
try self.writeShStrtab();
}
+// FIXME:JK again, why is this needed?
pub fn writeShStrtab(self: *Elf) !void {
if (self.shstrtab_section_index) |index| {
- const shdr = self.shdrs.items[index];
+ const shdr = self.sections.items(.shdr)[index];
log.debug("writing .shstrtab from 0x{x} to 0x{x}", .{ shdr.sh_offset, shdr.sh_offset + shdr.sh_size });
try self.base.file.?.pwriteAll(self.shstrtab.items, shdr.sh_offset);
}
}
pub fn writeSymtab(self: *Elf) !void {
- const target = self.base.comp.root_mod.resolved_target.result;
const gpa = self.base.comp.gpa;
- const symtab_shdr = self.shdrs.items[self.symtab_section_index.?];
- const strtab_shdr = self.shdrs.items[self.strtab_section_index.?];
+ const target = self.getTarget();
+ const slice = self.sections.slice();
+ const symtab_shdr = slice.items(.shdr)[self.symtab_section_index.?];
+ const strtab_shdr = slice.items(.shdr)[self.strtab_section_index.?];
const sym_size: u64 = switch (self.ptr_width) {
.p32 => @sizeOf(elf.Elf32_Sym),
.p64 => @sizeOf(elf.Elf64_Sym),
@@ -4630,7 +4246,17 @@ pub fn writeSymtab(self: *Elf) !void {
const needed_strtab_size = math.cast(usize, strtab_shdr.sh_size - 1) orelse return error.Overflow;
try self.strtab.ensureUnusedCapacity(gpa, needed_strtab_size);
- self.writeSectionSymbols();
+ for (slice.items(.shdr), 0..) |shdr, shndx| {
+ const out_sym = &self.symtab.items[shndx];
+ out_sym.* = .{
+ .st_name = 0,
+ .st_value = shdr.sh_addr,
+ .st_info = if (shdr.sh_type == elf.SHT_NULL) elf.STT_NOTYPE else elf.STT_SECTION,
+ .st_shndx = @intCast(shndx),
+ .st_size = 0,
+ .st_other = 0,
+ };
+ }
if (self.requiresThunks()) for (self.thunks.items) |th| {
th.writeSymtab(self);
@@ -4696,44 +4322,6 @@ pub fn writeSymtab(self: *Elf) !void {
try self.base.file.?.pwriteAll(self.strtab.items, strtab_shdr.sh_offset);
}
-fn writeSectionSymbols(self: *Elf) void {
- var ilocal: u32 = 1;
- for (self.output_sections.keys()) |shndx| {
- const shdr = self.shdrs.items[shndx];
- const out_sym = &self.symtab.items[ilocal];
- out_sym.* = .{
- .st_name = 0,
- .st_value = shdr.sh_addr,
- .st_info = elf.STT_SECTION,
- .st_shndx = @intCast(shndx),
- .st_size = 0,
- .st_other = 0,
- };
- ilocal += 1;
- }
-
- if (self.eh_frame_section_index) |shndx| {
- const shdr = self.shdrs.items[shndx];
- const out_sym = &self.symtab.items[ilocal];
- out_sym.* = .{
- .st_name = 0,
- .st_value = shdr.sh_addr,
- .st_info = elf.STT_SECTION,
- .st_shndx = @intCast(shndx),
- .st_size = 0,
- .st_other = 0,
- };
- ilocal += 1;
- }
-}
-
-pub fn sectionSymbolOutputSymtabIndex(self: Elf, shndx: u32) u32 {
- if (self.eh_frame_section_index) |index| {
- if (index == shndx) return @intCast(self.output_sections.keys().len + 1);
- }
- return @intCast(self.output_sections.getIndex(shndx).? + 1);
-}
-
/// Always 4 or 8 depending on whether this is 32-bit ELF or 64-bit ELF.
pub fn ptrWidthBytes(self: Elf) u8 {
return switch (self.ptr_width) {
@@ -4745,8 +4333,7 @@ pub fn ptrWidthBytes(self: Elf) u8 {
/// Does not necessarily match `ptrWidthBytes` for example can be 2 bytes
/// in a 32-bit ELF file.
pub fn archPtrWidthBytes(self: Elf) u8 {
- const target = self.base.comp.root_mod.resolved_target.result;
- return @intCast(@divExact(target.ptrBitWidth(), 8));
+ return @intCast(@divExact(self.getTarget().ptrBitWidth(), 8));
}
fn phdrTo32(phdr: elf.Elf64_Phdr) elf.Elf32_Phdr {
@@ -5069,7 +4656,7 @@ pub fn isDebugSection(self: Elf, shndx: u32) bool {
return false;
}
-fn addPhdr(self: *Elf, opts: struct {
+pub fn addPhdr(self: *Elf, opts: struct {
type: u32 = 0,
flags: u32 = 0,
@"align": u64 = 0,
@@ -5126,25 +4713,26 @@ pub const AddSectionOpts = struct {
pub fn addSection(self: *Elf, opts: AddSectionOpts) !u32 {
const gpa = self.base.comp.gpa;
- const index: u32 = @intCast(self.shdrs.items.len);
- const shdr = try self.shdrs.addOne(gpa);
- shdr.* = .{
- .sh_name = opts.name,
- .sh_type = opts.type,
- .sh_flags = opts.flags,
- .sh_addr = 0,
- .sh_offset = opts.offset,
- .sh_size = 0,
- .sh_link = opts.link,
- .sh_info = opts.info,
- .sh_addralign = opts.addralign,
- .sh_entsize = opts.entsize,
- };
+ const index: u32 = @intCast(try self.sections.addOne(gpa));
+ self.sections.set(index, .{
+ .shdr = .{
+ .sh_name = opts.name,
+ .sh_type = opts.type,
+ .sh_flags = opts.flags,
+ .sh_addr = 0,
+ .sh_offset = opts.offset,
+ .sh_size = 0,
+ .sh_link = opts.link,
+ .sh_info = opts.info,
+ .sh_addralign = opts.addralign,
+ .sh_entsize = opts.entsize,
+ },
+ });
return index;
}
pub fn sectionByName(self: *Elf, name: [:0]const u8) ?u32 {
- for (self.shdrs.items, 0..) |*shdr, i| {
+ for (self.sections.items(.shdr), 0..) |*shdr, i| {
const this_name = self.getShString(shdr.sh_name);
if (mem.eql(u8, this_name, name)) return @intCast(i);
} else return null;
@@ -5322,7 +4910,7 @@ pub fn gotAddress(self: *Elf) i64 {
break :blk self.got_plt_section_index.?;
break :blk if (self.got_section_index) |shndx| shndx else null;
};
- return if (shndx) |index| @intCast(self.shdrs.items[index].sh_addr) else 0;
+ return if (shndx) |index| @intCast(self.sections.items(.shdr)[index].sh_addr) else 0;
}
pub fn tpAddress(self: *Elf) i64 {
@@ -5676,16 +5264,16 @@ fn fmtDumpState(
}
try writer.writeAll("\nOutput shdrs\n");
- for (self.shdrs.items, 0..) |shdr, shndx| {
+ for (self.sections.items(.shdr), self.sections.items(.phndx), 0..) |shdr, phndx, shndx| {
try writer.print(" shdr({d}) : phdr({?d}) : {}\n", .{
shndx,
- self.phdr_to_shdr_table.get(@intCast(shndx)),
+ phndx,
self.fmtShdr(shdr),
});
}
try writer.writeAll("\nOutput phdrs\n");
for (self.phdrs.items, 0..) |phdr, phndx| {
- try writer.print(" phdr{d} : {}\n", .{ phndx, self.fmtPhdr(phdr) });
+ try writer.print(" phdr({d}) : {}\n", .{ phndx, self.fmtPhdr(phdr) });
}
}
@@ -5746,7 +5334,7 @@ fn requiresThunks(self: Elf) bool {
/// so that we reserve enough space for the program header table up-front.
/// Bump these numbers when adding or deleting a Zig specific pre-allocated segment, or adding
/// more special-purpose program headers.
-const number_of_zig_segments = 5;
+pub const number_of_zig_segments = 4;
const max_number_of_object_segments = 9;
const max_number_of_special_phdrs = 5;
@@ -5814,8 +5402,8 @@ pub const SystemLib = struct {
};
pub const Ref = struct {
- index: u32,
- file: u32,
+ index: u32 = 0,
+ file: u32 = 0,
pub fn eql(ref: Ref, other: Ref) bool {
return ref.index == other.index and ref.file == other.file;
@@ -5923,9 +5511,18 @@ pub const SymbolResolver = struct {
pub const Index = u32;
};
-const LastAtomAndFreeList = struct {
+const Section = struct {
+ /// Section header.
+ shdr: elf.Elf64_Shdr,
+
+ /// Assigned program header index if any.
+ phndx: ?u32 = null,
+
+ /// List of atoms contributing to this section.
+ atom_list: std.ArrayListUnmanaged(Ref) = .{},
+
/// Index of the last allocated atom in this section.
- last_atom_index: Atom.Index = 0,
+ last_atom: Ref = .{ .index = 0, .file = 0 },
/// A list of atoms that have surplus capacity. This list can have false
/// positives, as functions grow and shrink over time, only sometimes being added
@@ -5942,15 +5539,8 @@ const LastAtomAndFreeList = struct {
/// overcapacity can be negative. A simple way to have negative overcapacity is to
/// allocate a fresh text block, which will have ideal capacity, and then grow it
/// by 1 byte. It will then have -1 overcapacity.
- free_list: std.ArrayListUnmanaged(Atom.Index) = .{},
-};
-const LastAtomAndFreeListTable = std.AutoArrayHashMapUnmanaged(u32, LastAtomAndFreeList);
-
-const RelaSection = struct {
- shndx: u32,
- atom_list: std.ArrayListUnmanaged(Ref) = .{},
+ free_list: std.ArrayListUnmanaged(Ref) = .{},
};
-const RelaSectionTable = std.AutoArrayHashMapUnmanaged(u32, RelaSection);
fn defaultEntrySymbolName(cpu_arch: std.Target.Cpu.Arch) []const u8 {
return switch (cpu_arch) {
diff --git a/src/link/Elf/Atom.zig b/src/link/Elf/Atom.zig
index 3eb447ab75..5981b6ef2c 100644
--- a/src/link/Elf/Atom.zig
+++ b/src/link/Elf/Atom.zig
@@ -25,10 +25,9 @@ relocs_section_index: u32 = 0,
/// Index of this atom in the linker's atoms table.
atom_index: Index = 0,
-/// Points to the previous and next neighbors, based on the `text_offset`.
-/// This can be used to find, for example, the capacity of this `TextBlock`.
-prev_index: Index = 0,
-next_index: Index = 0,
+/// Points to the previous and next neighbors.
+prev_atom_ref: Elf.Ref = .{},
+next_atom_ref: Elf.Ref = .{},
/// Specifies whether this atom is alive or has been garbage collected.
alive: bool = true,
@@ -48,10 +47,22 @@ pub fn name(self: Atom, elf_file: *Elf) [:0]const u8 {
}
pub fn address(self: Atom, elf_file: *Elf) i64 {
- const shdr = elf_file.shdrs.items[self.output_section_index];
+ const shdr = elf_file.sections.items(.shdr)[self.output_section_index];
return @as(i64, @intCast(shdr.sh_addr)) + self.value;
}
+pub fn ref(self: Atom) Elf.Ref {
+ return .{ .index = self.atom_index, .file = self.file_index };
+}
+
+pub fn prevAtom(self: Atom, elf_file: *Elf) ?*Atom {
+ return elf_file.atom(self.prev_atom_ref);
+}
+
+pub fn nextAtom(self: Atom, elf_file: *Elf) ?*Atom {
+ return elf_file.atom(self.next_atom_ref);
+}
+
pub fn debugTombstoneValue(self: Atom, target: Symbol, elf_file: *Elf) ?u64 {
if (target.mergeSubsection(elf_file)) |msub| {
if (msub.alive) return null;
@@ -95,18 +106,16 @@ pub fn priority(self: Atom, elf_file: *Elf) u64 {
/// File offset relocation happens transparently, so it is not included in
/// this calculation.
pub fn capacity(self: Atom, elf_file: *Elf) u64 {
- const zo = elf_file.zigObjectPtr().?;
- const next_addr = if (zo.atom(self.next_index)) |next|
- next.address(elf_file)
+ const next_addr = if (self.nextAtom(elf_file)) |next_atom|
+ next_atom.address(elf_file)
else
std.math.maxInt(u32);
return @intCast(next_addr - self.address(elf_file));
}
pub fn freeListEligible(self: Atom, elf_file: *Elf) bool {
- const zo = elf_file.zigObjectPtr().?;
// No need to keep a free list node for the last block.
- const next = zo.atom(self.next_index) orelse return false;
+ const next = self.nextAtom(elf_file) orelse return false;
const cap: u64 = @intCast(next.address(elf_file) - self.address(elf_file));
const ideal_cap = Elf.padToIdeal(self.size);
if (cap <= ideal_cap) return false;
@@ -115,11 +124,10 @@ pub fn freeListEligible(self: Atom, elf_file: *Elf) bool {
}
pub fn allocate(self: *Atom, elf_file: *Elf) !void {
- const zo = elf_file.zigObjectPtr().?;
- const shdr = &elf_file.shdrs.items[self.output_section_index];
- const meta = elf_file.last_atom_and_free_list_table.getPtr(self.output_section_index).?;
- const free_list = &meta.free_list;
- const last_atom_index = &meta.last_atom_index;
+ const slice = elf_file.sections.slice();
+ const shdr = &slice.items(.shdr)[self.output_section_index];
+ const free_list = &slice.items(.free_list)[self.output_section_index];
+ const last_atom_ref = &slice.items(.last_atom)[self.output_section_index];
const new_atom_ideal_capacity = Elf.padToIdeal(self.size);
// We use these to indicate our intention to update metadata, placing the new atom,
@@ -127,7 +135,7 @@ pub fn allocate(self: *Atom, elf_file: *Elf) !void {
// It would be simpler to do it inside the for loop below, but that would cause a
// problem if an error was returned later in the function. So this action
// is actually carried out at the end of the function, when errors are no longer possible.
- var atom_placement: ?Atom.Index = null;
+ var atom_placement: ?Elf.Ref = null;
var free_list_removal: ?usize = null;
// First we look for an appropriately sized free list node.
@@ -135,8 +143,8 @@ pub fn allocate(self: *Atom, elf_file: *Elf) !void {
self.value = blk: {
var i: usize = if (elf_file.base.child_pid == null) 0 else free_list.items.len;
while (i < free_list.items.len) {
- const big_atom_index = free_list.items[i];
- const big_atom = zo.atom(big_atom_index).?;
+ const big_atom_ref = free_list.items[i];
+ const big_atom = elf_file.atom(big_atom_ref).?;
// We now have a pointer to a live atom that has too much capacity.
// Is it enough that we could fit this new atom?
const cap = big_atom.capacity(elf_file);
@@ -163,50 +171,52 @@ pub fn allocate(self: *Atom, elf_file: *Elf) !void {
const keep_free_list_node = remaining_capacity >= Elf.min_text_capacity;
// Set up the metadata to be updated, after errors are no longer possible.
- atom_placement = big_atom_index;
+ atom_placement = big_atom_ref;
if (!keep_free_list_node) {
free_list_removal = i;
}
break :blk @intCast(new_start_vaddr);
- } else if (zo.atom(last_atom_index.*)) |last| {
- const ideal_capacity = Elf.padToIdeal(last.size);
- const ideal_capacity_end_vaddr = @as(u64, @intCast(last.value)) + ideal_capacity;
+ } else if (elf_file.atom(last_atom_ref.*)) |last_atom| {
+ const ideal_capacity = Elf.padToIdeal(last_atom.size);
+ const ideal_capacity_end_vaddr = @as(u64, @intCast(last_atom.value)) + ideal_capacity;
const new_start_vaddr = self.alignment.forward(ideal_capacity_end_vaddr);
// Set up the metadata to be updated, after errors are no longer possible.
- atom_placement = last.atom_index;
+ atom_placement = last_atom.ref();
break :blk @intCast(new_start_vaddr);
} else {
break :blk 0;
}
};
- log.debug("allocated atom({d}) : '{s}' at 0x{x} to 0x{x}", .{
- self.atom_index,
+ log.debug("allocated atom({}) : '{s}' at 0x{x} to 0x{x}", .{
+ self.ref(),
self.name(elf_file),
self.address(elf_file),
self.address(elf_file) + @as(i64, @intCast(self.size)),
});
- const expand_section = if (atom_placement) |placement_index|
- zo.atom(placement_index).?.next_index == 0
+ const expand_section = if (atom_placement) |placement_ref|
+ elf_file.atom(placement_ref).?.nextAtom(elf_file) == null
else
true;
if (expand_section) {
const needed_size: u64 = @intCast(self.value + @as(i64, @intCast(self.size)));
try elf_file.growAllocSection(self.output_section_index, needed_size);
- last_atom_index.* = self.atom_index;
-
- const zig_object = elf_file.zigObjectPtr().?;
- if (zig_object.dwarf) |_| {
- // The .debug_info section has `low_pc` and `high_pc` values which is the virtual address
- // range of the compilation unit. When we expand the text section, this range changes,
- // so the DW_TAG.compile_unit tag of the .debug_info section becomes dirty.
- zig_object.debug_info_section_dirty = true;
- // This becomes dirty for the same reason. We could potentially make this more
- // fine-grained with the addition of support for more compilation units. It is planned to
- // model each package as a different compilation unit.
- zig_object.debug_aranges_section_dirty = true;
- zig_object.debug_rnglists_section_dirty = true;
+ last_atom_ref.* = self.ref();
+
+ switch (self.file(elf_file).?) {
+ .zig_object => |zo| if (zo.dwarf) |_| {
+ // The .debug_info section has `low_pc` and `high_pc` values which is the virtual address
+ // range of the compilation unit. When we expand the text section, this range changes,
+ // so the DW_TAG.compile_unit tag of the .debug_info section becomes dirty.
+ zo.debug_info_section_dirty = true;
+ // This becomes dirty for the same reason. We could potentially make this more
+ // fine-grained with the addition of support for more compilation units. It is planned to
+ // model each package as a different compilation unit.
+ zo.debug_aranges_section_dirty = true;
+ zo.debug_rnglists_section_dirty = true;
+ },
+ else => {},
}
}
shdr.sh_addralign = @max(shdr.sh_addralign, self.alignment.toByteUnits().?);
@@ -214,21 +224,21 @@ pub fn allocate(self: *Atom, elf_file: *Elf) !void {
// This function can also reallocate an atom.
// In this case we need to "unplug" it from its previous location before
// plugging it in to its new location.
- if (zo.atom(self.prev_index)) |prev| {
- prev.next_index = self.next_index;
+ if (self.prevAtom(elf_file)) |prev| {
+ prev.next_atom_ref = self.next_atom_ref;
}
- if (zo.atom(self.next_index)) |next| {
- next.prev_index = self.prev_index;
+ if (self.nextAtom(elf_file)) |next| {
+ next.prev_atom_ref = self.prev_atom_ref;
}
- if (atom_placement) |big_atom_index| {
- const big_atom = zo.atom(big_atom_index).?;
- self.prev_index = big_atom_index;
- self.next_index = big_atom.next_index;
- big_atom.next_index = self.atom_index;
+ if (atom_placement) |big_atom_ref| {
+ const big_atom = elf_file.atom(big_atom_ref).?;
+ self.prev_atom_ref = big_atom_ref;
+ self.next_atom_ref = big_atom.next_atom_ref;
+ big_atom.next_atom_ref = self.ref();
} else {
- self.prev_index = 0;
- self.next_index = 0;
+ self.prev_atom_ref = .{ .index = 0, .file = 0 };
+ self.next_atom_ref = .{ .index = 0, .file = 0 };
}
if (free_list_removal) |i| {
_ = free_list.swapRemove(i);
@@ -248,64 +258,70 @@ pub fn grow(self: *Atom, elf_file: *Elf) !void {
}
pub fn free(self: *Atom, elf_file: *Elf) void {
- log.debug("freeAtom {d} ({s})", .{ self.atom_index, self.name(elf_file) });
+ log.debug("freeAtom atom({}) ({s})", .{ self.ref(), self.name(elf_file) });
- const zo = elf_file.zigObjectPtr().?;
const comp = elf_file.base.comp;
const gpa = comp.gpa;
const shndx = self.output_section_index;
- const meta = elf_file.last_atom_and_free_list_table.getPtr(shndx).?;
- const free_list = &meta.free_list;
- const last_atom_index = &meta.last_atom_index;
+ const slice = elf_file.sections.slice();
+ const free_list = &slice.items(.free_list)[shndx];
+ const last_atom_ref = &slice.items(.last_atom)[shndx];
var already_have_free_list_node = false;
{
var i: usize = 0;
// TODO turn free_list into a hash map
while (i < free_list.items.len) {
- if (free_list.items[i] == self.atom_index) {
+ if (free_list.items[i].eql(self.ref())) {
_ = free_list.swapRemove(i);
continue;
}
- if (free_list.items[i] == self.prev_index) {
- already_have_free_list_node = true;
+ if (self.prevAtom(elf_file)) |prev_atom| {
+ if (free_list.items[i].eql(prev_atom.ref())) {
+ already_have_free_list_node = true;
+ }
}
i += 1;
}
}
- if (zo.atom(last_atom_index.*)) |last_atom| {
- if (last_atom.atom_index == self.atom_index) {
- if (zo.atom(self.prev_index)) |_| {
+ if (elf_file.atom(last_atom_ref.*)) |last_atom| {
+ if (last_atom.ref().eql(self.ref())) {
+ if (self.prevAtom(elf_file)) |prev_atom| {
// TODO shrink the section size here
- last_atom_index.* = self.prev_index;
+ last_atom_ref.* = prev_atom.ref();
} else {
- last_atom_index.* = 0;
+ last_atom_ref.* = .{};
}
}
}
- if (zo.atom(self.prev_index)) |prev| {
- prev.next_index = self.next_index;
- if (!already_have_free_list_node and prev.*.freeListEligible(elf_file)) {
+ if (self.prevAtom(elf_file)) |prev_atom| {
+ prev_atom.next_atom_ref = self.next_atom_ref;
+ if (!already_have_free_list_node and prev_atom.*.freeListEligible(elf_file)) {
// The free list is heuristics, it doesn't have to be perfect, so we can
// ignore the OOM here.
- free_list.append(gpa, prev.atom_index) catch {};
+ free_list.append(gpa, prev_atom.ref()) catch {};
}
} else {
- self.prev_index = 0;
+ self.prev_atom_ref = .{};
}
- if (zo.atom(self.next_index)) |next| {
- next.prev_index = self.prev_index;
+ if (self.nextAtom(elf_file)) |next_atom| {
+ next_atom.prev_atom_ref = self.prev_atom_ref;
} else {
- self.next_index = 0;
+ self.next_atom_ref = .{};
}
- // TODO create relocs free list
- self.freeRelocs(zo);
- // TODO figure out how to free input section mappind in ZigModule
- // const zig_object = elf_file.zigObjectPtr().?
- // assert(zig_object.atoms.swapRemove(self.atom_index));
+ switch (self.file(elf_file).?) {
+ .zig_object => |zo| {
+ // TODO create relocs free list
+ self.freeRelocs(zo);
+ // TODO figure out how to free input section mappind in ZigModule
+ // const zig_object = elf_file.zigObjectPtr().?
+ // assert(zig_object.atoms.swapRemove(self.atom_index));
+ },
+ else => {},
+ }
self.* = .{};
}
@@ -336,10 +352,7 @@ pub fn writeRelocs(self: Atom, elf_file: *Elf, out_relocs: *std.ArrayList(elf.El
switch (target.type(elf_file)) {
elf.STT_SECTION => {
r_addend += @intCast(target.address(.{}, elf_file));
- r_sym = if (target.outputShndx(elf_file)) |osec|
- elf_file.sectionSymbolOutputSymtabIndex(osec)
- else
- 0;
+ r_sym = target.outputShndx(elf_file) orelse 0;
},
else => {
r_sym = target.outputSymtabIndex(elf_file) orelse 0;
diff --git a/src/link/Elf/LinkerDefined.zig b/src/link/Elf/LinkerDefined.zig
index 0a3a96d95e..a754539882 100644
--- a/src/link/Elf/LinkerDefined.zig
+++ b/src/link/Elf/LinkerDefined.zig
@@ -129,9 +129,10 @@ pub fn initSymbols(self: *LinkerDefined, elf_file: *Elf) !void {
pub fn initStartStopSymbols(self: *LinkerDefined, elf_file: *Elf) !void {
const gpa = elf_file.base.comp.gpa;
+ const slice = elf_file.sections.slice();
var nsyms: usize = 0;
- for (elf_file.shdrs.items) |shdr| {
+ for (slice.items(.shdr)) |shdr| {
if (elf_file.getStartStopBasename(shdr)) |_| {
nsyms += 2; // __start_, __stop_
}
@@ -143,7 +144,7 @@ pub fn initStartStopSymbols(self: *LinkerDefined, elf_file: *Elf) !void {
try self.symbols_extra.ensureUnusedCapacity(gpa, nsyms * @sizeOf(Symbol.Extra));
try self.symbols_resolver.ensureUnusedCapacity(gpa, nsyms);
- for (elf_file.shdrs.items) |shdr| {
+ for (slice.items(.shdr)) |shdr| {
if (elf_file.getStartStopBasename(shdr)) |name| {
const start_name = try std.fmt.allocPrintZ(gpa, "__start_{s}", .{name});
defer gpa.free(start_name);
@@ -193,6 +194,7 @@ pub fn resolveSymbols(self: *LinkerDefined, elf_file: *Elf) !void {
pub fn allocateSymbols(self: *LinkerDefined, elf_file: *Elf) void {
const comp = elf_file.base.comp;
const link_mode = comp.config.link_mode;
+ const shdrs = elf_file.sections.items(.shdr);
const allocSymbol = struct {
fn allocSymbol(ld: *LinkerDefined, index: Symbol.Index, value: u64, osec: u32, ef: *Elf) void {
@@ -204,7 +206,7 @@ pub fn allocateSymbols(self: *LinkerDefined, elf_file: *Elf) void {
// _DYNAMIC
if (elf_file.dynamic_section_index) |shndx| {
- const shdr = &elf_file.shdrs.items[shndx];
+ const shdr = shdrs[shndx];
allocSymbol(self, self.dynamic_index.?, shdr.sh_addr, shndx, elf_file);
}
@@ -213,21 +215,21 @@ pub fn allocateSymbols(self: *LinkerDefined, elf_file: *Elf) void {
// __init_array_start, __init_array_end
if (elf_file.sectionByName(".init_array")) |shndx| {
- const shdr = &elf_file.shdrs.items[shndx];
+ const shdr = shdrs[shndx];
allocSymbol(self, self.init_array_start_index.?, shdr.sh_addr, shndx, elf_file);
allocSymbol(self, self.init_array_end_index.?, shdr.sh_addr + shdr.sh_size, shndx, elf_file);
}
// __fini_array_start, __fini_array_end
if (elf_file.sectionByName(".fini_array")) |shndx| {
- const shdr = &elf_file.shdrs.items[shndx];
+ const shdr = shdrs[shndx];
allocSymbol(self, self.fini_array_start_index.?, shdr.sh_addr, shndx, elf_file);
allocSymbol(self, self.fini_array_end_index.?, shdr.sh_addr + shdr.sh_size, shndx, elf_file);
}
// __preinit_array_start, __preinit_array_end
if (elf_file.sectionByName(".preinit_array")) |shndx| {
- const shdr = &elf_file.shdrs.items[shndx];
+ const shdr = shdrs[shndx];
allocSymbol(self, self.preinit_array_start_index.?, shdr.sh_addr, shndx, elf_file);
allocSymbol(self, self.preinit_array_end_index.?, shdr.sh_addr + shdr.sh_size, shndx, elf_file);
}
@@ -235,38 +237,38 @@ pub fn allocateSymbols(self: *LinkerDefined, elf_file: *Elf) void {
// _GLOBAL_OFFSET_TABLE_
if (elf_file.getTarget().cpu.arch == .x86_64) {
if (elf_file.got_plt_section_index) |shndx| {
- const shdr = elf_file.shdrs.items[shndx];
+ const shdr = shdrs[shndx];
allocSymbol(self, self.got_index.?, shdr.sh_addr, shndx, elf_file);
}
} else {
if (elf_file.got_section_index) |shndx| {
- const shdr = elf_file.shdrs.items[shndx];
+ const shdr = shdrs[shndx];
allocSymbol(self, self.got_index.?, shdr.sh_addr, shndx, elf_file);
}
}
// _PROCEDURE_LINKAGE_TABLE_
if (elf_file.plt_section_index) |shndx| {
- const shdr = &elf_file.shdrs.items[shndx];
+ const shdr = shdrs[shndx];
allocSymbol(self, self.plt_index.?, shdr.sh_addr, shndx, elf_file);
}
// __dso_handle
if (self.dso_handle_index) |index| {
- const shdr = &elf_file.shdrs.items[1];
+ const shdr = shdrs[1];
allocSymbol(self, index, shdr.sh_addr, 0, elf_file);
}
// __GNU_EH_FRAME_HDR
if (elf_file.eh_frame_hdr_section_index) |shndx| {
- const shdr = &elf_file.shdrs.items[shndx];
+ const shdr = shdrs[shndx];
allocSymbol(self, self.gnu_eh_frame_hdr_index.?, shdr.sh_addr, shndx, elf_file);
}
// __rela_iplt_start, __rela_iplt_end
if (elf_file.rela_dyn_section_index) |shndx| blk: {
if (link_mode != .static or comp.config.pie) break :blk;
- const shdr = &elf_file.shdrs.items[shndx];
+ const shdr = shdrs[shndx];
const end_addr = shdr.sh_addr + shdr.sh_size;
const start_addr = end_addr - elf_file.calcNumIRelativeRelocs() * @sizeOf(elf.Elf64_Rela);
allocSymbol(self, self.rela_iplt_start_index.?, start_addr, shndx, elf_file);
@@ -277,7 +279,7 @@ pub fn allocateSymbols(self: *LinkerDefined, elf_file: *Elf) void {
{
var value: u64 = 0;
var osec: u32 = 0;
- for (elf_file.shdrs.items, 0..) |shdr, shndx| {
+ for (shdrs, 0..) |shdr, shndx| {
if (shdr.sh_flags & elf.SHF_ALLOC != 0) {
value = shdr.sh_addr + shdr.sh_size;
osec = @intCast(shndx);
@@ -289,7 +291,7 @@ pub fn allocateSymbols(self: *LinkerDefined, elf_file: *Elf) void {
// __global_pointer$
if (self.global_pointer_index) |index| {
const value, const osec = if (elf_file.sectionByName(".sdata")) |shndx| .{
- elf_file.shdrs.items[shndx].sh_addr + 0x800,
+ shdrs[shndx].sh_addr + 0x800,
shndx,
} else .{ 0, 0 };
allocSymbol(self, index, value, osec, elf_file);
@@ -305,7 +307,7 @@ pub fn allocateSymbols(self: *LinkerDefined, elf_file: *Elf) void {
const stop_ref = self.resolveSymbol(self.start_stop_indexes.items[index + 1], elf_file);
const stop = elf_file.symbol(stop_ref).?;
const shndx = elf_file.sectionByName(name["__start_".len..]).?;
- const shdr = &elf_file.shdrs.items[shndx];
+ const shdr = shdrs[shndx];
start.value = @intCast(shdr.sh_addr);
start.output_section_index = shndx;
stop.value = @intCast(shdr.sh_addr + shdr.sh_size);
diff --git a/src/link/Elf/Object.zig b/src/link/Elf/Object.zig
index 584e5ad6d3..a19d327fcc 100644
--- a/src/link/Elf/Object.zig
+++ b/src/link/Elf/Object.zig
@@ -998,9 +998,8 @@ pub fn addAtomsToOutputSections(self: *Object, elf_file: *Elf) !void {
const comp = elf_file.base.comp;
const gpa = comp.gpa;
- const gop = try elf_file.output_sections.getOrPut(gpa, atom_ptr.output_section_index);
- if (!gop.found_existing) gop.value_ptr.* = .{};
- try gop.value_ptr.append(gpa, .{ .index = atom_index, .file = self.index });
+ const atom_list = &elf_file.sections.items(.atom_list)[atom_ptr.output_section_index];
+ try atom_list.append(gpa, .{ .index = atom_index, .file = self.index });
}
}
@@ -1011,7 +1010,8 @@ pub fn initRelaSections(self: *Object, elf_file: *Elf) !void {
const shndx = atom_ptr.relocsShndx() orelse continue;
const shdr = self.shdrs.items[shndx];
const out_shndx = try self.initOutputSection(elf_file, shdr);
- const out_shdr = &elf_file.shdrs.items[out_shndx];
+ const out_shdr = &elf_file.sections.items(.shdr)[out_shndx];
+ out_shdr.sh_type = elf.SHT_RELA;
out_shdr.sh_addralign = @alignOf(elf.Elf64_Rela);
out_shdr.sh_entsize = @sizeOf(elf.Elf64_Rela);
out_shdr.sh_flags |= elf.SHF_INFO_LINK;
@@ -1027,15 +1027,13 @@ pub fn addAtomsToRelaSections(self: *Object, elf_file: *Elf) !void {
const shdr = self.shdrs.items[shndx];
break :blk self.initOutputSection(elf_file, shdr) catch unreachable;
};
- const shdr = &elf_file.shdrs.items[shndx];
+ const slice = elf_file.sections.slice();
+ const shdr = &slice.items(.shdr)[shndx];
shdr.sh_info = atom_ptr.output_section_index;
shdr.sh_link = elf_file.symtab_section_index.?;
-
- const comp = elf_file.base.comp;
- const gpa = comp.gpa;
- const gop = try elf_file.output_rela_sections.getOrPut(gpa, atom_ptr.output_section_index);
- if (!gop.found_existing) gop.value_ptr.* = .{ .shndx = shndx };
- try gop.value_ptr.atom_list.append(gpa, .{ .index = atom_index, .file = self.index });
+ const gpa = elf_file.base.comp.gpa;
+ const atom_list = &elf_file.sections.items(.atom_list)[shndx];
+ try atom_list.append(gpa, .{ .index = atom_index, .file = self.index });
}
}
diff --git a/src/link/Elf/Symbol.zig b/src/link/Elf/Symbol.zig
index 4f6c2b8c7e..1b1c35b645 100644
--- a/src/link/Elf/Symbol.zig
+++ b/src/link/Elf/Symbol.zig
@@ -11,7 +11,7 @@ file_index: File.Index = 0,
/// Reference to Atom or merge subsection containing this symbol if any.
/// Use `atom` or `mergeSubsection` to get the pointer to the atom.
-ref: Elf.Ref = .{ .index = 0, .file = 0 },
+ref: Elf.Ref = .{},
/// Assigned output section index for this symbol.
output_section_index: u32 = 0,
@@ -126,7 +126,7 @@ pub fn address(symbol: Symbol, opts: struct { plt: bool = true, trampoline: bool
const sym_name = symbol.name(elf_file);
const sh_addr, const sh_size = blk: {
const shndx = elf_file.eh_frame_section_index orelse break :blk .{ 0, 0 };
- const shdr = elf_file.shdrs.items[shndx];
+ const shdr = elf_file.sections.items(.shdr)[shndx];
break :blk .{ shdr.sh_addr, shdr.sh_size };
};
if (mem.startsWith(u8, sym_name, "__EH_FRAME_BEGIN__") or
@@ -173,7 +173,7 @@ pub fn gotAddress(symbol: Symbol, elf_file: *Elf) i64 {
pub fn pltGotAddress(symbol: Symbol, elf_file: *Elf) i64 {
if (!(symbol.flags.has_plt and symbol.flags.has_got)) return 0;
const extras = symbol.extra(elf_file);
- const shdr = elf_file.shdrs.items[elf_file.plt_got_section_index.?];
+ const shdr = elf_file.sections.items(.shdr)[elf_file.plt_got_section_index.?];
const cpu_arch = elf_file.getTarget().cpu.arch;
return @intCast(shdr.sh_addr + extras.plt_got * PltGotSection.entrySize(cpu_arch));
}
@@ -181,7 +181,7 @@ pub fn pltGotAddress(symbol: Symbol, elf_file: *Elf) i64 {
pub fn pltAddress(symbol: Symbol, elf_file: *Elf) i64 {
if (!symbol.flags.has_plt) return 0;
const extras = symbol.extra(elf_file);
- const shdr = elf_file.shdrs.items[elf_file.plt_section_index.?];
+ const shdr = elf_file.sections.items(.shdr)[elf_file.plt_section_index.?];
const cpu_arch = elf_file.getTarget().cpu.arch;
return @intCast(shdr.sh_addr + extras.plt * PltSection.entrySize(cpu_arch) + PltSection.preambleSize(cpu_arch));
}
@@ -189,13 +189,13 @@ pub fn pltAddress(symbol: Symbol, elf_file: *Elf) i64 {
pub fn gotPltAddress(symbol: Symbol, elf_file: *Elf) i64 {
if (!symbol.flags.has_plt) return 0;
const extras = symbol.extra(elf_file);
- const shdr = elf_file.shdrs.items[elf_file.got_plt_section_index.?];
+ const shdr = elf_file.sections.items(.shdr)[elf_file.got_plt_section_index.?];
return @intCast(shdr.sh_addr + extras.plt * 8 + GotPltSection.preamble_size);
}
pub fn copyRelAddress(symbol: Symbol, elf_file: *Elf) i64 {
if (!symbol.flags.has_copy_rel) return 0;
- const shdr = elf_file.shdrs.items[elf_file.copy_rel_section_index.?];
+ const shdr = elf_file.sections.items(.shdr)[elf_file.copy_rel_section_index.?];
return @as(i64, @intCast(shdr.sh_addr)) + symbol.value;
}
@@ -300,7 +300,7 @@ pub fn setOutputSym(symbol: Symbol, elf_file: *Elf, out: *elf.Elf64_Sym) void {
break :blk 0;
}
if (st_shndx == elf.SHN_ABS or st_shndx == elf.SHN_COMMON) break :blk symbol.address(.{ .plt = false }, elf_file);
- const shdr = elf_file.shdrs.items[st_shndx];
+ const shdr = elf_file.sections.items(.shdr)[st_shndx];
if (shdr.sh_flags & elf.SHF_TLS != 0 and file_ptr != .linker_defined)
break :blk symbol.address(.{ .plt = false }, elf_file) - elf_file.tlsAddress();
break :blk symbol.address(.{ .plt = false, .trampoline = false }, elf_file);
diff --git a/src/link/Elf/ZigObject.zig b/src/link/Elf/ZigObject.zig
index 06cde2bc96..acee16673e 100644
--- a/src/link/Elf/ZigObject.zig
+++ b/src/link/Elf/ZigObject.zig
@@ -63,24 +63,300 @@ pub const global_symbol_bit: u32 = 0x80000000;
pub const symbol_mask: u32 = 0x7fffffff;
pub const SHN_ATOM: u16 = 0x100;
-pub fn init(self: *ZigObject, elf_file: *Elf) !void {
+const InitOptions = struct {
+ symbol_count_hint: u64,
+ program_code_size_hint: u64,
+};
+
+pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void {
const comp = elf_file.base.comp;
const gpa = comp.gpa;
+ const ptr_size = elf_file.ptrWidthBytes();
+ const target = elf_file.getTarget();
+ const ptr_bit_width = target.ptrBitWidth();
try self.atoms.append(gpa, .{ .extra_index = try self.addAtomExtra(gpa, .{}) }); // null input section
try self.relocs.append(gpa, .{}); // null relocs section
try self.strtab.buffer.append(gpa, 0);
- const name_off = try self.strtab.insert(gpa, self.path);
- const symbol_index = try self.newLocalSymbol(gpa, name_off);
- const sym = self.symbol(symbol_index);
- const esym = &self.symtab.items(.elf_sym)[sym.esym_index];
- esym.st_info = elf.STT_FILE;
- esym.st_shndx = elf.SHN_ABS;
+ {
+ const name_off = try self.strtab.insert(gpa, self.path);
+ const symbol_index = try self.newLocalSymbol(gpa, name_off);
+ const sym = self.symbol(symbol_index);
+ const esym = &self.symtab.items(.elf_sym)[sym.esym_index];
+ esym.st_info = elf.STT_FILE;
+ esym.st_shndx = elf.SHN_ABS;
+ }
+
+ const fillSection = struct {
+ fn fillSection(ef: *Elf, shdr: *elf.Elf64_Shdr, size: u64, phndx: ?u16) !void {
+ if (ef.base.isRelocatable()) {
+ const off = try ef.findFreeSpace(size, shdr.sh_addralign);
+ shdr.sh_offset = off;
+ shdr.sh_size = size;
+ } else {
+ const phdr = ef.phdrs.items[phndx.?];
+ shdr.sh_addr = phdr.p_vaddr;
+ shdr.sh_offset = phdr.p_offset;
+ shdr.sh_size = phdr.p_memsz;
+ }
+ }
+ }.fillSection;
+
+ comptime assert(Elf.number_of_zig_segments == 4);
+
+ if (!elf_file.base.isRelocatable()) {
+ if (elf_file.phdr_zig_load_re_index == null) {
+ const filesz = options.program_code_size_hint;
+ const off = try elf_file.findFreeSpace(filesz, elf_file.page_size);
+ elf_file.phdr_zig_load_re_index = try elf_file.addPhdr(.{
+ .type = elf.PT_LOAD,
+ .offset = off,
+ .filesz = filesz,
+ .addr = if (ptr_bit_width >= 32) 0x4000000 else 0x4000,
+ .memsz = filesz,
+ .@"align" = elf_file.page_size,
+ .flags = elf.PF_X | elf.PF_R | elf.PF_W,
+ });
+ }
+
+ if (elf_file.phdr_zig_load_ro_index == null) {
+ const alignment = elf_file.page_size;
+ const filesz: u64 = 1024;
+ const off = try elf_file.findFreeSpace(filesz, alignment);
+ elf_file.phdr_zig_load_ro_index = try elf_file.addPhdr(.{
+ .type = elf.PT_LOAD,
+ .offset = off,
+ .filesz = filesz,
+ .addr = if (ptr_bit_width >= 32) 0xc000000 else 0xa000,
+ .memsz = filesz,
+ .@"align" = alignment,
+ .flags = elf.PF_R | elf.PF_W,
+ });
+ }
+
+ if (elf_file.phdr_zig_load_rw_index == null) {
+ const alignment = elf_file.page_size;
+ const filesz: u64 = 1024;
+ const off = try elf_file.findFreeSpace(filesz, alignment);
+ elf_file.phdr_zig_load_rw_index = try elf_file.addPhdr(.{
+ .type = elf.PT_LOAD,
+ .offset = off,
+ .filesz = filesz,
+ .addr = if (ptr_bit_width >= 32) 0x10000000 else 0xc000,
+ .memsz = filesz,
+ .@"align" = alignment,
+ .flags = elf.PF_R | elf.PF_W,
+ });
+ }
+
+ if (elf_file.phdr_zig_load_zerofill_index == null) {
+ const alignment = elf_file.page_size;
+ elf_file.phdr_zig_load_zerofill_index = try elf_file.addPhdr(.{
+ .type = elf.PT_LOAD,
+ .addr = if (ptr_bit_width >= 32) 0x14000000 else 0xf000,
+ .memsz = 1024,
+ .@"align" = alignment,
+ .flags = elf.PF_R | elf.PF_W,
+ });
+ }
+ }
+
+ if (elf_file.zig_text_section_index == null) {
+ elf_file.zig_text_section_index = try elf_file.addSection(.{
+ .name = try elf_file.insertShString(".text.zig"),
+ .type = elf.SHT_PROGBITS,
+ .flags = elf.SHF_ALLOC | elf.SHF_EXECINSTR,
+ .addralign = 1,
+ .offset = std.math.maxInt(u64),
+ });
+ const shdr = &elf_file.sections.items(.shdr)[elf_file.zig_text_section_index.?];
+ const phndx = &elf_file.sections.items(.phndx)[elf_file.zig_text_section_index.?];
+ try fillSection(elf_file, shdr, options.program_code_size_hint, elf_file.phdr_zig_load_re_index);
+ if (elf_file.base.isRelocatable()) {
+ _ = try elf_file.addRelaShdr(
+ try elf_file.insertShString(".rela.text.zig"),
+ elf_file.zig_text_section_index.?,
+ );
+ } else {
+ phndx.* = elf_file.phdr_zig_load_re_index.?;
+ }
+ }
+
+ if (elf_file.zig_data_rel_ro_section_index == null) {
+ elf_file.zig_data_rel_ro_section_index = try elf_file.addSection(.{
+ .name = try elf_file.insertShString(".data.rel.ro.zig"),
+ .type = elf.SHT_PROGBITS,
+ .addralign = 1,
+ .flags = elf.SHF_ALLOC | elf.SHF_WRITE,
+ .offset = std.math.maxInt(u64),
+ });
+ const shdr = &elf_file.sections.items(.shdr)[elf_file.zig_data_rel_ro_section_index.?];
+ const phndx = &elf_file.sections.items(.phndx)[elf_file.zig_data_rel_ro_section_index.?];
+ try fillSection(elf_file, shdr, 1024, elf_file.phdr_zig_load_ro_index);
+ if (elf_file.base.isRelocatable()) {
+ _ = try elf_file.addRelaShdr(
+ try elf_file.insertShString(".rela.data.rel.ro.zig"),
+ elf_file.zig_data_rel_ro_section_index.?,
+ );
+ } else {
+ phndx.* = elf_file.phdr_zig_load_ro_index.?;
+ }
+ }
+
+ if (elf_file.zig_data_section_index == null) {
+ elf_file.zig_data_section_index = try elf_file.addSection(.{
+ .name = try elf_file.insertShString(".data.zig"),
+ .type = elf.SHT_PROGBITS,
+ .addralign = ptr_size,
+ .flags = elf.SHF_ALLOC | elf.SHF_WRITE,
+ .offset = std.math.maxInt(u64),
+ });
+ const shdr = &elf_file.sections.items(.shdr)[elf_file.zig_data_section_index.?];
+ const phndx = &elf_file.sections.items(.phndx)[elf_file.zig_data_section_index.?];
+ try fillSection(elf_file, shdr, 1024, elf_file.phdr_zig_load_rw_index);
+ if (elf_file.base.isRelocatable()) {
+ _ = try elf_file.addRelaShdr(
+ try elf_file.insertShString(".rela.data.zig"),
+ elf_file.zig_data_section_index.?,
+ );
+ } else {
+ phndx.* = elf_file.phdr_zig_load_rw_index.?;
+ }
+ }
+
+ if (elf_file.zig_bss_section_index == null) {
+ elf_file.zig_bss_section_index = try elf_file.addSection(.{
+ .name = try elf_file.insertShString(".bss.zig"),
+ .type = elf.SHT_NOBITS,
+ .addralign = ptr_size,
+ .flags = elf.SHF_ALLOC | elf.SHF_WRITE,
+ .offset = 0,
+ });
+ const shdr = &elf_file.sections.items(.shdr)[elf_file.zig_bss_section_index.?];
+ const phndx = &elf_file.sections.items(.phndx)[elf_file.zig_bss_section_index.?];
+ if (elf_file.base.isRelocatable()) {
+ shdr.sh_size = 1024;
+ } else {
+ phndx.* = elf_file.phdr_zig_load_zerofill_index.?;
+ const phdr = elf_file.phdrs.items[phndx.*.?];
+ shdr.sh_addr = phdr.p_vaddr;
+ shdr.sh_size = phdr.p_memsz;
+ }
+ }
switch (comp.config.debug_format) {
.strip => {},
- .dwarf => |v| self.dwarf = Dwarf.init(&elf_file.base, v),
+ .dwarf => |v| {
+ var dwarf = Dwarf.init(&elf_file.base, v);
+
+ const addSectionSymbol = struct {
+ fn addSectionSymbol(
+ zig_object: *ZigObject,
+ alloc: Allocator,
+ name: [:0]const u8,
+ alignment: Atom.Alignment,
+ shndx: u32,
+ ) !Symbol.Index {
+ const name_off = try zig_object.addString(alloc, name);
+ const index = try zig_object.newSymbolWithAtom(alloc, name_off);
+ const sym = zig_object.symbol(index);
+ const esym = &zig_object.symtab.items(.elf_sym)[sym.esym_index];
+ esym.st_info |= elf.STT_SECTION;
+ const atom_ptr = zig_object.atom(sym.ref.index).?;
+ atom_ptr.alignment = alignment;
+ atom_ptr.output_section_index = shndx;
+ return index;
+ }
+ }.addSectionSymbol;
+
+ if (elf_file.debug_str_section_index == null) {
+ elf_file.debug_str_section_index = try elf_file.addSection(.{
+ .name = try elf_file.insertShString(".debug_str"),
+ .flags = elf.SHF_MERGE | elf.SHF_STRINGS,
+ .entsize = 1,
+ .type = elf.SHT_PROGBITS,
+ .addralign = 1,
+ });
+ self.debug_str_section_dirty = true;
+ self.debug_str_index = try addSectionSymbol(self, gpa, ".debug_str", .@"1", elf_file.debug_str_section_index.?);
+ }
+
+ if (elf_file.debug_info_section_index == null) {
+ elf_file.debug_info_section_index = try elf_file.addSection(.{
+ .name = try elf_file.insertShString(".debug_info"),
+ .type = elf.SHT_PROGBITS,
+ .addralign = 1,
+ });
+ self.debug_info_section_dirty = true;
+ self.debug_info_index = try addSectionSymbol(self, gpa, ".debug_info", .@"1", elf_file.debug_info_section_index.?);
+ }
+
+ if (elf_file.debug_abbrev_section_index == null) {
+ elf_file.debug_abbrev_section_index = try elf_file.addSection(.{
+ .name = try elf_file.insertShString(".debug_abbrev"),
+ .type = elf.SHT_PROGBITS,
+ .addralign = 1,
+ });
+ self.debug_abbrev_section_dirty = true;
+ self.debug_abbrev_index = try addSectionSymbol(self, gpa, ".debug_abbrev", .@"1", elf_file.debug_abbrev_section_index.?);
+ }
+
+ if (elf_file.debug_aranges_section_index == null) {
+ elf_file.debug_aranges_section_index = try elf_file.addSection(.{
+ .name = try elf_file.insertShString(".debug_aranges"),
+ .type = elf.SHT_PROGBITS,
+ .addralign = 16,
+ });
+ self.debug_aranges_section_dirty = true;
+ self.debug_aranges_index = try addSectionSymbol(self, gpa, ".debug_aranges", .@"16", elf_file.debug_aranges_section_index.?);
+ }
+
+ if (elf_file.debug_line_section_index == null) {
+ elf_file.debug_line_section_index = try elf_file.addSection(.{
+ .name = try elf_file.insertShString(".debug_line"),
+ .type = elf.SHT_PROGBITS,
+ .addralign = 1,
+ });
+ self.debug_line_section_dirty = true;
+ self.debug_line_index = try addSectionSymbol(self, gpa, ".debug_line", .@"1", elf_file.debug_line_section_index.?);
+ }
+
+ if (elf_file.debug_line_str_section_index == null) {
+ elf_file.debug_line_str_section_index = try elf_file.addSection(.{
+ .name = try elf_file.insertShString(".debug_line_str"),
+ .flags = elf.SHF_MERGE | elf.SHF_STRINGS,
+ .entsize = 1,
+ .type = elf.SHT_PROGBITS,
+ .addralign = 1,
+ });
+ self.debug_line_str_section_dirty = true;
+ self.debug_line_str_index = try addSectionSymbol(self, gpa, ".debug_line_str", .@"1", elf_file.debug_line_str_section_index.?);
+ }
+
+ if (elf_file.debug_loclists_section_index == null) {
+ elf_file.debug_loclists_section_index = try elf_file.addSection(.{
+ .name = try elf_file.insertShString(".debug_loclists"),
+ .type = elf.SHT_PROGBITS,
+ .addralign = 1,
+ });
+ self.debug_loclists_section_dirty = true;
+ self.debug_loclists_index = try addSectionSymbol(self, gpa, ".debug_loclists", .@"1", elf_file.debug_loclists_section_index.?);
+ }
+
+ if (elf_file.debug_rnglists_section_index == null) {
+ elf_file.debug_rnglists_section_index = try elf_file.addSection(.{
+ .name = try elf_file.insertShString(".debug_rnglists"),
+ .type = elf.SHT_PROGBITS,
+ .addralign = 1,
+ });
+ self.debug_rnglists_section_dirty = true;
+ self.debug_rnglists_index = try addSectionSymbol(self, gpa, ".debug_rnglists", .@"1", elf_file.debug_rnglists_section_index.?);
+ }
+
+ try dwarf.initMetadata();
+ self.dwarf = dwarf;
+ },
.code_view => unreachable,
}
}
@@ -198,7 +474,7 @@ pub fn flushModule(self: *ZigObject, elf_file: *Elf, tid: Zcu.PerThread.Id) !voi
const atom_ptr = self.atom(sym.ref.index).?;
if (!atom_ptr.alive) continue;
const shndx = sym.outputShndx(elf_file).?;
- const shdr = elf_file.shdrs.items[shndx];
+ const shdr = elf_file.sections.items(.shdr)[shndx];
const esym = &self.symtab.items(.elf_sym)[sym.esym_index];
esym.st_size = shdr.sh_size;
atom_ptr.size = shdr.sh_size;
@@ -358,13 +634,10 @@ pub fn flushModule(self: *ZigObject, elf_file: *Elf, tid: Zcu.PerThread.Id) !voi
}
if (elf_file.base.isRelocatable() and relocs.items.len > 0) {
- const gop = try elf_file.output_rela_sections.getOrPut(gpa, shndx);
- if (!gop.found_existing) {
- const rela_sect_name = try std.fmt.allocPrintZ(gpa, ".rela{s}", .{elf_file.getShString(shdr.sh_name)});
- defer gpa.free(rela_sect_name);
- const rela_sh_name = try elf_file.insertShString(rela_sect_name);
- const rela_shndx = try elf_file.addRelaShdr(rela_sh_name, shndx);
- gop.value_ptr.* = .{ .shndx = rela_shndx };
+ const rela_sect_name = try std.fmt.allocPrintZ(gpa, ".rela{s}", .{elf_file.getShString(shdr.sh_name)});
+ defer gpa.free(rela_sect_name);
+ if (elf_file.sectionByName(rela_sect_name) == null) {
+ _ = try elf_file.addRelaShdr(try elf_file.insertShString(rela_sect_name), shndx);
}
}
}
@@ -446,7 +719,7 @@ fn newAtom(self: *ZigObject, allocator: Allocator, name_off: u32) !Atom.Index {
return index;
}
-pub fn newSymbolWithAtom(self: *ZigObject, allocator: Allocator, name_off: u32) !Symbol.Index {
+fn newSymbolWithAtom(self: *ZigObject, allocator: Allocator, name_off: u32) !Symbol.Index {
const atom_index = try self.newAtom(allocator, name_off);
const sym_index = try self.newLocalSymbol(allocator, name_off);
const sym = self.symbol(sym_index);
@@ -460,7 +733,7 @@ pub fn newSymbolWithAtom(self: *ZigObject, allocator: Allocator, name_off: u32)
pub fn inputShdr(self: *ZigObject, atom_index: Atom.Index, elf_file: *Elf) elf.Elf64_Shdr {
const atom_ptr = self.atom(atom_index) orelse return Elf.null_shdr;
const shndx = atom_ptr.output_section_index;
- var shdr = elf_file.shdrs.items[shndx];
+ var shdr = elf_file.sections.items(.shdr)[shndx];
shdr.sh_addr = 0;
shdr.sh_offset = 0;
shdr.sh_size = atom_ptr.size;
@@ -638,8 +911,8 @@ pub fn readFileContents(self: *ZigObject, elf_file: *Elf) !void {
.p32 => @sizeOf(elf.Elf32_Shdr),
.p64 => @sizeOf(elf.Elf64_Shdr),
};
- var end_pos: u64 = elf_file.shdr_table_offset.? + elf_file.shdrs.items.len * shsize;
- for (elf_file.shdrs.items) |shdr| {
+ var end_pos: u64 = elf_file.shdr_table_offset.? + elf_file.sections.items(.shdr).len * shsize;
+ for (elf_file.sections.items(.shdr)) |shdr| {
if (shdr.sh_type == elf.SHT_NOBITS) continue;
end_pos = @max(end_pos, shdr.sh_offset + shdr.sh_size);
}
@@ -692,12 +965,14 @@ pub fn addAtomsToRelaSections(self: *ZigObject, elf_file: *Elf) !void {
// TODO this check will become obsolete when we rework our relocs mechanism at the ZigObject level
if (self.relocs.items[rela_shndx].items.len == 0) continue;
const out_shndx = atom_ptr.output_section_index;
- const out_shdr = elf_file.shdrs.items[out_shndx];
+ const out_shdr = elf_file.sections.items(.shdr)[out_shndx];
if (out_shdr.sh_type == elf.SHT_NOBITS) continue;
-
+ const out_rela_shndx = for (elf_file.sections.items(.shdr), 0..) |out_rela_shdr, out_rela_shndx| {
+ if (out_rela_shdr.sh_type == elf.SHT_RELA and out_rela_shdr.sh_info == out_shndx) break out_rela_shndx;
+ } else unreachable;
+ const atom_list = &elf_file.sections.items(.atom_list)[out_rela_shndx];
const gpa = elf_file.base.comp.gpa;
- const sec = elf_file.output_rela_sections.getPtr(out_shndx).?;
- try sec.atom_list.append(gpa, .{ .index = atom_index, .file = self.index });
+ try atom_list.append(gpa, .{ .index = atom_index, .file = self.index });
}
}
@@ -767,7 +1042,7 @@ pub fn writeSymtab(self: ZigObject, elf_file: *Elf) void {
pub fn codeAlloc(self: *ZigObject, elf_file: *Elf, atom_index: Atom.Index) ![]u8 {
const gpa = elf_file.base.comp.gpa;
const atom_ptr = self.atom(atom_index).?;
- const shdr = &elf_file.shdrs.items[atom_ptr.output_section_index];
+ const shdr = &elf_file.sections.items(.shdr)[atom_ptr.output_section_index];
if (shdr.sh_flags & elf.SHF_TLS != 0) {
const tlv = self.tls_variables.get(atom_index).?;
@@ -1094,7 +1369,7 @@ fn updateNavCode(
}
}
- const shdr = elf_file.shdrs.items[shdr_index];
+ const shdr = elf_file.sections.items(.shdr)[shdr_index];
if (shdr.sh_type != elf.SHT_NOBITS) {
const file_offset = shdr.sh_offset + @as(u64, @intCast(atom_ptr.value));
try elf_file.base.file.?.pwriteAll(code, file_offset);
@@ -1149,16 +1424,13 @@ fn updateTlv(
gop.value_ptr.* = .{ .symbol_index = sym_index };
// We only store the data for the TLV if it's non-zerofill.
- if (elf_file.shdrs.items[shndx].sh_type != elf.SHT_NOBITS) {
+ if (elf_file.sections.items(.shdr)[shndx].sh_type != elf.SHT_NOBITS) {
gop.value_ptr.code = try gpa.dupe(u8, code);
}
}
- {
- const gop = try elf_file.output_sections.getOrPut(gpa, atom_ptr.output_section_index);
- if (!gop.found_existing) gop.value_ptr.* = .{};
- try gop.value_ptr.append(gpa, .{ .index = atom_ptr.atom_index, .file = self.index });
- }
+ const atom_list = &elf_file.sections.items(.atom_list)[atom_ptr.output_section_index];
+ try atom_list.append(gpa, .{ .index = atom_ptr.atom_index, .file = self.index });
}
pub fn updateFunc(
@@ -1210,7 +1482,7 @@ pub fn updateFunc(
const shndx = try self.getNavShdrIndex(elf_file, zcu, func.owner_nav, sym_index, code);
log.debug("setting shdr({x},{s}) for {}", .{
shndx,
- elf_file.getShString(elf_file.shdrs.items[shndx].sh_name),
+ elf_file.getShString(elf_file.sections.items(.shdr)[shndx].sh_name),
ip.getNav(func.owner_nav).fqn.fmt(ip),
});
const old_rva, const old_alignment = blk: {
@@ -1338,10 +1610,10 @@ pub fn updateNav(
const shndx = try self.getNavShdrIndex(elf_file, zcu, nav_index, sym_index, code);
log.debug("setting shdr({x},{s}) for {}", .{
shndx,
- elf_file.getShString(elf_file.shdrs.items[shndx].sh_name),
+ elf_file.getShString(elf_file.sections.items(.shdr)[shndx].sh_name),
nav.fqn.fmt(ip),
});
- if (elf_file.shdrs.items[shndx].sh_flags & elf.SHF_TLS != 0)
+ if (elf_file.sections.items(.shdr)[shndx].sh_flags & elf.SHF_TLS != 0)
try self.updateTlv(elf_file, pt, nav_index, sym_index, shndx, code)
else
try self.updateNavCode(elf_file, pt, nav_index, sym_index, shndx, code, elf.STT_OBJECT);
@@ -1440,7 +1712,7 @@ fn updateLazySymbol(
local_sym.value = 0;
local_esym.st_value = 0;
- const shdr = elf_file.shdrs.items[output_section_index];
+ const shdr = elf_file.sections.items(.shdr)[output_section_index];
const file_offset = shdr.sh_offset + @as(u64, @intCast(atom_ptr.value));
try elf_file.base.file.?.pwriteAll(code, file_offset);
}
@@ -1496,7 +1768,7 @@ fn lowerConst(
// TODO rename and re-audit this method
errdefer self.freeNavMetadata(elf_file, sym_index);
- const shdr = elf_file.shdrs.items[output_section_index];
+ const shdr = elf_file.sections.items(.shdr)[output_section_index];
const file_offset = shdr.sh_offset + @as(u64, @intCast(atom_ptr.value));
try elf_file.base.file.?.pwriteAll(code, file_offset);
@@ -1660,7 +1932,7 @@ fn trampolineSize(cpu_arch: std.Target.Cpu.Arch) u64 {
fn writeTrampoline(tr_sym: Symbol, target: Symbol, elf_file: *Elf) !void {
const atom_ptr = tr_sym.atom(elf_file).?;
- const shdr = elf_file.shdrs.items[atom_ptr.output_section_index];
+ const shdr = elf_file.sections.items(.shdr)[atom_ptr.output_section_index];
const fileoff = shdr.sh_offset + @as(u64, @intCast(atom_ptr.value));
const source_addr = tr_sym.address(.{}, elf_file);
const target_addr = target.address(.{ .trampoline = false }, elf_file);
diff --git a/src/link/Elf/eh_frame.zig b/src/link/Elf/eh_frame.zig
index f090cf0820..fa555e9d2d 100644
--- a/src/link/Elf/eh_frame.zig
+++ b/src/link/Elf/eh_frame.zig
@@ -13,7 +13,7 @@ pub const Fde = struct {
pub fn address(fde: Fde, elf_file: *Elf) u64 {
const base: u64 = if (elf_file.eh_frame_section_index) |shndx|
- elf_file.shdrs.items[shndx].sh_addr
+ elf_file.sections.items(.shdr)[shndx].sh_addr
else
0;
return base + fde.out_offset;
@@ -112,7 +112,7 @@ pub const Cie = struct {
pub fn address(cie: Cie, elf_file: *Elf) u64 {
const base: u64 = if (elf_file.eh_frame_section_index) |shndx|
- elf_file.shdrs.items[shndx].sh_addr
+ elf_file.sections.items(.shdr)[shndx].sh_addr
else
0;
return base + cie.out_offset;
@@ -326,7 +326,9 @@ fn resolveReloc(rec: anytype, sym: *const Symbol, rel: elf.Elf64_Rela, elf_file:
}
pub fn writeEhFrame(elf_file: *Elf, writer: anytype) !void {
- relocs_log.debug("{x}: .eh_frame", .{elf_file.shdrs.items[elf_file.eh_frame_section_index.?].sh_addr});
+ relocs_log.debug("{x}: .eh_frame", .{
+ elf_file.sections.items(.shdr)[elf_file.eh_frame_section_index.?].sh_addr,
+ });
var has_reloc_errors = false;
@@ -423,7 +425,7 @@ fn emitReloc(elf_file: *Elf, rec: anytype, sym: *const Symbol, rel: elf.Elf64_Re
switch (sym.type(elf_file)) {
elf.STT_SECTION => {
r_addend += @intCast(sym.address(.{}, elf_file));
- r_sym = elf_file.sectionSymbolOutputSymtabIndex(sym.outputShndx(elf_file).?);
+ r_sym = sym.outputShndx(elf_file).?;
},
else => {
r_sym = sym.outputSymtabIndex(elf_file) orelse 0;
@@ -446,7 +448,9 @@ fn emitReloc(elf_file: *Elf, rec: anytype, sym: *const Symbol, rel: elf.Elf64_Re
}
pub fn writeEhFrameRelocs(elf_file: *Elf, writer: anytype) !void {
- relocs_log.debug("{x}: .eh_frame", .{elf_file.shdrs.items[elf_file.eh_frame_section_index.?].sh_addr});
+ relocs_log.debug("{x}: .eh_frame", .{
+ elf_file.sections.items(.shdr)[elf_file.eh_frame_section_index.?].sh_addr,
+ });
for (elf_file.objects.items) |index| {
const object = elf_file.file(index).?.object;
@@ -482,8 +486,9 @@ pub fn writeEhFrameHdr(elf_file: *Elf, writer: anytype) !void {
try writer.writeByte(EH_PE.udata4);
try writer.writeByte(EH_PE.datarel | EH_PE.sdata4);
- const eh_frame_shdr = elf_file.shdrs.items[elf_file.eh_frame_section_index.?];
- const eh_frame_hdr_shdr = elf_file.shdrs.items[elf_file.eh_frame_hdr_section_index.?];
+ const shdrs = elf_file.sections.items(.shdr);
+ const eh_frame_shdr = shdrs[elf_file.eh_frame_section_index.?];
+ const eh_frame_hdr_shdr = shdrs[elf_file.eh_frame_hdr_section_index.?];
const num_fdes = @as(u32, @intCast(@divExact(eh_frame_hdr_shdr.sh_size - eh_frame_hdr_header_size, 8)));
try writer.writeInt(
u32,
diff --git a/src/link/Elf/merge_section.zig b/src/link/Elf/merge_section.zig
index 7ffb17e963..6241e1aec9 100644
--- a/src/link/Elf/merge_section.zig
+++ b/src/link/Elf/merge_section.zig
@@ -29,7 +29,7 @@ pub const MergeSection = struct {
}
pub fn address(msec: MergeSection, elf_file: *Elf) i64 {
- const shdr = elf_file.shdrs.items[msec.output_section_index];
+ const shdr = elf_file.sections.items(.shdr)[msec.output_section_index];
return @intCast(shdr.sh_addr + msec.value);
}
@@ -108,13 +108,11 @@ pub const MergeSection = struct {
}
pub fn initOutputSection(msec: *MergeSection, elf_file: *Elf) !void {
- const shndx = elf_file.sectionByName(msec.name(elf_file)) orelse try elf_file.addSection(.{
+ msec.output_section_index = elf_file.sectionByName(msec.name(elf_file)) orelse try elf_file.addSection(.{
.name = msec.name_offset,
.type = msec.type,
.flags = msec.flags,
});
- try elf_file.output_sections.put(elf_file.base.comp.gpa, shndx, .{});
- msec.output_section_index = shndx;
}
pub fn addMergeSubsection(msec: *MergeSection, allocator: Allocator) !MergeSubsection.Index {
diff --git a/src/link/Elf/relocatable.zig b/src/link/Elf/relocatable.zig
index 06850f87ac..58610fd3c3 100644
--- a/src/link/Elf/relocatable.zig
+++ b/src/link/Elf/relocatable.zig
@@ -347,36 +347,36 @@ fn initComdatGroups(elf_file: *Elf) !void {
}
fn updateSectionSizes(elf_file: *Elf) !void {
- for (elf_file.output_sections.keys(), elf_file.output_sections.values()) |shndx, atom_list| {
- const shdr = &elf_file.shdrs.items[shndx];
- for (atom_list.items) |ref| {
- const atom_ptr = elf_file.atom(ref) orelse continue;
- if (!atom_ptr.alive) continue;
- const offset = atom_ptr.alignment.forward(shdr.sh_size);
- const padding = offset - shdr.sh_size;
- atom_ptr.value = @intCast(offset);
- shdr.sh_size += padding + atom_ptr.size;
- shdr.sh_addralign = @max(shdr.sh_addralign, atom_ptr.alignment.toByteUnits() orelse 1);
- }
- }
+ const slice = elf_file.sections.slice();
+ for (slice.items(.shdr), 0..) |*shdr, shndx| {
+ const atom_list = slice.items(.atom_list)[shndx];
+ if (shdr.sh_type != elf.SHT_RELA) {
+ for (atom_list.items) |ref| {
+ const atom_ptr = elf_file.atom(ref) orelse continue;
+ if (!atom_ptr.alive) continue;
+ const offset = atom_ptr.alignment.forward(shdr.sh_size);
+ const padding = offset - shdr.sh_size;
+ atom_ptr.value = @intCast(offset);
+ shdr.sh_size += padding + atom_ptr.size;
+ shdr.sh_addralign = @max(shdr.sh_addralign, atom_ptr.alignment.toByteUnits() orelse 1);
+ }
+ } else {
+ for (atom_list.items) |ref| {
+ const atom_ptr = elf_file.atom(ref) orelse continue;
+ if (!atom_ptr.alive) continue;
+ const relocs = atom_ptr.relocs(elf_file);
+ shdr.sh_size += shdr.sh_entsize * relocs.len;
+ }
- for (elf_file.output_rela_sections.values()) |sec| {
- const shdr = &elf_file.shdrs.items[sec.shndx];
- for (sec.atom_list.items) |ref| {
- const atom_ptr = elf_file.atom(ref) orelse continue;
- if (!atom_ptr.alive) continue;
- const relocs = atom_ptr.relocs(elf_file);
- shdr.sh_size += shdr.sh_entsize * relocs.len;
+ if (shdr.sh_size == 0) shdr.sh_offset = 0;
}
-
- if (shdr.sh_size == 0) shdr.sh_offset = 0;
}
if (elf_file.eh_frame_section_index) |index| {
- elf_file.shdrs.items[index].sh_size = try eh_frame.calcEhFrameSize(elf_file);
+ slice.items(.shdr)[index].sh_size = try eh_frame.calcEhFrameSize(elf_file);
}
if (elf_file.eh_frame_rela_section_index) |index| {
- const shdr = &elf_file.shdrs.items[index];
+ const shdr = &slice.items(.shdr)[index];
shdr.sh_size = eh_frame.calcEhFrameRelocs(elf_file) * shdr.sh_entsize;
}
@@ -387,19 +387,18 @@ fn updateSectionSizes(elf_file: *Elf) !void {
fn updateComdatGroupsSizes(elf_file: *Elf) void {
for (elf_file.comdat_group_sections.items) |cg| {
- const shdr = &elf_file.shdrs.items[cg.shndx];
+ const shdr = &elf_file.sections.items(.shdr)[cg.shndx];
shdr.sh_size = cg.size(elf_file);
shdr.sh_link = elf_file.symtab_section_index.?;
const sym = cg.symbol(elf_file);
- shdr.sh_info = sym.outputSymtabIndex(elf_file) orelse
- elf_file.sectionSymbolOutputSymtabIndex(sym.outputShndx(elf_file).?);
+ shdr.sh_info = sym.outputSymtabIndex(elf_file) orelse sym.outputShndx(elf_file).?;
}
}
/// Allocates alloc sections when merging relocatable objects files together.
fn allocateAllocSections(elf_file: *Elf) !void {
- for (elf_file.shdrs.items) |*shdr| {
+ for (elf_file.sections.items(.shdr)) |*shdr| {
if (shdr.sh_type == elf.SHT_NULL) continue;
if (shdr.sh_flags & elf.SHF_ALLOC == 0) continue;
if (shdr.sh_type == elf.SHT_NOBITS) {
@@ -418,13 +417,13 @@ fn allocateAllocSections(elf_file: *Elf) !void {
fn writeAtoms(elf_file: *Elf) !void {
const gpa = elf_file.base.comp.gpa;
+ const slice = elf_file.sections.slice();
// TODO iterate over `output_sections` directly
- for (elf_file.shdrs.items, 0..) |shdr, shndx| {
+ for (slice.items(.shdr), slice.items(.atom_list), 0..) |shdr, atom_list, shndx| {
if (shdr.sh_type == elf.SHT_NULL) continue;
if (shdr.sh_type == elf.SHT_NOBITS) continue;
-
- const atom_list = elf_file.output_sections.get(@intCast(shndx)) orelse continue;
+ if (shdr.sh_type == elf.SHT_RELA) continue;
if (atom_list.items.len == 0) continue;
log.debug("writing atoms in '{s}' section", .{elf_file.getShString(shdr.sh_name)});
@@ -490,18 +489,18 @@ fn writeAtoms(elf_file: *Elf) !void {
fn writeSyntheticSections(elf_file: *Elf) !void {
const gpa = elf_file.base.comp.gpa;
+ const slice = elf_file.sections.slice();
- for (elf_file.output_rela_sections.values()) |sec| {
- if (sec.atom_list.items.len == 0) continue;
-
- const shdr = elf_file.shdrs.items[sec.shndx];
+ for (slice.items(.shdr), slice.items(.atom_list)) |shdr, atom_list| {
+ if (shdr.sh_type != elf.SHT_RELA) continue;
+ if (atom_list.items.len == 0) continue;
const num_relocs = math.cast(usize, @divExact(shdr.sh_size, shdr.sh_entsize)) orelse
return error.Overflow;
var relocs = try std.ArrayList(elf.Elf64_Rela).initCapacity(gpa, num_relocs);
defer relocs.deinit();
- for (sec.atom_list.items) |ref| {
+ for (atom_list.items) |ref| {
const atom_ptr = elf_file.atom(ref) orelse continue;
if (!atom_ptr.alive) continue;
try atom_ptr.writeRelocs(elf_file, &relocs);
@@ -527,7 +526,7 @@ fn writeSyntheticSections(elf_file: *Elf) !void {
}
if (elf_file.eh_frame_section_index) |shndx| {
- const shdr = elf_file.shdrs.items[shndx];
+ const shdr = slice.items(.shdr)[shndx];
const sh_size = math.cast(usize, shdr.sh_size) orelse return error.Overflow;
var buffer = try std.ArrayList(u8).initCapacity(gpa, sh_size);
defer buffer.deinit();
@@ -540,7 +539,7 @@ fn writeSyntheticSections(elf_file: *Elf) !void {
try elf_file.base.file.?.pwriteAll(buffer.items, shdr.sh_offset);
}
if (elf_file.eh_frame_rela_section_index) |shndx| {
- const shdr = elf_file.shdrs.items[shndx];
+ const shdr = slice.items(.shdr)[shndx];
const sh_size = math.cast(usize, shdr.sh_size) orelse return error.Overflow;
var buffer = try std.ArrayList(u8).initCapacity(gpa, sh_size);
defer buffer.deinit();
@@ -561,7 +560,7 @@ fn writeSyntheticSections(elf_file: *Elf) !void {
fn writeComdatGroups(elf_file: *Elf) !void {
const gpa = elf_file.base.comp.gpa;
for (elf_file.comdat_group_sections.items) |cgs| {
- const shdr = elf_file.shdrs.items[cgs.shndx];
+ const shdr = elf_file.sections.items(.shdr)[cgs.shndx];
const sh_size = math.cast(usize, shdr.sh_size) orelse return error.Overflow;
var buffer = try std.ArrayList(u8).initCapacity(gpa, sh_size);
defer buffer.deinit();
diff --git a/src/link/Elf/synthetic_sections.zig b/src/link/Elf/synthetic_sections.zig
index 40cb37b967..a159ba23c1 100644
--- a/src/link/Elf/synthetic_sections.zig
+++ b/src/link/Elf/synthetic_sections.zig
@@ -95,6 +95,8 @@ pub const DynamicSection = struct {
}
pub fn write(dt: DynamicSection, elf_file: *Elf, writer: anytype) !void {
+ const shdrs = elf_file.sections.items(.shdr);
+
// NEEDED
for (dt.needed.items) |off| {
try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_NEEDED, .d_val = off });
@@ -112,33 +114,33 @@ pub const DynamicSection = struct {
// INIT
if (elf_file.sectionByName(".init")) |shndx| {
- const addr = elf_file.shdrs.items[shndx].sh_addr;
+ const addr = shdrs[shndx].sh_addr;
try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_INIT, .d_val = addr });
}
// FINI
if (elf_file.sectionByName(".fini")) |shndx| {
- const addr = elf_file.shdrs.items[shndx].sh_addr;
+ const addr = shdrs[shndx].sh_addr;
try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_FINI, .d_val = addr });
}
// INIT_ARRAY
if (elf_file.sectionByName(".init_array")) |shndx| {
- const shdr = elf_file.shdrs.items[shndx];
+ const shdr = shdrs[shndx];
try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_INIT_ARRAY, .d_val = shdr.sh_addr });
try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_INIT_ARRAYSZ, .d_val = shdr.sh_size });
}
// FINI_ARRAY
if (elf_file.sectionByName(".fini_array")) |shndx| {
- const shdr = elf_file.shdrs.items[shndx];
+ const shdr = shdrs[shndx];
try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_FINI_ARRAY, .d_val = shdr.sh_addr });
try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_FINI_ARRAYSZ, .d_val = shdr.sh_size });
}
// RELA
if (elf_file.rela_dyn_section_index) |shndx| {
- const shdr = elf_file.shdrs.items[shndx];
+ const shdr = shdrs[shndx];
try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_RELA, .d_val = shdr.sh_addr });
try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_RELASZ, .d_val = shdr.sh_size });
try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_RELAENT, .d_val = shdr.sh_entsize });
@@ -146,7 +148,7 @@ pub const DynamicSection = struct {
// JMPREL
if (elf_file.rela_plt_section_index) |shndx| {
- const shdr = elf_file.shdrs.items[shndx];
+ const shdr = shdrs[shndx];
try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_JMPREL, .d_val = shdr.sh_addr });
try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_PLTRELSZ, .d_val = shdr.sh_size });
try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_PLTREL, .d_val = elf.DT_RELA });
@@ -154,18 +156,18 @@ pub const DynamicSection = struct {
// PLTGOT
if (elf_file.got_plt_section_index) |shndx| {
- const addr = elf_file.shdrs.items[shndx].sh_addr;
+ const addr = shdrs[shndx].sh_addr;
try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_PLTGOT, .d_val = addr });
}
{
assert(elf_file.hash_section_index != null);
- const addr = elf_file.shdrs.items[elf_file.hash_section_index.?].sh_addr;
+ const addr = shdrs[elf_file.hash_section_index.?].sh_addr;
try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_HASH, .d_val = addr });
}
if (elf_file.gnu_hash_section_index) |shndx| {
- const addr = elf_file.shdrs.items[shndx].sh_addr;
+ const addr = shdrs[shndx].sh_addr;
try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_GNU_HASH, .d_val = addr });
}
@@ -177,7 +179,7 @@ pub const DynamicSection = struct {
// SYMTAB + SYMENT
{
assert(elf_file.dynsymtab_section_index != null);
- const shdr = elf_file.shdrs.items[elf_file.dynsymtab_section_index.?];
+ const shdr = shdrs[elf_file.dynsymtab_section_index.?];
try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_SYMTAB, .d_val = shdr.sh_addr });
try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_SYMENT, .d_val = shdr.sh_entsize });
}
@@ -185,20 +187,20 @@ pub const DynamicSection = struct {
// STRTAB + STRSZ
{
assert(elf_file.dynstrtab_section_index != null);
- const shdr = elf_file.shdrs.items[elf_file.dynstrtab_section_index.?];
+ const shdr = shdrs[elf_file.dynstrtab_section_index.?];
try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_STRTAB, .d_val = shdr.sh_addr });
try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_STRSZ, .d_val = shdr.sh_size });
}
// VERSYM
if (elf_file.versym_section_index) |shndx| {
- const addr = elf_file.shdrs.items[shndx].sh_addr;
+ const addr = shdrs[shndx].sh_addr;
try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_VERSYM, .d_val = addr });
}
// VERNEED + VERNEEDNUM
if (elf_file.verneed_section_index) |shndx| {
- const addr = elf_file.shdrs.items[shndx].sh_addr;
+ const addr = shdrs[shndx].sh_addr;
try writer.writeStruct(elf.Elf64_Dyn{ .d_tag = elf.DT_VERNEED, .d_val = addr });
try writer.writeStruct(elf.Elf64_Dyn{
.d_tag = elf.DT_VERNEEDNUM,
@@ -259,7 +261,7 @@ pub const GotSection = struct {
pub fn address(entry: Entry, elf_file: *Elf) i64 {
const ptr_bytes = elf_file.archPtrWidthBytes();
- const shdr = &elf_file.shdrs.items[elf_file.got_section_index.?];
+ const shdr = &elf_file.sections.items(.shdr)[elf_file.got_section_index.?];
return @as(i64, @intCast(shdr.sh_addr)) + entry.cell_index * ptr_bytes;
}
};
@@ -759,8 +761,9 @@ pub const PltSection = struct {
const x86_64 = struct {
fn write(plt: PltSection, elf_file: *Elf, writer: anytype) !void {
- const plt_addr = elf_file.shdrs.items[elf_file.plt_section_index.?].sh_addr;
- const got_plt_addr = elf_file.shdrs.items[elf_file.got_plt_section_index.?].sh_addr;
+ const shdrs = elf_file.sections.items(.shdr);
+ const plt_addr = shdrs[elf_file.plt_section_index.?].sh_addr;
+ const got_plt_addr = shdrs[elf_file.got_plt_section_index.?].sh_addr;
var preamble = [_]u8{
0xf3, 0x0f, 0x1e, 0xfa, // endbr64
0x41, 0x53, // push r11
@@ -794,8 +797,9 @@ pub const PltSection = struct {
const aarch64 = struct {
fn write(plt: PltSection, elf_file: *Elf, writer: anytype) !void {
{
- const plt_addr: i64 = @intCast(elf_file.shdrs.items[elf_file.plt_section_index.?].sh_addr);
- const got_plt_addr: i64 = @intCast(elf_file.shdrs.items[elf_file.got_plt_section_index.?].sh_addr);
+ const shdrs = elf_file.sections.items(.shdr);
+ const plt_addr: i64 = @intCast(shdrs[elf_file.plt_section_index.?].sh_addr);
+ const got_plt_addr: i64 = @intCast(shdrs[elf_file.got_plt_section_index.?].sh_addr);
// TODO: relax if possible
// .got.plt[2]
const pages = try aarch64_util.calcNumberOfPages(plt_addr + 4, got_plt_addr + 16);
@@ -869,7 +873,7 @@ pub const GotPltSection = struct {
try writer.writeInt(u64, 0x0, .little);
try writer.writeInt(u64, 0x0, .little);
if (elf_file.plt_section_index) |shndx| {
- const plt_addr = elf_file.shdrs.items[shndx].sh_addr;
+ const plt_addr = elf_file.sections.items(.shdr)[shndx].sh_addr;
for (0..elf_file.plt.symbols.items.len) |_| {
// [N]: .plt
try writer.writeInt(u64, plt_addr, .little);
@@ -1027,7 +1031,7 @@ pub const CopyRelSection = struct {
}
pub fn updateSectionSize(copy_rel: CopyRelSection, shndx: u32, elf_file: *Elf) !void {
- const shdr = &elf_file.shdrs.items[shndx];
+ const shdr = &elf_file.sections.items(.shdr)[shndx];
for (copy_rel.symbols.items) |ref| {
const symbol = elf_file.symbol(ref).?;
const shared_object = symbol.file(elf_file).?.shared_object;
@@ -1487,8 +1491,12 @@ pub const ComdatGroupSection = struct {
elf.SHT_RELA => {
const atom_index = object.atoms_indexes.items[shdr.sh_info];
const atom = object.atom(atom_index).?;
- const rela = elf_file.output_rela_sections.get(atom.output_section_index).?;
- try writer.writeInt(u32, rela.shndx, .little);
+ const rela_shndx = for (elf_file.sections.items(.shdr), 0..) |rela_shdr, rela_shndx| {
+ if (rela_shdr.sh_type == elf.SHT_RELA and
+ atom.output_section_index == rela_shdr.sh_info)
+ break rela_shndx;
+ } else unreachable;
+ try writer.writeInt(u32, @intCast(rela_shndx), .little);
},
else => {
const atom_index = object.atoms_indexes.items[shndx];
diff --git a/src/link/Elf/thunks.zig b/src/link/Elf/thunks.zig
index c62c81bd38..bc534639da 100644
--- a/src/link/Elf/thunks.zig
+++ b/src/link/Elf/thunks.zig
@@ -1,9 +1,8 @@
-pub fn createThunks(shndx: u32, elf_file: *Elf) !void {
+pub fn createThunks(shdr: *elf.Elf64_Shdr, shndx: u32, elf_file: *Elf) !void {
const gpa = elf_file.base.comp.gpa;
const cpu_arch = elf_file.getTarget().cpu.arch;
const max_distance = maxAllowedDistance(cpu_arch);
- const shdr = &elf_file.shdrs.items[shndx];
- const atoms = elf_file.output_sections.get(shndx).?.items;
+ const atoms = elf_file.sections.items(.atom_list)[shndx].items;
assert(atoms.len > 0);
for (atoms) |ref| {
@@ -89,7 +88,7 @@ pub const Thunk = struct {
}
pub fn address(thunk: Thunk, elf_file: *Elf) i64 {
- const shdr = elf_file.shdrs.items[thunk.output_section_index];
+ const shdr = elf_file.sections.items(.shdr)[thunk.output_section_index];
return @as(i64, @intCast(shdr.sh_addr)) + thunk.value;
}