diff options
| author | Andrew Kelley <andrew@ziglang.org> | 2024-10-09 17:43:17 -0700 |
|---|---|---|
| committer | GitHub <noreply@github.com> | 2024-10-09 17:43:17 -0700 |
| commit | 2e2927735d26fc6047343f0c620f20e9048ebaa5 (patch) | |
| tree | b405962660ed41a6c2682e912cb9ccbcfe3748c9 /src | |
| parent | 5d7ed6110391bc8f6ff7fb9fa225bfa03fd19191 (diff) | |
| parent | 73c3b9b8ab056c3bcbde3a7a9b893b8814553c45 (diff) | |
| download | zig-2e2927735d26fc6047343f0c620f20e9048ebaa5.tar.gz zig-2e2927735d26fc6047343f0c620f20e9048ebaa5.zip | |
Merge pull request #21629 from ziglang/elf-incr
elf: more incremental progress
Diffstat (limited to 'src')
| -rw-r--r-- | src/link/Dwarf.zig | 35 | ||||
| -rw-r--r-- | src/link/Elf.zig | 326 | ||||
| -rw-r--r-- | src/link/Elf/Atom.zig | 19 | ||||
| -rw-r--r-- | src/link/Elf/AtomList.zig | 46 | ||||
| -rw-r--r-- | src/link/Elf/Object.zig | 3 | ||||
| -rw-r--r-- | src/link/Elf/Symbol.zig | 15 | ||||
| -rw-r--r-- | src/link/Elf/ZigObject.zig | 277 | ||||
| -rw-r--r-- | src/link/Elf/file.zig | 14 | ||||
| -rw-r--r-- | src/link/Elf/relocatable.zig | 34 | ||||
| -rw-r--r-- | src/link/Elf/synthetic_sections.zig | 29 |
10 files changed, 384 insertions, 414 deletions
diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index 148961147b..3b5e4fddeb 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -389,15 +389,10 @@ pub const Section = struct { if (dwarf.bin_file.cast(.elf)) |elf_file| { const zo = elf_file.zigObjectPtr().?; const atom = zo.symbol(sec.index).atom(elf_file).?; - const shndx = atom.output_section_index; - if (sec == &dwarf.debug_frame.section) - try elf_file.growAllocSection(shndx, len, sec.alignment.toByteUnits().?) - else - try elf_file.growNonAllocSection(shndx, len, sec.alignment.toByteUnits().?, true); - const shdr = elf_file.sections.items(.shdr)[shndx]; - atom.size = shdr.sh_size; - atom.alignment = InternPool.Alignment.fromNonzeroByteUnits(shdr.sh_addralign); - sec.len = shdr.sh_size; + atom.size = len; + atom.alignment = sec.alignment; + sec.len = len; + try zo.allocateAtom(atom, false, elf_file); } else if (dwarf.bin_file.cast(.macho)) |macho_file| { const header = if (macho_file.d_sym) |*d_sym| header: { try d_sym.growSection(@intCast(sec.index), len, true, macho_file); @@ -418,11 +413,15 @@ pub const Section = struct { if (dwarf.bin_file.cast(.elf)) |elf_file| { const zo = elf_file.zigObjectPtr().?; const atom = zo.symbol(sec.index).atom(elf_file).?; - const shndx = atom.output_section_index; - const shdr = &elf_file.sections.items(.shdr)[shndx]; - atom.size = sec.len; - shdr.sh_offset += len; - shdr.sh_size = sec.len; + if (atom.prevAtom(elf_file)) |_| { + // FIXME:JK trimming/shrinking has to be reworked on ZigObject/Elf level + atom.value += len; + } else { + const shdr = &elf_file.sections.items(.shdr)[atom.output_section_index]; + shdr.sh_offset += len; + atom.value = 0; + } + atom.size -= len; } else if (dwarf.bin_file.cast(.macho)) |macho_file| { const header = if (macho_file.d_sym) |*d_sym| &d_sym.sections.items[sec.index] @@ -911,11 +910,9 @@ const Entry = struct { if (std.debug.runtime_safety) { log.err("missing {} from {s}", .{ @as(Entry.Index, @enumFromInt(entry - unit.entries.items.ptr)), - std.mem.sliceTo(if (dwarf.bin_file.cast(.elf)) |elf_file| sh_name: { - const zo = elf_file.zigObjectPtr().?; - const shndx = zo.symbol(sec.index).atom(elf_file).?.output_section_index; - break :sh_name elf_file.shstrtab.items[elf_file.sections.items(.shdr)[shndx].sh_name..]; - } else if (dwarf.bin_file.cast(.macho)) |macho_file| + std.mem.sliceTo(if (dwarf.bin_file.cast(.elf)) |elf_file| + elf_file.zigObjectPtr().?.symbol(sec.index).name(elf_file) + else if (dwarf.bin_file.cast(.macho)) |macho_file| if (macho_file.d_sym) |*d_sym| &d_sym.sections.items[sec.index].segname else diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 0ccb920a3e..62bf6be63a 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -548,16 +548,6 @@ pub fn allocatedSize(self: *Elf, start: u64) u64 { return min_pos - start; } -fn allocatedVirtualSize(self: *Elf, start: u64) u64 { - if (start == 0) return 0; - var min_pos: u64 = std.math.maxInt(u64); - for (self.phdrs.items) |phdr| { - if (phdr.p_vaddr <= start) continue; - if (phdr.p_vaddr < min_pos) min_pos = phdr.p_vaddr; - } - return min_pos - start; -} - pub fn findFreeSpace(self: *Elf, object_size: u64, min_alignment: u64) !u64 { var start: u64 = 0; while (try self.detectAllocCollision(start, object_size)) |item_end| { @@ -566,90 +556,49 @@ pub fn findFreeSpace(self: *Elf, object_size: u64, min_alignment: u64) !u64 { return start; } -pub fn growAllocSection(self: *Elf, shdr_index: u32, needed_size: u64, min_alignment: u64) !void { - const slice = self.sections.slice(); - const shdr = &slice.items(.shdr)[shdr_index]; - assert(shdr.sh_flags & elf.SHF_ALLOC != 0); - const phndx = slice.items(.phndx)[shdr_index]; - const maybe_phdr = if (phndx) |ndx| &self.phdrs.items[ndx] else null; - - log.debug("allocated size {x} of {s}, needed size {x}", .{ - self.allocatedSize(shdr.sh_offset), - self.getShString(shdr.sh_name), - needed_size, - }); +pub fn growSection(self: *Elf, shdr_index: u32, needed_size: u64, min_alignment: u64) !void { + const shdr = &self.sections.items(.shdr)[shdr_index]; if (shdr.sh_type != elf.SHT_NOBITS) { const allocated_size = self.allocatedSize(shdr.sh_offset); + log.debug("allocated size {x} of '{s}', needed size {x}", .{ + allocated_size, + self.getShString(shdr.sh_name), + needed_size, + }); + if (needed_size > allocated_size) { const existing_size = shdr.sh_size; shdr.sh_size = 0; // Must move the entire section. const new_offset = try self.findFreeSpace(needed_size, min_alignment); - log.debug("new '{s}' file offset 0x{x} to 0x{x}", .{ + log.debug("moving '{s}' from 0x{x} to 0x{x}", .{ self.getShString(shdr.sh_name), + shdr.sh_offset, new_offset, - new_offset + existing_size, }); - const amt = try self.base.file.?.copyRangeAll(shdr.sh_offset, self.base.file.?, new_offset, existing_size); - // TODO figure out what to about this error condition - how to communicate it up. - if (amt != existing_size) return error.InputOutput; - - shdr.sh_offset = new_offset; - if (maybe_phdr) |phdr| phdr.p_offset = new_offset; - } else if (shdr.sh_offset + allocated_size == std.math.maxInt(u64)) { - try self.base.file.?.setEndPos(shdr.sh_offset + needed_size); - } - if (maybe_phdr) |phdr| phdr.p_filesz = needed_size; - } - shdr.sh_size = needed_size; - self.markDirty(shdr_index); -} - -pub fn growNonAllocSection( - self: *Elf, - shdr_index: u32, - needed_size: u64, - min_alignment: u64, - requires_file_copy: bool, -) !void { - const shdr = &self.sections.items(.shdr)[shdr_index]; - assert(shdr.sh_flags & elf.SHF_ALLOC == 0); - - const allocated_size = self.allocatedSize(shdr.sh_offset); - if (needed_size > allocated_size) { - const existing_size = shdr.sh_size; - shdr.sh_size = 0; - // Move all the symbols to a new file location. - const new_offset = try self.findFreeSpace(needed_size, min_alignment); - - log.debug("new '{s}' file offset 0x{x} to 0x{x}", .{ - self.getShString(shdr.sh_name), - new_offset, - new_offset + existing_size, - }); - - if (requires_file_copy) { const amt = try self.base.file.?.copyRangeAll( shdr.sh_offset, self.base.file.?, new_offset, existing_size, ); + // TODO figure out what to about this error condition - how to communicate it up. if (amt != existing_size) return error.InputOutput; - } - shdr.sh_offset = new_offset; - } else if (shdr.sh_offset + allocated_size == std.math.maxInt(u64)) { - try self.base.file.?.setEndPos(shdr.sh_offset + needed_size); + shdr.sh_offset = new_offset; + } else if (shdr.sh_offset + allocated_size == std.math.maxInt(u64)) { + try self.base.file.?.setEndPos(shdr.sh_offset + needed_size); + } } + shdr.sh_size = needed_size; self.markDirty(shdr_index); } -pub fn markDirty(self: *Elf, shdr_index: u32) void { +fn markDirty(self: *Elf, shdr_index: u32) void { if (self.zigObjectPtr()) |zo| { for ([_]?Symbol.Index{ zo.debug_info_index, @@ -742,25 +691,27 @@ pub fn allocateChunk(self: *Elf, args: struct { } }; - log.debug("allocated chunk (size({x}),align({x})) at 0x{x} (file(0x{x}))", .{ - args.size, - args.alignment.toByteUnits().?, - shdr.sh_addr + res.value, - shdr.sh_offset + res.value, - }); - const expand_section = if (self.atom(res.placement)) |placement_atom| placement_atom.nextAtom(self) == null else true; if (expand_section) { const needed_size = res.value + args.size; - if (shdr.sh_flags & elf.SHF_ALLOC != 0) - try self.growAllocSection(args.shndx, needed_size, args.alignment.toByteUnits().?) - else - try self.growNonAllocSection(args.shndx, needed_size, args.alignment.toByteUnits().?, true); + try self.growSection(args.shndx, needed_size, args.alignment.toByteUnits().?); } + log.debug("allocated chunk (size({x}),align({x})) in {s} at 0x{x} (file(0x{x}))", .{ + args.size, + args.alignment.toByteUnits().?, + self.getShString(shdr.sh_name), + shdr.sh_addr + res.value, + shdr.sh_offset + res.value, + }); + log.debug(" placement {}, {s}", .{ + res.placement, + if (self.atom(res.placement)) |atom_ptr| atom_ptr.name(self) else "", + }); + return res; } @@ -809,8 +760,6 @@ pub fn flushModule(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_nod const csu = try CsuObjects.init(arena, comp); - // Here we will parse object and library files (if referenced). - // csu prelude if (csu.crt0) |path| try parseObjectReportingFailure(self, path); if (csu.crti) |path| try parseObjectReportingFailure(self, path); @@ -1040,6 +989,13 @@ pub fn flushModule(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_nod // Beyond this point, everything has been allocated a virtual address and we can resolve // the relocations, and commit objects to file. + for (self.objects.items) |index| { + self.file(index).?.object.dirty = false; + } + // TODO: would state tracking be more appropriate here? perhaps even custom relocation type? + self.rela_dyn.clearRetainingCapacity(); + self.rela_plt.clearRetainingCapacity(); + if (self.zigObjectPtr()) |zo| { var has_reloc_errors = false; for (zo.atoms_indexes.items) |atom_index| { @@ -1069,6 +1025,7 @@ pub fn flushModule(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_nod try self.writeShdrTable(); try self.writeAtoms(); try self.writeMergeSections(); + self.writeSyntheticSections() catch |err| switch (err) { error.RelocFailure => return error.FlushFailure, error.UnsupportedCpuArch => { @@ -1401,7 +1358,6 @@ pub fn parseLibraryReportingFailure(self: *Elf, lib: SystemLib, must_link: bool) fn parseLibrary(self: *Elf, lib: SystemLib, must_link: bool) ParseError!void { const tracy = trace(@src()); defer tracy.end(); - if (try Archive.isArchive(lib.path)) { try self.parseArchive(lib.path, must_link); } else if (try SharedObject.isSharedObject(lib.path)) { @@ -2801,9 +2757,10 @@ pub fn resolveMergeSections(self: *Elf) !void { var has_errors = false; for (self.objects.items) |index| { - const file_ptr = self.file(index).?; - if (!file_ptr.isAlive()) continue; - file_ptr.object.initInputMergeSections(self) catch |err| switch (err) { + const object = self.file(index).?.object; + if (!object.alive) continue; + if (!object.dirty) continue; + object.initInputMergeSections(self) catch |err| switch (err) { error.LinkFailure => has_errors = true, else => |e| return e, }; @@ -2812,15 +2769,17 @@ pub fn resolveMergeSections(self: *Elf) !void { if (has_errors) return error.FlushFailure; for (self.objects.items) |index| { - const file_ptr = self.file(index).?; - if (!file_ptr.isAlive()) continue; - try file_ptr.object.initOutputMergeSections(self); + const object = self.file(index).?.object; + if (!object.alive) continue; + if (!object.dirty) continue; + try object.initOutputMergeSections(self); } for (self.objects.items) |index| { - const file_ptr = self.file(index).?; - if (!file_ptr.isAlive()) continue; - file_ptr.object.resolveMergeSubsections(self) catch |err| switch (err) { + const object = self.file(index).?.object; + if (!object.alive) continue; + if (!object.dirty) continue; + object.resolveMergeSubsections(self) catch |err| switch (err) { error.LinkFailure => has_errors = true, else => |e| return e, }; @@ -2907,7 +2866,6 @@ fn initSyntheticSections(self: *Elf) !void { elf.SHT_PROGBITS, .flags = elf.SHF_ALLOC, .addralign = ptr_size, - .offset = std.math.maxInt(u64), }); } if (comp.link_eh_frame_hdr and self.eh_frame_hdr_section_index == null) { @@ -2916,7 +2874,6 @@ fn initSyntheticSections(self: *Elf) !void { .type = elf.SHT_PROGBITS, .flags = elf.SHF_ALLOC, .addralign = 4, - .offset = std.math.maxInt(u64), }); } } @@ -2927,7 +2884,6 @@ fn initSyntheticSections(self: *Elf) !void { .type = elf.SHT_PROGBITS, .flags = elf.SHF_ALLOC | elf.SHF_WRITE, .addralign = ptr_size, - .offset = std.math.maxInt(u64), }); } @@ -2937,7 +2893,6 @@ fn initSyntheticSections(self: *Elf) !void { .type = elf.SHT_PROGBITS, .flags = elf.SHF_ALLOC | elf.SHF_WRITE, .addralign = @alignOf(u64), - .offset = std.math.maxInt(u64), }); } @@ -2959,7 +2914,6 @@ fn initSyntheticSections(self: *Elf) !void { .flags = elf.SHF_ALLOC, .addralign = @alignOf(elf.Elf64_Rela), .entsize = @sizeOf(elf.Elf64_Rela), - .offset = std.math.maxInt(u64), }); } @@ -2970,7 +2924,6 @@ fn initSyntheticSections(self: *Elf) !void { .type = elf.SHT_PROGBITS, .flags = elf.SHF_ALLOC | elf.SHF_EXECINSTR, .addralign = 16, - .offset = std.math.maxInt(u64), }); } if (self.rela_plt_section_index == null) { @@ -2980,7 +2933,6 @@ fn initSyntheticSections(self: *Elf) !void { .flags = elf.SHF_ALLOC, .addralign = @alignOf(elf.Elf64_Rela), .entsize = @sizeOf(elf.Elf64_Rela), - .offset = std.math.maxInt(u64), }); } } @@ -2991,7 +2943,6 @@ fn initSyntheticSections(self: *Elf) !void { .type = elf.SHT_PROGBITS, .flags = elf.SHF_ALLOC | elf.SHF_EXECINSTR, .addralign = 16, - .offset = std.math.maxInt(u64), }); } @@ -3000,7 +2951,6 @@ fn initSyntheticSections(self: *Elf) !void { .name = try self.insertShString(".copyrel"), .type = elf.SHT_NOBITS, .flags = elf.SHF_ALLOC | elf.SHF_WRITE, - .offset = std.math.maxInt(u64), }); } @@ -3019,7 +2969,6 @@ fn initSyntheticSections(self: *Elf) !void { .type = elf.SHT_PROGBITS, .flags = elf.SHF_ALLOC, .addralign = 1, - .offset = std.math.maxInt(u64), }); } @@ -3031,7 +2980,6 @@ fn initSyntheticSections(self: *Elf) !void { .type = elf.SHT_STRTAB, .entsize = 1, .addralign = 1, - .offset = std.math.maxInt(u64), }); } if (self.dynamic_section_index == null) { @@ -3041,7 +2989,6 @@ fn initSyntheticSections(self: *Elf) !void { .type = elf.SHT_DYNAMIC, .entsize = @sizeOf(elf.Elf64_Dyn), .addralign = @alignOf(elf.Elf64_Dyn), - .offset = std.math.maxInt(u64), }); } if (self.dynsymtab_section_index == null) { @@ -3052,7 +2999,6 @@ fn initSyntheticSections(self: *Elf) !void { .addralign = @alignOf(elf.Elf64_Sym), .entsize = @sizeOf(elf.Elf64_Sym), .info = 1, - .offset = std.math.maxInt(u64), }); } if (self.hash_section_index == null) { @@ -3062,7 +3008,6 @@ fn initSyntheticSections(self: *Elf) !void { .type = elf.SHT_HASH, .addralign = 4, .entsize = 4, - .offset = std.math.maxInt(u64), }); } if (self.gnu_hash_section_index == null) { @@ -3071,7 +3016,6 @@ fn initSyntheticSections(self: *Elf) !void { .flags = elf.SHF_ALLOC, .type = elf.SHT_GNU_HASH, .addralign = 8, - .offset = std.math.maxInt(u64), }); } @@ -3087,7 +3031,6 @@ fn initSyntheticSections(self: *Elf) !void { .type = elf.SHT_GNU_VERSYM, .addralign = @alignOf(elf.Elf64_Versym), .entsize = @sizeOf(elf.Elf64_Versym), - .offset = std.math.maxInt(u64), }); } if (self.verneed_section_index == null) { @@ -3096,7 +3039,6 @@ fn initSyntheticSections(self: *Elf) !void { .flags = elf.SHF_ALLOC, .type = elf.SHT_GNU_VERNEED, .addralign = @alignOf(elf.Elf64_Verneed), - .offset = std.math.maxInt(u64), }); } } @@ -3117,7 +3059,6 @@ pub fn initSymtab(self: *Elf) !void { .type = elf.SHT_SYMTAB, .addralign = if (small_ptr) @alignOf(elf.Elf32_Sym) else @alignOf(elf.Elf64_Sym), .entsize = if (small_ptr) @sizeOf(elf.Elf32_Sym) else @sizeOf(elf.Elf64_Sym), - .offset = std.math.maxInt(u64), }); } if (self.strtab_section_index == null) { @@ -3126,7 +3067,6 @@ pub fn initSymtab(self: *Elf) !void { .type = elf.SHT_STRTAB, .entsize = 1, .addralign = 1, - .offset = std.math.maxInt(u64), }); } } @@ -3138,7 +3078,6 @@ pub fn initShStrtab(self: *Elf) !void { .type = elf.SHT_STRTAB, .entsize = 1, .addralign = 1, - .offset = std.math.maxInt(u64), }); } } @@ -3219,7 +3158,7 @@ fn sortInitFini(self: *Elf) !void { for (slice.items(.shdr), slice.items(.atom_list_2)) |shdr, *atom_list| { if (shdr.sh_flags & elf.SHF_ALLOC == 0) continue; - if (atom_list.atoms.items.len == 0) continue; + if (atom_list.atoms.keys().len == 0) continue; var is_init_fini = false; var is_ctor_dtor = false; @@ -3236,10 +3175,10 @@ fn sortInitFini(self: *Elf) !void { if (!is_init_fini and !is_ctor_dtor) continue; var entries = std.ArrayList(Entry).init(gpa); - try entries.ensureTotalCapacityPrecise(atom_list.atoms.items.len); + try entries.ensureTotalCapacityPrecise(atom_list.atoms.keys().len); defer entries.deinit(); - for (atom_list.atoms.items) |ref| { + for (atom_list.atoms.keys()) |ref| { const atom_ptr = self.atom(ref).?; const object = atom_ptr.file(self).?.object; const priority = blk: { @@ -3260,7 +3199,7 @@ fn sortInitFini(self: *Elf) !void { atom_list.atoms.clearRetainingCapacity(); for (entries.items) |entry| { - atom_list.atoms.appendAssumeCapacity(entry.atom_ref); + _ = atom_list.atoms.getOrPutAssumeCapacity(entry.atom_ref); } } } @@ -3506,7 +3445,7 @@ fn resetShdrIndexes(self: *Elf, backlinks: []const u32) void { const slice = self.sections.slice(); for (slice.items(.shdr), slice.items(.atom_list_2)) |*shdr, *atom_list| { atom_list.output_section_index = backlinks[atom_list.output_section_index]; - for (atom_list.atoms.items) |ref| { + for (atom_list.atoms.keys()) |ref| { self.atom(ref).?.output_section_index = atom_list.output_section_index; } if (shdr.sh_type == elf.SHT_RELA) { @@ -3518,12 +3457,7 @@ fn resetShdrIndexes(self: *Elf, backlinks: []const u32) void { } } - if (self.zigObjectPtr()) |zo| { - for (zo.atoms_indexes.items) |atom_index| { - const atom_ptr = zo.atom(atom_index) orelse continue; - atom_ptr.output_section_index = backlinks[atom_ptr.output_section_index]; - } - } + if (self.zigObjectPtr()) |zo| zo.resetShdrIndexes(backlinks); for (self.comdat_group_sections.items) |*cg| { cg.shndx = backlinks[cg.shndx]; @@ -3585,20 +3519,24 @@ fn resetShdrIndexes(self: *Elf, backlinks: []const u32) void { fn updateSectionSizes(self: *Elf) !void { const slice = self.sections.slice(); for (slice.items(.shdr), slice.items(.atom_list_2)) |shdr, *atom_list| { - if (atom_list.atoms.items.len == 0) continue; + if (atom_list.atoms.keys().len == 0) continue; + if (!atom_list.dirty) continue; if (self.requiresThunks() and shdr.sh_flags & elf.SHF_EXECINSTR != 0) continue; atom_list.updateSize(self); try atom_list.allocate(self); + atom_list.dirty = false; } if (self.requiresThunks()) { for (slice.items(.shdr), slice.items(.atom_list_2)) |shdr, *atom_list| { if (shdr.sh_flags & elf.SHF_EXECINSTR == 0) continue; - if (atom_list.atoms.items.len == 0) continue; + if (atom_list.atoms.keys().len == 0) continue; + if (!atom_list.dirty) continue; // Create jump/branch range extenders if needed. try self.createThunks(atom_list); try atom_list.allocate(self); + atom_list.dirty = false; } // FIXME:JK this will hopefully not be needed once we create a link from Atom/Thunk to AtomList. @@ -3882,57 +3820,55 @@ pub fn allocateAllocSections(self: *Elf) !void { } const first = slice.items(.shdr)[cover.items[0]]; - var new_offset = try self.findFreeSpace(filesz, @"align"); const phndx = self.getPhdr(.{ .type = elf.PT_LOAD, .flags = shdrToPhdrFlags(first.sh_flags) }).?; const phdr = &self.phdrs.items[phndx]; - phdr.p_offset = new_offset; - phdr.p_vaddr = first.sh_addr; - phdr.p_paddr = first.sh_addr; - phdr.p_memsz = memsz; - phdr.p_filesz = filesz; - phdr.p_align = @"align"; + const allocated_size = self.allocatedSize(phdr.p_offset); + if (filesz > allocated_size) { + const old_offset = phdr.p_offset; + phdr.p_offset = 0; + var new_offset = try self.findFreeSpace(filesz, @"align"); + phdr.p_offset = new_offset; + + log.debug("moving phdr({d}) from 0x{x} to 0x{x}", .{ phndx, old_offset, new_offset }); + + for (cover.items) |shndx| { + const shdr = &slice.items(.shdr)[shndx]; + slice.items(.phndx)[shndx] = phndx; + if (shdr.sh_type == elf.SHT_NOBITS) { + shdr.sh_offset = 0; + continue; + } + new_offset = alignment.@"align"(shndx, shdr.sh_addralign, new_offset); - for (cover.items) |shndx| { - const shdr = &slice.items(.shdr)[shndx]; - slice.items(.phndx)[shndx] = phndx; - if (shdr.sh_type == elf.SHT_NOBITS) { - shdr.sh_offset = 0; - continue; - } - new_offset = alignment.@"align"(shndx, shdr.sh_addralign, new_offset); - - if (self.zigObjectPtr()) |zo| blk: { - const existing_size = for ([_]?Symbol.Index{ - zo.text_index, - zo.rodata_index, - zo.data_relro_index, - zo.data_index, - zo.tdata_index, - zo.eh_frame_index, - }) |maybe_sym_index| { - const sect_sym_index = maybe_sym_index orelse continue; - const sect_atom_ptr = zo.symbol(sect_sym_index).atom(self).?; - if (sect_atom_ptr.output_section_index != shndx) continue; - break sect_atom_ptr.size; - } else break :blk; log.debug("moving {s} from 0x{x} to 0x{x}", .{ self.getShString(shdr.sh_name), shdr.sh_offset, new_offset, }); - const amt = try self.base.file.?.copyRangeAll( - shdr.sh_offset, - self.base.file.?, - new_offset, - existing_size, - ); - if (amt != existing_size) return error.InputOutput; - } - shdr.sh_offset = new_offset; - new_offset += shdr.sh_size; + if (shdr.sh_offset > 0) { + // Get size actually commited to the output file. + const existing_size = self.sectionSize(shndx); + const amt = try self.base.file.?.copyRangeAll( + shdr.sh_offset, + self.base.file.?, + new_offset, + existing_size, + ); + if (amt != existing_size) return error.InputOutput; + } + + shdr.sh_offset = new_offset; + new_offset += shdr.sh_size; + } } + phdr.p_vaddr = first.sh_addr; + phdr.p_paddr = first.sh_addr; + phdr.p_memsz = memsz; + phdr.p_filesz = filesz; + phdr.p_align = @"align"; + addr = mem.alignForward(u64, addr, self.page_size); } } @@ -3947,27 +3883,14 @@ pub fn allocateNonAllocSections(self: *Elf) !void { shdr.sh_size = 0; const new_offset = try self.findFreeSpace(needed_size, shdr.sh_addralign); - if (self.zigObjectPtr()) |zo| blk: { - const existing_size = for ([_]?Symbol.Index{ - zo.debug_info_index, - zo.debug_abbrev_index, - zo.debug_aranges_index, - zo.debug_str_index, - zo.debug_line_index, - zo.debug_line_str_index, - zo.debug_loclists_index, - zo.debug_rnglists_index, - }) |maybe_sym_index| { - const sym_index = maybe_sym_index orelse continue; - const sym = zo.symbol(sym_index); - const atom_ptr = sym.atom(self).?; - if (atom_ptr.output_section_index == shndx) break atom_ptr.size; - } else break :blk; - log.debug("moving {s} from 0x{x} to 0x{x}", .{ - self.getShString(shdr.sh_name), - shdr.sh_offset, - new_offset, - }); + log.debug("moving {s} from 0x{x} to 0x{x}", .{ + self.getShString(shdr.sh_name), + shdr.sh_offset, + new_offset, + }); + + if (shdr.sh_offset > 0) { + const existing_size = self.sectionSize(@intCast(shndx)); const amt = try self.base.file.?.copyRangeAll( shdr.sh_offset, self.base.file.?, @@ -4058,7 +3981,7 @@ fn writeAtoms(self: *Elf) !void { var has_reloc_errors = false; for (slice.items(.shdr), slice.items(.atom_list_2)) |shdr, atom_list| { if (shdr.sh_type == elf.SHT_NOBITS) continue; - if (atom_list.atoms.items.len == 0) continue; + if (atom_list.atoms.keys().len == 0) continue; atom_list.write(&buffer, &undefs, self) catch |err| switch (err) { error.UnsupportedCpuArch => { try self.reportUnsupportedCpuArch(); @@ -4816,7 +4739,6 @@ pub fn addRelaShdr(self: *Elf, name: u32, shndx: u32) !u32 { .entsize = entsize, .info = shndx, .addralign = addralign, - .offset = std.math.maxInt(u64), }); } @@ -4828,7 +4750,6 @@ pub const AddSectionOpts = struct { info: u32 = 0, addralign: u64 = 0, entsize: u64 = 0, - offset: u64 = 0, }; pub fn addSection(self: *Elf, opts: AddSectionOpts) !u32 { @@ -4840,7 +4761,7 @@ pub fn addSection(self: *Elf, opts: AddSectionOpts) !u32 { .sh_type = opts.type, .sh_flags = opts.flags, .sh_addr = 0, - .sh_offset = opts.offset, + .sh_offset = 0, .sh_size = 0, .sh_link = opts.link, .sh_info = opts.info, @@ -4863,6 +4784,7 @@ const RelaDyn = struct { sym: u64 = 0, type: u32, addend: i64 = 0, + target: ?*const Symbol = null, }; pub fn addRelaDyn(self: *Elf, opts: RelaDyn) !void { @@ -4871,6 +4793,13 @@ pub fn addRelaDyn(self: *Elf, opts: RelaDyn) !void { } pub fn addRelaDynAssumeCapacity(self: *Elf, opts: RelaDyn) void { + relocs_log.debug(" {s}: [{x} => {d}({s})] + {x}", .{ + relocation.fmtRelocType(opts.type, self.getTarget().cpu.arch), + opts.offset, + opts.sym, + if (opts.target) |sym| sym.name(self) else "", + opts.addend, + }); self.rela_dyn.appendAssumeCapacity(.{ .r_offset = opts.offset, .r_info = (opts.sym << 32) | opts.type, @@ -5703,6 +5632,12 @@ const Section = struct { free_list: std.ArrayListUnmanaged(Ref) = .empty, }; +pub fn sectionSize(self: *Elf, shndx: u32) u64 { + const last_atom_ref = self.sections.items(.last_atom)[shndx]; + const atom_ptr = self.atom(last_atom_ref) orelse return 0; + return @as(u64, @intCast(atom_ptr.value)) + atom_ptr.size; +} + fn defaultEntrySymbolName(cpu_arch: std.Target.Cpu.Arch) []const u8 { return switch (cpu_arch) { .mips, .mipsel, .mips64, .mips64el => "__start", @@ -5732,20 +5667,20 @@ fn createThunks(elf_file: *Elf, atom_list: *AtomList) !void { } }.advance; - for (atom_list.atoms.items) |ref| { + for (atom_list.atoms.keys()) |ref| { elf_file.atom(ref).?.value = -1; } var i: usize = 0; - while (i < atom_list.atoms.items.len) { + while (i < atom_list.atoms.keys().len) { const start = i; - const start_atom = elf_file.atom(atom_list.atoms.items[start]).?; + const start_atom = elf_file.atom(atom_list.atoms.keys()[start]).?; assert(start_atom.alive); start_atom.value = try advance(atom_list, start_atom.size, start_atom.alignment); i += 1; - while (i < atom_list.atoms.items.len) : (i += 1) { - const atom_ptr = elf_file.atom(atom_list.atoms.items[i]).?; + while (i < atom_list.atoms.keys().len) : (i += 1) { + const atom_ptr = elf_file.atom(atom_list.atoms.keys()[i]).?; assert(atom_ptr.alive); if (@as(i64, @intCast(atom_ptr.alignment.forward(atom_list.size))) - start_atom.value >= max_distance) break; @@ -5758,7 +5693,7 @@ fn createThunks(elf_file: *Elf, atom_list: *AtomList) !void { thunk_ptr.output_section_index = atom_list.output_section_index; // Scan relocs in the group and create trampolines for any unreachable callsite - for (atom_list.atoms.items[start..i]) |ref| { + for (atom_list.atoms.keys()[start..i]) |ref| { const atom_ptr = elf_file.atom(ref).?; const file_ptr = atom_ptr.file(elf_file).?; log.debug("atom({}) {s}", .{ ref, atom_ptr.name(elf_file) }); @@ -5801,6 +5736,7 @@ const assert = std.debug.assert; const elf = std.elf; const fs = std.fs; const log = std.log.scoped(.link); +const relocs_log = std.log.scoped(.link_relocs); const state_log = std.log.scoped(.link_state); const math = std.math; const mem = std.mem; diff --git a/src/link/Elf/Atom.zig b/src/link/Elf/Atom.zig index 70236ff519..20aa249ea5 100644 --- a/src/link/Elf/Atom.zig +++ b/src/link/Elf/Atom.zig @@ -118,10 +118,19 @@ pub fn capacity(self: Atom, elf_file: *Elf) u64 { return @intCast(next_addr - self.address(elf_file)); } +pub fn fileCapacity(self: Atom, elf_file: *Elf) u64 { + const self_off = self.offset(elf_file); + const next_off = if (self.nextAtom(elf_file)) |next_atom| + next_atom.offset(elf_file) + else + self_off + elf_file.allocatedSize(self_off); + return @intCast(next_off - self_off); +} + pub fn freeListEligible(self: Atom, elf_file: *Elf) bool { // No need to keep a free list node for the last block. const next = self.nextAtom(elf_file) orelse return false; - const cap: u64 = @intCast(next.address(elf_file) - self.address(elf_file)); + const cap: u64 = @intCast(next.value - self.value); const ideal_cap = Elf.padToIdeal(self.size); if (cap <= ideal_cap) return false; const surplus = cap - ideal_cap; @@ -723,6 +732,7 @@ fn resolveDynAbsReloc( .sym = target.extra(elf_file).dynamic, .type = relocation.encode(.abs, cpu_arch), .addend = A, + .target = target, }); try applyDynamicReloc(A, elf_file, writer); } else { @@ -737,6 +747,7 @@ fn resolveDynAbsReloc( .sym = target.extra(elf_file).dynamic, .type = relocation.encode(.abs, cpu_arch), .addend = A, + .target = target, }); try applyDynamicReloc(A, elf_file, writer); } else { @@ -750,6 +761,7 @@ fn resolveDynAbsReloc( .sym = target.extra(elf_file).dynamic, .type = relocation.encode(.abs, cpu_arch), .addend = A, + .target = target, }); try applyDynamicReloc(A, elf_file, writer); }, @@ -759,6 +771,7 @@ fn resolveDynAbsReloc( .offset = P, .type = relocation.encode(.rel, cpu_arch), .addend = S + A, + .target = target, }); try applyDynamicReloc(S + A, elf_file, writer); }, @@ -769,6 +782,7 @@ fn resolveDynAbsReloc( .offset = P, .type = relocation.encode(.irel, cpu_arch), .addend = S_ + A, + .target = target, }); try applyDynamicReloc(S_ + A, elf_file, writer); }, @@ -922,9 +936,10 @@ fn format2( _ = unused_fmt_string; const atom = ctx.atom; const elf_file = ctx.elf_file; - try writer.print("atom({d}) : {s} : @{x} : shdr({d}) : align({x}) : size({x})", .{ + try writer.print("atom({d}) : {s} : @{x} : shdr({d}) : align({x}) : size({x}) : prev({}) : next({})", .{ atom.atom_index, atom.name(elf_file), atom.address(elf_file), atom.output_section_index, atom.alignment.toByteUnits() orelse 0, atom.size, + atom.prev_atom_ref, atom.next_atom_ref, }); if (atom.fdes(elf_file).len > 0) { try writer.writeAll(" : fdes{ "); diff --git a/src/link/Elf/AtomList.zig b/src/link/Elf/AtomList.zig index dfabbe0ff7..bab4726f24 100644 --- a/src/link/Elf/AtomList.zig +++ b/src/link/Elf/AtomList.zig @@ -2,7 +2,10 @@ value: i64 = 0, size: u64 = 0, alignment: Atom.Alignment = .@"1", output_section_index: u32 = 0, -atoms: std.ArrayListUnmanaged(Elf.Ref) = .empty, +// atoms: std.ArrayListUnmanaged(Elf.Ref) = .empty, +atoms: std.AutoArrayHashMapUnmanaged(Elf.Ref, void) = .empty, + +dirty: bool = true, pub fn deinit(list: *AtomList, allocator: Allocator) void { list.atoms.deinit(allocator); @@ -19,10 +22,8 @@ pub fn offset(list: AtomList, elf_file: *Elf) u64 { } pub fn updateSize(list: *AtomList, elf_file: *Elf) void { - // TODO perhaps a 'stale' flag would be better here? - list.size = 0; - list.alignment = .@"1"; - for (list.atoms.items) |ref| { + assert(list.dirty); + for (list.atoms.keys()) |ref| { const atom_ptr = elf_file.atom(ref).?; assert(atom_ptr.alive); const off = atom_ptr.alignment.forward(list.size); @@ -34,6 +35,8 @@ pub fn updateSize(list: *AtomList, elf_file: *Elf) void { } pub fn allocate(list: *AtomList, elf_file: *Elf) !void { + assert(list.dirty); + const alloc_res = try elf_file.allocateChunk(.{ .shndx = list.output_section_index, .size = list.size, @@ -42,6 +45,8 @@ pub fn allocate(list: *AtomList, elf_file: *Elf) !void { }); list.value = @intCast(alloc_res.value); + log.debug("allocated atom_list({d}) at 0x{x}", .{ list.output_section_index, list.address(elf_file) }); + const slice = elf_file.sections.slice(); const shdr = &slice.items(.shdr)[list.output_section_index]; const last_atom_ref = &slice.items(.last_atom)[list.output_section_index]; @@ -56,13 +61,13 @@ pub fn allocate(list: *AtomList, elf_file: *Elf) !void { // FIXME:JK this currently ignores Thunks as valid chunks. { var idx: usize = 0; - while (idx < list.atoms.items.len) : (idx += 1) { - const curr_atom_ptr = elf_file.atom(list.atoms.items[idx]).?; + while (idx < list.atoms.keys().len) : (idx += 1) { + const curr_atom_ptr = elf_file.atom(list.atoms.keys()[idx]).?; if (idx > 0) { - curr_atom_ptr.prev_atom_ref = list.atoms.items[idx - 1]; + curr_atom_ptr.prev_atom_ref = list.atoms.keys()[idx - 1]; } - if (idx + 1 < list.atoms.items.len) { - curr_atom_ptr.next_atom_ref = list.atoms.items[idx + 1]; + if (idx + 1 < list.atoms.keys().len) { + curr_atom_ptr.next_atom_ref = list.atoms.keys()[idx + 1]; } } } @@ -74,17 +79,20 @@ pub fn allocate(list: *AtomList, elf_file: *Elf) !void { } // FIXME:JK if we had a link from Atom to parent AtomList we would not need to update Atom's value or osec index - for (list.atoms.items) |ref| { + for (list.atoms.keys()) |ref| { const atom_ptr = elf_file.atom(ref).?; atom_ptr.output_section_index = list.output_section_index; atom_ptr.value += list.value; } + + list.dirty = false; } pub fn write(list: AtomList, buffer: *std.ArrayList(u8), undefs: anytype, elf_file: *Elf) !void { const gpa = elf_file.base.comp.gpa; const osec = elf_file.sections.items(.shdr)[list.output_section_index]; assert(osec.sh_type != elf.SHT_NOBITS); + assert(!list.dirty); log.debug("writing atoms in section '{s}'", .{elf_file.getShString(osec.sh_name)}); @@ -92,7 +100,7 @@ pub fn write(list: AtomList, buffer: *std.ArrayList(u8), undefs: anytype, elf_fi try buffer.ensureUnusedCapacity(list_size); buffer.appendNTimesAssumeCapacity(0, list_size); - for (list.atoms.items) |ref| { + for (list.atoms.keys()) |ref| { const atom_ptr = elf_file.atom(ref).?; assert(atom_ptr.alive); @@ -128,7 +136,7 @@ pub fn writeRelocatable(list: AtomList, buffer: *std.ArrayList(u8), elf_file: *E try buffer.ensureUnusedCapacity(list_size); buffer.appendNTimesAssumeCapacity(0, list_size); - for (list.atoms.items) |ref| { + for (list.atoms.keys()) |ref| { const atom_ptr = elf_file.atom(ref).?; assert(atom_ptr.alive); @@ -149,13 +157,13 @@ pub fn writeRelocatable(list: AtomList, buffer: *std.ArrayList(u8), elf_file: *E } pub fn firstAtom(list: AtomList, elf_file: *Elf) *Atom { - assert(list.atoms.items.len > 0); - return elf_file.atom(list.atoms.items[0]).?; + assert(list.atoms.keys().len > 0); + return elf_file.atom(list.atoms.keys()[0]).?; } pub fn lastAtom(list: AtomList, elf_file: *Elf) *Atom { - assert(list.atoms.items.len > 0); - return elf_file.atom(list.atoms.items[list.atoms.items.len - 1]).?; + assert(list.atoms.keys().len > 0); + return elf_file.atom(list.atoms.keys()[list.atoms.keys().len - 1]).?; } pub fn format( @@ -191,9 +199,9 @@ fn format2( list.alignment.toByteUnits() orelse 0, list.size, }); try writer.writeAll(" : atoms{ "); - for (list.atoms.items, 0..) |ref, i| { + for (list.atoms.keys(), 0..) |ref, i| { try writer.print("{}", .{ref}); - if (i < list.atoms.items.len - 1) try writer.writeAll(", "); + if (i < list.atoms.keys().len - 1) try writer.writeAll(", "); } try writer.writeAll(" }"); } diff --git a/src/link/Elf/Object.zig b/src/link/Elf/Object.zig index 2367b81797..96178ec6c5 100644 --- a/src/link/Elf/Object.zig +++ b/src/link/Elf/Object.zig @@ -29,6 +29,7 @@ cies: std.ArrayListUnmanaged(Cie) = .empty, eh_frame_data: std.ArrayListUnmanaged(u8) = .empty, alive: bool = true, +dirty: bool = true, num_dynrelocs: u32 = 0, output_symtab_ctx: Elf.SymtabCtx = .{}, @@ -917,7 +918,7 @@ pub fn initOutputSections(self: *Object, elf_file: *Elf) !void { }); const atom_list = &elf_file.sections.items(.atom_list_2)[osec]; atom_list.output_section_index = osec; - try atom_list.atoms.append(elf_file.base.comp.gpa, atom_ptr.ref()); + _ = try atom_list.atoms.getOrPut(elf_file.base.comp.gpa, atom_ptr.ref()); } } diff --git a/src/link/Elf/Symbol.zig b/src/link/Elf/Symbol.zig index cdf1b6f40a..6eaaedf28c 100644 --- a/src/link/Elf/Symbol.zig +++ b/src/link/Elf/Symbol.zig @@ -112,13 +112,16 @@ pub fn address(symbol: Symbol, opts: struct { plt: bool = true, trampoline: bool if (symbol.flags.has_trampoline and opts.trampoline) { return symbol.trampolineAddress(elf_file); } - if (symbol.flags.has_plt and opts.plt) { - if (!symbol.flags.is_canonical and symbol.flags.has_got) { + if (opts.plt) { + if (symbol.flags.has_pltgot) { + assert(!symbol.flags.is_canonical); // We have a non-lazy bound function pointer, use that! return symbol.pltGotAddress(elf_file); } - // Lazy-bound function it is! - return symbol.pltAddress(elf_file); + if (symbol.flags.has_plt) { + // Lazy-bound function it is! + return symbol.pltAddress(elf_file); + } } if (symbol.atom(elf_file)) |atom_ptr| { if (!atom_ptr.alive) { @@ -171,7 +174,7 @@ pub fn gotAddress(symbol: Symbol, elf_file: *Elf) i64 { } pub fn pltGotAddress(symbol: Symbol, elf_file: *Elf) i64 { - if (!(symbol.flags.has_plt and symbol.flags.has_got)) return 0; + if (!symbol.flags.has_pltgot) return 0; const extras = symbol.extra(elf_file); const shdr = elf_file.sections.items(.shdr)[elf_file.plt_got_section_index.?]; const cpu_arch = elf_file.getTarget().cpu.arch; @@ -430,6 +433,8 @@ pub const Flags = packed struct { has_plt: bool = false, /// Whether the PLT entry is canonical. is_canonical: bool = false, + /// Whether the PLT entry is indirected via GOT. + has_pltgot: bool = false, /// Whether the symbol contains COPYREL directive. needs_copy_rel: bool = false, diff --git a/src/link/Elf/ZigObject.zig b/src/link/Elf/ZigObject.zig index de70462b07..846bcac15c 100644 --- a/src/link/Elf/ZigObject.zig +++ b/src/link/Elf/ZigObject.zig @@ -101,6 +101,28 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void { .dwarf => |v| { var dwarf = Dwarf.init(&elf_file.base, v); + const addSectionSymbolWithAtom = struct { + fn addSectionSymbolWithAtom( + zo: *ZigObject, + allocator: Allocator, + name: [:0]const u8, + alignment: Atom.Alignment, + shndx: u32, + ) !Symbol.Index { + const name_off = try zo.addString(allocator, name); + const sym_index = try zo.addSectionSymbol(allocator, name_off, shndx); + const sym = zo.symbol(sym_index); + const atom_index = try zo.newAtom(allocator, name_off); + const atom_ptr = zo.atom(atom_index).?; + atom_ptr.alignment = alignment; + atom_ptr.output_section_index = shndx; + sym.ref = .{ .index = atom_index, .file = zo.index }; + zo.symtab.items(.shndx)[sym.esym_index] = atom_index; + zo.symtab.items(.elf_sym)[sym.esym_index].st_shndx = SHN_ATOM; + return sym_index; + } + }.addSectionSymbolWithAtom; + if (self.debug_str_index == null) { const osec = try elf_file.addSection(.{ .name = try elf_file.insertShString(".debug_str"), @@ -110,8 +132,7 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void { .addralign = 1, }); self.debug_str_section_dirty = true; - self.debug_str_index = try self.addSectionSymbol(gpa, ".debug_str", .@"1", osec); - elf_file.sections.items(.last_atom)[osec] = self.symbol(self.debug_str_index.?).ref; + self.debug_str_index = try addSectionSymbolWithAtom(self, gpa, ".debug_str", .@"1", osec); } if (self.debug_info_index == null) { @@ -121,8 +142,7 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void { .addralign = 1, }); self.debug_info_section_dirty = true; - self.debug_info_index = try self.addSectionSymbol(gpa, ".debug_info", .@"1", osec); - elf_file.sections.items(.last_atom)[osec] = self.symbol(self.debug_info_index.?).ref; + self.debug_info_index = try addSectionSymbolWithAtom(self, gpa, ".debug_info", .@"1", osec); } if (self.debug_abbrev_index == null) { @@ -132,8 +152,7 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void { .addralign = 1, }); self.debug_abbrev_section_dirty = true; - self.debug_abbrev_index = try self.addSectionSymbol(gpa, ".debug_abbrev", .@"1", osec); - elf_file.sections.items(.last_atom)[osec] = self.symbol(self.debug_abbrev_index.?).ref; + self.debug_abbrev_index = try addSectionSymbolWithAtom(self, gpa, ".debug_abbrev", .@"1", osec); } if (self.debug_aranges_index == null) { @@ -143,8 +162,7 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void { .addralign = 16, }); self.debug_aranges_section_dirty = true; - self.debug_aranges_index = try self.addSectionSymbol(gpa, ".debug_aranges", .@"16", osec); - elf_file.sections.items(.last_atom)[osec] = self.symbol(self.debug_aranges_index.?).ref; + self.debug_aranges_index = try addSectionSymbolWithAtom(self, gpa, ".debug_aranges", .@"16", osec); } if (self.debug_line_index == null) { @@ -154,8 +172,7 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void { .addralign = 1, }); self.debug_line_section_dirty = true; - self.debug_line_index = try self.addSectionSymbol(gpa, ".debug_line", .@"1", osec); - elf_file.sections.items(.last_atom)[osec] = self.symbol(self.debug_line_index.?).ref; + self.debug_line_index = try addSectionSymbolWithAtom(self, gpa, ".debug_line", .@"1", osec); } if (self.debug_line_str_index == null) { @@ -167,8 +184,7 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void { .addralign = 1, }); self.debug_line_str_section_dirty = true; - self.debug_line_str_index = try self.addSectionSymbol(gpa, ".debug_line_str", .@"1", osec); - elf_file.sections.items(.last_atom)[osec] = self.symbol(self.debug_line_str_index.?).ref; + self.debug_line_str_index = try addSectionSymbolWithAtom(self, gpa, ".debug_line_str", .@"1", osec); } if (self.debug_loclists_index == null) { @@ -178,8 +194,7 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void { .addralign = 1, }); self.debug_loclists_section_dirty = true; - self.debug_loclists_index = try self.addSectionSymbol(gpa, ".debug_loclists", .@"1", osec); - elf_file.sections.items(.last_atom)[osec] = self.symbol(self.debug_loclists_index.?).ref; + self.debug_loclists_index = try addSectionSymbolWithAtom(self, gpa, ".debug_loclists", .@"1", osec); } if (self.debug_rnglists_index == null) { @@ -189,8 +204,7 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void { .addralign = 1, }); self.debug_rnglists_section_dirty = true; - self.debug_rnglists_index = try self.addSectionSymbol(gpa, ".debug_rnglists", .@"1", osec); - elf_file.sections.items(.last_atom)[osec] = self.symbol(self.debug_rnglists_index.?).ref; + self.debug_rnglists_index = try addSectionSymbolWithAtom(self, gpa, ".debug_rnglists", .@"1", osec); } if (self.eh_frame_index == null) { @@ -204,8 +218,7 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void { .addralign = ptr_size, }); self.eh_frame_section_dirty = true; - self.eh_frame_index = try self.addSectionSymbol(gpa, ".eh_frame", Atom.Alignment.fromNonzeroByteUnits(ptr_size), osec); - elf_file.sections.items(.last_atom)[osec] = self.symbol(self.eh_frame_index.?).ref; + self.eh_frame_index = try addSectionSymbolWithAtom(self, gpa, ".eh_frame", Atom.Alignment.fromNonzeroByteUnits(ptr_size), osec); } try dwarf.initMetadata(); @@ -336,8 +349,6 @@ pub fn flushModule(self: *ZigObject, elf_file: *Elf, tid: Zcu.PerThread.Id) !voi const atom_ptr = self.atom(sym.ref.index).?; if (!atom_ptr.alive) continue; - log.debug("parsing relocs in {s}", .{sym.name(elf_file)}); - const relocs = &self.relocs.items[atom_ptr.relocsShndx().?]; for (sect.units.items) |*unit| { try relocs.ensureUnusedCapacity(gpa, unit.cross_unit_relocs.items.len + @@ -350,12 +361,6 @@ pub fn flushModule(self: *ZigObject, elf_file: *Elf, tid: Zcu.PerThread.Id) !voi else 0)); const r_type = relocation.dwarf.crossSectionRelocType(dwarf.format, cpu_arch); - log.debug(" {s} <- r_off={x}, r_add={x}, r_type={}", .{ - self.symbol(sym_index).name(elf_file), - r_offset, - r_addend, - relocation.fmtRelocType(r_type, cpu_arch), - }); atom_ptr.addRelocAssumeCapacity(.{ .r_offset = r_offset, .r_addend = r_addend, @@ -384,12 +389,6 @@ pub fn flushModule(self: *ZigObject, elf_file: *Elf, tid: Zcu.PerThread.Id) !voi else 0)); const r_type = relocation.dwarf.crossSectionRelocType(dwarf.format, cpu_arch); - log.debug(" {s} <- r_off={x}, r_add={x}, r_type={}", .{ - self.symbol(target_sym_index).name(elf_file), - r_offset, - r_addend, - relocation.fmtRelocType(r_type, cpu_arch), - }); atom_ptr.addRelocAssumeCapacity(.{ .r_offset = r_offset, .r_addend = r_addend, @@ -410,12 +409,6 @@ pub fn flushModule(self: *ZigObject, elf_file: *Elf, tid: Zcu.PerThread.Id) !voi else 0)); const r_type = relocation.dwarf.crossSectionRelocType(dwarf.format, cpu_arch); - log.debug(" {s} <- r_off={x}, r_add={x}, r_type={}", .{ - self.symbol(sym_index).name(elf_file), - r_offset, - r_addend, - relocation.fmtRelocType(r_type, cpu_arch), - }); atom_ptr.addRelocAssumeCapacity(.{ .r_offset = r_offset, .r_addend = r_addend, @@ -430,12 +423,6 @@ pub fn flushModule(self: *ZigObject, elf_file: *Elf, tid: Zcu.PerThread.Id) !voi else 0)); const r_type = relocation.dwarf.crossSectionRelocType(dwarf.format, cpu_arch); - log.debug(" {s} <- r_off={x}, r_add={x}, r_type={}", .{ - self.symbol(sym_index).name(elf_file), - r_offset, - r_addend, - relocation.fmtRelocType(r_type, cpu_arch), - }); atom_ptr.addRelocAssumeCapacity(.{ .r_offset = r_offset, .r_addend = r_addend, @@ -464,12 +451,6 @@ pub fn flushModule(self: *ZigObject, elf_file: *Elf, tid: Zcu.PerThread.Id) !voi else 0)); const r_type = relocation.dwarf.crossSectionRelocType(dwarf.format, cpu_arch); - log.debug(" {s} <- r_off={x}, r_add={x}, r_type={}", .{ - self.symbol(target_sym_index).name(elf_file), - r_offset, - r_addend, - relocation.fmtRelocType(r_type, cpu_arch), - }); atom_ptr.addRelocAssumeCapacity(.{ .r_offset = r_offset, .r_addend = r_addend, @@ -481,12 +462,6 @@ pub fn flushModule(self: *ZigObject, elf_file: *Elf, tid: Zcu.PerThread.Id) !voi const r_offset = entry_off + reloc.source_off; const r_addend: i64 = @intCast(reloc.target_off); const r_type = relocation.dwarf.externalRelocType(target_sym.*, sect_index, dwarf.address_size, cpu_arch); - log.debug(" {s} <- r_off={x}, r_add={x}, r_type={}", .{ - target_sym.name(elf_file), - r_offset, - r_addend, - relocation.fmtRelocType(r_type, cpu_arch), - }); atom_ptr.addRelocAssumeCapacity(.{ .r_offset = r_offset, .r_addend = r_addend, @@ -1035,16 +1010,15 @@ pub fn lowerUav( } const osec = if (self.data_relro_index) |sym_index| - self.symbol(sym_index).atom(elf_file).?.output_section_index + self.symbol(sym_index).outputShndx(elf_file).? else osec: { const osec = try elf_file.addSection(.{ .name = try elf_file.insertShString(".data.rel.ro"), .type = elf.SHT_PROGBITS, .addralign = 1, .flags = elf.SHF_ALLOC | elf.SHF_WRITE, - .offset = std.math.maxInt(u64), }); - self.data_relro_index = try self.addSectionSymbol(gpa, ".data.rel.ro", .@"1", osec); + self.data_relro_index = try self.addSectionSymbol(gpa, try self.addString(gpa, ".data.rel.ro"), osec); break :osec osec; }; @@ -1150,24 +1124,14 @@ pub fn getOrCreateMetadataForNav( return gop.value_ptr.symbol_index; } -// FIXME: we always create an atom to basically store size and alignment, however, this is only true for -// sections that have a single atom like the debug sections. It would be a better solution to decouple this -// concept from the atom, maybe. -fn addSectionSymbol( - self: *ZigObject, - allocator: Allocator, - name: [:0]const u8, - alignment: Atom.Alignment, - shndx: u32, -) !Symbol.Index { - const name_off = try self.addString(allocator, name); - const index = try self.newSymbolWithAtom(allocator, name_off); +fn addSectionSymbol(self: *ZigObject, allocator: Allocator, name_off: u32, shndx: u32) !Symbol.Index { + const index = try self.newLocalSymbol(allocator, name_off); const sym = self.symbol(index); const esym = &self.symtab.items(.elf_sym)[sym.esym_index]; esym.st_info |= elf.STT_SECTION; - const atom_ptr = self.atom(sym.ref.index).?; - atom_ptr.alignment = alignment; - atom_ptr.output_section_index = shndx; + // TODO create fake shdrs? + // esym.st_shndx = shndx; + sym.output_section_index = shndx; return index; } @@ -1186,15 +1150,14 @@ fn getNavShdrIndex( const nav_val = zcu.navValue(nav_index); if (ip.isFunctionType(nav_val.typeOf(zcu).toIntern())) { if (self.text_index) |symbol_index| - return self.symbol(symbol_index).atom(elf_file).?.output_section_index; + return self.symbol(symbol_index).outputShndx(elf_file).?; const osec = try elf_file.addSection(.{ .type = elf.SHT_PROGBITS, .flags = elf.SHF_ALLOC | elf.SHF_EXECINSTR, .name = try elf_file.insertShString(".text"), .addralign = 1, - .offset = std.math.maxInt(u64), }); - self.text_index = try self.addSectionSymbol(gpa, ".text", .@"1", osec); + self.text_index = try self.addSectionSymbol(gpa, try self.addString(gpa, ".text"), osec); return osec; } const is_const, const is_threadlocal, const nav_init = switch (ip.indexToKey(nav_val.toIntern())) { @@ -1209,71 +1172,63 @@ fn getNavShdrIndex( } else true; if (is_bss) { if (self.tbss_index) |symbol_index| - return self.symbol(symbol_index).atom(elf_file).?.output_section_index; + return self.symbol(symbol_index).outputShndx(elf_file).?; const osec = try elf_file.addSection(.{ .name = try elf_file.insertShString(".tbss"), .flags = elf.SHF_ALLOC | elf.SHF_WRITE | elf.SHF_TLS, .type = elf.SHT_NOBITS, .addralign = 1, }); - self.tbss_index = try self.addSectionSymbol(gpa, ".tbss", .@"1", osec); + self.tbss_index = try self.addSectionSymbol(gpa, try self.addString(gpa, ".tbss"), osec); return osec; } if (self.tdata_index) |symbol_index| - return self.symbol(symbol_index).atom(elf_file).?.output_section_index; + return self.symbol(symbol_index).outputShndx(elf_file).?; const osec = try elf_file.addSection(.{ .type = elf.SHT_PROGBITS, .flags = elf.SHF_ALLOC | elf.SHF_WRITE | elf.SHF_TLS, .name = try elf_file.insertShString(".tdata"), .addralign = 1, - .offset = std.math.maxInt(u64), }); - self.tdata_index = try self.addSectionSymbol(gpa, ".tdata", .@"1", osec); + self.tdata_index = try self.addSectionSymbol(gpa, try self.addString(gpa, ".tdata"), osec); return osec; } if (is_const) { if (self.data_relro_index) |symbol_index| - return self.symbol(symbol_index).atom(elf_file).?.output_section_index; + return self.symbol(symbol_index).outputShndx(elf_file).?; const osec = try elf_file.addSection(.{ .name = try elf_file.insertShString(".data.rel.ro"), .type = elf.SHT_PROGBITS, .addralign = 1, .flags = elf.SHF_ALLOC | elf.SHF_WRITE, - .offset = std.math.maxInt(u64), }); - self.data_relro_index = try self.addSectionSymbol(gpa, ".data.rel.ro", .@"1", osec); + self.data_relro_index = try self.addSectionSymbol(gpa, try self.addString(gpa, ".data.rel.ro"), osec); return osec; } if (nav_init != .none and Value.fromInterned(nav_init).isUndefDeep(zcu)) return switch (zcu.navFileScope(nav_index).mod.optimize_mode) { .Debug, .ReleaseSafe => { if (self.data_index) |symbol_index| - return self.symbol(symbol_index).atom(elf_file).?.output_section_index; + return self.symbol(symbol_index).outputShndx(elf_file).?; const osec = try elf_file.addSection(.{ .name = try elf_file.insertShString(".data"), .type = elf.SHT_PROGBITS, .addralign = ptr_size, .flags = elf.SHF_ALLOC | elf.SHF_WRITE, - .offset = std.math.maxInt(u64), }); - self.data_index = try self.addSectionSymbol( - gpa, - ".data", - Atom.Alignment.fromNonzeroByteUnits(ptr_size), - osec, - ); + self.data_index = try self.addSectionSymbol(gpa, try self.addString(gpa, ".data"), osec); return osec; }, .ReleaseFast, .ReleaseSmall => { if (self.bss_index) |symbol_index| - return self.symbol(symbol_index).atom(elf_file).?.output_section_index; + return self.symbol(symbol_index).outputShndx(elf_file).?; const osec = try elf_file.addSection(.{ .type = elf.SHT_NOBITS, .flags = elf.SHF_ALLOC | elf.SHF_WRITE, .name = try elf_file.insertShString(".bss"), .addralign = 1, }); - self.bss_index = try self.addSectionSymbol(gpa, ".bss", .@"1", osec); + self.bss_index = try self.addSectionSymbol(gpa, try self.addString(gpa, ".bss"), osec); return osec; }, }; @@ -1282,31 +1237,25 @@ fn getNavShdrIndex( } else true; if (is_bss) { if (self.bss_index) |symbol_index| - return self.symbol(symbol_index).atom(elf_file).?.output_section_index; + return self.symbol(symbol_index).outputShndx(elf_file).?; const osec = try elf_file.addSection(.{ .type = elf.SHT_NOBITS, .flags = elf.SHF_ALLOC | elf.SHF_WRITE, .name = try elf_file.insertShString(".bss"), .addralign = 1, }); - self.bss_index = try self.addSectionSymbol(gpa, ".bss", .@"1", osec); + self.bss_index = try self.addSectionSymbol(gpa, try self.addString(gpa, ".bss"), osec); return osec; } if (self.data_index) |symbol_index| - return self.symbol(symbol_index).atom(elf_file).?.output_section_index; + return self.symbol(symbol_index).outputShndx(elf_file).?; const osec = try elf_file.addSection(.{ .name = try elf_file.insertShString(".data"), .type = elf.SHT_PROGBITS, .addralign = ptr_size, .flags = elf.SHF_ALLOC | elf.SHF_WRITE, - .offset = std.math.maxInt(u64), }); - self.data_index = try self.addSectionSymbol( - gpa, - ".data", - Atom.Alignment.fromNonzeroByteUnits(ptr_size), - osec, - ); + self.data_index = try self.addSectionSymbol(gpa, try self.addString(gpa, ".data"), osec); return osec; } @@ -1354,7 +1303,7 @@ fn updateNavCode( const capacity = atom_ptr.capacity(elf_file); const need_realloc = code.len > capacity or !required_alignment.check(@intCast(atom_ptr.value)); if (need_realloc) { - try self.growAtom(atom_ptr, elf_file); + try self.allocateAtom(atom_ptr, true, elf_file); log.debug("growing {} from 0x{x} to 0x{x}", .{ nav.fqn.fmt(ip), old_vaddr, atom_ptr.value }); if (old_vaddr != atom_ptr.value) { sym.value = 0; @@ -1364,7 +1313,7 @@ fn updateNavCode( // TODO shrink section size } } else { - try self.allocateAtom(atom_ptr, elf_file); + try self.allocateAtom(atom_ptr, true, elf_file); errdefer self.freeNavMetadata(elf_file, sym_index); sym.value = 0; esym.st_value = 0; @@ -1439,7 +1388,7 @@ fn updateTlv( const gop = try self.tls_variables.getOrPut(gpa, atom_ptr.atom_index); assert(!gop.found_existing); // TODO incremental updates - try self.allocateAtom(atom_ptr, elf_file); + try self.allocateAtom(atom_ptr, true, elf_file); sym.value = 0; esym.st_value = 0; @@ -1557,9 +1506,8 @@ pub fn updateFunc( .flags = elf.SHF_ALLOC | elf.SHF_EXECINSTR, .type = elf.SHT_PROGBITS, .addralign = 1, - .offset = std.math.maxInt(u64), }); - self.text_index = try self.addSectionSymbol(gpa, ".text", .@"1", osec); + self.text_index = try self.addSectionSymbol(gpa, try self.addString(gpa, ".text"), osec); break :osec osec; }; const name_off = try self.addString(gpa, name); @@ -1726,29 +1674,27 @@ fn updateLazySymbol( const output_section_index = switch (sym.kind) { .code => if (self.text_index) |sym_index| - self.symbol(sym_index).atom(elf_file).?.output_section_index + self.symbol(sym_index).outputShndx(elf_file).? else osec: { const osec = try elf_file.addSection(.{ .name = try elf_file.insertShString(".text"), .type = elf.SHT_PROGBITS, .addralign = 1, .flags = elf.SHF_ALLOC | elf.SHF_EXECINSTR, - .offset = std.math.maxInt(u64), }); - self.text_index = try self.addSectionSymbol(gpa, ".text", .@"1", osec); + self.text_index = try self.addSectionSymbol(gpa, try self.addString(gpa, ".text"), osec); break :osec osec; }, .const_data => if (self.rodata_index) |sym_index| - self.symbol(sym_index).atom(elf_file).?.output_section_index + self.symbol(sym_index).outputShndx(elf_file).? else osec: { const osec = try elf_file.addSection(.{ .name = try elf_file.insertShString(".rodata"), .type = elf.SHT_PROGBITS, .addralign = 1, .flags = elf.SHF_ALLOC, - .offset = std.math.maxInt(u64), }); - self.rodata_index = try self.addSectionSymbol(gpa, ".rodata", .@"1", osec); + self.rodata_index = try self.addSectionSymbol(gpa, try self.addString(gpa, ".rodata"), osec); break :osec osec; }, }; @@ -1765,7 +1711,7 @@ fn updateLazySymbol( atom_ptr.size = code.len; atom_ptr.output_section_index = output_section_index; - try self.allocateAtom(atom_ptr, elf_file); + try self.allocateAtom(atom_ptr, true, elf_file); errdefer self.freeNavMetadata(elf_file, symbol_index); local_sym.value = 0; @@ -1820,7 +1766,7 @@ fn lowerConst( atom_ptr.size = code.len; atom_ptr.output_section_index = output_section_index; - try self.allocateAtom(atom_ptr, elf_file); + try self.allocateAtom(atom_ptr, true, elf_file); errdefer self.freeNavMetadata(elf_file, sym_index); try elf_file.base.file.?.pwriteAll(code, atom_ptr.offset(elf_file)); @@ -2017,17 +1963,27 @@ fn writeTrampoline(tr_sym: Symbol, target: Symbol, elf_file: *Elf) !void { } } -fn allocateAtom(self: *ZigObject, atom_ptr: *Atom, elf_file: *Elf) !void { +pub fn allocateAtom(self: *ZigObject, atom_ptr: *Atom, requires_padding: bool, elf_file: *Elf) !void { + const slice = elf_file.sections.slice(); + const shdr = &slice.items(.shdr)[atom_ptr.output_section_index]; + const last_atom_ref = &slice.items(.last_atom)[atom_ptr.output_section_index]; + + // FIXME:JK this only works if this atom is the only atom in the output section + // In every other case, we need to redo the prev/next links + if (last_atom_ref.eql(atom_ptr.ref())) last_atom_ref.* = .{}; + const alloc_res = try elf_file.allocateChunk(.{ .shndx = atom_ptr.output_section_index, .size = atom_ptr.size, .alignment = atom_ptr.alignment, + .requires_padding = requires_padding, }); atom_ptr.value = @intCast(alloc_res.value); - - const slice = elf_file.sections.slice(); - const shdr = &slice.items(.shdr)[atom_ptr.output_section_index]; - const last_atom_ref = &slice.items(.last_atom)[atom_ptr.output_section_index]; + log.debug("allocated {s} at {x}\n placement {?}", .{ + atom_ptr.name(elf_file), + atom_ptr.offset(elf_file), + alloc_res.placement, + }); const expand_section = if (elf_file.atom(alloc_res.placement)) |placement_atom| placement_atom.nextAtom(elf_file) == null @@ -2049,22 +2005,6 @@ fn allocateAtom(self: *ZigObject, atom_ptr: *Atom, elf_file: *Elf) !void { } shdr.sh_addralign = @max(shdr.sh_addralign, atom_ptr.alignment.toByteUnits().?); - const sect_atom_ptr = for ([_]?Symbol.Index{ - self.text_index, - self.rodata_index, - self.data_relro_index, - self.data_index, - self.tdata_index, - }) |maybe_sym_index| { - const sect_sym_index = maybe_sym_index orelse continue; - const sect_atom_ptr = self.symbol(sect_sym_index).atom(elf_file).?; - if (sect_atom_ptr.output_section_index == atom_ptr.output_section_index) break sect_atom_ptr; - } else null; - if (sect_atom_ptr) |sap| { - sap.size = shdr.sh_size; - sap.alignment = Atom.Alignment.fromNonzeroByteUnits(shdr.sh_addralign); - } - // This function can also reallocate an atom. // In this case we need to "unplug" it from its previous location before // plugging it in to its new location. @@ -2083,11 +2023,37 @@ fn allocateAtom(self: *ZigObject, atom_ptr: *Atom, elf_file: *Elf) !void { atom_ptr.prev_atom_ref = .{ .index = 0, .file = 0 }; atom_ptr.next_atom_ref = .{ .index = 0, .file = 0 }; } + + log.debug(" prev {?}, next {?}", .{ atom_ptr.prev_atom_ref, atom_ptr.next_atom_ref }); } -fn growAtom(self: *ZigObject, atom_ptr: *Atom, elf_file: *Elf) !void { - if (!atom_ptr.alignment.check(@intCast(atom_ptr.value)) or atom_ptr.size > atom_ptr.capacity(elf_file)) { - try self.allocateAtom(atom_ptr, elf_file); +pub fn resetShdrIndexes(self: *ZigObject, backlinks: anytype) void { + for (self.atoms_indexes.items) |atom_index| { + const atom_ptr = self.atom(atom_index) orelse continue; + atom_ptr.output_section_index = backlinks[atom_ptr.output_section_index]; + } + inline for ([_]?Symbol.Index{ + self.text_index, + self.rodata_index, + self.data_relro_index, + self.data_index, + self.bss_index, + self.tdata_index, + self.tbss_index, + self.eh_frame_index, + self.debug_info_index, + self.debug_abbrev_index, + self.debug_aranges_index, + self.debug_str_index, + self.debug_line_index, + self.debug_line_str_index, + self.debug_loclists_index, + self.debug_rnglists_index, + }) |maybe_sym_index| { + if (maybe_sym_index) |sym_index| { + const sym = self.symbol(sym_index); + sym.output_section_index = backlinks[sym.output_section_index]; + } } } @@ -2095,6 +2061,33 @@ pub fn asFile(self: *ZigObject) File { return .{ .zig_object = self }; } +pub fn sectionSymbol(self: *ZigObject, shndx: u32, elf_file: *Elf) ?*Symbol { + inline for ([_]?Symbol.Index{ + self.text_index, + self.rodata_index, + self.data_relro_index, + self.data_index, + self.bss_index, + self.tdata_index, + self.tbss_index, + self.eh_frame_index, + self.debug_info_index, + self.debug_abbrev_index, + self.debug_aranges_index, + self.debug_str_index, + self.debug_line_index, + self.debug_line_str_index, + self.debug_loclists_index, + self.debug_rnglists_index, + }) |maybe_sym_index| { + if (maybe_sym_index) |sym_index| { + const sym = self.symbol(sym_index); + if (sym.outputShndx(elf_file) == shndx) return sym; + } + } + return null; +} + pub fn addString(self: *ZigObject, allocator: Allocator, string: []const u8) !u32 { return self.strtab.insert(allocator, string); } diff --git a/src/link/Elf/file.zig b/src/link/Elf/file.zig index 88dc807274..740987feb2 100644 --- a/src/link/Elf/file.zig +++ b/src/link/Elf/file.zig @@ -95,19 +95,19 @@ pub const File = union(enum) { log.debug("'{s}' is non-local", .{sym.name(ef)}); try ef.dynsym.addSymbol(ref, ef); } - if (sym.flags.needs_got) { + if (sym.flags.needs_got and !sym.flags.has_got) { log.debug("'{s}' needs GOT", .{sym.name(ef)}); _ = try ef.got.addGotSymbol(ref, ef); } if (sym.flags.needs_plt) { - if (sym.flags.is_canonical) { + if (sym.flags.is_canonical and !sym.flags.has_plt) { log.debug("'{s}' needs CPLT", .{sym.name(ef)}); sym.flags.@"export" = true; try ef.plt.addSymbol(ref, ef); - } else if (sym.flags.needs_got) { + } else if (sym.flags.needs_got and !sym.flags.has_pltgot) { log.debug("'{s}' needs PLTGOT", .{sym.name(ef)}); try ef.plt_got.addSymbol(ref, ef); - } else { + } else if (!sym.flags.has_plt) { log.debug("'{s}' needs PLT", .{sym.name(ef)}); try ef.plt.addSymbol(ref, ef); } @@ -116,15 +116,15 @@ pub const File = union(enum) { log.debug("'{s}' needs COPYREL", .{sym.name(ef)}); try ef.copy_rel.addSymbol(ref, ef); } - if (sym.flags.needs_tlsgd) { + if (sym.flags.needs_tlsgd and !sym.flags.has_tlsgd) { log.debug("'{s}' needs TLSGD", .{sym.name(ef)}); try ef.got.addTlsGdSymbol(ref, ef); } - if (sym.flags.needs_gottp) { + if (sym.flags.needs_gottp and !sym.flags.has_gottp) { log.debug("'{s}' needs GOTTP", .{sym.name(ef)}); try ef.got.addGotTpSymbol(ref, ef); } - if (sym.flags.needs_tlsdesc) { + if (sym.flags.needs_tlsdesc and !sym.flags.has_tlsdesc) { log.debug("'{s}' needs TLSDESC", .{sym.name(ef)}); try ef.got.addTlsDescSymbol(ref, ef); } diff --git a/src/link/Elf/relocatable.zig b/src/link/Elf/relocatable.zig index 4bd42ffcd6..8768c1d754 100644 --- a/src/link/Elf/relocatable.zig +++ b/src/link/Elf/relocatable.zig @@ -295,7 +295,6 @@ fn initSections(elf_file: *Elf) !void { elf.SHT_PROGBITS, .flags = elf.SHF_ALLOC, .addralign = elf_file.ptrWidthBytes(), - .offset = std.math.maxInt(u64), }); } elf_file.eh_frame_rela_section_index = elf_file.sectionByName(".rela.eh_frame") orelse @@ -324,7 +323,6 @@ fn initComdatGroups(elf_file: *Elf) !void { .type = elf.SHT_GROUP, .entsize = @sizeOf(u32), .addralign = @alignOf(u32), - .offset = std.math.maxInt(u64), }), .cg_ref = .{ .index = @intCast(cg_index), .file = index }, }; @@ -335,9 +333,11 @@ fn initComdatGroups(elf_file: *Elf) !void { fn updateSectionSizes(elf_file: *Elf) !void { const slice = elf_file.sections.slice(); for (slice.items(.atom_list_2)) |*atom_list| { - if (atom_list.atoms.items.len == 0) continue; + if (atom_list.atoms.keys().len == 0) continue; + if (!atom_list.dirty) continue; atom_list.updateSize(elf_file); try atom_list.allocate(elf_file); + atom_list.dirty = false; } for (slice.items(.shdr), 0..) |*shdr, shndx| { @@ -392,24 +392,14 @@ fn allocateAllocSections(elf_file: *Elf) !void { shdr.sh_size = 0; const new_offset = try elf_file.findFreeSpace(needed_size, shdr.sh_addralign); - if (elf_file.zigObjectPtr()) |zo| blk: { - const existing_size = for ([_]?Symbol.Index{ - zo.text_index, - zo.rodata_index, - zo.data_relro_index, - zo.data_index, - zo.tdata_index, - zo.eh_frame_index, - }) |maybe_sym_index| { - const sect_sym_index = maybe_sym_index orelse continue; - const sect_atom_ptr = zo.symbol(sect_sym_index).atom(elf_file).?; - if (sect_atom_ptr.output_section_index == shndx) break sect_atom_ptr.size; - } else break :blk; - log.debug("moving {s} from 0x{x} to 0x{x}", .{ - elf_file.getShString(shdr.sh_name), - shdr.sh_offset, - new_offset, - }); + log.debug("moving {s} from 0x{x} to 0x{x}", .{ + elf_file.getShString(shdr.sh_name), + shdr.sh_offset, + new_offset, + }); + + if (shdr.sh_offset > 0) { + const existing_size = elf_file.sectionSize(@intCast(shndx)); const amt = try elf_file.base.file.?.copyRangeAll( shdr.sh_offset, elf_file.base.file.?, @@ -434,7 +424,7 @@ fn writeAtoms(elf_file: *Elf) !void { const slice = elf_file.sections.slice(); for (slice.items(.shdr), slice.items(.atom_list_2)) |shdr, atom_list| { if (shdr.sh_type == elf.SHT_NOBITS) continue; - if (atom_list.atoms.items.len == 0) continue; + if (atom_list.atoms.keys().len == 0) continue; try atom_list.writeRelocatable(&buffer, elf_file); } } diff --git a/src/link/Elf/synthetic_sections.zig b/src/link/Elf/synthetic_sections.zig index f914bb8d84..987ee4bf9a 100644 --- a/src/link/Elf/synthetic_sections.zig +++ b/src/link/Elf/synthetic_sections.zig @@ -435,6 +435,8 @@ pub const GotSection = struct { const cpu_arch = elf_file.getTarget().cpu.arch; try elf_file.rela_dyn.ensureUnusedCapacity(gpa, got.numRela(elf_file)); + relocs_log.debug(".got", .{}); + for (got.entries.items) |entry| { const symbol = elf_file.symbol(entry.ref); const extra = if (symbol) |s| s.extra(elf_file) else null; @@ -447,6 +449,7 @@ pub const GotSection = struct { .offset = offset, .sym = extra.?.dynamic, .type = relocation.encode(.glob_dat, cpu_arch), + .target = symbol, }); continue; } @@ -455,6 +458,7 @@ pub const GotSection = struct { .offset = offset, .type = relocation.encode(.irel, cpu_arch), .addend = symbol.?.address(.{ .plt = false }, elf_file), + .target = symbol, }); continue; } @@ -465,6 +469,7 @@ pub const GotSection = struct { .offset = offset, .type = relocation.encode(.rel, cpu_arch), .addend = symbol.?.address(.{ .plt = false }, elf_file), + .target = symbol, }); } }, @@ -486,17 +491,20 @@ pub const GotSection = struct { .offset = offset, .sym = extra.?.dynamic, .type = relocation.encode(.dtpmod, cpu_arch), + .target = symbol, }); elf_file.addRelaDynAssumeCapacity(.{ .offset = offset + 8, .sym = extra.?.dynamic, .type = relocation.encode(.dtpoff, cpu_arch), + .target = symbol, }); } else if (is_dyn_lib) { elf_file.addRelaDynAssumeCapacity(.{ .offset = offset, .sym = extra.?.dynamic, .type = relocation.encode(.dtpmod, cpu_arch), + .target = symbol, }); } }, @@ -508,12 +516,14 @@ pub const GotSection = struct { .offset = offset, .sym = extra.?.dynamic, .type = relocation.encode(.tpoff, cpu_arch), + .target = symbol, }); } else if (is_dyn_lib) { elf_file.addRelaDynAssumeCapacity(.{ .offset = offset, .type = relocation.encode(.tpoff, cpu_arch), .addend = symbol.?.address(.{}, elf_file) - elf_file.tlsAddress(), + .target = symbol, }); } }, @@ -525,6 +535,7 @@ pub const GotSection = struct { .sym = if (symbol.?.flags.import) extra.?.dynamic else 0, .type = relocation.encode(.tlsdesc, cpu_arch), .addend = if (symbol.?.flags.import) 0 else symbol.?.address(.{}, elf_file) - elf_file.tlsAddress(), + .target = symbol, }); }, } @@ -681,6 +692,9 @@ pub const PltSection = struct { const gpa = comp.gpa; const cpu_arch = elf_file.getTarget().cpu.arch; try elf_file.rela_plt.ensureUnusedCapacity(gpa, plt.numRela()); + + relocs_log.debug(".plt", .{}); + for (plt.symbols.items) |ref| { const sym = elf_file.symbol(ref).?; assert(sym.flags.import); @@ -688,6 +702,14 @@ pub const PltSection = struct { const r_offset: u64 = @intCast(sym.gotPltAddress(elf_file)); const r_sym: u64 = extra.dynamic; const r_type = relocation.encode(.jump_slot, cpu_arch); + + relocs_log.debug(" {s}: [{x} => {d}({s})] + 0", .{ + relocation.fmtRelocType(r_type, cpu_arch), + r_offset, + r_sym, + sym.name(elf_file), + }); + elf_file.rela_plt.appendAssumeCapacity(.{ .r_offset = r_offset, .r_info = (r_sym << 32) | r_type, @@ -895,8 +917,7 @@ pub const PltGotSection = struct { const gpa = comp.gpa; const index = @as(u32, @intCast(plt_got.symbols.items.len)); const symbol = elf_file.symbol(ref).?; - symbol.flags.has_plt = true; - symbol.flags.has_got = true; + symbol.flags.has_pltgot = true; symbol.addExtra(.{ .plt_got = index }, elf_file); try plt_got.symbols.append(gpa, ref); } @@ -1054,6 +1075,9 @@ pub const CopyRelSection = struct { const gpa = comp.gpa; const cpu_arch = elf_file.getTarget().cpu.arch; try elf_file.rela_dyn.ensureUnusedCapacity(gpa, copy_rel.numRela()); + + relocs_log.debug(".copy.rel", .{}); + for (copy_rel.symbols.items) |ref| { const sym = elf_file.symbol(ref).?; assert(sym.flags.import and sym.flags.has_copy_rel); @@ -1526,6 +1550,7 @@ const elf = std.elf; const math = std.math; const mem = std.mem; const log = std.log.scoped(.link); +const relocs_log = std.log.scoped(.link_relocs); const relocation = @import("relocation.zig"); const std = @import("std"); |
