From 4d804c1b239e45a0a28f4caf1f9748dac44ddce2 Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Tue, 24 Jan 2023 17:55:10 +0100 Subject: macho: completely remove allocateDeclIndexes in favor of linker tracking --- src/Module.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src/Module.zig') diff --git a/src/Module.zig b/src/Module.zig index b17c140231..2d85399baf 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -5324,7 +5324,7 @@ pub fn deleteUnusedDecl(mod: *Module, decl_index: Decl.Index) void { // Until then, we did call `allocateDeclIndexes` on this anonymous Decl and so we // must call `freeDecl` in the linker backend now. switch (mod.comp.bin_file.tag) { - .c => {}, // this linker backend has already migrated to the new API + .macho, .c => {}, // this linker backend has already migrated to the new API else => if (decl.has_tv) { if (decl.ty.isFnOrHasRuntimeBits()) { mod.comp.bin_file.freeDecl(decl_index); -- cgit v1.2.3 From e1b9800ffa74a637e2b0a6356249c2c37228ec01 Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Thu, 26 Jan 2023 13:17:38 +0100 Subject: elf: migrate to new non-allocateDeclIndexes API --- src/Module.zig | 2 +- src/arch/aarch64/CodeGen.zig | 17 ++----- src/arch/arm/CodeGen.zig | 103 ++++++++++++++++++++-------------------- src/arch/riscv64/CodeGen.zig | 13 ++--- src/arch/sparc64/CodeGen.zig | 17 +++---- src/arch/x86_64/CodeGen.zig | 15 ++---- src/link.zig | 2 +- src/link/Elf.zig | 110 +++++++++++++++++++++---------------------- src/link/Elf/Atom.zig | 52 +++++++++++++++++--- src/link/MachO.zig | 7 +-- 10 files changed, 175 insertions(+), 163 deletions(-) (limited to 'src/Module.zig') diff --git a/src/Module.zig b/src/Module.zig index 2d85399baf..4e1f65aff4 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -5324,7 +5324,7 @@ pub fn deleteUnusedDecl(mod: *Module, decl_index: Decl.Index) void { // Until then, we did call `allocateDeclIndexes` on this anonymous Decl and so we // must call `freeDecl` in the linker backend now. switch (mod.comp.bin_file.tag) { - .macho, .c => {}, // this linker backend has already migrated to the new API + .elf, .macho, .c => {}, // this linker backend has already migrated to the new API else => if (decl.has_tv) { if (decl.ty.isFnOrHasRuntimeBits()) { mod.comp.bin_file.freeDecl(decl_index); diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index 93ac450bae..8c3f3e8168 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -4307,12 +4307,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const fn_owner_decl = mod.declPtr(func.owner_decl); if (self.bin_file.cast(link.File.Elf)) |elf_file| { - const ptr_bits = self.target.cpu.arch.ptrBitWidth(); - const ptr_bytes: u64 = @divExact(ptr_bits, 8); - const got_addr = blk: { - const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?]; - break :blk @intCast(u32, got.p_vaddr + fn_owner_decl.link.elf.offset_table_index * ptr_bytes); - }; + try fn_owner_decl.link.elf.ensureInitialized(elf_file); + const got_addr = @intCast(u32, fn_owner_decl.link.elf.getOffsetTableAddress(elf_file)); try self.genSetReg(Type.initTag(.usize), .x30, .{ .memory = got_addr }); } else if (self.bin_file.cast(link.File.MachO)) |macho_file| { try fn_owner_decl.link.macho.ensureInitialized(macho_file); @@ -6125,20 +6121,15 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) Inne mod.markDeclAlive(decl); if (self.bin_file.cast(link.File.Elf)) |elf_file| { - const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?]; - const got_addr = got.p_vaddr + decl.link.elf.offset_table_index * ptr_bytes; - return MCValue{ .memory = got_addr }; + try decl.link.elf.ensureInitialized(elf_file); + return MCValue{ .memory = decl.link.elf.getOffsetTableAddress(elf_file) }; } else if (self.bin_file.cast(link.File.MachO)) |macho_file| { - // Because MachO is PIE-always-on, we defer memory address resolution until - // the linker has enough info to perform relocations. try decl.link.macho.ensureInitialized(macho_file); return MCValue{ .linker_load = .{ .type = .got, .sym_index = decl.link.macho.getSymbolIndex().?, } }; } else if (self.bin_file.cast(link.File.Coff)) |_| { - // Because COFF is PIE-always-on, we defer memory address resolution until - // the linker has enough info to perform relocations. assert(decl.link.coff.sym_index != 0); return MCValue{ .linker_load = .{ .type = .got, diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index 6a7986a7a8..49f979624d 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -4253,59 +4253,57 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier // Due to incremental compilation, how function calls are generated depends // on linking. - switch (self.bin_file.tag) { - .elf => { - if (self.air.value(callee)) |func_value| { - if (func_value.castTag(.function)) |func_payload| { - const func = func_payload.data; - const ptr_bits = self.target.cpu.arch.ptrBitWidth(); - const ptr_bytes: u64 = @divExact(ptr_bits, 8); - const mod = self.bin_file.options.module.?; - const fn_owner_decl = mod.declPtr(func.owner_decl); - const got_addr = if (self.bin_file.cast(link.File.Elf)) |elf_file| blk: { - const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?]; - break :blk @intCast(u32, got.p_vaddr + fn_owner_decl.link.elf.offset_table_index * ptr_bytes); - } else unreachable; - try self.genSetReg(Type.initTag(.usize), .lr, .{ .memory = got_addr }); - } else if (func_value.castTag(.extern_fn)) |_| { - return self.fail("TODO implement calling extern functions", .{}); - } else { - return self.fail("TODO implement calling bitcasted functions", .{}); - } + if (self.air.value(callee)) |func_value| { + if (func_value.castTag(.function)) |func_payload| { + const func = func_payload.data; + const mod = self.bin_file.options.module.?; + const fn_owner_decl = mod.declPtr(func.owner_decl); + + if (self.bin_file.cast(link.File.Elf)) |elf_file| { + try fn_owner_decl.link.elf.ensureInitialized(elf_file); + const got_addr = @intCast(u32, fn_owner_decl.link.elf.getOffsetTableAddress(elf_file)); + try self.genSetReg(Type.initTag(.usize), .lr, .{ .memory = got_addr }); + } else if (self.bin_file.cast(link.File.MachO)) |_| { + unreachable; // unsupported architecture for MachO } else { - assert(ty.zigTypeTag() == .Pointer); - const mcv = try self.resolveInst(callee); - - try self.genSetReg(Type.initTag(.usize), .lr, mcv); - } - - // TODO: add Instruction.supportedOn - // function for ARM - if (Target.arm.featureSetHas(self.target.cpu.features, .has_v5t)) { - _ = try self.addInst(.{ - .tag = .blx, - .data = .{ .reg = .lr }, + return self.fail("TODO implement call on {s} for {s}", .{ + @tagName(self.bin_file.tag), + @tagName(self.target.cpu.arch), }); - } else { - return self.fail("TODO fix blx emulation for ARM unreachable, // unsupported architecture for MachO - .coff => return self.fail("TODO implement call in COFF for {}", .{self.target.cpu.arch}), - .plan9 => return self.fail("TODO implement call on plan9 for {}", .{self.target.cpu.arch}), - else => unreachable, + } else if (func_value.castTag(.extern_fn)) |_| { + return self.fail("TODO implement calling extern functions", .{}); + } else { + return self.fail("TODO implement calling bitcasted functions", .{}); + } + } else { + assert(ty.zigTypeTag() == .Pointer); + const mcv = try self.resolveInst(callee); + + try self.genSetReg(Type.initTag(.usize), .lr, mcv); + } + + // TODO: add Instruction.supportedOn + // function for ARM + if (Target.arm.featureSetHas(self.target.cpu.features, .has_v5t)) { + _ = try self.addInst(.{ + .tag = .blx, + .data = .{ .reg = .lr }, + }); + } else { + return self.fail("TODO fix blx emulation for ARM return @fieldParentPtr(Coff, "base", base).allocateDeclIndexes(decl_index), - .elf => return @fieldParentPtr(Elf, "base", base).allocateDeclIndexes(decl_index), + .elf => {}, // no-op .macho => {}, // no-op .wasm => return @fieldParentPtr(Wasm, "base", base).allocateDeclIndexes(decl_index), .plan9 => return @fieldParentPtr(Plan9, "base", base).allocateDeclIndexes(decl_index), diff --git a/src/link/Elf.zig b/src/link/Elf.zig index e915fff423..9dbc5e846f 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -344,9 +344,10 @@ pub fn getDeclVAddr(self: *Elf, decl_index: Module.Decl.Index, reloc_info: File. const decl = mod.declPtr(decl_index); assert(self.llvm_object == null); - assert(decl.link.elf.local_sym_index != 0); - const target = decl.link.elf.local_sym_index; + try decl.link.elf.ensureInitialized(self); + const target = decl.link.elf.getSymbolIndex().?; + const vaddr = self.local_symbols.items[target].st_value; const atom = self.atom_by_index_table.get(reloc_info.parent_atom_index).?; const gop = try self.relocs.getOrPut(self.base.allocator, atom); @@ -447,7 +448,7 @@ fn makeString(self: *Elf, bytes: []const u8) !u32 { return @intCast(u32, result); } -fn getString(self: Elf, str_off: u32) []const u8 { +pub fn getString(self: Elf, str_off: u32) []const u8 { assert(str_off < self.shstrtab.items.len); return mem.sliceTo(@ptrCast([*:0]const u8, self.shstrtab.items.ptr + str_off), 0); } @@ -2069,7 +2070,7 @@ fn freeTextBlock(self: *Elf, text_block: *TextBlock, phdr_index: u16) void { if (text_block.prev) |prev| { prev.next = text_block.next; - if (!already_have_free_list_node and prev.freeListEligible(self.*)) { + if (!already_have_free_list_node and prev.freeListEligible(self)) { // The free list is heuristics, it doesn't have to be perfect, so we can // ignore the OOM here. free_list.append(self.base.allocator, prev) catch {}; @@ -2084,6 +2085,15 @@ fn freeTextBlock(self: *Elf, text_block: *TextBlock, phdr_index: u16) void { text_block.next = null; } + // Appending to free lists is allowed to fail because the free lists are heuristics based anyway. + const local_sym_index = text_block.getSymbolIndex().?; + self.local_symbol_free_list.append(self.base.allocator, local_sym_index) catch {}; + self.local_symbols.items[local_sym_index].st_info = 0; + _ = self.atom_by_index_table.remove(local_sym_index); + text_block.local_sym_index = 0; + + self.offset_table_free_list.append(self.base.allocator, text_block.offset_table_index) catch {}; + if (self.dwarf) |*dw| { dw.freeAtom(&text_block.dbg_info_atom); } @@ -2099,7 +2109,7 @@ fn shrinkTextBlock(self: *Elf, text_block: *TextBlock, new_block_size: u64, phdr fn growTextBlock(self: *Elf, text_block: *TextBlock, new_block_size: u64, alignment: u64, phdr_index: u16) !u64 { const sym = self.local_symbols.items[text_block.local_sym_index]; const align_ok = mem.alignBackwardGeneric(u64, sym.st_value, alignment) == sym.st_value; - const need_realloc = !align_ok or new_block_size > text_block.capacity(self.*); + const need_realloc = !align_ok or new_block_size > text_block.capacity(self); if (!need_realloc) return sym.st_value; return self.allocateTextBlock(text_block, new_block_size, alignment, phdr_index); } @@ -2128,7 +2138,7 @@ fn allocateTextBlock(self: *Elf, text_block: *TextBlock, new_block_size: u64, al // We now have a pointer to a live text block that has too much capacity. // Is it enough that we could fit this new text block? const sym = self.local_symbols.items[big_block.local_sym_index]; - const capacity = big_block.capacity(self.*); + const capacity = big_block.capacity(self); const ideal_capacity = padToIdeal(capacity); const ideal_capacity_end_vaddr = std.math.add(u64, sym.st_value, ideal_capacity) catch ideal_capacity; const capacity_end_vaddr = sym.st_value + capacity; @@ -2138,7 +2148,7 @@ fn allocateTextBlock(self: *Elf, text_block: *TextBlock, new_block_size: u64, al // Additional bookkeeping here to notice if this free list node // should be deleted because the block that it points to has grown to take up // more of the extra capacity. - if (!big_block.freeListEligible(self.*)) { + if (!big_block.freeListEligible(self)) { _ = free_list.swapRemove(i); } else { i += 1; @@ -2213,7 +2223,7 @@ fn allocateTextBlock(self: *Elf, text_block: *TextBlock, new_block_size: u64, al return vaddr; } -fn allocateLocalSymbol(self: *Elf) !u32 { +pub fn allocateLocalSymbol(self: *Elf) !u32 { try self.local_symbols.ensureUnusedCapacity(self.base.allocator, 1); const index = blk: { @@ -2240,7 +2250,7 @@ fn allocateLocalSymbol(self: *Elf) !u32 { return index; } -fn allocateGotOffset(self: *Elf) !u32 { +pub fn allocateGotOffset(self: *Elf) !u32 { try self.offset_table.ensureUnusedCapacity(self.base.allocator, 1); const index = blk: { @@ -2260,32 +2270,10 @@ fn allocateGotOffset(self: *Elf) !u32 { return index; } -pub fn allocateDeclIndexes(self: *Elf, decl_index: Module.Decl.Index) !void { - if (self.llvm_object) |_| return; - - const mod = self.base.options.module.?; - const decl = mod.declPtr(decl_index); - const block = &decl.link.elf; - if (block.local_sym_index != 0) return; - - const decl_name = try decl.getFullyQualifiedName(mod); - defer self.base.allocator.free(decl_name); - - log.debug("allocating symbol indexes for {s}", .{decl_name}); - - block.local_sym_index = try self.allocateLocalSymbol(); - block.offset_table_index = try self.allocateGotOffset(); - try self.atom_by_index_table.putNoClobber(self.base.allocator, block.local_sym_index, block); - try self.decls.putNoClobber(self.base.allocator, decl_index, null); -} - fn freeUnnamedConsts(self: *Elf, decl_index: Module.Decl.Index) void { const unnamed_consts = self.unnamed_const_atoms.getPtr(decl_index) orelse return; for (unnamed_consts.items) |atom| { self.freeTextBlock(atom, self.phdr_load_ro_index.?); - self.local_symbol_free_list.append(self.base.allocator, atom.local_sym_index) catch {}; - self.local_symbols.items[atom.local_sym_index].st_info = 0; - _ = self.atom_by_index_table.remove(atom.local_sym_index); } unnamed_consts.clearAndFree(self.base.allocator); } @@ -2298,20 +2286,13 @@ pub fn freeDecl(self: *Elf, decl_index: Module.Decl.Index) void { const mod = self.base.options.module.?; const decl = mod.declPtr(decl_index); - const kv = self.decls.fetchRemove(decl_index); - if (kv.?.value) |index| { - self.freeTextBlock(&decl.link.elf, index); - self.freeUnnamedConsts(decl_index); - } - - // Appending to free lists is allowed to fail because the free lists are heuristics based anyway. - if (decl.link.elf.local_sym_index != 0) { - self.local_symbol_free_list.append(self.base.allocator, decl.link.elf.local_sym_index) catch {}; - self.local_symbols.items[decl.link.elf.local_sym_index].st_info = 0; - _ = self.atom_by_index_table.remove(decl.link.elf.local_sym_index); - decl.link.elf.local_sym_index = 0; + log.debug("freeDecl {*}", .{decl}); - self.offset_table_free_list.append(self.base.allocator, decl.link.elf.offset_table_index) catch {}; + if (self.decls.fetchRemove(decl_index)) |kv| { + if (kv.value) |index| { + self.freeTextBlock(&decl.link.elf, index); + self.freeUnnamedConsts(decl_index); + } } if (self.dwarf) |*dw| { @@ -2363,7 +2344,7 @@ fn updateDeclCode(self: *Elf, decl_index: Module.Decl.Index, code: []const u8, s assert(decl.link.elf.local_sym_index != 0); // Caller forgot to allocateDeclIndexes() const local_sym = &self.local_symbols.items[decl.link.elf.local_sym_index]; if (local_sym.st_size != 0) { - const capacity = decl.link.elf.capacity(self.*); + const capacity = decl.link.elf.capacity(self); const need_realloc = code.len > capacity or !mem.isAlignedGeneric(u64, local_sym.st_value, required_alignment); if (need_realloc) { @@ -2424,12 +2405,19 @@ pub fn updateFunc(self: *Elf, module: *Module, func: *Module.Fn, air: Air, liven const tracy = trace(@src()); defer tracy.end(); - var code_buffer = std.ArrayList(u8).init(self.base.allocator); - defer code_buffer.deinit(); - const decl_index = func.owner_decl; const decl = module.declPtr(decl_index); - self.freeUnnamedConsts(decl_index); + const atom = &decl.link.elf; + try atom.ensureInitialized(self); + const gop = try self.decls.getOrPut(self.base.allocator, decl_index); + if (gop.found_existing) { + self.freeUnnamedConsts(decl_index); + } else { + gop.value_ptr.* = null; + } + + var code_buffer = std.ArrayList(u8).init(self.base.allocator); + defer code_buffer.deinit(); var decl_state: ?Dwarf.DeclState = if (self.dwarf) |*dw| try dw.initDeclState(module, decl_index) else null; defer if (decl_state) |*ds| ds.deinit(); @@ -2490,6 +2478,13 @@ pub fn updateDecl(self: *Elf, module: *Module, decl_index: Module.Decl.Index) !v assert(!self.unnamed_const_atoms.contains(decl_index)); + const atom = &decl.link.elf; + try atom.ensureInitialized(self); + const gop = try self.decls.getOrPut(self.base.allocator, decl_index); + if (!gop.found_existing) { + gop.value_ptr.* = null; + } + var code_buffer = std.ArrayList(u8).init(self.base.allocator); defer code_buffer.deinit(); @@ -2633,16 +2628,19 @@ pub fn updateDeclExports( const tracy = trace(@src()); defer tracy.end(); - try self.global_symbols.ensureUnusedCapacity(self.base.allocator, exports.len); const decl = module.declPtr(decl_index); - if (decl.link.elf.local_sym_index == 0) return; - const decl_sym = self.local_symbols.items[decl.link.elf.local_sym_index]; + const atom = &decl.link.elf; - const decl_ptr = self.decls.getPtr(decl_index).?; - if (decl_ptr.* == null) { - decl_ptr.* = try self.getDeclPhdrIndex(decl); + if (atom.getSymbolIndex() == null) return; + + const decl_sym = atom.getSymbol(self); + try self.global_symbols.ensureUnusedCapacity(self.base.allocator, exports.len); + + const gop = try self.decls.getOrPut(self.base.allocator, decl_index); + if (!gop.found_existing) { + gop.value_ptr.* = try self.getDeclPhdrIndex(decl); } - const phdr_index = decl_ptr.*.?; + const phdr_index = gop.value_ptr.*.?; const shdr_index = self.phdr_shdr_table.get(phdr_index).?; for (exports) |exp| { diff --git a/src/link/Elf/Atom.zig b/src/link/Elf/Atom.zig index e4061f5bbc..caeb3bfbc5 100644 --- a/src/link/Elf/Atom.zig +++ b/src/link/Elf/Atom.zig @@ -1,6 +1,8 @@ const Atom = @This(); const std = @import("std"); +const assert = std.debug.assert; +const elf = std.elf; const Dwarf = @import("../Dwarf.zig"); const Elf = @import("../Elf.zig"); @@ -12,8 +14,10 @@ const Elf = @import("../Elf.zig"); /// If this field is 0, it means the codegen size = 0 and there is no symbol or /// offset table entry. local_sym_index: u32, + /// This field is undefined for symbols with size = 0. offset_table_index: u32, + /// Points to the previous and next neighbors, based on the `text_offset`. /// This can be used to find, for example, the capacity of this `TextBlock`. prev: ?*Atom, @@ -29,13 +33,49 @@ pub const empty = Atom{ .dbg_info_atom = undefined, }; +pub fn ensureInitialized(self: *Atom, elf_file: *Elf) !void { + if (self.getSymbolIndex() != null) return; // Already initialized + self.local_sym_index = try elf_file.allocateLocalSymbol(); + self.offset_table_index = try elf_file.allocateGotOffset(); + try elf_file.atom_by_index_table.putNoClobber(elf_file.base.allocator, self.local_sym_index, self); +} + +pub fn getSymbolIndex(self: Atom) ?u32 { + if (self.local_sym_index == 0) return null; + return self.local_sym_index; +} + +pub fn getSymbol(self: Atom, elf_file: *Elf) elf.Elf64_Sym { + const sym_index = self.getSymbolIndex().?; + return elf_file.local_symbols.items[sym_index]; +} + +pub fn getSymbolPtr(self: Atom, elf_file: *Elf) *elf.Elf64_Sym { + const sym_index = self.getSymbolIndex().?; + return &elf_file.local_symbols.items[sym_index]; +} + +pub fn getName(self: Atom, elf_file: *Elf) []const u8 { + const sym = self.getSymbol(); + return elf_file.getString(sym.st_name); +} + +pub fn getOffsetTableAddress(self: Atom, elf_file: *Elf) u64 { + assert(self.getSymbolIndex() != null); + const target = elf_file.base.options.target; + const ptr_bits = target.cpu.arch.ptrBitWidth(); + const ptr_bytes: u64 = @divExact(ptr_bits, 8); + const got = elf_file.program_headers.items[elf_file.phdr_got_index.?]; + return got.p_vaddr + self.offset_table_index * ptr_bytes; +} + /// Returns how much room there is to grow in virtual address space. /// File offset relocation happens transparently, so it is not included in /// this calculation. -pub fn capacity(self: Atom, elf_file: Elf) u64 { - const self_sym = elf_file.local_symbols.items[self.local_sym_index]; +pub fn capacity(self: Atom, elf_file: *Elf) u64 { + const self_sym = self.getSymbol(elf_file); if (self.next) |next| { - const next_sym = elf_file.local_symbols.items[next.local_sym_index]; + const next_sym = next.getSymbol(elf_file); return next_sym.st_value - self_sym.st_value; } else { // We are the last block. The capacity is limited only by virtual address space. @@ -43,11 +83,11 @@ pub fn capacity(self: Atom, elf_file: Elf) u64 { } } -pub fn freeListEligible(self: Atom, elf_file: Elf) bool { +pub fn freeListEligible(self: Atom, elf_file: *Elf) bool { // No need to keep a free list node for the last block. const next = self.next orelse return false; - const self_sym = elf_file.local_symbols.items[self.local_sym_index]; - const next_sym = elf_file.local_symbols.items[next.local_sym_index]; + const self_sym = self.getSymbol(elf_file); + const next_sym = next.getSymbol(elf_file); const cap = next_sym.st_value - self_sym.st_value; const ideal_cap = Elf.padToIdeal(self_sym.st_size); if (cap <= ideal_cap) return false; diff --git a/src/link/MachO.zig b/src/link/MachO.zig index 1ec8ecaaf8..8fe5a1c712 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -2472,14 +2472,15 @@ pub fn updateDeclExports( const decl = module.declPtr(decl_index); const atom = &decl.link.macho; - try atom.ensureInitialized(self); + + if (atom.getSymbolIndex() == null) return; const gop = try self.decls.getOrPut(gpa, decl_index); if (!gop.found_existing) { - gop.value_ptr.* = null; + gop.value_ptr.* = self.getDeclOutputSection(decl); } - const decl_sym = decl.link.macho.getSymbol(self); + const decl_sym = atom.getSymbol(self); for (exports) |exp| { const exp_name = try std.fmt.allocPrint(gpa, "_{s}", .{exp.options.name}); -- cgit v1.2.3 From cc1d7a0e315ba63b0d8c0cd647b4c7e92a571bf2 Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Thu, 26 Jan 2023 14:28:44 +0100 Subject: coff: migrate to new non-allocateDeclIndexes API --- src/Module.zig | 7 +- src/arch/aarch64/CodeGen.zig | 19 +++--- src/arch/x86_64/CodeGen.zig | 14 ++-- src/link.zig | 12 ++-- src/link/Coff.zig | 152 ++++++++++++++++++++++--------------------- src/link/Coff/Atom.zig | 23 +++++-- src/link/Elf.zig | 49 ++++++++------ src/link/MachO.zig | 135 +++++++++++++++----------------------- 8 files changed, 209 insertions(+), 202 deletions(-) (limited to 'src/Module.zig') diff --git a/src/Module.zig b/src/Module.zig index 4e1f65aff4..360dd4d1ec 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -5324,7 +5324,12 @@ pub fn deleteUnusedDecl(mod: *Module, decl_index: Decl.Index) void { // Until then, we did call `allocateDeclIndexes` on this anonymous Decl and so we // must call `freeDecl` in the linker backend now. switch (mod.comp.bin_file.tag) { - .elf, .macho, .c => {}, // this linker backend has already migrated to the new API + .coff, + .elf, + .macho, + .c, + => {}, // this linker backend has already migrated to the new API + else => if (decl.has_tv) { if (decl.ty.isFnOrHasRuntimeBits()) { mod.comp.bin_file.freeDecl(decl_index); diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index 8c3f3e8168..200bf3b8e0 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -4000,7 +4000,7 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type const owner_decl = mod.declPtr(self.mod_fn.owner_decl); const atom_index = switch (self.bin_file.tag) { .macho => owner_decl.link.macho.getSymbolIndex().?, - .coff => owner_decl.link.coff.sym_index, + .coff => owner_decl.link.coff.getSymbolIndex().?, else => unreachable, // unsupported target format }; _ = try self.addInst(.{ @@ -4318,11 +4318,12 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier .sym_index = fn_owner_decl.link.macho.getSymbolIndex().?, }, }); - } else if (self.bin_file.cast(link.File.Coff)) |_| { + } else if (self.bin_file.cast(link.File.Coff)) |coff_file| { + try fn_owner_decl.link.coff.ensureInitialized(coff_file); try self.genSetReg(Type.initTag(.u64), .x30, .{ .linker_load = .{ .type = .got, - .sym_index = fn_owner_decl.link.coff.sym_index, + .sym_index = fn_owner_decl.link.coff.getSymbolIndex().?, }, }); } else if (self.bin_file.cast(link.File.Plan9)) |p9| { @@ -5494,7 +5495,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro const owner_decl = mod.declPtr(self.mod_fn.owner_decl); const atom_index = switch (self.bin_file.tag) { .macho => owner_decl.link.macho.getSymbolIndex().?, - .coff => owner_decl.link.coff.sym_index, + .coff => owner_decl.link.coff.getSymbolIndex().?, else => unreachable, // unsupported target format }; _ = try self.addInst(.{ @@ -5608,7 +5609,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void const owner_decl = mod.declPtr(self.mod_fn.owner_decl); const atom_index = switch (self.bin_file.tag) { .macho => owner_decl.link.macho.getSymbolIndex().?, - .coff => owner_decl.link.coff.sym_index, + .coff => owner_decl.link.coff.getSymbolIndex().?, else => unreachable, // unsupported target format }; _ = try self.addInst(.{ @@ -5802,7 +5803,7 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I const owner_decl = mod.declPtr(self.mod_fn.owner_decl); const atom_index = switch (self.bin_file.tag) { .macho => owner_decl.link.macho.getSymbolIndex().?, - .coff => owner_decl.link.coff.sym_index, + .coff => owner_decl.link.coff.getSymbolIndex().?, else => unreachable, // unsupported target format }; _ = try self.addInst(.{ @@ -6129,11 +6130,11 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) Inne .type = .got, .sym_index = decl.link.macho.getSymbolIndex().?, } }; - } else if (self.bin_file.cast(link.File.Coff)) |_| { - assert(decl.link.coff.sym_index != 0); + } else if (self.bin_file.cast(link.File.Coff)) |coff_file| { + try decl.link.coff.ensureInitialized(coff_file); return MCValue{ .linker_load = .{ .type = .got, - .sym_index = decl.link.coff.sym_index, + .sym_index = decl.link.coff.getSymbolIndex().?, } }; } else if (self.bin_file.cast(link.File.Plan9)) |p9| { try p9.seeDecl(decl_index); diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 93f1269147..df24fe5e7d 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -2673,7 +2673,7 @@ fn loadMemPtrIntoRegister(self: *Self, reg: Register, ptr_ty: Type, ptr: MCValue const atom_index = if (self.bin_file.tag == link.File.MachO.base_tag) fn_owner_decl.link.macho.getSymbolIndex().? else - fn_owner_decl.link.coff.sym_index; + fn_owner_decl.link.coff.getSymbolIndex().?; const flags: u2 = switch (load_struct.type) { .got => 0b00, .direct => 0b01, @@ -4005,11 +4005,13 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier .ops = Mir.Inst.Ops.encode(.{ .flags = 0b01 }), .data = .{ .imm = got_addr }, }); - } else if (self.bin_file.cast(link.File.Coff)) |_| { + } else if (self.bin_file.cast(link.File.Coff)) |coff_file| { + try fn_owner_decl.link.coff.ensureInitialized(coff_file); + const sym_index = fn_owner_decl.link.coff.getSymbolIndex().?; try self.genSetReg(Type.initTag(.usize), .rax, .{ .linker_load = .{ .type = .got, - .sym_index = fn_owner_decl.link.coff.sym_index, + .sym_index = sym_index, }, }); _ = try self.addInst(.{ @@ -6725,11 +6727,11 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) Inne .type = .got, .sym_index = decl.link.macho.getSymbolIndex().?, } }; - } else if (self.bin_file.cast(link.File.Coff)) |_| { - assert(decl.link.coff.sym_index != 0); + } else if (self.bin_file.cast(link.File.Coff)) |coff_file| { + try decl.link.coff.ensureInitialized(coff_file); return MCValue{ .linker_load = .{ .type = .got, - .sym_index = decl.link.coff.sym_index, + .sym_index = decl.link.coff.getSymbolIndex().?, } }; } else if (self.bin_file.cast(link.File.Plan9)) |p9| { try p9.seeDecl(decl_index); diff --git a/src/link.zig b/src/link.zig index d64c09d199..9be5b9ca3a 100644 --- a/src/link.zig +++ b/src/link.zig @@ -615,12 +615,16 @@ pub const File = struct { return; } switch (base.tag) { - .coff => return @fieldParentPtr(Coff, "base", base).allocateDeclIndexes(decl_index), - .elf => {}, // no-op - .macho => {}, // no-op .wasm => return @fieldParentPtr(Wasm, "base", base).allocateDeclIndexes(decl_index), .plan9 => return @fieldParentPtr(Plan9, "base", base).allocateDeclIndexes(decl_index), - .c, .spirv, .nvptx => {}, + + .coff, + .elf, + .macho, + .c, + .spirv, + .nvptx, + => {}, } } diff --git a/src/link/Coff.zig b/src/link/Coff.zig index 65aee010fd..dee3c7c381 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -480,16 +480,6 @@ fn growSectionVM(self: *Coff, sect_id: u32, needed_size: u32) !void { header.virtual_size = increased_size; } -pub fn allocateDeclIndexes(self: *Coff, decl_index: Module.Decl.Index) !void { - if (self.llvm_object) |_| return; - const decl = self.base.options.module.?.declPtr(decl_index); - if (decl.link.coff.sym_index != 0) return; - decl.link.coff.sym_index = try self.allocateSymbol(); - const gpa = self.base.allocator; - try self.atom_by_index_table.putNoClobber(gpa, decl.link.coff.sym_index, &decl.link.coff); - try self.decls.putNoClobber(gpa, decl_index, null); -} - fn allocateAtom(self: *Coff, atom: *Atom, new_atom_size: u32, alignment: u32) !u32 { const tracy = trace(@src()); defer tracy.end(); @@ -615,7 +605,7 @@ fn allocateAtom(self: *Coff, atom: *Atom, new_atom_size: u32, alignment: u32) !u return vaddr; } -fn allocateSymbol(self: *Coff) !u32 { +pub fn allocateSymbol(self: *Coff) !u32 { const gpa = self.base.allocator; try self.locals.ensureUnusedCapacity(gpa, 1); @@ -716,12 +706,11 @@ fn createGotAtom(self: *Coff, target: SymbolWithLoc) !*Atom { const atom = try gpa.create(Atom); errdefer gpa.destroy(atom); atom.* = Atom.empty; - atom.sym_index = try self.allocateSymbol(); + try atom.ensureInitialized(self); atom.size = @sizeOf(u64); atom.alignment = @alignOf(u64); try self.managed_atoms.append(gpa, atom); - try self.atom_by_index_table.putNoClobber(gpa, atom.sym_index, atom); const sym = atom.getSymbolPtr(self); sym.section_number = @intToEnum(coff.SectionNumber, self.got_section_index.? + 1); @@ -754,12 +743,11 @@ fn createImportAtom(self: *Coff) !*Atom { const atom = try gpa.create(Atom); errdefer gpa.destroy(atom); atom.* = Atom.empty; - atom.sym_index = try self.allocateSymbol(); + try atom.ensureInitialized(self); atom.size = @sizeOf(u64); atom.alignment = @alignOf(u64); try self.managed_atoms.append(gpa, atom); - try self.atom_by_index_table.putNoClobber(gpa, atom.sym_index, atom); const sym = atom.getSymbolPtr(self); sym.section_number = @intToEnum(coff.SectionNumber, self.idata_section_index.? + 1); @@ -790,7 +778,11 @@ fn writeAtom(self: *Coff, atom: *Atom, code: []const u8) !void { const sym = atom.getSymbol(self); const section = self.sections.get(@enumToInt(sym.section_number) - 1); const file_offset = section.header.pointer_to_raw_data + sym.value - section.header.virtual_address; - log.debug("writing atom for symbol {s} at file offset 0x{x} to 0x{x}", .{ atom.getName(self), file_offset, file_offset + code.len }); + log.debug("writing atom for symbol {s} at file offset 0x{x} to 0x{x}", .{ + atom.getName(self), + file_offset, + file_offset + code.len, + }); try self.base.file.?.pwriteAll(code, file_offset); try self.resolveRelocs(atom); } @@ -848,6 +840,7 @@ fn freeAtom(self: *Coff, atom: *Atom) void { // Remove any relocs and base relocs associated with this Atom self.freeRelocationsForAtom(atom); + const gpa = self.base.allocator; const sym = atom.getSymbol(self); const sect_id = @enumToInt(sym.section_number) - 1; const free_list = &self.sections.items(.free_list)[sect_id]; @@ -885,7 +878,7 @@ fn freeAtom(self: *Coff, atom: *Atom) void { if (!already_have_free_list_node and prev.freeListEligible(self)) { // The free list is heuristics, it doesn't have to be perfect, so we can // ignore the OOM here. - free_list.append(self.base.allocator, prev) catch {}; + free_list.append(gpa, prev) catch {}; } } else { atom.prev = null; @@ -896,6 +889,28 @@ fn freeAtom(self: *Coff, atom: *Atom) void { } else { atom.next = null; } + + // Appending to free lists is allowed to fail because the free lists are heuristics based anyway. + const sym_index = atom.getSymbolIndex().?; + self.locals_free_list.append(gpa, sym_index) catch {}; + + // Try freeing GOT atom if this decl had one + const got_target = SymbolWithLoc{ .sym_index = sym_index, .file = null }; + if (self.got_entries_table.get(got_target)) |got_index| { + self.got_entries_free_list.append(gpa, @intCast(u32, got_index)) catch {}; + self.got_entries.items[got_index] = .{ + .target = .{ .sym_index = 0, .file = null }, + .sym_index = 0, + }; + _ = self.got_entries_table.remove(got_target); + + log.debug(" adding GOT index {d} to free list (target local@{d})", .{ got_index, sym_index }); + } + + self.locals.items[sym_index].section_number = .UNDEFINED; + _ = self.atom_by_index_table.remove(sym_index); + log.debug(" adding local symbol index {d} to free list", .{sym_index}); + atom.sym_index = 0; } pub fn updateFunc(self: *Coff, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { @@ -912,8 +927,15 @@ pub fn updateFunc(self: *Coff, module: *Module, func: *Module.Fn, air: Air, live const decl_index = func.owner_decl; const decl = module.declPtr(decl_index); - self.freeUnnamedConsts(decl_index); - self.freeRelocationsForAtom(&decl.link.coff); + const atom = &decl.link.coff; + try atom.ensureInitialized(self); + const gop = try self.decls.getOrPut(self.base.allocator, decl_index); + if (gop.found_existing) { + self.freeUnnamedConsts(decl_index); + self.freeRelocationsForAtom(&decl.link.coff); + } else { + gop.value_ptr.* = null; + } var code_buffer = std.ArrayList(u8).init(self.base.allocator); defer code_buffer.deinit(); @@ -960,9 +982,9 @@ pub fn lowerUnnamedConst(self: *Coff, tv: TypedValue, decl_index: Module.Decl.In const atom = try gpa.create(Atom); errdefer gpa.destroy(atom); atom.* = Atom.empty; + try atom.ensureInitialized(self); + try self.managed_atoms.append(gpa, atom); - atom.sym_index = try self.allocateSymbol(); - const sym = atom.getSymbolPtr(self); const sym_name = blk: { const decl_name = try decl.getFullyQualifiedName(mod); defer gpa.free(decl_name); @@ -971,14 +993,11 @@ pub fn lowerUnnamedConst(self: *Coff, tv: TypedValue, decl_index: Module.Decl.In break :blk try std.fmt.allocPrint(gpa, "__unnamed_{s}_{d}", .{ decl_name, index }); }; defer gpa.free(sym_name); - try self.setSymbolName(sym, sym_name); - sym.section_number = @intToEnum(coff.SectionNumber, self.rdata_section_index.? + 1); - - try self.managed_atoms.append(gpa, atom); - try self.atom_by_index_table.putNoClobber(gpa, atom.sym_index, atom); + try self.setSymbolName(atom.getSymbolPtr(self), sym_name); + atom.getSymbolPtr(self).section_number = @intToEnum(coff.SectionNumber, self.rdata_section_index.? + 1); const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), tv, &code_buffer, .none, .{ - .parent_atom_index = atom.sym_index, + .parent_atom_index = atom.getSymbolIndex().?, }); const code = switch (res) { .ok => code_buffer.items, @@ -993,17 +1012,17 @@ pub fn lowerUnnamedConst(self: *Coff, tv: TypedValue, decl_index: Module.Decl.In const required_alignment = tv.ty.abiAlignment(self.base.options.target); atom.alignment = required_alignment; atom.size = @intCast(u32, code.len); - sym.value = try self.allocateAtom(atom, atom.size, atom.alignment); + atom.getSymbolPtr(self).value = try self.allocateAtom(atom, atom.size, atom.alignment); errdefer self.freeAtom(atom); try unnamed_consts.append(gpa, atom); - log.debug("allocated atom for {s} at 0x{x}", .{ sym_name, sym.value }); + log.debug("allocated atom for {s} at 0x{x}", .{ sym_name, atom.getSymbol(self).value }); log.debug(" (required alignment 0x{x})", .{required_alignment}); try self.writeAtom(atom, code); - return atom.sym_index; + return atom.getSymbolIndex().?; } pub fn updateDecl(self: *Coff, module: *Module, decl_index: Module.Decl.Index) !void { @@ -1028,7 +1047,14 @@ pub fn updateDecl(self: *Coff, module: *Module, decl_index: Module.Decl.Index) ! } } - self.freeRelocationsForAtom(&decl.link.coff); + const atom = &decl.link.coff; + try atom.ensureInitialized(self); + const gop = try self.decls.getOrPut(self.base.allocator, decl_index); + if (gop.found_existing) { + self.freeRelocationsForAtom(atom); + } else { + gop.value_ptr.* = null; + } var code_buffer = std.ArrayList(u8).init(self.base.allocator); defer code_buffer.deinit(); @@ -1038,7 +1064,7 @@ pub fn updateDecl(self: *Coff, module: *Module, decl_index: Module.Decl.Index) ! .ty = decl.ty, .val = decl_val, }, &code_buffer, .none, .{ - .parent_atom_index = decl.link.coff.sym_index, + .parent_atom_index = decl.link.coff.getSymbolIndex().?, }); const code = switch (res) { .ok => code_buffer.items, @@ -1099,7 +1125,7 @@ fn updateDeclCode(self: *Coff, decl_index: Module.Decl.Index, code: []const u8, const code_len = @intCast(u32, code.len); const atom = &decl.link.coff; - assert(atom.sym_index != 0); // Caller forgot to allocateDeclIndexes() + if (atom.size != 0) { const sym = atom.getSymbolPtr(self); try self.setSymbolName(sym, decl_name); @@ -1116,7 +1142,7 @@ fn updateDeclCode(self: *Coff, decl_index: Module.Decl.Index, code: []const u8, if (vaddr != sym.value) { sym.value = vaddr; log.debug(" (updating GOT entry)", .{}); - const got_target = SymbolWithLoc{ .sym_index = atom.sym_index, .file = null }; + const got_target = SymbolWithLoc{ .sym_index = atom.getSymbolIndex().?, .file = null }; const got_atom = self.getGotAtomForSymbol(got_target).?; self.markRelocsDirtyByTarget(got_target); try self.writePtrWidthAtom(got_atom); @@ -1137,10 +1163,10 @@ fn updateDeclCode(self: *Coff, decl_index: Module.Decl.Index, code: []const u8, atom.size = code_len; sym.value = vaddr; - const got_target = SymbolWithLoc{ .sym_index = atom.sym_index, .file = null }; + const got_target = SymbolWithLoc{ .sym_index = atom.getSymbolIndex().?, .file = null }; const got_index = try self.allocateGotEntry(got_target); const got_atom = try self.createGotAtom(got_target); - self.got_entries.items[got_index].sym_index = got_atom.sym_index; + self.got_entries.items[got_index].sym_index = got_atom.getSymbolIndex().?; try self.writePtrWidthAtom(got_atom); } @@ -1160,11 +1186,6 @@ fn freeUnnamedConsts(self: *Coff, decl_index: Module.Decl.Index) void { const unnamed_consts = self.unnamed_const_atoms.getPtr(decl_index) orelse return; for (unnamed_consts.items) |atom| { self.freeAtom(atom); - self.locals_free_list.append(gpa, atom.sym_index) catch {}; - self.locals.items[atom.sym_index].section_number = .UNDEFINED; - _ = self.atom_by_index_table.remove(atom.sym_index); - log.debug(" adding local symbol index {d} to free list", .{atom.sym_index}); - atom.sym_index = 0; } unnamed_consts.clearAndFree(gpa); } @@ -1179,35 +1200,11 @@ pub fn freeDecl(self: *Coff, decl_index: Module.Decl.Index) void { log.debug("freeDecl {*}", .{decl}); - const kv = self.decls.fetchRemove(decl_index); - if (kv.?.value) |_| { - self.freeAtom(&decl.link.coff); - self.freeUnnamedConsts(decl_index); - } - - // Appending to free lists is allowed to fail because the free lists are heuristics based anyway. - const gpa = self.base.allocator; - const sym_index = decl.link.coff.sym_index; - if (sym_index != 0) { - self.locals_free_list.append(gpa, sym_index) catch {}; - - // Try freeing GOT atom if this decl had one - const got_target = SymbolWithLoc{ .sym_index = sym_index, .file = null }; - if (self.got_entries_table.get(got_target)) |got_index| { - self.got_entries_free_list.append(gpa, @intCast(u32, got_index)) catch {}; - self.got_entries.items[got_index] = .{ - .target = .{ .sym_index = 0, .file = null }, - .sym_index = 0, - }; - _ = self.got_entries_table.remove(got_target); - - log.debug(" adding GOT index {d} to free list (target local@{d})", .{ got_index, sym_index }); + if (self.decls.fetchRemove(decl_index)) |kv| { + if (kv.value) |_| { + self.freeAtom(&decl.link.coff); + self.freeUnnamedConsts(decl_index); } - - self.locals.items[sym_index].section_number = .UNDEFINED; - _ = self.atom_by_index_table.remove(sym_index); - log.debug(" adding local symbol index {d} to free list", .{sym_index}); - decl.link.coff.sym_index = 0; } } @@ -1261,7 +1258,14 @@ pub fn updateDeclExports( const decl = module.declPtr(decl_index); const atom = &decl.link.coff; - if (atom.sym_index == 0) return; + + if (atom.getSymbolIndex() == null) return; + + const gop = try self.decls.getOrPut(gpa, decl_index); + if (!gop.found_existing) { + gop.value_ptr.* = self.getDeclOutputSection(decl); + } + const decl_sym = atom.getSymbol(self); for (exports) |exp| { @@ -1416,7 +1420,7 @@ pub fn flushModule(self: *Coff, comp: *Compilation, prog_node: *std.Progress.Nod const import_index = try self.allocateImportEntry(global); const import_atom = try self.createImportAtom(); - self.imports.items[import_index].sym_index = import_atom.sym_index; + self.imports.items[import_index].sym_index = import_atom.getSymbolIndex().?; try self.writePtrWidthAtom(import_atom); } @@ -1460,10 +1464,12 @@ pub fn getDeclVAddr( const decl = mod.declPtr(decl_index); assert(self.llvm_object == null); - assert(decl.link.coff.sym_index != 0); + + try decl.link.coff.ensureInitialized(self); + const sym_index = decl.link.coff.getSymbolIndex().?; const atom = self.getAtomForSymbol(.{ .sym_index = reloc_info.parent_atom_index, .file = null }).?; - const target = SymbolWithLoc{ .sym_index = decl.link.coff.sym_index, .file = null }; + const target = SymbolWithLoc{ .sym_index = sym_index, .file = null }; try atom.addRelocation(self, .{ .type = .direct, .target = target, diff --git a/src/link/Coff/Atom.zig b/src/link/Coff/Atom.zig index b1bb292c62..78824eac1d 100644 --- a/src/link/Coff/Atom.zig +++ b/src/link/Coff/Atom.zig @@ -39,30 +39,45 @@ pub const empty = Atom{ .next = null, }; +pub fn ensureInitialized(self: *Atom, coff_file: *Coff) !void { + if (self.getSymbolIndex() != null) return; // Already initialized + self.sym_index = try coff_file.allocateSymbol(); + try coff_file.atom_by_index_table.putNoClobber(coff_file.base.allocator, self.sym_index, self); +} + +pub fn getSymbolIndex(self: Atom) ?u32 { + if (self.sym_index == 0) return null; + return self.sym_index; +} + /// Returns symbol referencing this atom. pub fn getSymbol(self: Atom, coff_file: *const Coff) *const coff.Symbol { + const sym_index = self.getSymbolIndex().?; return coff_file.getSymbol(.{ - .sym_index = self.sym_index, + .sym_index = sym_index, .file = self.file, }); } /// Returns pointer-to-symbol referencing this atom. pub fn getSymbolPtr(self: Atom, coff_file: *Coff) *coff.Symbol { + const sym_index = self.getSymbolIndex().?; return coff_file.getSymbolPtr(.{ - .sym_index = self.sym_index, + .sym_index = sym_index, .file = self.file, }); } pub fn getSymbolWithLoc(self: Atom) SymbolWithLoc { - return .{ .sym_index = self.sym_index, .file = self.file }; + const sym_index = self.getSymbolIndex().?; + return .{ .sym_index = sym_index, .file = self.file }; } /// Returns the name of this atom. pub fn getName(self: Atom, coff_file: *const Coff) []const u8 { + const sym_index = self.getSymbolIndex().?; return coff_file.getSymbolName(.{ - .sym_index = self.sym_index, + .sym_index = sym_index, .file = self.file, }); } diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 9dbc5e846f..2c55e55f83 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -893,7 +893,7 @@ fn growAllocSection(self: *Elf, shdr_index: u16, phdr_index: u16, needed_size: u // Must move the entire section. const new_offset = self.findFreeSpace(needed_size, self.page_size); const existing_size = if (self.atoms.get(phdr_index)) |last| blk: { - const sym = self.local_symbols.items[last.local_sym_index]; + const sym = last.getSymbol(self); break :blk (sym.st_value + sym.st_size) - phdr.p_vaddr; } else if (shdr_index == self.got_section_index.?) blk: { break :blk shdr.sh_size; @@ -1031,7 +1031,7 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node while (it.next()) |entry| { const atom = entry.key_ptr.*; const relocs = entry.value_ptr.*; - const source_sym = self.local_symbols.items[atom.local_sym_index]; + const source_sym = atom.getSymbol(self); const source_shdr = self.sections.items[source_sym.st_shndx]; log.debug("relocating '{s}'", .{self.getString(source_sym.st_name)}); @@ -2034,11 +2034,13 @@ fn writeElfHeader(self: *Elf) !void { } fn freeTextBlock(self: *Elf, text_block: *TextBlock, phdr_index: u16) void { - const local_sym = self.local_symbols.items[text_block.local_sym_index]; + const local_sym = text_block.getSymbol(self); const name_str_index = local_sym.st_name; const name = self.getString(name_str_index); log.debug("freeTextBlock {*} ({s})", .{ text_block, name }); + self.freeRelocationsForTextBlock(text_block); + const free_list = self.atom_free_lists.getPtr(phdr_index).?; var already_have_free_list_node = false; { @@ -2107,7 +2109,7 @@ fn shrinkTextBlock(self: *Elf, text_block: *TextBlock, new_block_size: u64, phdr } fn growTextBlock(self: *Elf, text_block: *TextBlock, new_block_size: u64, alignment: u64, phdr_index: u16) !u64 { - const sym = self.local_symbols.items[text_block.local_sym_index]; + const sym = text_block.getSymbol(self); const align_ok = mem.alignBackwardGeneric(u64, sym.st_value, alignment) == sym.st_value; const need_realloc = !align_ok or new_block_size > text_block.capacity(self); if (!need_realloc) return sym.st_value; @@ -2137,7 +2139,7 @@ fn allocateTextBlock(self: *Elf, text_block: *TextBlock, new_block_size: u64, al const big_block = free_list.items[i]; // We now have a pointer to a live text block that has too much capacity. // Is it enough that we could fit this new text block? - const sym = self.local_symbols.items[big_block.local_sym_index]; + const sym = big_block.getSymbol(self); const capacity = big_block.capacity(self); const ideal_capacity = padToIdeal(capacity); const ideal_capacity_end_vaddr = std.math.add(u64, sym.st_value, ideal_capacity) catch ideal_capacity; @@ -2168,7 +2170,7 @@ fn allocateTextBlock(self: *Elf, text_block: *TextBlock, new_block_size: u64, al } break :blk new_start_vaddr; } else if (self.atoms.get(phdr_index)) |last| { - const sym = self.local_symbols.items[last.local_sym_index]; + const sym = last.getSymbol(self); const ideal_capacity = padToIdeal(sym.st_size); const ideal_capacity_end_vaddr = sym.st_value + ideal_capacity; const new_start_vaddr = mem.alignForwardGeneric(u64, ideal_capacity_end_vaddr, alignment); @@ -2270,6 +2272,11 @@ pub fn allocateGotOffset(self: *Elf) !u32 { return index; } +fn freeRelocationsForTextBlock(self: *Elf, text_block: *TextBlock) void { + var removed_relocs = self.relocs.fetchRemove(text_block); + if (removed_relocs) |*relocs| relocs.value.deinit(self.base.allocator); +} + fn freeUnnamedConsts(self: *Elf, decl_index: Module.Decl.Index) void { const unnamed_consts = self.unnamed_const_atoms.getPtr(decl_index) orelse return; for (unnamed_consts.items) |atom| { @@ -2341,8 +2348,7 @@ fn updateDeclCode(self: *Elf, decl_index: Module.Decl.Index, code: []const u8, s const phdr_index = decl_ptr.*.?; const shdr_index = self.phdr_shdr_table.get(phdr_index).?; - assert(decl.link.elf.local_sym_index != 0); // Caller forgot to allocateDeclIndexes() - const local_sym = &self.local_symbols.items[decl.link.elf.local_sym_index]; + const local_sym = decl.link.elf.getSymbolPtr(self); if (local_sym.st_size != 0) { const capacity = decl.link.elf.capacity(self); const need_realloc = code.len > capacity or @@ -2366,7 +2372,7 @@ fn updateDeclCode(self: *Elf, decl_index: Module.Decl.Index, code: []const u8, s local_sym.st_other = 0; local_sym.st_shndx = shdr_index; // TODO this write could be avoided if no fields of the symbol were changed. - try self.writeSymbol(decl.link.elf.local_sym_index); + try self.writeSymbol(decl.link.elf.getSymbolIndex().?); } else { const name_str_index = try self.makeString(decl_name); const vaddr = try self.allocateTextBlock(&decl.link.elf, code.len, required_alignment, phdr_index); @@ -2383,7 +2389,7 @@ fn updateDeclCode(self: *Elf, decl_index: Module.Decl.Index, code: []const u8, s }; self.offset_table.items[decl.link.elf.offset_table_index] = vaddr; - try self.writeSymbol(decl.link.elf.local_sym_index); + try self.writeSymbol(decl.link.elf.getSymbolIndex().?); try self.writeOffsetTableEntry(decl.link.elf.offset_table_index); } @@ -2412,6 +2418,7 @@ pub fn updateFunc(self: *Elf, module: *Module, func: *Module.Fn, air: Air, liven const gop = try self.decls.getOrPut(self.base.allocator, decl_index); if (gop.found_existing) { self.freeUnnamedConsts(decl_index); + self.freeRelocationsForTextBlock(atom); } else { gop.value_ptr.* = null; } @@ -2481,7 +2488,9 @@ pub fn updateDecl(self: *Elf, module: *Module, decl_index: Module.Decl.Index) !v const atom = &decl.link.elf; try atom.ensureInitialized(self); const gop = try self.decls.getOrPut(self.base.allocator, decl_index); - if (!gop.found_existing) { + if (gop.found_existing) { + self.freeRelocationsForTextBlock(atom); + } else { gop.value_ptr.* = null; } @@ -2500,14 +2509,14 @@ pub fn updateDecl(self: *Elf, module: *Module, decl_index: Module.Decl.Index) !v }, &code_buffer, .{ .dwarf = ds, }, .{ - .parent_atom_index = decl.link.elf.local_sym_index, + .parent_atom_index = decl.link.elf.getSymbolIndex().?, }) else try codegen.generateSymbol(&self.base, decl.srcLoc(), .{ .ty = decl.ty, .val = decl_val, }, &code_buffer, .none, .{ - .parent_atom_index = decl.link.elf.local_sym_index, + .parent_atom_index = decl.link.elf.getSymbolIndex().?, }); const code = switch (res) { @@ -2551,6 +2560,8 @@ pub fn lowerUnnamedConst(self: *Elf, typed_value: TypedValue, decl_index: Module const atom = try self.base.allocator.create(TextBlock); errdefer self.base.allocator.destroy(atom); atom.* = TextBlock.empty; + // TODO for unnamed consts we don't need GOT offset/entry allocated + try atom.ensureInitialized(self); try self.managed_atoms.append(self.base.allocator, atom); const name_str_index = blk: { @@ -2565,14 +2576,10 @@ pub fn lowerUnnamedConst(self: *Elf, typed_value: TypedValue, decl_index: Module }; const name = self.getString(name_str_index); - log.debug("allocating symbol indexes for {s}", .{name}); - atom.local_sym_index = try self.allocateLocalSymbol(); - try self.atom_by_index_table.putNoClobber(self.base.allocator, atom.local_sym_index, atom); - const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), typed_value, &code_buffer, .{ .none = {}, }, .{ - .parent_atom_index = atom.local_sym_index, + .parent_atom_index = atom.getSymbolIndex().?, }); const code = switch (res) { .ok => code_buffer.items, @@ -2592,7 +2599,7 @@ pub fn lowerUnnamedConst(self: *Elf, typed_value: TypedValue, decl_index: Module log.debug("allocated text block for {s} at 0x{x}", .{ name, vaddr }); - const local_sym = &self.local_symbols.items[atom.local_sym_index]; + const local_sym = atom.getSymbolPtr(self); local_sym.* = .{ .st_name = name_str_index, .st_info = (elf.STB_LOCAL << 4) | elf.STT_OBJECT, @@ -2602,14 +2609,14 @@ pub fn lowerUnnamedConst(self: *Elf, typed_value: TypedValue, decl_index: Module .st_size = code.len, }; - try self.writeSymbol(atom.local_sym_index); + try self.writeSymbol(atom.getSymbolIndex().?); try unnamed_consts.append(self.base.allocator, atom); const section_offset = local_sym.st_value - self.program_headers.items[phdr_index].p_vaddr; const file_offset = self.sections.items[shdr_index].sh_offset + section_offset; try self.base.file.?.pwriteAll(code, file_offset); - return atom.local_sym_index; + return atom.getSymbolIndex().?; } pub fn updateDeclExports( diff --git a/src/link/MachO.zig b/src/link/MachO.zig index 8fe5a1c712..543cb473d7 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -1056,19 +1056,14 @@ pub fn allocateSpecialSymbols(self: *MachO) !void { pub fn createGotAtom(self: *MachO, target: SymbolWithLoc) !*Atom { const gpa = self.base.allocator; - const sym_index = try self.allocateSymbol(); - const atom = blk: { - const atom = try gpa.create(Atom); - atom.* = Atom.empty; - atom.sym_index = sym_index; - atom.size = @sizeOf(u64); - atom.alignment = @alignOf(u64); - break :blk atom; - }; + const atom = try gpa.create(Atom); + atom.* = Atom.empty; + try atom.ensureInitialized(self); + atom.size = @sizeOf(u64); + atom.alignment = @alignOf(u64); errdefer gpa.destroy(atom); try self.managed_atoms.append(gpa, atom); - try self.atom_by_index_table.putNoClobber(gpa, atom.sym_index, atom); const sym = atom.getSymbolPtr(self); sym.n_type = macho.N_SECT; @@ -1109,15 +1104,11 @@ pub fn createDyldPrivateAtom(self: *MachO) !void { const gpa = self.base.allocator; - const sym_index = try self.allocateSymbol(); - const atom = blk: { - const atom = try gpa.create(Atom); - atom.* = Atom.empty; - atom.sym_index = sym_index; - atom.size = @sizeOf(u64); - atom.alignment = @alignOf(u64); - break :blk atom; - }; + const atom = try gpa.create(Atom); + atom.* = Atom.empty; + try atom.ensureInitialized(self); + atom.size = @sizeOf(u64); + atom.alignment = @alignOf(u64); errdefer gpa.destroy(atom); const sym = atom.getSymbolPtr(self); @@ -1126,7 +1117,6 @@ pub fn createDyldPrivateAtom(self: *MachO) !void { self.dyld_private_atom = atom; try self.managed_atoms.append(gpa, atom); - try self.atom_by_index_table.putNoClobber(gpa, sym_index, atom); sym.n_value = try self.allocateAtom(atom, atom.size, @alignOf(u64)); log.debug("allocated dyld_private atom at 0x{x}", .{sym.n_value}); @@ -1144,18 +1134,14 @@ pub fn createStubHelperPreambleAtom(self: *MachO) !void { .aarch64 => 6 * @sizeOf(u32), else => unreachable, }; - const sym_index = try self.allocateSymbol(); - const atom = blk: { - const atom = try gpa.create(Atom); - atom.* = Atom.empty; - atom.sym_index = sym_index; - atom.size = size; - atom.alignment = switch (arch) { - .x86_64 => 1, - .aarch64 => @alignOf(u32), - else => unreachable, - }; - break :blk atom; + const atom = try gpa.create(Atom); + atom.* = Atom.empty; + try atom.ensureInitialized(self); + atom.size = size; + atom.alignment = switch (arch) { + .x86_64 => 1, + .aarch64 => @alignOf(u32), + else => unreachable, }; errdefer gpa.destroy(atom); @@ -1163,7 +1149,7 @@ pub fn createStubHelperPreambleAtom(self: *MachO) !void { sym.n_type = macho.N_SECT; sym.n_sect = self.stub_helper_section_index.? + 1; - const dyld_private_sym_index = self.dyld_private_atom.?.sym_index; + const dyld_private_sym_index = self.dyld_private_atom.?.getSymbolIndex().?; const code = try gpa.alloc(u8, size); defer gpa.free(code); @@ -1258,7 +1244,6 @@ pub fn createStubHelperPreambleAtom(self: *MachO) !void { self.stub_helper_preamble_atom = atom; try self.managed_atoms.append(gpa, atom); - try self.atom_by_index_table.putNoClobber(gpa, sym_index, atom); sym.n_value = try self.allocateAtom(atom, size, atom.alignment); log.debug("allocated stub preamble atom at 0x{x}", .{sym.n_value}); @@ -1273,18 +1258,14 @@ pub fn createStubHelperAtom(self: *MachO) !*Atom { .aarch64 => 3 * @sizeOf(u32), else => unreachable, }; - const sym_index = try self.allocateSymbol(); - const atom = blk: { - const atom = try gpa.create(Atom); - atom.* = Atom.empty; - atom.sym_index = sym_index; - atom.size = size; - atom.alignment = switch (arch) { - .x86_64 => 1, - .aarch64 => @alignOf(u32), - else => unreachable, - }; - break :blk atom; + const atom = try gpa.create(Atom); + atom.* = Atom.empty; + try atom.ensureInitialized(self); + atom.size = size; + atom.alignment = switch (arch) { + .x86_64 => 1, + .aarch64 => @alignOf(u32), + else => unreachable, }; errdefer gpa.destroy(atom); @@ -1306,7 +1287,7 @@ pub fn createStubHelperAtom(self: *MachO) !*Atom { try atom.addRelocation(self, .{ .type = @enumToInt(macho.reloc_type_x86_64.X86_64_RELOC_BRANCH), - .target = .{ .sym_index = self.stub_helper_preamble_atom.?.sym_index, .file = null }, + .target = .{ .sym_index = self.stub_helper_preamble_atom.?.getSymbolIndex().?, .file = null }, .offset = 6, .addend = 0, .pcrel = true, @@ -1329,7 +1310,7 @@ pub fn createStubHelperAtom(self: *MachO) !*Atom { try atom.addRelocation(self, .{ .type = @enumToInt(macho.reloc_type_arm64.ARM64_RELOC_BRANCH26), - .target = .{ .sym_index = self.stub_helper_preamble_atom.?.sym_index, .file = null }, + .target = .{ .sym_index = self.stub_helper_preamble_atom.?.getSymbolIndex().?, .file = null }, .offset = 4, .addend = 0, .pcrel = true, @@ -1340,7 +1321,6 @@ pub fn createStubHelperAtom(self: *MachO) !*Atom { } try self.managed_atoms.append(gpa, atom); - try self.atom_by_index_table.putNoClobber(gpa, sym_index, atom); sym.n_value = try self.allocateAtom(atom, size, atom.alignment); log.debug("allocated stub helper atom at 0x{x}", .{sym.n_value}); @@ -1351,15 +1331,11 @@ pub fn createStubHelperAtom(self: *MachO) !*Atom { pub fn createLazyPointerAtom(self: *MachO, stub_sym_index: u32, target: SymbolWithLoc) !*Atom { const gpa = self.base.allocator; - const sym_index = try self.allocateSymbol(); - const atom = blk: { - const atom = try gpa.create(Atom); - atom.* = Atom.empty; - atom.sym_index = sym_index; - atom.size = @sizeOf(u64); - atom.alignment = @alignOf(u64); - break :blk atom; - }; + const atom = try gpa.create(Atom); + atom.* = Atom.empty; + try atom.ensureInitialized(self); + atom.size = @sizeOf(u64); + atom.alignment = @alignOf(u64); errdefer gpa.destroy(atom); const sym = atom.getSymbolPtr(self); @@ -1385,7 +1361,6 @@ pub fn createLazyPointerAtom(self: *MachO, stub_sym_index: u32, target: SymbolWi }); try self.managed_atoms.append(gpa, atom); - try self.atom_by_index_table.putNoClobber(gpa, sym_index, atom); sym.n_value = try self.allocateAtom(atom, atom.size, @alignOf(u64)); log.debug("allocated lazy pointer atom at 0x{x} ({s})", .{ sym.n_value, self.getSymbolName(target) }); @@ -1402,19 +1377,15 @@ pub fn createStubAtom(self: *MachO, laptr_sym_index: u32) !*Atom { .aarch64 => 3 * @sizeOf(u32), else => unreachable, // unhandled architecture type }; - const sym_index = try self.allocateSymbol(); - const atom = blk: { - const atom = try gpa.create(Atom); - atom.* = Atom.empty; - atom.sym_index = sym_index; - atom.size = size; - atom.alignment = switch (arch) { - .x86_64 => 1, - .aarch64 => @alignOf(u32), - else => unreachable, // unhandled architecture type + const atom = try gpa.create(Atom); + atom.* = Atom.empty; + try atom.ensureInitialized(self); + atom.size = size; + atom.alignment = switch (arch) { + .x86_64 => 1, + .aarch64 => @alignOf(u32), + else => unreachable, // unhandled architecture type - }; - break :blk atom; }; errdefer gpa.destroy(atom); @@ -1476,7 +1447,6 @@ pub fn createStubAtom(self: *MachO, laptr_sym_index: u32) !*Atom { } try self.managed_atoms.append(gpa, atom); - try self.atom_by_index_table.putNoClobber(gpa, sym_index, atom); sym.n_value = try self.allocateAtom(atom, size, atom.alignment); log.debug("allocated stub atom at 0x{x}", .{sym.n_value}); @@ -1617,9 +1587,9 @@ pub fn resolveSymbolsInDylibs(self: *MachO) !void { const stub_index = try self.allocateStubEntry(global); const stub_helper_atom = try self.createStubHelperAtom(); - const laptr_atom = try self.createLazyPointerAtom(stub_helper_atom.sym_index, global); - const stub_atom = try self.createStubAtom(laptr_atom.sym_index); - self.stubs.items[stub_index].sym_index = stub_atom.sym_index; + const laptr_atom = try self.createLazyPointerAtom(stub_helper_atom.getSymbolIndex().?, global); + const stub_atom = try self.createStubAtom(laptr_atom.getSymbolIndex().?); + self.stubs.items[stub_index].sym_index = stub_atom.getSymbolIndex().?; self.markRelocsDirtyByTarget(global); } @@ -1717,7 +1687,7 @@ pub fn resolveDyldStubBinder(self: *MachO) !void { // Add dyld_stub_binder as the final GOT entry. const got_index = try self.allocateGotEntry(global); const got_atom = try self.createGotAtom(global); - self.got_entries.items[got_index].sym_index = got_atom.sym_index; + self.got_entries.items[got_index].sym_index = got_atom.getSymbolIndex().?; try self.writePtrWidthAtom(got_atom); } @@ -2098,14 +2068,11 @@ pub fn lowerUnnamedConst(self: *MachO, typed_value: TypedValue, decl_index: Modu const atom = try gpa.create(Atom); errdefer gpa.destroy(atom); atom.* = Atom.empty; - - atom.sym_index = try self.allocateSymbol(); - + try atom.ensureInitialized(self); try self.managed_atoms.append(gpa, atom); - try self.atom_by_index_table.putNoClobber(gpa, atom.sym_index, atom); const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), typed_value, &code_buffer, .none, .{ - .parent_atom_index = atom.sym_index, + .parent_atom_index = atom.getSymbolIndex().?, }); const code = switch (res) { .ok => code_buffer.items, @@ -2137,7 +2104,7 @@ pub fn lowerUnnamedConst(self: *MachO, typed_value: TypedValue, decl_index: Modu try self.writeAtom(atom, code); - return atom.sym_index; + return atom.getSymbolIndex().?; } pub fn updateDecl(self: *MachO, module: *Module, decl_index: Module.Decl.Index) !void { @@ -2188,14 +2155,14 @@ pub fn updateDecl(self: *MachO, module: *Module, decl_index: Module.Decl.Index) }, &code_buffer, .{ .dwarf = ds, }, .{ - .parent_atom_index = decl.link.macho.sym_index, + .parent_atom_index = decl.link.macho.getSymbolIndex().?, }) else try codegen.generateSymbol(&self.base, decl.srcLoc(), .{ .ty = decl.ty, .val = decl_val, }, &code_buffer, .none, .{ - .parent_atom_index = decl.link.macho.sym_index, + .parent_atom_index = decl.link.macho.getSymbolIndex().?, }); const code = switch (res) { -- cgit v1.2.3 From b25efb86e1b1b2a9e8aa269bf83b717d54f7e276 Mon Sep 17 00:00:00 2001 From: Luuk de Gram Date: Fri, 27 Jan 2023 19:24:15 +0100 Subject: wasm: migrate to new non-allocateDeclIndexes API --- src/Module.zig | 1 + src/arch/wasm/CodeGen.zig | 13 +++++++-- src/link.zig | 2 +- src/link/Wasm.zig | 70 ++++++++++++++++++++--------------------------- src/link/Wasm/Atom.zig | 11 ++++++++ 5 files changed, 53 insertions(+), 44 deletions(-) (limited to 'src/Module.zig') diff --git a/src/Module.zig b/src/Module.zig index 360dd4d1ec..713680c5fa 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -5328,6 +5328,7 @@ pub fn deleteUnusedDecl(mod: *Module, decl_index: Decl.Index) void { .elf, .macho, .c, + .wasm, => {}, // this linker backend has already migrated to the new API else => if (decl.has_tv) { diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index a7d90a8bf9..342d6b70cc 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -2120,22 +2120,28 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif const module = func.bin_file.base.options.module.?; if (func_val.castTag(.function)) |function| { - break :blk module.declPtr(function.data.owner_decl); + const decl = module.declPtr(function.data.owner_decl); + try decl.link.wasm.ensureInitialized(func.bin_file); + break :blk decl; } else if (func_val.castTag(.extern_fn)) |extern_fn| { const ext_decl = module.declPtr(extern_fn.data.owner_decl); const ext_info = ext_decl.ty.fnInfo(); var func_type = try genFunctype(func.gpa, ext_info.cc, ext_info.param_types, ext_info.return_type, func.target); defer func_type.deinit(func.gpa); + const atom = &ext_decl.link.wasm; + try atom.ensureInitialized(func.bin_file); ext_decl.fn_link.wasm.type_index = try func.bin_file.putOrGetFuncType(func_type); try func.bin_file.addOrUpdateImport( mem.sliceTo(ext_decl.name, 0), - ext_decl.link.wasm.sym_index, + atom.getSymbolIndex().?, ext_decl.getExternFn().?.lib_name, ext_decl.fn_link.wasm.type_index, ); break :blk ext_decl; } else if (func_val.castTag(.decl_ref)) |decl_ref| { - break :blk module.declPtr(decl_ref.data); + const decl = module.declPtr(decl_ref.data); + try decl.link.wasm.ensureInitialized(func.bin_file); + break :blk decl; } return func.fail("Expected a function, but instead found type '{}'", .{func_val.tag()}); }; @@ -2752,6 +2758,7 @@ fn lowerDeclRefValue(func: *CodeGen, tv: TypedValue, decl_index: Module.Decl.Ind } module.markDeclAlive(decl); + try decl.link.wasm.ensureInitialized(func.bin_file); const target_sym_index = decl.link.wasm.sym_index; if (decl.ty.zigTypeTag() == .Fn) { diff --git a/src/link.zig b/src/link.zig index 9be5b9ca3a..f9081499a8 100644 --- a/src/link.zig +++ b/src/link.zig @@ -615,7 +615,6 @@ pub const File = struct { return; } switch (base.tag) { - .wasm => return @fieldParentPtr(Wasm, "base", base).allocateDeclIndexes(decl_index), .plan9 => return @fieldParentPtr(Plan9, "base", base).allocateDeclIndexes(decl_index), .coff, @@ -624,6 +623,7 @@ pub const File = struct { .c, .spirv, .nvptx, + .wasm, => {}, } } diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index 7129722d16..31dfb87659 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -986,31 +986,23 @@ pub fn deinit(wasm: *Wasm) void { } } -pub fn allocateDeclIndexes(wasm: *Wasm, decl_index: Module.Decl.Index) !void { - if (wasm.llvm_object) |_| return; - const decl = wasm.base.options.module.?.declPtr(decl_index); - if (decl.link.wasm.sym_index != 0) return; - +/// Allocates a new symbol and returns its index. +/// Will re-use slots when a symbol was freed at an earlier stage. +pub fn allocateSymbol(wasm: *Wasm) !u32 { try wasm.symbols.ensureUnusedCapacity(wasm.base.allocator, 1); - try wasm.decls.putNoClobber(wasm.base.allocator, decl_index, {}); - - const atom = &decl.link.wasm; - var symbol: Symbol = .{ .name = undefined, // will be set after updateDecl .flags = @enumToInt(Symbol.Flag.WASM_SYM_BINDING_LOCAL), .tag = undefined, // will be set after updateDecl .index = undefined, // will be set after updateDecl }; - if (wasm.symbols_free_list.popOrNull()) |index| { - atom.sym_index = index; wasm.symbols.items[index] = symbol; - } else { - atom.sym_index = @intCast(u32, wasm.symbols.items.len); - wasm.symbols.appendAssumeCapacity(symbol); + return index; } - try wasm.symbol_atom.putNoClobber(wasm.base.allocator, atom.symbolLoc(), atom); + const index = @intCast(u32, wasm.symbols.items.len); + wasm.symbols.appendAssumeCapacity(symbol); + return index; } pub fn updateFunc(wasm: *Wasm, mod: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { @@ -1026,9 +1018,12 @@ pub fn updateFunc(wasm: *Wasm, mod: *Module, func: *Module.Fn, air: Air, livenes const decl_index = func.owner_decl; const decl = mod.declPtr(decl_index); - assert(decl.link.wasm.sym_index != 0); // Must call allocateDeclIndexes() - - decl.link.wasm.clear(); + const atom = &decl.link.wasm; + try atom.ensureInitialized(wasm); + const gop = try wasm.decls.getOrPut(wasm.base.allocator, decl_index); + if (gop.found_existing) { + atom.clear(); + } else gop.value_ptr.* = {}; var decl_state: ?Dwarf.DeclState = if (wasm.dwarf) |*dwarf| try dwarf.initDeclState(mod, decl_index) else null; defer if (decl_state) |*ds| ds.deinit(); @@ -1083,16 +1078,19 @@ pub fn updateDecl(wasm: *Wasm, mod: *Module, decl_index: Module.Decl.Index) !voi defer tracy.end(); const decl = mod.declPtr(decl_index); - assert(decl.link.wasm.sym_index != 0); // Must call allocateDeclIndexes() - - decl.link.wasm.clear(); - if (decl.val.castTag(.function)) |_| { return; } else if (decl.val.castTag(.extern_fn)) |_| { return; } + const atom = &decl.link.wasm; + try atom.ensureInitialized(wasm); + const gop = try wasm.decls.getOrPut(wasm.base.allocator, decl_index); + if (gop.found_existing) { + atom.clear(); + } else gop.value_ptr.* = {}; + if (decl.isExtern()) { const variable = decl.getVariable().?; const name = mem.sliceTo(decl.name, 0); @@ -1148,8 +1146,8 @@ fn finishUpdateDecl(wasm: *Wasm, decl: *Module.Decl, code: []const u8) !void { try atom.code.appendSlice(wasm.base.allocator, code); try wasm.resolved_symbols.put(wasm.base.allocator, atom.symbolLoc(), {}); - if (code.len == 0) return; atom.size = @intCast(u32, code.len); + if (code.len == 0) return; atom.alignment = decl.ty.abiAlignment(wasm.base.options.target); } @@ -1211,28 +1209,19 @@ pub fn lowerUnnamedConst(wasm: *Wasm, tv: TypedValue, decl_index: Module.Decl.In defer wasm.base.allocator.free(fqdn); const name = try std.fmt.allocPrintZ(wasm.base.allocator, "__unnamed_{s}_{d}", .{ fqdn, local_index }); defer wasm.base.allocator.free(name); - var symbol: Symbol = .{ - .name = try wasm.string_table.put(wasm.base.allocator, name), - .flags = 0, - .tag = .data, - .index = undefined, - }; - symbol.setFlag(.WASM_SYM_BINDING_LOCAL); const atom = try decl.link.wasm.locals.addOne(wasm.base.allocator); atom.* = Atom.empty; + try atom.ensureInitialized(wasm); atom.alignment = tv.ty.abiAlignment(wasm.base.options.target); - try wasm.symbols.ensureUnusedCapacity(wasm.base.allocator, 1); + wasm.symbols.items[atom.sym_index] = .{ + .name = try wasm.string_table.put(wasm.base.allocator, name), + .flags = @enumToInt(Symbol.Flag.WASM_SYM_BINDING_LOCAL), + .tag = .data, + .index = undefined, + }; - if (wasm.symbols_free_list.popOrNull()) |index| { - atom.sym_index = index; - wasm.symbols.items[index] = symbol; - } else { - atom.sym_index = @intCast(u32, wasm.symbols.items.len); - wasm.symbols.appendAssumeCapacity(symbol); - } try wasm.resolved_symbols.putNoClobber(wasm.base.allocator, atom.symbolLoc(), {}); - try wasm.symbol_atom.putNoClobber(wasm.base.allocator, atom.symbolLoc(), atom); var value_bytes = std.ArrayList(u8).init(wasm.base.allocator); defer value_bytes.deinit(); @@ -1304,8 +1293,8 @@ pub fn getDeclVAddr( ) !u64 { const mod = wasm.base.options.module.?; const decl = mod.declPtr(decl_index); + try decl.link.wasm.ensureInitialized(wasm); const target_symbol_index = decl.link.wasm.sym_index; - assert(target_symbol_index != 0); assert(reloc_info.parent_atom_index != 0); const atom = wasm.symbol_atom.get(.{ .file = null, .index = reloc_info.parent_atom_index }).?; const is_wasm32 = wasm.base.options.target.cpu.arch == .wasm32; @@ -1363,6 +1352,7 @@ pub fn updateDeclExports( } const decl = mod.declPtr(decl_index); + if (decl.link.wasm.getSymbolIndex() == null) return; // unititialized for (exports) |exp| { if (exp.options.section) |section| { diff --git a/src/link/Wasm/Atom.zig b/src/link/Wasm/Atom.zig index de9cefebdc..20f847e475 100644 --- a/src/link/Wasm/Atom.zig +++ b/src/link/Wasm/Atom.zig @@ -95,6 +95,17 @@ pub fn symbolLoc(atom: Atom) Wasm.SymbolLoc { return .{ .file = atom.file, .index = atom.sym_index }; } +pub fn ensureInitialized(atom: *Atom, wasm_bin: *Wasm) !void { + if (atom.getSymbolIndex() != null) return; // already initialized + atom.sym_index = try wasm_bin.allocateSymbol(); + try wasm_bin.symbol_atom.putNoClobber(wasm_bin.base.allocator, atom.symbolLoc(), atom); +} + +pub fn getSymbolIndex(atom: Atom) ?u32 { + if (atom.sym_index == 0) return null; + return atom.sym_index; +} + /// Returns the virtual address of the `Atom`. This is the address starting /// from the first entry within a section. pub fn getVA(atom: Atom, wasm: *const Wasm, symbol: *const Symbol) u32 { -- cgit v1.2.3 From a8987291390d80ad9e2bb45ba225313a108eed0b Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Fri, 27 Jan 2023 19:51:06 +0100 Subject: self-hosted: remove allocateDeclIndexes from the public link.File API --- src/Module.zig | 26 -------------------------- src/Sema.zig | 1 - src/link.zig | 35 +++-------------------------------- src/link/Plan9.zig | 9 ++------- 4 files changed, 5 insertions(+), 66 deletions(-) (limited to 'src/Module.zig') diff --git a/src/Module.zig b/src/Module.zig index 713680c5fa..dcdbeec322 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -4585,7 +4585,6 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { // We don't fully codegen the decl until later, but we do need to reserve a global // offset table index for it. This allows us to codegen decls out of dependency // order, increasing how many computations can be done in parallel. - try mod.comp.bin_file.allocateDeclIndexes(decl_index); try mod.comp.work_queue.writeItem(.{ .codegen_func = func }); if (type_changed and mod.emit_h != null) { try mod.comp.work_queue.writeItem(.{ .emit_h_decl = decl_index }); @@ -4697,7 +4696,6 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { // codegen backend wants full access to the Decl Type. try sema.resolveTypeFully(decl.ty); - try mod.comp.bin_file.allocateDeclIndexes(decl_index); try mod.comp.work_queue.writeItem(.{ .codegen_decl = decl_index }); if (type_changed and mod.emit_h != null) { @@ -5315,29 +5313,6 @@ pub fn deleteUnusedDecl(mod: *Module, decl_index: Decl.Index) void { const decl = mod.declPtr(decl_index); log.debug("deleteUnusedDecl {d} ({s})", .{ decl_index, decl.name }); - // TODO: remove `allocateDeclIndexes` and make the API that the linker backends - // are required to notice the first time `updateDecl` happens and keep track - // of it themselves. However they can rely on getting a `freeDecl` call if any - // `updateDecl` or `updateFunc` calls happen. This will allow us to avoid any call - // into the linker backend here, since the linker backend will never have been told - // about the Decl in the first place. - // Until then, we did call `allocateDeclIndexes` on this anonymous Decl and so we - // must call `freeDecl` in the linker backend now. - switch (mod.comp.bin_file.tag) { - .coff, - .elf, - .macho, - .c, - .wasm, - => {}, // this linker backend has already migrated to the new API - - else => if (decl.has_tv) { - if (decl.ty.isFnOrHasRuntimeBits()) { - mod.comp.bin_file.freeDecl(decl_index); - } - }, - } - assert(!mod.declIsRoot(decl_index)); assert(decl.src_namespace.anon_decls.swapRemove(decl_index)); @@ -5822,7 +5797,6 @@ pub fn initNewAnonDecl( // the Decl will be garbage collected by the `codegen_decl` task instead of sent // to the linker. if (typed_value.ty.isFnOrHasRuntimeBits()) { - try mod.comp.bin_file.allocateDeclIndexes(new_decl_index); try mod.comp.anon_work_queue.writeItem(.{ .codegen_decl = new_decl_index }); } } diff --git a/src/Sema.zig b/src/Sema.zig index 2e57de2406..9c553a0092 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -7510,7 +7510,6 @@ fn resolveGenericInstantiationType( // Queue up a `codegen_func` work item for the new Fn. The `comptime_args` field // will be populated, ensuring it will have `analyzeBody` called with the ZIR // parameters mapped appropriately. - try mod.comp.bin_file.allocateDeclIndexes(new_decl_index); try mod.comp.work_queue.writeItem(.{ .codegen_func = new_func }); return new_func; } diff --git a/src/link.zig b/src/link.zig index f9081499a8..668c5b72e3 100644 --- a/src/link.zig +++ b/src/link.zig @@ -533,8 +533,7 @@ pub const File = struct { } } - /// May be called before or after updateDeclExports but must be called - /// after allocateDeclIndexes for any given Decl. + /// May be called before or after updateDeclExports for any given Decl. pub fn updateDecl(base: *File, module: *Module, decl_index: Module.Decl.Index) UpdateDeclError!void { const decl = module.declPtr(decl_index); log.debug("updateDecl {*} ({s}), type={}", .{ decl, decl.name, decl.ty.fmtDebug() }); @@ -557,8 +556,7 @@ pub const File = struct { } } - /// May be called before or after updateDeclExports but must be called - /// after allocateDeclIndexes for any given Decl. + /// May be called before or after updateDeclExports for any given Decl. pub fn updateFunc(base: *File, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) UpdateDeclError!void { const owner_decl = module.declPtr(func.owner_decl); log.debug("updateFunc {*} ({s}), type={}", .{ @@ -602,32 +600,6 @@ pub const File = struct { } } - /// Must be called before any call to updateDecl or updateDeclExports for - /// any given Decl. - /// TODO we're transitioning to deleting this function and instead having - /// each linker backend notice the first time updateDecl or updateFunc is called, or - /// a callee referenced from AIR. - pub fn allocateDeclIndexes(base: *File, decl_index: Module.Decl.Index) error{OutOfMemory}!void { - const decl = base.options.module.?.declPtr(decl_index); - log.debug("allocateDeclIndexes {*} ({s})", .{ decl, decl.name }); - if (build_options.only_c) { - assert(base.tag == .c); - return; - } - switch (base.tag) { - .plan9 => return @fieldParentPtr(Plan9, "base", base).allocateDeclIndexes(decl_index), - - .coff, - .elf, - .macho, - .c, - .spirv, - .nvptx, - .wasm, - => {}, - } - } - pub fn releaseLock(self: *File) void { if (self.lock) |*lock| { lock.release(); @@ -878,8 +850,7 @@ pub const File = struct { AnalysisFail, }; - /// May be called before or after updateDecl, but must be called after - /// allocateDeclIndexes for any given Decl. + /// May be called before or after updateDecl for any given Decl. pub fn updateDeclExports( base: *File, module: *Module, diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig index e412c78f7f..a8b8caafab 100644 --- a/src/link/Plan9.zig +++ b/src/link/Plan9.zig @@ -424,7 +424,7 @@ fn updateFinish(self: *Plan9, decl: *Module.Decl) !void { // write the internal linker metadata decl.link.plan9.type = sym_t; // write the symbol - // we already have the got index because that got allocated in allocateDeclIndexes + // we already have the got index const sym: aout.Sym = .{ .value = undefined, // the value of stuff gets filled in in flushModule .type = decl.link.plan9.type, @@ -737,7 +737,7 @@ fn addDeclExports( pub fn freeDecl(self: *Plan9, decl_index: Module.Decl.Index) void { // TODO audit the lifetimes of decls table entries. It's possible to get - // allocateDeclIndexes and then freeDecl without any updateDecl in between. + // freeDecl without any updateDecl in between. // However that is planned to change, see the TODO comment in Module.zig // in the deleteUnusedDecl function. const mod = self.base.options.module.?; @@ -959,11 +959,6 @@ pub fn writeSyms(self: *Plan9, buf: *std.ArrayList(u8)) !void { } } -/// this will be removed, moved to updateFinish -pub fn allocateDeclIndexes(self: *Plan9, decl_index: Module.Decl.Index) !void { - _ = self; - _ = decl_index; -} /// Must be called only after a successful call to `updateDecl`. pub fn updateDeclLineNumber(self: *Plan9, mod: *Module, decl: *const Module.Decl) !void { _ = self; -- cgit v1.2.3