aboutsummaryrefslogtreecommitdiff
path: root/src/link
diff options
context:
space:
mode:
authorJacob Young <15544577+jacobly0@users.noreply.github.com>2025-10-03 05:03:44 -0400
committerGitHub <noreply@github.com>2025-10-03 05:03:44 -0400
commit12ed0ff1efa71d11f6220d9cf94202b888e177fc (patch)
tree58679862045c06d8ebe2f1c67bedb5f6f91eb3fa /src/link
parent1f083e9ed78f5c3c2d848d0abc58612c4ce88804 (diff)
parent759e038a44eda0c950f0a5baac37b3a1d7f786b3 (diff)
downloadzig-12ed0ff1efa71d11f6220d9cf94202b888e177fc.tar.gz
zig-12ed0ff1efa71d11f6220d9cf94202b888e177fc.zip
Merge pull request #25430 from jacobly0/x86_64-win
Coff2: create a new linker from scratch
Diffstat (limited to 'src/link')
-rw-r--r--src/link/Coff.zig3169
-rw-r--r--src/link/Coff2.zig2193
-rw-r--r--src/link/Elf2.zig628
-rw-r--r--src/link/MappedFile.zig70
4 files changed, 2568 insertions, 3492 deletions
diff --git a/src/link/Coff.zig b/src/link/Coff.zig
deleted file mode 100644
index f3e3b3d0b5..0000000000
--- a/src/link/Coff.zig
+++ /dev/null
@@ -1,3169 +0,0 @@
-//! The main driver of the self-hosted COFF linker.
-const Coff = @This();
-
-const std = @import("std");
-const build_options = @import("build_options");
-const builtin = @import("builtin");
-const assert = std.debug.assert;
-const coff_util = std.coff;
-const fmt = std.fmt;
-const fs = std.fs;
-const log = std.log.scoped(.link);
-const math = std.math;
-const mem = std.mem;
-
-const Allocator = std.mem.Allocator;
-const Path = std.Build.Cache.Path;
-const Directory = std.Build.Cache.Directory;
-const Cache = std.Build.Cache;
-
-const aarch64_util = link.aarch64;
-const allocPrint = std.fmt.allocPrint;
-const codegen = @import("../codegen.zig");
-const link = @import("../link.zig");
-const target_util = @import("../target.zig");
-const trace = @import("../tracy.zig").trace;
-
-const Compilation = @import("../Compilation.zig");
-const Zcu = @import("../Zcu.zig");
-const InternPool = @import("../InternPool.zig");
-const TableSection = @import("table_section.zig").TableSection;
-const StringTable = @import("StringTable.zig");
-const Type = @import("../Type.zig");
-const Value = @import("../Value.zig");
-const AnalUnit = InternPool.AnalUnit;
-const dev = @import("../dev.zig");
-
-base: link.File,
-image_base: u64,
-/// TODO this and minor_subsystem_version should be combined into one property and left as
-/// default or populated together. They should not be separate fields.
-major_subsystem_version: u16,
-minor_subsystem_version: u16,
-entry: link.File.OpenOptions.Entry,
-entry_addr: ?u32,
-module_definition_file: ?[]const u8,
-repro: bool,
-
-ptr_width: PtrWidth,
-page_size: u32,
-
-sections: std.MultiArrayList(Section) = .{},
-data_directories: [coff_util.IMAGE_NUMBEROF_DIRECTORY_ENTRIES]coff_util.ImageDataDirectory,
-
-text_section_index: ?u16 = null,
-got_section_index: ?u16 = null,
-rdata_section_index: ?u16 = null,
-data_section_index: ?u16 = null,
-reloc_section_index: ?u16 = null,
-idata_section_index: ?u16 = null,
-
-locals: std.ArrayListUnmanaged(coff_util.Symbol) = .empty,
-globals: std.ArrayListUnmanaged(SymbolWithLoc) = .empty,
-resolver: std.StringHashMapUnmanaged(u32) = .empty,
-unresolved: std.AutoArrayHashMapUnmanaged(u32, bool) = .empty,
-need_got_table: std.AutoHashMapUnmanaged(u32, void) = .empty,
-
-locals_free_list: std.ArrayListUnmanaged(u32) = .empty,
-globals_free_list: std.ArrayListUnmanaged(u32) = .empty,
-
-strtab: StringTable = .{},
-strtab_offset: ?u32 = null,
-
-temp_strtab: StringTable = .{},
-
-got_table: TableSection(SymbolWithLoc) = .{},
-
-/// A table of ImportTables partitioned by the library name.
-/// Key is an offset into the interning string table `temp_strtab`.
-import_tables: std.AutoArrayHashMapUnmanaged(u32, ImportTable) = .empty,
-
-got_table_count_dirty: bool = true,
-got_table_contents_dirty: bool = true,
-imports_count_dirty: bool = true,
-
-/// Table of tracked LazySymbols.
-lazy_syms: LazySymbolTable = .{},
-
-/// Table of tracked `Nav`s.
-navs: NavTable = .{},
-
-/// List of atoms that are either synthetic or map directly to the Zig source program.
-atoms: std.ArrayListUnmanaged(Atom) = .empty,
-
-/// Table of atoms indexed by the symbol index.
-atom_by_index_table: std.AutoHashMapUnmanaged(u32, Atom.Index) = .empty,
-
-uavs: UavTable = .{},
-
-/// A table of relocations indexed by the owning them `Atom`.
-/// Note that once we refactor `Atom`'s lifetime and ownership rules,
-/// this will be a table indexed by index into the list of Atoms.
-relocs: RelocTable = .{},
-
-/// A table of base relocations indexed by the owning them `Atom`.
-/// Note that once we refactor `Atom`'s lifetime and ownership rules,
-/// this will be a table indexed by index into the list of Atoms.
-base_relocs: BaseRelocationTable = .{},
-
-/// Hot-code swapping state.
-hot_state: if (is_hot_update_compatible) HotUpdateState else struct {} = .{},
-
-const is_hot_update_compatible = switch (builtin.target.os.tag) {
- .windows => true,
- else => false,
-};
-
-const HotUpdateState = struct {
- /// Base address at which the process (image) got loaded.
- /// We need this info to correctly slide pointers when relocating.
- loaded_base_address: ?std.os.windows.HMODULE = null,
-};
-
-const NavTable = std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, AvMetadata);
-const UavTable = std.AutoHashMapUnmanaged(InternPool.Index, AvMetadata);
-const RelocTable = std.AutoArrayHashMapUnmanaged(Atom.Index, std.ArrayListUnmanaged(Relocation));
-const BaseRelocationTable = std.AutoArrayHashMapUnmanaged(Atom.Index, std.ArrayListUnmanaged(u32));
-
-const default_file_alignment: u16 = 0x200;
-const default_size_of_stack_reserve: u32 = 0x1000000;
-const default_size_of_stack_commit: u32 = 0x1000;
-const default_size_of_heap_reserve: u32 = 0x100000;
-const default_size_of_heap_commit: u32 = 0x1000;
-
-const Section = struct {
- header: coff_util.SectionHeader,
-
- last_atom_index: ?Atom.Index = null,
-
- /// A list of atoms that have surplus capacity. This list can have false
- /// positives, as functions grow and shrink over time, only sometimes being added
- /// or removed from the freelist.
- ///
- /// An atom has surplus capacity when its overcapacity value is greater than
- /// padToIdeal(minimum_atom_size). That is, when it has so
- /// much extra capacity, that we could fit a small new symbol in it, itself with
- /// ideal_capacity or more.
- ///
- /// Ideal capacity is defined by size + (size / ideal_factor).
- ///
- /// Overcapacity is measured by actual_capacity - ideal_capacity. Note that
- /// overcapacity can be negative. A simple way to have negative overcapacity is to
- /// allocate a fresh atom, which will have ideal capacity, and then grow it
- /// by 1 byte. It will then have -1 overcapacity.
- free_list: std.ArrayListUnmanaged(Atom.Index) = .empty,
-};
-
-const LazySymbolTable = std.AutoArrayHashMapUnmanaged(InternPool.Index, LazySymbolMetadata);
-
-const LazySymbolMetadata = struct {
- const State = enum { unused, pending_flush, flushed };
- text_atom: Atom.Index = undefined,
- rdata_atom: Atom.Index = undefined,
- text_state: State = .unused,
- rdata_state: State = .unused,
-};
-
-const AvMetadata = struct {
- atom: Atom.Index,
- section: u16,
- /// A list of all exports aliases of this Decl.
- exports: std.ArrayListUnmanaged(u32) = .empty,
-
- fn deinit(m: *AvMetadata, allocator: Allocator) void {
- m.exports.deinit(allocator);
- }
-
- fn getExport(m: AvMetadata, coff: *const Coff, name: []const u8) ?u32 {
- for (m.exports.items) |exp| {
- if (mem.eql(u8, name, coff.getSymbolName(.{
- .sym_index = exp,
- .file = null,
- }))) return exp;
- }
- return null;
- }
-
- fn getExportPtr(m: *AvMetadata, coff: *Coff, name: []const u8) ?*u32 {
- for (m.exports.items) |*exp| {
- if (mem.eql(u8, name, coff.getSymbolName(.{
- .sym_index = exp.*,
- .file = null,
- }))) return exp;
- }
- return null;
- }
-};
-
-pub const PtrWidth = enum {
- p32,
- p64,
-
- /// Size in bytes.
- pub fn size(pw: PtrWidth) u4 {
- return switch (pw) {
- .p32 => 4,
- .p64 => 8,
- };
- }
-};
-
-pub const SymbolWithLoc = struct {
- // Index into the respective symbol table.
- sym_index: u32,
-
- // null means it's a synthetic global or Zig source.
- file: ?u32 = null,
-
- pub fn eql(this: SymbolWithLoc, other: SymbolWithLoc) bool {
- if (this.file == null and other.file == null) {
- return this.sym_index == other.sym_index;
- }
- if (this.file != null and other.file != null) {
- return this.sym_index == other.sym_index and this.file.? == other.file.?;
- }
- return false;
- }
-};
-
-/// When allocating, the ideal_capacity is calculated by
-/// actual_capacity + (actual_capacity / ideal_factor)
-const ideal_factor = 3;
-
-/// In order for a slice of bytes to be considered eligible to keep metadata pointing at
-/// it as a possible place to put new symbols, it must have enough room for this many bytes
-/// (plus extra for reserved capacity).
-const minimum_text_block_size = 64;
-pub const min_text_capacity = padToIdeal(minimum_text_block_size);
-
-pub fn createEmpty(
- arena: Allocator,
- comp: *Compilation,
- emit: Path,
- options: link.File.OpenOptions,
-) !*Coff {
- const target = &comp.root_mod.resolved_target.result;
- assert(target.ofmt == .coff);
- const optimize_mode = comp.root_mod.optimize_mode;
- const output_mode = comp.config.output_mode;
- const link_mode = comp.config.link_mode;
- const use_llvm = comp.config.use_llvm;
-
- const ptr_width: PtrWidth = switch (target.ptrBitWidth()) {
- 0...32 => .p32,
- 33...64 => .p64,
- else => return error.UnsupportedCOFFArchitecture,
- };
- const page_size: u32 = switch (target.cpu.arch) {
- else => 0x1000,
- };
-
- const coff = try arena.create(Coff);
- coff.* = .{
- .base = .{
- .tag = .coff,
- .comp = comp,
- .emit = emit,
- .zcu_object_basename = if (use_llvm)
- try std.fmt.allocPrint(arena, "{s}_zcu.obj", .{fs.path.stem(emit.sub_path)})
- else
- null,
- .stack_size = options.stack_size orelse 16777216,
- .gc_sections = options.gc_sections orelse (optimize_mode != .Debug),
- .print_gc_sections = options.print_gc_sections,
- .allow_shlib_undefined = options.allow_shlib_undefined orelse false,
- .file = null,
- .build_id = options.build_id,
- },
- .ptr_width = ptr_width,
- .page_size = page_size,
-
- .data_directories = [1]coff_util.ImageDataDirectory{.{
- .virtual_address = 0,
- .size = 0,
- }} ** coff_util.IMAGE_NUMBEROF_DIRECTORY_ENTRIES,
-
- .image_base = options.image_base orelse switch (output_mode) {
- .Exe => switch (target.cpu.arch) {
- .aarch64, .x86_64 => 0x140000000,
- .thumb, .x86 => 0x400000,
- else => unreachable,
- },
- .Lib => switch (target.cpu.arch) {
- .aarch64, .x86_64 => 0x180000000,
- .thumb, .x86 => 0x10000000,
- else => unreachable,
- },
- .Obj => 0,
- },
-
- .entry = options.entry,
-
- .major_subsystem_version = options.major_subsystem_version orelse 6,
- .minor_subsystem_version = options.minor_subsystem_version orelse 0,
- .entry_addr = math.cast(u32, options.entry_addr orelse 0) orelse
- return error.EntryAddressTooBig,
- .module_definition_file = options.module_definition_file,
- .repro = options.repro,
- };
- errdefer coff.base.destroy();
-
- coff.base.file = try emit.root_dir.handle.createFile(emit.sub_path, .{
- .truncate = true,
- .read = true,
- .mode = link.File.determineMode(output_mode, link_mode),
- });
-
- const gpa = comp.gpa;
-
- try coff.strtab.buffer.ensureUnusedCapacity(gpa, @sizeOf(u32));
- coff.strtab.buffer.appendNTimesAssumeCapacity(0, @sizeOf(u32));
-
- try coff.temp_strtab.buffer.append(gpa, 0);
-
- // Index 0 is always a null symbol.
- try coff.locals.append(gpa, .{
- .name = [_]u8{0} ** 8,
- .value = 0,
- .section_number = .UNDEFINED,
- .type = .{ .base_type = .NULL, .complex_type = .NULL },
- .storage_class = .NULL,
- .number_of_aux_symbols = 0,
- });
-
- if (coff.text_section_index == null) {
- const file_size: u32 = @intCast(options.program_code_size_hint);
- coff.text_section_index = try coff.allocateSection(".text", file_size, .{
- .CNT_CODE = 1,
- .MEM_EXECUTE = 1,
- .MEM_READ = 1,
- });
- }
-
- if (coff.got_section_index == null) {
- const file_size = @as(u32, @intCast(options.symbol_count_hint)) * coff.ptr_width.size();
- coff.got_section_index = try coff.allocateSection(".got", file_size, .{
- .CNT_INITIALIZED_DATA = 1,
- .MEM_READ = 1,
- });
- }
-
- if (coff.rdata_section_index == null) {
- const file_size: u32 = coff.page_size;
- coff.rdata_section_index = try coff.allocateSection(".rdata", file_size, .{
- .CNT_INITIALIZED_DATA = 1,
- .MEM_READ = 1,
- });
- }
-
- if (coff.data_section_index == null) {
- const file_size: u32 = coff.page_size;
- coff.data_section_index = try coff.allocateSection(".data", file_size, .{
- .CNT_INITIALIZED_DATA = 1,
- .MEM_READ = 1,
- .MEM_WRITE = 1,
- });
- }
-
- if (coff.idata_section_index == null) {
- const file_size = @as(u32, @intCast(options.symbol_count_hint)) * coff.ptr_width.size();
- coff.idata_section_index = try coff.allocateSection(".idata", file_size, .{
- .CNT_INITIALIZED_DATA = 1,
- .MEM_READ = 1,
- });
- }
-
- if (coff.reloc_section_index == null) {
- const file_size = @as(u32, @intCast(options.symbol_count_hint)) * @sizeOf(coff_util.BaseRelocation);
- coff.reloc_section_index = try coff.allocateSection(".reloc", file_size, .{
- .CNT_INITIALIZED_DATA = 1,
- .MEM_DISCARDABLE = 1,
- .MEM_READ = 1,
- });
- }
-
- if (coff.strtab_offset == null) {
- const file_size = @as(u32, @intCast(coff.strtab.buffer.items.len));
- coff.strtab_offset = coff.findFreeSpace(file_size, @alignOf(u32)); // 4bytes aligned seems like a good idea here
- log.debug("found strtab free space 0x{x} to 0x{x}", .{ coff.strtab_offset.?, coff.strtab_offset.? + file_size });
- }
-
- {
- // We need to find out what the max file offset is according to section headers.
- // Otherwise, we may end up with an COFF binary with file size not matching the final section's
- // offset + it's filesize.
- // TODO I don't like this here one bit
- var max_file_offset: u64 = 0;
- for (coff.sections.items(.header)) |header| {
- if (header.pointer_to_raw_data + header.size_of_raw_data > max_file_offset) {
- max_file_offset = header.pointer_to_raw_data + header.size_of_raw_data;
- }
- }
- try coff.pwriteAll(&[_]u8{0}, max_file_offset);
- }
-
- return coff;
-}
-
-pub fn open(
- arena: Allocator,
- comp: *Compilation,
- emit: Path,
- options: link.File.OpenOptions,
-) !*Coff {
- // TODO: restore saved linker state, don't truncate the file, and
- // participate in incremental compilation.
- return createEmpty(arena, comp, emit, options);
-}
-
-pub fn deinit(coff: *Coff) void {
- const gpa = coff.base.comp.gpa;
-
- for (coff.sections.items(.free_list)) |*free_list| {
- free_list.deinit(gpa);
- }
- coff.sections.deinit(gpa);
-
- coff.atoms.deinit(gpa);
- coff.locals.deinit(gpa);
- coff.globals.deinit(gpa);
-
- {
- var it = coff.resolver.keyIterator();
- while (it.next()) |key_ptr| {
- gpa.free(key_ptr.*);
- }
- coff.resolver.deinit(gpa);
- }
-
- coff.unresolved.deinit(gpa);
- coff.need_got_table.deinit(gpa);
- coff.locals_free_list.deinit(gpa);
- coff.globals_free_list.deinit(gpa);
- coff.strtab.deinit(gpa);
- coff.temp_strtab.deinit(gpa);
- coff.got_table.deinit(gpa);
-
- for (coff.import_tables.values()) |*itab| {
- itab.deinit(gpa);
- }
- coff.import_tables.deinit(gpa);
-
- coff.lazy_syms.deinit(gpa);
-
- for (coff.navs.values()) |*metadata| {
- metadata.deinit(gpa);
- }
- coff.navs.deinit(gpa);
-
- coff.atom_by_index_table.deinit(gpa);
-
- {
- var it = coff.uavs.iterator();
- while (it.next()) |entry| {
- entry.value_ptr.exports.deinit(gpa);
- }
- coff.uavs.deinit(gpa);
- }
-
- for (coff.relocs.values()) |*relocs| {
- relocs.deinit(gpa);
- }
- coff.relocs.deinit(gpa);
-
- for (coff.base_relocs.values()) |*relocs| {
- relocs.deinit(gpa);
- }
- coff.base_relocs.deinit(gpa);
-}
-
-fn allocateSection(coff: *Coff, name: []const u8, size: u32, flags: coff_util.SectionHeaderFlags) !u16 {
- const index = @as(u16, @intCast(coff.sections.slice().len));
- const off = coff.findFreeSpace(size, default_file_alignment);
- // Memory is always allocated in sequence
- // TODO: investigate if we can allocate .text last; this way it would never need to grow in memory!
- const vaddr = blk: {
- if (index == 0) break :blk coff.page_size;
- const prev_header = coff.sections.items(.header)[index - 1];
- break :blk mem.alignForward(u32, prev_header.virtual_address + prev_header.virtual_size, coff.page_size);
- };
- // We commit more memory than needed upfront so that we don't have to reallocate too soon.
- const memsz = mem.alignForward(u32, size, coff.page_size) * 100;
- log.debug("found {s} free space 0x{x} to 0x{x} (0x{x} - 0x{x})", .{
- name,
- off,
- off + size,
- vaddr,
- vaddr + size,
- });
- var header = coff_util.SectionHeader{
- .name = undefined,
- .virtual_size = memsz,
- .virtual_address = vaddr,
- .size_of_raw_data = size,
- .pointer_to_raw_data = off,
- .pointer_to_relocations = 0,
- .pointer_to_linenumbers = 0,
- .number_of_relocations = 0,
- .number_of_linenumbers = 0,
- .flags = flags,
- };
- const gpa = coff.base.comp.gpa;
- try coff.setSectionName(&header, name);
- try coff.sections.append(gpa, .{ .header = header });
- return index;
-}
-
-fn growSection(coff: *Coff, sect_id: u32, needed_size: u32) !void {
- const header = &coff.sections.items(.header)[sect_id];
- const maybe_last_atom_index = coff.sections.items(.last_atom_index)[sect_id];
- const sect_capacity = coff.allocatedSize(header.pointer_to_raw_data);
-
- if (needed_size > sect_capacity) {
- const new_offset = coff.findFreeSpace(needed_size, default_file_alignment);
- const current_size = if (maybe_last_atom_index) |last_atom_index| blk: {
- const last_atom = coff.getAtom(last_atom_index);
- const sym = last_atom.getSymbol(coff);
- break :blk (sym.value + last_atom.size) - header.virtual_address;
- } else 0;
- log.debug("moving {s} from 0x{x} to 0x{x}", .{
- coff.getSectionName(header),
- header.pointer_to_raw_data,
- new_offset,
- });
- const amt = try coff.base.file.?.copyRangeAll(
- header.pointer_to_raw_data,
- coff.base.file.?,
- new_offset,
- current_size,
- );
- if (amt != current_size) return error.InputOutput;
- header.pointer_to_raw_data = new_offset;
- }
-
- const sect_vm_capacity = coff.allocatedVirtualSize(header.virtual_address);
- if (needed_size > sect_vm_capacity) {
- coff.markRelocsDirtyByAddress(header.virtual_address + header.virtual_size);
- try coff.growSectionVirtualMemory(sect_id, needed_size);
- }
-
- header.virtual_size = @max(header.virtual_size, needed_size);
- header.size_of_raw_data = needed_size;
-}
-
-fn growSectionVirtualMemory(coff: *Coff, sect_id: u32, needed_size: u32) !void {
- const header = &coff.sections.items(.header)[sect_id];
- const increased_size = padToIdeal(needed_size);
- const old_aligned_end = header.virtual_address + mem.alignForward(u32, header.virtual_size, coff.page_size);
- const new_aligned_end = header.virtual_address + mem.alignForward(u32, increased_size, coff.page_size);
- const diff = new_aligned_end - old_aligned_end;
- log.debug("growing {s} in virtual memory by {x}", .{ coff.getSectionName(header), diff });
-
- // TODO: enforce order by increasing VM addresses in coff.sections container.
- // This is required by the loader anyhow as far as I can tell.
- for (coff.sections.items(.header)[sect_id + 1 ..], 0..) |*next_header, next_sect_id| {
- const maybe_last_atom_index = coff.sections.items(.last_atom_index)[sect_id + 1 + next_sect_id];
- next_header.virtual_address += diff;
-
- if (maybe_last_atom_index) |last_atom_index| {
- var atom_index = last_atom_index;
- while (true) {
- const atom = coff.getAtom(atom_index);
- const sym = atom.getSymbolPtr(coff);
- sym.value += diff;
-
- if (atom.prev_index) |prev_index| {
- atom_index = prev_index;
- } else break;
- }
- }
- }
-
- header.virtual_size = increased_size;
-}
-
-fn allocateAtom(coff: *Coff, atom_index: Atom.Index, new_atom_size: u32, alignment: u32) !u32 {
- const tracy = trace(@src());
- defer tracy.end();
-
- const atom = coff.getAtom(atom_index);
- const sect_id = @intFromEnum(atom.getSymbol(coff).section_number) - 1;
- const header = &coff.sections.items(.header)[sect_id];
- const free_list = &coff.sections.items(.free_list)[sect_id];
- const maybe_last_atom_index = &coff.sections.items(.last_atom_index)[sect_id];
- const new_atom_ideal_capacity = if (header.isCode()) padToIdeal(new_atom_size) else new_atom_size;
-
- // We use these to indicate our intention to update metadata, placing the new atom,
- // and possibly removing a free list node.
- // It would be simpler to do it inside the for loop below, but that would cause a
- // problem if an error was returned later in the function. So this action
- // is actually carried out at the end of the function, when errors are no longer possible.
- var atom_placement: ?Atom.Index = null;
- var free_list_removal: ?usize = null;
-
- // First we look for an appropriately sized free list node.
- // The list is unordered. We'll just take the first thing that works.
- const vaddr = blk: {
- var i: usize = 0;
- while (i < free_list.items.len) {
- const big_atom_index = free_list.items[i];
- const big_atom = coff.getAtom(big_atom_index);
- // We now have a pointer to a live atom that has too much capacity.
- // Is it enough that we could fit this new atom?
- const sym = big_atom.getSymbol(coff);
- const capacity = big_atom.capacity(coff);
- const ideal_capacity = if (header.isCode()) padToIdeal(capacity) else capacity;
- const ideal_capacity_end_vaddr = math.add(u32, sym.value, ideal_capacity) catch ideal_capacity;
- const capacity_end_vaddr = sym.value + capacity;
- const new_start_vaddr_unaligned = capacity_end_vaddr - new_atom_ideal_capacity;
- const new_start_vaddr = mem.alignBackward(u32, new_start_vaddr_unaligned, alignment);
- if (new_start_vaddr < ideal_capacity_end_vaddr) {
- // Additional bookkeeping here to notice if this free list node
- // should be deleted because the atom that it points to has grown to take up
- // more of the extra capacity.
- if (!big_atom.freeListEligible(coff)) {
- _ = free_list.swapRemove(i);
- } else {
- i += 1;
- }
- continue;
- }
- // At this point we know that we will place the new atom here. But the
- // remaining question is whether there is still yet enough capacity left
- // over for there to still be a free list node.
- const remaining_capacity = new_start_vaddr - ideal_capacity_end_vaddr;
- const keep_free_list_node = remaining_capacity >= min_text_capacity;
-
- // Set up the metadata to be updated, after errors are no longer possible.
- atom_placement = big_atom_index;
- if (!keep_free_list_node) {
- free_list_removal = i;
- }
- break :blk new_start_vaddr;
- } else if (maybe_last_atom_index.*) |last_index| {
- const last = coff.getAtom(last_index);
- const last_symbol = last.getSymbol(coff);
- const ideal_capacity = if (header.isCode()) padToIdeal(last.size) else last.size;
- const ideal_capacity_end_vaddr = last_symbol.value + ideal_capacity;
- const new_start_vaddr = mem.alignForward(u32, ideal_capacity_end_vaddr, alignment);
- atom_placement = last_index;
- break :blk new_start_vaddr;
- } else {
- break :blk mem.alignForward(u32, header.virtual_address, alignment);
- }
- };
-
- const expand_section = if (atom_placement) |placement_index|
- coff.getAtom(placement_index).next_index == null
- else
- true;
- if (expand_section) {
- const needed_size: u32 = (vaddr + new_atom_size) - header.virtual_address;
- try coff.growSection(sect_id, needed_size);
- maybe_last_atom_index.* = atom_index;
- }
- coff.getAtomPtr(atom_index).size = new_atom_size;
-
- if (atom.prev_index) |prev_index| {
- const prev = coff.getAtomPtr(prev_index);
- prev.next_index = atom.next_index;
- }
- if (atom.next_index) |next_index| {
- const next = coff.getAtomPtr(next_index);
- next.prev_index = atom.prev_index;
- }
-
- if (atom_placement) |big_atom_index| {
- const big_atom = coff.getAtomPtr(big_atom_index);
- const atom_ptr = coff.getAtomPtr(atom_index);
- atom_ptr.prev_index = big_atom_index;
- atom_ptr.next_index = big_atom.next_index;
- big_atom.next_index = atom_index;
- } else {
- const atom_ptr = coff.getAtomPtr(atom_index);
- atom_ptr.prev_index = null;
- atom_ptr.next_index = null;
- }
- if (free_list_removal) |i| {
- _ = free_list.swapRemove(i);
- }
-
- return vaddr;
-}
-
-pub fn allocateSymbol(coff: *Coff) !u32 {
- const gpa = coff.base.comp.gpa;
- try coff.locals.ensureUnusedCapacity(gpa, 1);
-
- const index = blk: {
- if (coff.locals_free_list.pop()) |index| {
- log.debug(" (reusing symbol index {d})", .{index});
- break :blk index;
- } else {
- log.debug(" (allocating symbol index {d})", .{coff.locals.items.len});
- const index = @as(u32, @intCast(coff.locals.items.len));
- _ = coff.locals.addOneAssumeCapacity();
- break :blk index;
- }
- };
-
- coff.locals.items[index] = .{
- .name = [_]u8{0} ** 8,
- .value = 0,
- .section_number = .UNDEFINED,
- .type = .{ .base_type = .NULL, .complex_type = .NULL },
- .storage_class = .NULL,
- .number_of_aux_symbols = 0,
- };
-
- return index;
-}
-
-fn allocateGlobal(coff: *Coff) !u32 {
- const gpa = coff.base.comp.gpa;
- try coff.globals.ensureUnusedCapacity(gpa, 1);
-
- const index = blk: {
- if (coff.globals_free_list.pop()) |index| {
- log.debug(" (reusing global index {d})", .{index});
- break :blk index;
- } else {
- log.debug(" (allocating global index {d})", .{coff.globals.items.len});
- const index = @as(u32, @intCast(coff.globals.items.len));
- _ = coff.globals.addOneAssumeCapacity();
- break :blk index;
- }
- };
-
- coff.globals.items[index] = .{
- .sym_index = 0,
- .file = null,
- };
-
- return index;
-}
-
-fn addGotEntry(coff: *Coff, target: SymbolWithLoc) !void {
- const gpa = coff.base.comp.gpa;
- if (coff.got_table.lookup.contains(target)) return;
- const got_index = try coff.got_table.allocateEntry(gpa, target);
- try coff.writeOffsetTableEntry(got_index);
- coff.got_table_count_dirty = true;
- coff.markRelocsDirtyByTarget(target);
-}
-
-pub fn createAtom(coff: *Coff) !Atom.Index {
- const gpa = coff.base.comp.gpa;
- const atom_index = @as(Atom.Index, @intCast(coff.atoms.items.len));
- const atom = try coff.atoms.addOne(gpa);
- const sym_index = try coff.allocateSymbol();
- try coff.atom_by_index_table.putNoClobber(gpa, sym_index, atom_index);
- atom.* = .{
- .sym_index = sym_index,
- .file = null,
- .size = 0,
- .prev_index = null,
- .next_index = null,
- };
- log.debug("creating ATOM(%{d}) at index {d}", .{ sym_index, atom_index });
- return atom_index;
-}
-
-fn growAtom(coff: *Coff, atom_index: Atom.Index, new_atom_size: u32, alignment: u32) !u32 {
- const atom = coff.getAtom(atom_index);
- const sym = atom.getSymbol(coff);
- const align_ok = mem.alignBackward(u32, sym.value, alignment) == sym.value;
- const need_realloc = !align_ok or new_atom_size > atom.capacity(coff);
- if (!need_realloc) return sym.value;
- return coff.allocateAtom(atom_index, new_atom_size, alignment);
-}
-
-fn shrinkAtom(coff: *Coff, atom_index: Atom.Index, new_block_size: u32) void {
- _ = coff;
- _ = atom_index;
- _ = new_block_size;
- // TODO check the new capacity, and if it crosses the size threshold into a big enough
- // capacity, insert a free list node for it.
-}
-
-fn writeAtom(coff: *Coff, atom_index: Atom.Index, code: []u8, resolve_relocs: bool) !void {
- const atom = coff.getAtom(atom_index);
- const sym = atom.getSymbol(coff);
- const section = coff.sections.get(@intFromEnum(sym.section_number) - 1);
- const file_offset = section.header.pointer_to_raw_data + sym.value - section.header.virtual_address;
-
- log.debug("writing atom for symbol {s} at file offset 0x{x} to 0x{x}", .{
- atom.getName(coff),
- file_offset,
- file_offset + code.len,
- });
-
- const gpa = coff.base.comp.gpa;
-
- // Gather relocs which can be resolved.
- // We need to do this as we will be applying different slide values depending
- // if we are running in hot-code swapping mode or not.
- // TODO: how crazy would it be to try and apply the actual image base of the loaded
- // process for the in-file values rather than the Windows defaults?
- var relocs = std.array_list.Managed(*Relocation).init(gpa);
- defer relocs.deinit();
-
- if (resolve_relocs) {
- if (coff.relocs.getPtr(atom_index)) |rels| {
- try relocs.ensureTotalCapacityPrecise(rels.items.len);
- for (rels.items) |*reloc| {
- if (reloc.isResolvable(coff) and reloc.dirty) {
- relocs.appendAssumeCapacity(reloc);
- }
- }
- }
- }
-
- if (is_hot_update_compatible) {
- if (coff.base.child_pid) |handle| {
- const slide = @intFromPtr(coff.hot_state.loaded_base_address.?);
-
- const mem_code = try gpa.dupe(u8, code);
- defer gpa.free(mem_code);
- coff.resolveRelocs(atom_index, relocs.items, mem_code, slide);
-
- const vaddr = sym.value + slide;
- const pvaddr = @as(*anyopaque, @ptrFromInt(vaddr));
-
- log.debug("writing to memory at address {x}", .{vaddr});
-
- if (build_options.enable_logging) {
- try debugMem(gpa, handle, pvaddr, mem_code);
- }
-
- if (section.header.flags.MEM_WRITE == 0) {
- writeMemProtected(handle, pvaddr, mem_code) catch |err| {
- log.warn("writing to protected memory failed with error: {s}", .{@errorName(err)});
- };
- } else {
- writeMem(handle, pvaddr, mem_code) catch |err| {
- log.warn("writing to protected memory failed with error: {s}", .{@errorName(err)});
- };
- }
- }
- }
-
- if (resolve_relocs) {
- coff.resolveRelocs(atom_index, relocs.items, code, coff.image_base);
- }
- try coff.pwriteAll(code, file_offset);
- if (resolve_relocs) {
- // Now we can mark the relocs as resolved.
- while (relocs.pop()) |reloc| {
- reloc.dirty = false;
- }
- }
-}
-
-fn debugMem(allocator: Allocator, handle: std.process.Child.Id, pvaddr: std.os.windows.LPVOID, code: []const u8) !void {
- const buffer = try allocator.alloc(u8, code.len);
- defer allocator.free(buffer);
- const memread = try std.os.windows.ReadProcessMemory(handle, pvaddr, buffer);
- log.debug("to write: {x}", .{code});
- log.debug("in memory: {x}", .{memread});
-}
-
-fn writeMemProtected(handle: std.process.Child.Id, pvaddr: std.os.windows.LPVOID, code: []const u8) !void {
- const old_prot = try std.os.windows.VirtualProtectEx(handle, pvaddr, code.len, std.os.windows.PAGE_EXECUTE_WRITECOPY);
- try writeMem(handle, pvaddr, code);
- // TODO: We can probably just set the pages writeable and leave it at that without having to restore the attributes.
- // For that though, we want to track which page has already been modified.
- _ = try std.os.windows.VirtualProtectEx(handle, pvaddr, code.len, old_prot);
-}
-
-fn writeMem(handle: std.process.Child.Id, pvaddr: std.os.windows.LPVOID, code: []const u8) !void {
- const amt = try std.os.windows.WriteProcessMemory(handle, pvaddr, code);
- if (amt != code.len) return error.InputOutput;
-}
-
-fn writeOffsetTableEntry(coff: *Coff, index: usize) !void {
- const sect_id = coff.got_section_index.?;
-
- if (coff.got_table_count_dirty) {
- const needed_size: u32 = @intCast(coff.got_table.entries.items.len * coff.ptr_width.size());
- try coff.growSection(sect_id, needed_size);
- coff.got_table_count_dirty = false;
- }
-
- const header = &coff.sections.items(.header)[sect_id];
- const entry = coff.got_table.entries.items[index];
- const entry_value = coff.getSymbol(entry).value;
- const entry_offset = index * coff.ptr_width.size();
- const file_offset = header.pointer_to_raw_data + entry_offset;
- const vmaddr = header.virtual_address + entry_offset;
-
- log.debug("writing GOT entry {d}: @{x} => {x}", .{ index, vmaddr, entry_value + coff.image_base });
-
- switch (coff.ptr_width) {
- .p32 => {
- var buf: [4]u8 = undefined;
- mem.writeInt(u32, &buf, @intCast(entry_value + coff.image_base), .little);
- try coff.base.file.?.pwriteAll(&buf, file_offset);
- },
- .p64 => {
- var buf: [8]u8 = undefined;
- mem.writeInt(u64, &buf, entry_value + coff.image_base, .little);
- try coff.base.file.?.pwriteAll(&buf, file_offset);
- },
- }
-
- if (is_hot_update_compatible) {
- if (coff.base.child_pid) |handle| {
- const gpa = coff.base.comp.gpa;
- const slide = @intFromPtr(coff.hot_state.loaded_base_address.?);
- const actual_vmaddr = vmaddr + slide;
- const pvaddr = @as(*anyopaque, @ptrFromInt(actual_vmaddr));
- log.debug("writing GOT entry to memory at address {x}", .{actual_vmaddr});
- if (build_options.enable_logging) {
- switch (coff.ptr_width) {
- .p32 => {
- var buf: [4]u8 = undefined;
- try debugMem(gpa, handle, pvaddr, &buf);
- },
- .p64 => {
- var buf: [8]u8 = undefined;
- try debugMem(gpa, handle, pvaddr, &buf);
- },
- }
- }
-
- switch (coff.ptr_width) {
- .p32 => {
- var buf: [4]u8 = undefined;
- mem.writeInt(u32, &buf, @as(u32, @intCast(entry_value + slide)), .little);
- writeMem(handle, pvaddr, &buf) catch |err| {
- log.warn("writing to protected memory failed with error: {s}", .{@errorName(err)});
- };
- },
- .p64 => {
- var buf: [8]u8 = undefined;
- mem.writeInt(u64, &buf, entry_value + slide, .little);
- writeMem(handle, pvaddr, &buf) catch |err| {
- log.warn("writing to protected memory failed with error: {s}", .{@errorName(err)});
- };
- },
- }
- }
- }
-}
-
-fn markRelocsDirtyByTarget(coff: *Coff, target: SymbolWithLoc) void {
- if (!coff.base.comp.config.incremental) return;
- // TODO: reverse-lookup might come in handy here
- for (coff.relocs.values()) |*relocs| {
- for (relocs.items) |*reloc| {
- if (!reloc.target.eql(target)) continue;
- reloc.dirty = true;
- }
- }
-}
-
-fn markRelocsDirtyByAddress(coff: *Coff, addr: u32) void {
- if (!coff.base.comp.config.incremental) return;
- const got_moved = blk: {
- const sect_id = coff.got_section_index orelse break :blk false;
- break :blk coff.sections.items(.header)[sect_id].virtual_address >= addr;
- };
-
- // TODO: dirty relocations targeting import table if that got moved in memory
-
- for (coff.relocs.values()) |*relocs| {
- for (relocs.items) |*reloc| {
- if (reloc.isGotIndirection()) {
- reloc.dirty = reloc.dirty or got_moved;
- } else {
- const target_vaddr = reloc.getTargetAddress(coff) orelse continue;
- if (target_vaddr >= addr) reloc.dirty = true;
- }
- }
- }
-
- // TODO: dirty only really affected GOT cells
- for (coff.got_table.entries.items) |entry| {
- const target_addr = coff.getSymbol(entry).value;
- if (target_addr >= addr) {
- coff.got_table_contents_dirty = true;
- break;
- }
- }
-}
-
-fn resolveRelocs(coff: *Coff, atom_index: Atom.Index, relocs: []const *const Relocation, code: []u8, image_base: u64) void {
- log.debug("relocating '{s}'", .{coff.getAtom(atom_index).getName(coff)});
- for (relocs) |reloc| {
- reloc.resolve(atom_index, code, image_base, coff);
- }
-}
-
-pub fn ptraceAttach(coff: *Coff, handle: std.process.Child.Id) !void {
- if (!is_hot_update_compatible) return;
-
- log.debug("attaching to process with handle {*}", .{handle});
- coff.hot_state.loaded_base_address = std.os.windows.ProcessBaseAddress(handle) catch |err| {
- log.warn("failed to get base address for the process with error: {s}", .{@errorName(err)});
- return;
- };
-}
-
-pub fn ptraceDetach(coff: *Coff, handle: std.process.Child.Id) void {
- if (!is_hot_update_compatible) return;
-
- log.debug("detaching from process with handle {*}", .{handle});
- coff.hot_state.loaded_base_address = null;
-}
-
-fn freeAtom(coff: *Coff, atom_index: Atom.Index) void {
- log.debug("freeAtom {d}", .{atom_index});
-
- const gpa = coff.base.comp.gpa;
-
- // Remove any relocs and base relocs associated with this Atom
- coff.freeRelocations(atom_index);
-
- const atom = coff.getAtom(atom_index);
- const sym = atom.getSymbol(coff);
- const sect_id = @intFromEnum(sym.section_number) - 1;
- const free_list = &coff.sections.items(.free_list)[sect_id];
- var already_have_free_list_node = false;
- {
- var i: usize = 0;
- // TODO turn free_list into a hash map
- while (i < free_list.items.len) {
- if (free_list.items[i] == atom_index) {
- _ = free_list.swapRemove(i);
- continue;
- }
- if (free_list.items[i] == atom.prev_index) {
- already_have_free_list_node = true;
- }
- i += 1;
- }
- }
-
- const maybe_last_atom_index = &coff.sections.items(.last_atom_index)[sect_id];
- if (maybe_last_atom_index.*) |last_atom_index| {
- if (last_atom_index == atom_index) {
- if (atom.prev_index) |prev_index| {
- // TODO shrink the section size here
- maybe_last_atom_index.* = prev_index;
- } else {
- maybe_last_atom_index.* = null;
- }
- }
- }
-
- if (atom.prev_index) |prev_index| {
- const prev = coff.getAtomPtr(prev_index);
- prev.next_index = atom.next_index;
-
- if (!already_have_free_list_node and prev.*.freeListEligible(coff)) {
- // The free list is heuristics, it doesn't have to be perfect, so we can
- // ignore the OOM here.
- free_list.append(gpa, prev_index) catch {};
- }
- } else {
- coff.getAtomPtr(atom_index).prev_index = null;
- }
-
- if (atom.next_index) |next_index| {
- coff.getAtomPtr(next_index).prev_index = atom.prev_index;
- } else {
- coff.getAtomPtr(atom_index).next_index = null;
- }
-
- // Appending to free lists is allowed to fail because the free lists are heuristics based anyway.
- const sym_index = atom.getSymbolIndex().?;
- coff.locals_free_list.append(gpa, sym_index) catch {};
-
- // Try freeing GOT atom if this decl had one
- coff.got_table.freeEntry(gpa, .{ .sym_index = sym_index });
-
- coff.locals.items[sym_index].section_number = .UNDEFINED;
- _ = coff.atom_by_index_table.remove(sym_index);
- log.debug(" adding local symbol index {d} to free list", .{sym_index});
- coff.getAtomPtr(atom_index).sym_index = 0;
-}
-
-pub fn updateFunc(
- coff: *Coff,
- pt: Zcu.PerThread,
- func_index: InternPool.Index,
- mir: *const codegen.AnyMir,
-) link.File.UpdateNavError!void {
- if (build_options.skip_non_native and builtin.object_format != .coff) {
- @panic("Attempted to compile for object format that was disabled by build configuration");
- }
- const tracy = trace(@src());
- defer tracy.end();
-
- const zcu = pt.zcu;
- const gpa = zcu.gpa;
- const func = zcu.funcInfo(func_index);
- const nav_index = func.owner_nav;
-
- const atom_index = try coff.getOrCreateAtomForNav(nav_index);
- coff.freeRelocations(atom_index);
-
- coff.navs.getPtr(func.owner_nav).?.section = coff.text_section_index.?;
-
- var aw: std.Io.Writer.Allocating = .init(gpa);
- defer aw.deinit();
-
- codegen.emitFunction(
- &coff.base,
- pt,
- zcu.navSrcLoc(nav_index),
- func_index,
- coff.getAtom(atom_index).getSymbolIndex().?,
- mir,
- &aw.writer,
- .none,
- ) catch |err| switch (err) {
- error.WriteFailed => return error.OutOfMemory,
- else => |e| return e,
- };
-
- try coff.updateNavCode(pt, nav_index, aw.written(), .FUNCTION);
-
- // Exports will be updated by `Zcu.processExports` after the update.
-}
-
-const LowerConstResult = union(enum) {
- ok: Atom.Index,
- fail: *Zcu.ErrorMsg,
-};
-
-fn lowerConst(
- coff: *Coff,
- pt: Zcu.PerThread,
- name: []const u8,
- val: Value,
- required_alignment: InternPool.Alignment,
- sect_id: u16,
- src_loc: Zcu.LazySrcLoc,
-) !LowerConstResult {
- const gpa = coff.base.comp.gpa;
-
- var aw: std.Io.Writer.Allocating = .init(gpa);
- defer aw.deinit();
-
- const atom_index = try coff.createAtom();
- const sym = coff.getAtom(atom_index).getSymbolPtr(coff);
- try coff.setSymbolName(sym, name);
- sym.section_number = @as(coff_util.SectionNumber, @enumFromInt(sect_id + 1));
-
- try codegen.generateSymbol(&coff.base, pt, src_loc, val, &aw.writer, .{
- .atom_index = coff.getAtom(atom_index).getSymbolIndex().?,
- });
- const code = aw.written();
-
- const atom = coff.getAtomPtr(atom_index);
- atom.size = @intCast(code.len);
- atom.getSymbolPtr(coff).value = try coff.allocateAtom(
- atom_index,
- atom.size,
- @intCast(required_alignment.toByteUnits().?),
- );
- errdefer coff.freeAtom(atom_index);
-
- log.debug("allocated atom for {s} at 0x{x}", .{ name, atom.getSymbol(coff).value });
- log.debug(" (required alignment 0x{x})", .{required_alignment});
-
- try coff.writeAtom(atom_index, code, coff.base.comp.config.incremental);
-
- return .{ .ok = atom_index };
-}
-
-pub fn updateNav(
- coff: *Coff,
- pt: Zcu.PerThread,
- nav_index: InternPool.Nav.Index,
-) link.File.UpdateNavError!void {
- if (build_options.skip_non_native and builtin.object_format != .coff) {
- @panic("Attempted to compile for object format that was disabled by build configuration");
- }
- const tracy = trace(@src());
- defer tracy.end();
-
- const zcu = pt.zcu;
- const gpa = zcu.gpa;
- const ip = &zcu.intern_pool;
- const nav = ip.getNav(nav_index);
-
- const nav_val = zcu.navValue(nav_index);
- const nav_init = switch (ip.indexToKey(nav_val.toIntern())) {
- .func => return,
- .variable => |variable| Value.fromInterned(variable.init),
- .@"extern" => |@"extern"| {
- if (ip.isFunctionType(@"extern".ty)) return;
- // TODO make this part of getGlobalSymbol
- const name = nav.name.toSlice(ip);
- const lib_name = @"extern".lib_name.toSlice(ip);
- const global_index = try coff.getGlobalSymbol(name, lib_name);
- try coff.need_got_table.put(gpa, global_index, {});
- return;
- },
- else => nav_val,
- };
-
- if (nav_init.typeOf(zcu).hasRuntimeBits(zcu)) {
- const atom_index = try coff.getOrCreateAtomForNav(nav_index);
- coff.freeRelocations(atom_index);
- const atom = coff.getAtom(atom_index);
-
- coff.navs.getPtr(nav_index).?.section = coff.getNavOutputSection(nav_index);
-
- var aw: std.Io.Writer.Allocating = .init(gpa);
- defer aw.deinit();
-
- codegen.generateSymbol(
- &coff.base,
- pt,
- zcu.navSrcLoc(nav_index),
- nav_init,
- &aw.writer,
- .{ .atom_index = atom.getSymbolIndex().? },
- ) catch |err| switch (err) {
- error.WriteFailed => return error.OutOfMemory,
- else => |e| return e,
- };
-
- try coff.updateNavCode(pt, nav_index, aw.written(), .NULL);
- }
-
- // Exports will be updated by `Zcu.processExports` after the update.
-}
-
-fn updateLazySymbolAtom(
- coff: *Coff,
- pt: Zcu.PerThread,
- sym: link.File.LazySymbol,
- atom_index: Atom.Index,
- section_index: u16,
-) !void {
- const zcu = pt.zcu;
- const comp = coff.base.comp;
- const gpa = comp.gpa;
-
- var required_alignment: InternPool.Alignment = .none;
- var aw: std.Io.Writer.Allocating = .init(gpa);
- defer aw.deinit();
-
- const name = try allocPrint(gpa, "__lazy_{s}_{f}", .{
- @tagName(sym.kind),
- Type.fromInterned(sym.ty).fmt(pt),
- });
- defer gpa.free(name);
-
- const local_sym_index = coff.getAtomPtr(atom_index).getSymbolIndex().?;
-
- const src = Type.fromInterned(sym.ty).srcLocOrNull(zcu) orelse Zcu.LazySrcLoc.unneeded;
- try codegen.generateLazySymbol(
- &coff.base,
- pt,
- src,
- sym,
- &required_alignment,
- &aw.writer,
- .none,
- .{ .atom_index = local_sym_index },
- );
- const code = aw.written();
-
- const atom = coff.getAtomPtr(atom_index);
- const symbol = atom.getSymbolPtr(coff);
- try coff.setSymbolName(symbol, name);
- symbol.section_number = @enumFromInt(section_index + 1);
- symbol.type = .{ .complex_type = .NULL, .base_type = .NULL };
-
- const code_len: u32 = @intCast(code.len);
- const vaddr = try coff.allocateAtom(atom_index, code_len, @intCast(required_alignment.toByteUnits() orelse 0));
- errdefer coff.freeAtom(atom_index);
-
- log.debug("allocated atom for {s} at 0x{x}", .{ name, vaddr });
- log.debug(" (required alignment 0x{x})", .{required_alignment});
-
- atom.size = code_len;
- symbol.value = vaddr;
-
- try coff.addGotEntry(.{ .sym_index = local_sym_index });
- try coff.writeAtom(atom_index, code, coff.base.comp.config.incremental);
-}
-
-pub fn getOrCreateAtomForLazySymbol(
- coff: *Coff,
- pt: Zcu.PerThread,
- lazy_sym: link.File.LazySymbol,
-) !Atom.Index {
- const gop = try coff.lazy_syms.getOrPut(pt.zcu.gpa, lazy_sym.ty);
- errdefer _ = if (!gop.found_existing) coff.lazy_syms.pop();
- if (!gop.found_existing) gop.value_ptr.* = .{};
- const atom_ptr, const state_ptr = switch (lazy_sym.kind) {
- .code => .{ &gop.value_ptr.text_atom, &gop.value_ptr.text_state },
- .const_data => .{ &gop.value_ptr.rdata_atom, &gop.value_ptr.rdata_state },
- };
- switch (state_ptr.*) {
- .unused => atom_ptr.* = try coff.createAtom(),
- .pending_flush => return atom_ptr.*,
- .flushed => {},
- }
- state_ptr.* = .pending_flush;
- const atom = atom_ptr.*;
- // anyerror needs to be deferred until flush
- if (lazy_sym.ty != .anyerror_type) try coff.updateLazySymbolAtom(pt, lazy_sym, atom, switch (lazy_sym.kind) {
- .code => coff.text_section_index.?,
- .const_data => coff.rdata_section_index.?,
- });
- return atom;
-}
-
-pub fn getOrCreateAtomForNav(coff: *Coff, nav_index: InternPool.Nav.Index) !Atom.Index {
- const gpa = coff.base.comp.gpa;
- const gop = try coff.navs.getOrPut(gpa, nav_index);
- if (!gop.found_existing) {
- gop.value_ptr.* = .{
- .atom = try coff.createAtom(),
- // If necessary, this will be modified by `updateNav` or `updateFunc`.
- .section = coff.rdata_section_index.?,
- .exports = .{},
- };
- }
- return gop.value_ptr.atom;
-}
-
-fn getNavOutputSection(coff: *Coff, nav_index: InternPool.Nav.Index) u16 {
- const zcu = coff.base.comp.zcu.?;
- const ip = &zcu.intern_pool;
- const nav = ip.getNav(nav_index);
- const ty = Type.fromInterned(nav.typeOf(ip));
- const zig_ty = ty.zigTypeTag(zcu);
- const val = Value.fromInterned(nav.status.fully_resolved.val);
- const index: u16 = blk: {
- if (val.isUndef(zcu)) {
- // TODO in release-fast and release-small, we should put undef in .bss
- break :blk coff.data_section_index.?;
- }
-
- switch (zig_ty) {
- // TODO: what if this is a function pointer?
- .@"fn" => break :blk coff.text_section_index.?,
- else => {
- if (val.getVariable(zcu)) |_| {
- break :blk coff.data_section_index.?;
- }
- break :blk coff.rdata_section_index.?;
- },
- }
- };
- return index;
-}
-
-fn updateNavCode(
- coff: *Coff,
- pt: Zcu.PerThread,
- nav_index: InternPool.Nav.Index,
- code: []u8,
- complex_type: coff_util.ComplexType,
-) link.File.UpdateNavError!void {
- const zcu = pt.zcu;
- const ip = &zcu.intern_pool;
- const nav = ip.getNav(nav_index);
-
- log.debug("updateNavCode {f} 0x{x}", .{ nav.fqn.fmt(ip), nav_index });
-
- const mod = zcu.navFileScope(nav_index).mod.?;
- const target = &mod.resolved_target.result;
- const required_alignment = switch (nav.status.fully_resolved.alignment) {
- .none => switch (mod.optimize_mode) {
- .Debug, .ReleaseSafe, .ReleaseFast => target_util.defaultFunctionAlignment(target),
- .ReleaseSmall => target_util.minFunctionAlignment(target),
- },
- else => |a| a.maxStrict(target_util.minFunctionAlignment(target)),
- };
-
- const nav_metadata = coff.navs.get(nav_index).?;
- const atom_index = nav_metadata.atom;
- const atom = coff.getAtom(atom_index);
- const sym_index = atom.getSymbolIndex().?;
- const sect_index = nav_metadata.section;
- const code_len: u32 = @intCast(code.len);
-
- if (atom.size != 0) {
- const sym = atom.getSymbolPtr(coff);
- try coff.setSymbolName(sym, nav.fqn.toSlice(ip));
- sym.section_number = @enumFromInt(sect_index + 1);
- sym.type = .{ .complex_type = complex_type, .base_type = .NULL };
-
- const capacity = atom.capacity(coff);
- const need_realloc = code.len > capacity or !required_alignment.check(sym.value);
- if (need_realloc) {
- const vaddr = coff.growAtom(atom_index, code_len, @intCast(required_alignment.toByteUnits() orelse 0)) catch |err| switch (err) {
- error.OutOfMemory => return error.OutOfMemory,
- else => |e| return coff.base.cgFail(nav_index, "failed to grow atom: {s}", .{@errorName(e)}),
- };
- log.debug("growing {f} from 0x{x} to 0x{x}", .{ nav.fqn.fmt(ip), sym.value, vaddr });
- log.debug(" (required alignment 0x{x}", .{required_alignment});
-
- if (vaddr != sym.value) {
- sym.value = vaddr;
- log.debug(" (updating GOT entry)", .{});
- const got_entry_index = coff.got_table.lookup.get(.{ .sym_index = sym_index }).?;
- coff.writeOffsetTableEntry(got_entry_index) catch |err| switch (err) {
- error.OutOfMemory => return error.OutOfMemory,
- else => |e| return coff.base.cgFail(nav_index, "failed to write offset table entry: {s}", .{@errorName(e)}),
- };
- coff.markRelocsDirtyByTarget(.{ .sym_index = sym_index });
- }
- } else if (code_len < atom.size) {
- coff.shrinkAtom(atom_index, code_len);
- }
- coff.getAtomPtr(atom_index).size = code_len;
- } else {
- const sym = atom.getSymbolPtr(coff);
- try coff.setSymbolName(sym, nav.fqn.toSlice(ip));
- sym.section_number = @enumFromInt(sect_index + 1);
- sym.type = .{ .complex_type = complex_type, .base_type = .NULL };
-
- const vaddr = coff.allocateAtom(atom_index, code_len, @intCast(required_alignment.toByteUnits() orelse 0)) catch |err| switch (err) {
- error.OutOfMemory => return error.OutOfMemory,
- else => |e| return coff.base.cgFail(nav_index, "failed to allocate atom: {s}", .{@errorName(e)}),
- };
- errdefer coff.freeAtom(atom_index);
- log.debug("allocated atom for {f} at 0x{x}", .{ nav.fqn.fmt(ip), vaddr });
- coff.getAtomPtr(atom_index).size = code_len;
- sym.value = vaddr;
-
- coff.addGotEntry(.{ .sym_index = sym_index }) catch |err| switch (err) {
- error.OutOfMemory => return error.OutOfMemory,
- else => |e| return coff.base.cgFail(nav_index, "failed to add GOT entry: {s}", .{@errorName(e)}),
- };
- }
-
- coff.writeAtom(atom_index, code, coff.base.comp.config.incremental) catch |err| switch (err) {
- error.OutOfMemory => return error.OutOfMemory,
- else => |e| return coff.base.cgFail(nav_index, "failed to write atom: {s}", .{@errorName(e)}),
- };
-}
-
-pub fn freeNav(coff: *Coff, nav_index: InternPool.NavIndex) void {
- const gpa = coff.base.comp.gpa;
-
- if (coff.decls.fetchOrderedRemove(nav_index)) |const_kv| {
- var kv = const_kv;
- coff.freeAtom(kv.value.atom);
- kv.value.exports.deinit(gpa);
- }
-}
-
-pub fn updateExports(
- coff: *Coff,
- pt: Zcu.PerThread,
- exported: Zcu.Exported,
- export_indices: []const Zcu.Export.Index,
-) link.File.UpdateExportsError!void {
- if (build_options.skip_non_native and builtin.object_format != .coff) {
- @panic("Attempted to compile for object format that was disabled by build configuration");
- }
-
- const zcu = pt.zcu;
- const gpa = zcu.gpa;
-
- const metadata = switch (exported) {
- .nav => |nav| blk: {
- _ = try coff.getOrCreateAtomForNav(nav);
- break :blk coff.navs.getPtr(nav).?;
- },
- .uav => |uav| coff.uavs.getPtr(uav) orelse blk: {
- const first_exp = export_indices[0].ptr(zcu);
- const res = try coff.lowerUav(pt, uav, .none, first_exp.src);
- switch (res) {
- .sym_index => {},
- .fail => |em| {
- // TODO maybe it's enough to return an error here and let Module.processExportsInner
- // handle the error?
- try zcu.failed_exports.ensureUnusedCapacity(zcu.gpa, 1);
- zcu.failed_exports.putAssumeCapacityNoClobber(export_indices[0], em);
- return;
- },
- }
- break :blk coff.uavs.getPtr(uav).?;
- },
- };
- const atom_index = metadata.atom;
- const atom = coff.getAtom(atom_index);
-
- for (export_indices) |export_idx| {
- const exp = export_idx.ptr(zcu);
- log.debug("adding new export '{f}'", .{exp.opts.name.fmt(&zcu.intern_pool)});
-
- if (exp.opts.section.toSlice(&zcu.intern_pool)) |section_name| {
- if (!mem.eql(u8, section_name, ".text")) {
- try zcu.failed_exports.putNoClobber(gpa, export_idx, try Zcu.ErrorMsg.create(
- gpa,
- exp.src,
- "Unimplemented: ExportOptions.section",
- .{},
- ));
- continue;
- }
- }
-
- if (exp.opts.linkage == .link_once) {
- try zcu.failed_exports.putNoClobber(gpa, export_idx, try Zcu.ErrorMsg.create(
- gpa,
- exp.src,
- "Unimplemented: GlobalLinkage.link_once",
- .{},
- ));
- continue;
- }
-
- const exp_name = exp.opts.name.toSlice(&zcu.intern_pool);
- const sym_index = metadata.getExport(coff, exp_name) orelse blk: {
- const sym_index = if (coff.getGlobalIndex(exp_name)) |global_index| ind: {
- const global = coff.globals.items[global_index];
- // TODO this is just plain wrong as it all should happen in a single `resolveSymbols`
- // pass. This will go away once we abstact away Zig's incremental compilation into
- // its own module.
- if (global.file == null and coff.getSymbol(global).section_number == .UNDEFINED) {
- _ = coff.unresolved.swapRemove(global_index);
- break :ind global.sym_index;
- }
- break :ind try coff.allocateSymbol();
- } else try coff.allocateSymbol();
- try metadata.exports.append(gpa, sym_index);
- break :blk sym_index;
- };
- const sym_loc = SymbolWithLoc{ .sym_index = sym_index, .file = null };
- const sym = coff.getSymbolPtr(sym_loc);
- try coff.setSymbolName(sym, exp_name);
- sym.value = atom.getSymbol(coff).value;
- sym.section_number = @as(coff_util.SectionNumber, @enumFromInt(metadata.section + 1));
- sym.type = atom.getSymbol(coff).type;
-
- sym.storage_class = switch (exp.opts.linkage) {
- .internal => .EXTERNAL,
- .strong => .EXTERNAL,
- .weak => @panic("TODO WeakExternal"),
- else => unreachable,
- };
-
- try coff.resolveGlobalSymbol(sym_loc);
- }
-}
-
-pub fn deleteExport(
- coff: *Coff,
- exported: Zcu.Exported,
- name: InternPool.NullTerminatedString,
-) void {
- const metadata = switch (exported) {
- .nav => |nav| coff.navs.getPtr(nav),
- .uav => |uav| coff.uavs.getPtr(uav),
- } orelse return;
- const zcu = coff.base.comp.zcu.?;
- const name_slice = name.toSlice(&zcu.intern_pool);
- const sym_index = metadata.getExportPtr(coff, name_slice) orelse return;
-
- const gpa = coff.base.comp.gpa;
- const sym_loc = SymbolWithLoc{ .sym_index = sym_index.*, .file = null };
- const sym = coff.getSymbolPtr(sym_loc);
- log.debug("deleting export '{f}'", .{name.fmt(&zcu.intern_pool)});
- assert(sym.storage_class == .EXTERNAL and sym.section_number != .UNDEFINED);
- sym.* = .{
- .name = [_]u8{0} ** 8,
- .value = 0,
- .section_number = .UNDEFINED,
- .type = .{ .base_type = .NULL, .complex_type = .NULL },
- .storage_class = .NULL,
- .number_of_aux_symbols = 0,
- };
- coff.locals_free_list.append(gpa, sym_index.*) catch {};
-
- if (coff.resolver.fetchRemove(name_slice)) |entry| {
- defer gpa.free(entry.key);
- coff.globals_free_list.append(gpa, entry.value) catch {};
- coff.globals.items[entry.value] = .{
- .sym_index = 0,
- .file = null,
- };
- }
-
- sym_index.* = 0;
-}
-
-fn resolveGlobalSymbol(coff: *Coff, current: SymbolWithLoc) !void {
- const gpa = coff.base.comp.gpa;
- const sym = coff.getSymbol(current);
- const sym_name = coff.getSymbolName(current);
-
- const gop = try coff.getOrPutGlobalPtr(sym_name);
- if (!gop.found_existing) {
- gop.value_ptr.* = current;
- if (sym.section_number == .UNDEFINED) {
- try coff.unresolved.putNoClobber(gpa, coff.getGlobalIndex(sym_name).?, false);
- }
- return;
- }
-
- log.debug("TODO finish resolveGlobalSymbols implementation", .{});
-
- if (sym.section_number == .UNDEFINED) return;
-
- _ = coff.unresolved.swapRemove(coff.getGlobalIndex(sym_name).?);
-
- gop.value_ptr.* = current;
-}
-
-pub fn flush(
- coff: *Coff,
- arena: Allocator,
- tid: Zcu.PerThread.Id,
- prog_node: std.Progress.Node,
-) link.File.FlushError!void {
- const tracy = trace(@src());
- defer tracy.end();
-
- const comp = coff.base.comp;
- const diags = &comp.link_diags;
-
- switch (coff.base.comp.config.output_mode) {
- .Exe, .Obj => {},
- .Lib => return diags.fail("writing lib files not yet implemented for COFF", .{}),
- }
-
- const sub_prog_node = prog_node.start("COFF Flush", 0);
- defer sub_prog_node.end();
-
- return flushInner(coff, arena, tid) catch |err| switch (err) {
- error.OutOfMemory => return error.OutOfMemory,
- error.LinkFailure => return error.LinkFailure,
- else => |e| return diags.fail("COFF flush failed: {s}", .{@errorName(e)}),
- };
-}
-
-fn flushInner(coff: *Coff, arena: Allocator, tid: Zcu.PerThread.Id) !void {
- _ = arena;
-
- const comp = coff.base.comp;
- const gpa = comp.gpa;
- const diags = &comp.link_diags;
-
- const pt: Zcu.PerThread = .activate(
- comp.zcu orelse return diags.fail("linking without zig source is not yet implemented", .{}),
- tid,
- );
- defer pt.deactivate();
-
- if (coff.lazy_syms.getPtr(.anyerror_type)) |metadata| {
- // Most lazy symbols can be updated on first use, but
- // anyerror needs to wait for everything to be flushed.
- if (metadata.text_state != .unused) try coff.updateLazySymbolAtom(
- pt,
- .{ .kind = .code, .ty = .anyerror_type },
- metadata.text_atom,
- coff.text_section_index.?,
- );
- if (metadata.rdata_state != .unused) try coff.updateLazySymbolAtom(
- pt,
- .{ .kind = .const_data, .ty = .anyerror_type },
- metadata.rdata_atom,
- coff.rdata_section_index.?,
- );
- }
- for (coff.lazy_syms.values()) |*metadata| {
- if (metadata.text_state != .unused) metadata.text_state = .flushed;
- if (metadata.rdata_state != .unused) metadata.rdata_state = .flushed;
- }
-
- {
- var it = coff.need_got_table.iterator();
- while (it.next()) |entry| {
- const global = coff.globals.items[entry.key_ptr.*];
- try coff.addGotEntry(global);
- }
- }
-
- while (coff.unresolved.pop()) |entry| {
- assert(entry.value);
- const global = coff.globals.items[entry.key];
- const sym = coff.getSymbol(global);
- const res = try coff.import_tables.getOrPut(gpa, sym.value);
- const itable = res.value_ptr;
- if (!res.found_existing) {
- itable.* = .{};
- }
- if (itable.lookup.contains(global)) continue;
- // TODO: we could technically write the pointer placeholder for to-be-bound import here,
- // but since this happens in flush, there is currently no point.
- _ = try itable.addImport(gpa, global);
- coff.imports_count_dirty = true;
- }
-
- try coff.writeImportTables();
-
- for (coff.relocs.keys(), coff.relocs.values()) |atom_index, relocs| {
- const needs_update = for (relocs.items) |reloc| {
- if (reloc.dirty) break true;
- } else false;
-
- if (!needs_update) continue;
-
- const atom = coff.getAtom(atom_index);
- const sym = atom.getSymbol(coff);
- const section = coff.sections.get(@intFromEnum(sym.section_number) - 1).header;
- const file_offset = section.pointer_to_raw_data + sym.value - section.virtual_address;
-
- var code = std.array_list.Managed(u8).init(gpa);
- defer code.deinit();
- try code.resize(math.cast(usize, atom.size) orelse return error.Overflow);
- assert(atom.size > 0);
-
- const amt = try coff.base.file.?.preadAll(code.items, file_offset);
- if (amt != code.items.len) return error.InputOutput;
-
- try coff.writeAtom(atom_index, code.items, true);
- }
-
- // Update GOT if it got moved in memory.
- if (coff.got_table_contents_dirty) {
- for (coff.got_table.entries.items, 0..) |entry, i| {
- if (!coff.got_table.lookup.contains(entry)) continue;
- // TODO: write all in one go rather than incrementally.
- try coff.writeOffsetTableEntry(i);
- }
- coff.got_table_contents_dirty = false;
- }
-
- try coff.writeBaseRelocations();
-
- if (coff.getEntryPoint()) |entry_sym_loc| {
- coff.entry_addr = coff.getSymbol(entry_sym_loc).value;
- }
-
- if (build_options.enable_logging) {
- coff.logSymtab();
- coff.logImportTables();
- }
-
- try coff.writeStrtab();
- try coff.writeDataDirectoriesHeaders();
- try coff.writeSectionHeaders();
-
- if (coff.entry_addr == null and comp.config.output_mode == .Exe) {
- log.debug("flushing. no_entry_point_found = true\n", .{});
- diags.flags.no_entry_point_found = true;
- } else {
- log.debug("flushing. no_entry_point_found = false\n", .{});
- diags.flags.no_entry_point_found = false;
- try coff.writeHeader();
- }
-
- assert(!coff.imports_count_dirty);
-
- // hack for stage2_x86_64 + coff
- if (comp.compiler_rt_dyn_lib) |crt_file| {
- const compiler_rt_sub_path = try std.fs.path.join(gpa, &.{
- std.fs.path.dirname(coff.base.emit.sub_path) orelse "",
- std.fs.path.basename(crt_file.full_object_path.sub_path),
- });
- defer gpa.free(compiler_rt_sub_path);
- try crt_file.full_object_path.root_dir.handle.copyFile(
- crt_file.full_object_path.sub_path,
- coff.base.emit.root_dir.handle,
- compiler_rt_sub_path,
- .{},
- );
- }
-}
-
-pub fn getNavVAddr(
- coff: *Coff,
- pt: Zcu.PerThread,
- nav_index: InternPool.Nav.Index,
- reloc_info: link.File.RelocInfo,
-) !u64 {
- const zcu = pt.zcu;
- const ip = &zcu.intern_pool;
- const nav = ip.getNav(nav_index);
- log.debug("getNavVAddr {f}({d})", .{ nav.fqn.fmt(ip), nav_index });
- const sym_index = if (nav.getExtern(ip)) |e|
- try coff.getGlobalSymbol(nav.name.toSlice(ip), e.lib_name.toSlice(ip))
- else
- coff.getAtom(try coff.getOrCreateAtomForNav(nav_index)).getSymbolIndex().?;
- const atom_index = coff.getAtomIndexForSymbol(.{
- .sym_index = reloc_info.parent.atom_index,
- .file = null,
- }).?;
- const target = SymbolWithLoc{ .sym_index = sym_index, .file = null };
- try coff.addRelocation(atom_index, .{
- .type = .direct,
- .target = target,
- .offset = @as(u32, @intCast(reloc_info.offset)),
- .addend = reloc_info.addend,
- .pcrel = false,
- .length = 3,
- });
- try coff.addBaseRelocation(atom_index, @as(u32, @intCast(reloc_info.offset)));
-
- return 0;
-}
-
-pub fn lowerUav(
- coff: *Coff,
- pt: Zcu.PerThread,
- uav: InternPool.Index,
- explicit_alignment: InternPool.Alignment,
- src_loc: Zcu.LazySrcLoc,
-) !codegen.SymbolResult {
- const zcu = pt.zcu;
- const gpa = zcu.gpa;
- const val = Value.fromInterned(uav);
- const uav_alignment = switch (explicit_alignment) {
- .none => val.typeOf(zcu).abiAlignment(zcu),
- else => explicit_alignment,
- };
- if (coff.uavs.get(uav)) |metadata| {
- const atom = coff.getAtom(metadata.atom);
- const existing_addr = atom.getSymbol(coff).value;
- if (uav_alignment.check(existing_addr))
- return .{ .sym_index = atom.getSymbolIndex().? };
- }
-
- var name_buf: [32]u8 = undefined;
- const name = std.fmt.bufPrint(&name_buf, "__anon_{d}", .{
- @intFromEnum(uav),
- }) catch unreachable;
- const res = coff.lowerConst(
- pt,
- name,
- val,
- uav_alignment,
- coff.rdata_section_index.?,
- src_loc,
- ) catch |err| switch (err) {
- error.OutOfMemory => return error.OutOfMemory,
- else => |e| return .{ .fail = try Zcu.ErrorMsg.create(
- gpa,
- src_loc,
- "lowerAnonDecl failed with error: {s}",
- .{@errorName(e)},
- ) },
- };
- const atom_index = switch (res) {
- .ok => |atom_index| atom_index,
- .fail => |em| return .{ .fail = em },
- };
- try coff.uavs.put(gpa, uav, .{
- .atom = atom_index,
- .section = coff.rdata_section_index.?,
- });
- return .{ .sym_index = coff.getAtom(atom_index).getSymbolIndex().? };
-}
-
-pub fn getUavVAddr(
- coff: *Coff,
- uav: InternPool.Index,
- reloc_info: link.File.RelocInfo,
-) !u64 {
- const this_atom_index = coff.uavs.get(uav).?.atom;
- const sym_index = coff.getAtom(this_atom_index).getSymbolIndex().?;
- const atom_index = coff.getAtomIndexForSymbol(.{
- .sym_index = reloc_info.parent.atom_index,
- .file = null,
- }).?;
- const target = SymbolWithLoc{ .sym_index = sym_index, .file = null };
- try coff.addRelocation(atom_index, .{
- .type = .direct,
- .target = target,
- .offset = @as(u32, @intCast(reloc_info.offset)),
- .addend = reloc_info.addend,
- .pcrel = false,
- .length = 3,
- });
- try coff.addBaseRelocation(atom_index, @as(u32, @intCast(reloc_info.offset)));
-
- return 0;
-}
-
-pub fn getGlobalSymbol(coff: *Coff, name: []const u8, lib_name_name: ?[]const u8) !u32 {
- const gop = try coff.getOrPutGlobalPtr(name);
- const global_index = coff.getGlobalIndex(name).?;
-
- if (gop.found_existing) {
- return global_index;
- }
-
- const sym_index = try coff.allocateSymbol();
- const sym_loc = SymbolWithLoc{ .sym_index = sym_index, .file = null };
- gop.value_ptr.* = sym_loc;
-
- const gpa = coff.base.comp.gpa;
- const sym = coff.getSymbolPtr(sym_loc);
- try coff.setSymbolName(sym, name);
- sym.storage_class = .EXTERNAL;
-
- if (lib_name_name) |lib_name| {
- // We repurpose the 'value' of the Symbol struct to store an offset into
- // temporary string table where we will store the library name hint.
- sym.value = try coff.temp_strtab.insert(gpa, lib_name);
- }
-
- try coff.unresolved.putNoClobber(gpa, global_index, true);
-
- return global_index;
-}
-
-pub fn updateLineNumber(coff: *Coff, pt: Zcu.PerThread, ti_id: InternPool.TrackedInst.Index) !void {
- _ = coff;
- _ = pt;
- _ = ti_id;
- log.debug("TODO implement updateLineNumber", .{});
-}
-
-/// TODO: note if we need to rewrite base relocations by dirtying any of the entries in the global table
-/// TODO: note that .ABSOLUTE is used as padding within each block; we could use this fact to do
-/// incremental updates and writes into the table instead of doing it all at once
-fn writeBaseRelocations(coff: *Coff) !void {
- const gpa = coff.base.comp.gpa;
-
- var page_table = std.AutoHashMap(u32, std.array_list.Managed(coff_util.BaseRelocation)).init(gpa);
- defer {
- var it = page_table.valueIterator();
- while (it.next()) |inner| {
- inner.deinit();
- }
- page_table.deinit();
- }
-
- {
- var it = coff.base_relocs.iterator();
- while (it.next()) |entry| {
- const atom_index = entry.key_ptr.*;
- const atom = coff.getAtom(atom_index);
- const sym = atom.getSymbol(coff);
- const offsets = entry.value_ptr.*;
-
- for (offsets.items) |offset| {
- const rva = sym.value + offset;
- const page = mem.alignBackward(u32, rva, coff.page_size);
- const gop = try page_table.getOrPut(page);
- if (!gop.found_existing) {
- gop.value_ptr.* = std.array_list.Managed(coff_util.BaseRelocation).init(gpa);
- }
- try gop.value_ptr.append(.{
- .offset = @as(u12, @intCast(rva - page)),
- .type = .DIR64,
- });
- }
- }
-
- {
- const header = &coff.sections.items(.header)[coff.got_section_index.?];
- for (coff.got_table.entries.items, 0..) |entry, index| {
- if (!coff.got_table.lookup.contains(entry)) continue;
-
- const sym = coff.getSymbol(entry);
- if (sym.section_number == .UNDEFINED) continue;
-
- const rva = @as(u32, @intCast(header.virtual_address + index * coff.ptr_width.size()));
- const page = mem.alignBackward(u32, rva, coff.page_size);
- const gop = try page_table.getOrPut(page);
- if (!gop.found_existing) {
- gop.value_ptr.* = std.array_list.Managed(coff_util.BaseRelocation).init(gpa);
- }
- try gop.value_ptr.append(.{
- .offset = @as(u12, @intCast(rva - page)),
- .type = .DIR64,
- });
- }
- }
- }
-
- // Sort pages by address.
- var pages = try std.array_list.Managed(u32).initCapacity(gpa, page_table.count());
- defer pages.deinit();
- {
- var it = page_table.keyIterator();
- while (it.next()) |page| {
- pages.appendAssumeCapacity(page.*);
- }
- }
- mem.sort(u32, pages.items, {}, std.sort.asc(u32));
-
- var buffer = std.array_list.Managed(u8).init(gpa);
- defer buffer.deinit();
-
- for (pages.items) |page| {
- const entries = page_table.getPtr(page).?;
- // Pad to required 4byte alignment
- if (!mem.isAlignedGeneric(
- usize,
- entries.items.len * @sizeOf(coff_util.BaseRelocation),
- @sizeOf(u32),
- )) {
- try entries.append(.{
- .offset = 0,
- .type = .ABSOLUTE,
- });
- }
-
- const block_size = @as(
- u32,
- @intCast(entries.items.len * @sizeOf(coff_util.BaseRelocation) + @sizeOf(coff_util.BaseRelocationDirectoryEntry)),
- );
- try buffer.ensureUnusedCapacity(block_size);
- buffer.appendSliceAssumeCapacity(mem.asBytes(&coff_util.BaseRelocationDirectoryEntry{
- .page_rva = page,
- .block_size = block_size,
- }));
- buffer.appendSliceAssumeCapacity(mem.sliceAsBytes(entries.items));
- }
-
- const header = &coff.sections.items(.header)[coff.reloc_section_index.?];
- const needed_size = @as(u32, @intCast(buffer.items.len));
- try coff.growSection(coff.reloc_section_index.?, needed_size);
-
- try coff.pwriteAll(buffer.items, header.pointer_to_raw_data);
-
- coff.data_directories[@intFromEnum(coff_util.DirectoryEntry.BASERELOC)] = .{
- .virtual_address = header.virtual_address,
- .size = needed_size,
- };
-}
-
-fn writeImportTables(coff: *Coff) !void {
- if (coff.idata_section_index == null) return;
- if (!coff.imports_count_dirty) return;
-
- const gpa = coff.base.comp.gpa;
-
- const ext = ".dll";
- const header = &coff.sections.items(.header)[coff.idata_section_index.?];
-
- // Calculate needed size
- var iat_size: u32 = 0;
- var dir_table_size: u32 = @sizeOf(coff_util.ImportDirectoryEntry); // sentinel
- var lookup_table_size: u32 = 0;
- var names_table_size: u32 = 0;
- var dll_names_size: u32 = 0;
- for (coff.import_tables.keys(), 0..) |off, i| {
- const lib_name = coff.temp_strtab.getAssumeExists(off);
- const itable = coff.import_tables.values()[i];
- iat_size += itable.size() + 8;
- dir_table_size += @sizeOf(coff_util.ImportDirectoryEntry);
- lookup_table_size += @as(u32, @intCast(itable.entries.items.len + 1)) * @sizeOf(coff_util.ImportLookupEntry64.ByName);
- for (itable.entries.items) |entry| {
- const sym_name = coff.getSymbolName(entry);
- names_table_size += 2 + mem.alignForward(u32, @as(u32, @intCast(sym_name.len + 1)), 2);
- }
- dll_names_size += @as(u32, @intCast(lib_name.len + ext.len + 1));
- }
-
- const needed_size = iat_size + dir_table_size + lookup_table_size + names_table_size + dll_names_size;
- try coff.growSection(coff.idata_section_index.?, needed_size);
-
- // Do the actual writes
- var buffer = std.array_list.Managed(u8).init(gpa);
- defer buffer.deinit();
- try buffer.ensureTotalCapacityPrecise(needed_size);
- buffer.resize(needed_size) catch unreachable;
-
- const dir_header_size = @sizeOf(coff_util.ImportDirectoryEntry);
- const lookup_entry_size = @sizeOf(coff_util.ImportLookupEntry64.ByName);
-
- var iat_offset: u32 = 0;
- var dir_table_offset = iat_size;
- var lookup_table_offset = dir_table_offset + dir_table_size;
- var names_table_offset = lookup_table_offset + lookup_table_size;
- var dll_names_offset = names_table_offset + names_table_size;
- for (coff.import_tables.keys(), 0..) |off, i| {
- const lib_name = coff.temp_strtab.getAssumeExists(off);
- const itable = coff.import_tables.values()[i];
-
- // Lookup table header
- const lookup_header = coff_util.ImportDirectoryEntry{
- .import_lookup_table_rva = header.virtual_address + lookup_table_offset,
- .time_date_stamp = 0,
- .forwarder_chain = 0,
- .name_rva = header.virtual_address + dll_names_offset,
- .import_address_table_rva = header.virtual_address + iat_offset,
- };
- @memcpy(buffer.items[dir_table_offset..][0..@sizeOf(coff_util.ImportDirectoryEntry)], mem.asBytes(&lookup_header));
- dir_table_offset += dir_header_size;
-
- for (itable.entries.items) |entry| {
- const import_name = coff.getSymbolName(entry);
-
- // IAT and lookup table entry
- const lookup = coff_util.ImportLookupEntry64.ByName{ .name_table_rva = @as(u31, @intCast(header.virtual_address + names_table_offset)) };
- @memcpy(
- buffer.items[iat_offset..][0..@sizeOf(coff_util.ImportLookupEntry64.ByName)],
- mem.asBytes(&lookup),
- );
- iat_offset += lookup_entry_size;
- @memcpy(
- buffer.items[lookup_table_offset..][0..@sizeOf(coff_util.ImportLookupEntry64.ByName)],
- mem.asBytes(&lookup),
- );
- lookup_table_offset += lookup_entry_size;
-
- // Names table entry
- mem.writeInt(u16, buffer.items[names_table_offset..][0..2], 0, .little); // Hint set to 0 until we learn how to parse DLLs
- names_table_offset += 2;
- @memcpy(buffer.items[names_table_offset..][0..import_name.len], import_name);
- names_table_offset += @as(u32, @intCast(import_name.len));
- buffer.items[names_table_offset] = 0;
- names_table_offset += 1;
- if (!mem.isAlignedGeneric(usize, names_table_offset, @sizeOf(u16))) {
- buffer.items[names_table_offset] = 0;
- names_table_offset += 1;
- }
- }
-
- // IAT sentinel
- mem.writeInt(u64, buffer.items[iat_offset..][0..lookup_entry_size], 0, .little);
- iat_offset += 8;
-
- // Lookup table sentinel
- @memcpy(
- buffer.items[lookup_table_offset..][0..@sizeOf(coff_util.ImportLookupEntry64.ByName)],
- mem.asBytes(&coff_util.ImportLookupEntry64.ByName{ .name_table_rva = 0 }),
- );
- lookup_table_offset += lookup_entry_size;
-
- // DLL name
- @memcpy(buffer.items[dll_names_offset..][0..lib_name.len], lib_name);
- dll_names_offset += @as(u32, @intCast(lib_name.len));
- @memcpy(buffer.items[dll_names_offset..][0..ext.len], ext);
- dll_names_offset += @as(u32, @intCast(ext.len));
- buffer.items[dll_names_offset] = 0;
- dll_names_offset += 1;
- }
-
- // Sentinel
- const lookup_header = coff_util.ImportDirectoryEntry{
- .import_lookup_table_rva = 0,
- .time_date_stamp = 0,
- .forwarder_chain = 0,
- .name_rva = 0,
- .import_address_table_rva = 0,
- };
- @memcpy(
- buffer.items[dir_table_offset..][0..@sizeOf(coff_util.ImportDirectoryEntry)],
- mem.asBytes(&lookup_header),
- );
- dir_table_offset += dir_header_size;
-
- assert(dll_names_offset == needed_size);
-
- try coff.pwriteAll(buffer.items, header.pointer_to_raw_data);
-
- coff.data_directories[@intFromEnum(coff_util.DirectoryEntry.IMPORT)] = .{
- .virtual_address = header.virtual_address + iat_size,
- .size = dir_table_size,
- };
- coff.data_directories[@intFromEnum(coff_util.DirectoryEntry.IAT)] = .{
- .virtual_address = header.virtual_address,
- .size = iat_size,
- };
-
- coff.imports_count_dirty = false;
-}
-
-fn writeStrtab(coff: *Coff) !void {
- if (coff.strtab_offset == null) return;
-
- const comp = coff.base.comp;
- const gpa = comp.gpa;
- const diags = &comp.link_diags;
- const allocated_size = coff.allocatedSize(coff.strtab_offset.?);
- const needed_size: u32 = @intCast(coff.strtab.buffer.items.len);
-
- if (needed_size > allocated_size) {
- coff.strtab_offset = null;
- coff.strtab_offset = @intCast(coff.findFreeSpace(needed_size, @alignOf(u32)));
- }
-
- log.debug("writing strtab from 0x{x} to 0x{x}", .{ coff.strtab_offset.?, coff.strtab_offset.? + needed_size });
-
- var buffer = std.array_list.Managed(u8).init(gpa);
- defer buffer.deinit();
- try buffer.ensureTotalCapacityPrecise(needed_size);
- buffer.appendSliceAssumeCapacity(coff.strtab.buffer.items);
- // Here, we do a trick in that we do not commit the size of the strtab to strtab buffer, instead
- // we write the length of the strtab to a temporary buffer that goes to file.
- mem.writeInt(u32, buffer.items[0..4], @as(u32, @intCast(coff.strtab.buffer.items.len)), .little);
-
- coff.pwriteAll(buffer.items, coff.strtab_offset.?) catch |err| {
- return diags.fail("failed to write: {s}", .{@errorName(err)});
- };
-}
-
-fn writeSectionHeaders(coff: *Coff) !void {
- const offset = coff.getSectionHeadersOffset();
- try coff.pwriteAll(@ptrCast(coff.sections.items(.header)), offset);
-}
-
-fn writeDataDirectoriesHeaders(coff: *Coff) !void {
- const offset = coff.getDataDirectoryHeadersOffset();
- try coff.pwriteAll(@ptrCast(&coff.data_directories), offset);
-}
-
-fn writeHeader(coff: *Coff) !void {
- const target = &coff.base.comp.root_mod.resolved_target.result;
- const gpa = coff.base.comp.gpa;
- var buffer: std.Io.Writer.Allocating = .init(gpa);
- defer buffer.deinit();
- const writer = &buffer.writer;
-
- try buffer.ensureTotalCapacity(coff.getSizeOfHeaders());
- writer.writeAll(&msdos_stub) catch unreachable;
- mem.writeInt(u32, buffer.writer.buffer[0x3c..][0..4], msdos_stub.len, .little);
-
- writer.writeAll("PE\x00\x00") catch unreachable;
- var flags = coff_util.CoffHeaderFlags{
- .EXECUTABLE_IMAGE = 1,
- .DEBUG_STRIPPED = 1, // TODO
- };
- switch (coff.ptr_width) {
- .p32 => flags.@"32BIT_MACHINE" = 1,
- .p64 => flags.LARGE_ADDRESS_AWARE = 1,
- }
- if (coff.base.comp.config.output_mode == .Lib and coff.base.comp.config.link_mode == .dynamic) {
- flags.DLL = 1;
- }
-
- const timestamp = if (coff.repro) 0 else std.time.timestamp();
- const size_of_optional_header = @as(u16, @intCast(coff.getOptionalHeaderSize() + coff.getDataDirectoryHeadersSize()));
- var coff_header = coff_util.CoffHeader{
- .machine = target.toCoffMachine(),
- .number_of_sections = @as(u16, @intCast(coff.sections.slice().len)), // TODO what if we prune a section
- .time_date_stamp = @as(u32, @truncate(@as(u64, @bitCast(timestamp)))),
- .pointer_to_symbol_table = coff.strtab_offset orelse 0,
- .number_of_symbols = 0,
- .size_of_optional_header = size_of_optional_header,
- .flags = flags,
- };
-
- writer.writeAll(mem.asBytes(&coff_header)) catch unreachable;
-
- const dll_flags: coff_util.DllFlags = .{
- .HIGH_ENTROPY_VA = 1, // TODO do we want to permit non-PIE builds at all?
- .DYNAMIC_BASE = 1,
- .TERMINAL_SERVER_AWARE = 1, // We are not a legacy app
- .NX_COMPAT = 1, // We are compatible with Data Execution Prevention
- };
- const subsystem: coff_util.Subsystem = .WINDOWS_CUI;
- const size_of_image: u32 = coff.getSizeOfImage();
- const size_of_headers: u32 = mem.alignForward(u32, coff.getSizeOfHeaders(), default_file_alignment);
- const base_of_code = coff.sections.get(coff.text_section_index.?).header.virtual_address;
- const base_of_data = coff.sections.get(coff.data_section_index.?).header.virtual_address;
-
- var size_of_code: u32 = 0;
- var size_of_initialized_data: u32 = 0;
- var size_of_uninitialized_data: u32 = 0;
- for (coff.sections.items(.header)) |header| {
- if (header.flags.CNT_CODE == 1) {
- size_of_code += header.size_of_raw_data;
- }
- if (header.flags.CNT_INITIALIZED_DATA == 1) {
- size_of_initialized_data += header.size_of_raw_data;
- }
- if (header.flags.CNT_UNINITIALIZED_DATA == 1) {
- size_of_uninitialized_data += header.size_of_raw_data;
- }
- }
-
- switch (coff.ptr_width) {
- .p32 => {
- var opt_header = coff_util.OptionalHeaderPE32{
- .magic = coff_util.IMAGE_NT_OPTIONAL_HDR32_MAGIC,
- .major_linker_version = 0,
- .minor_linker_version = 0,
- .size_of_code = size_of_code,
- .size_of_initialized_data = size_of_initialized_data,
- .size_of_uninitialized_data = size_of_uninitialized_data,
- .address_of_entry_point = coff.entry_addr orelse 0,
- .base_of_code = base_of_code,
- .base_of_data = base_of_data,
- .image_base = @intCast(coff.image_base),
- .section_alignment = coff.page_size,
- .file_alignment = default_file_alignment,
- .major_operating_system_version = 6,
- .minor_operating_system_version = 0,
- .major_image_version = 0,
- .minor_image_version = 0,
- .major_subsystem_version = @intCast(coff.major_subsystem_version),
- .minor_subsystem_version = @intCast(coff.minor_subsystem_version),
- .win32_version_value = 0,
- .size_of_image = size_of_image,
- .size_of_headers = size_of_headers,
- .checksum = 0,
- .subsystem = subsystem,
- .dll_flags = dll_flags,
- .size_of_stack_reserve = default_size_of_stack_reserve,
- .size_of_stack_commit = default_size_of_stack_commit,
- .size_of_heap_reserve = default_size_of_heap_reserve,
- .size_of_heap_commit = default_size_of_heap_commit,
- .loader_flags = 0,
- .number_of_rva_and_sizes = @intCast(coff.data_directories.len),
- };
- writer.writeAll(mem.asBytes(&opt_header)) catch unreachable;
- },
- .p64 => {
- var opt_header = coff_util.OptionalHeaderPE64{
- .magic = coff_util.IMAGE_NT_OPTIONAL_HDR64_MAGIC,
- .major_linker_version = 0,
- .minor_linker_version = 0,
- .size_of_code = size_of_code,
- .size_of_initialized_data = size_of_initialized_data,
- .size_of_uninitialized_data = size_of_uninitialized_data,
- .address_of_entry_point = coff.entry_addr orelse 0,
- .base_of_code = base_of_code,
- .image_base = coff.image_base,
- .section_alignment = coff.page_size,
- .file_alignment = default_file_alignment,
- .major_operating_system_version = 6,
- .minor_operating_system_version = 0,
- .major_image_version = 0,
- .minor_image_version = 0,
- .major_subsystem_version = coff.major_subsystem_version,
- .minor_subsystem_version = coff.minor_subsystem_version,
- .win32_version_value = 0,
- .size_of_image = size_of_image,
- .size_of_headers = size_of_headers,
- .checksum = 0,
- .subsystem = subsystem,
- .dll_flags = dll_flags,
- .size_of_stack_reserve = default_size_of_stack_reserve,
- .size_of_stack_commit = default_size_of_stack_commit,
- .size_of_heap_reserve = default_size_of_heap_reserve,
- .size_of_heap_commit = default_size_of_heap_commit,
- .loader_flags = 0,
- .number_of_rva_and_sizes = @intCast(coff.data_directories.len),
- };
- writer.writeAll(mem.asBytes(&opt_header)) catch unreachable;
- },
- }
-
- try coff.pwriteAll(buffer.written(), 0);
-}
-
-pub fn padToIdeal(actual_size: anytype) @TypeOf(actual_size) {
- return actual_size +| (actual_size / ideal_factor);
-}
-
-fn detectAllocCollision(coff: *Coff, start: u32, size: u32) ?u32 {
- const headers_size = @max(coff.getSizeOfHeaders(), coff.page_size);
- if (start < headers_size)
- return headers_size;
-
- const end = start + padToIdeal(size);
-
- if (coff.strtab_offset) |off| {
- const tight_size = @as(u32, @intCast(coff.strtab.buffer.items.len));
- const increased_size = padToIdeal(tight_size);
- const test_end = off + increased_size;
- if (end > off and start < test_end) {
- return test_end;
- }
- }
-
- for (coff.sections.items(.header)) |header| {
- const tight_size = header.size_of_raw_data;
- const increased_size = padToIdeal(tight_size);
- const test_end = header.pointer_to_raw_data + increased_size;
- if (end > header.pointer_to_raw_data and start < test_end) {
- return test_end;
- }
- }
-
- return null;
-}
-
-fn allocatedSize(coff: *Coff, start: u32) u32 {
- if (start == 0)
- return 0;
- var min_pos: u32 = std.math.maxInt(u32);
- if (coff.strtab_offset) |off| {
- if (off > start and off < min_pos) min_pos = off;
- }
- for (coff.sections.items(.header)) |header| {
- if (header.pointer_to_raw_data <= start) continue;
- if (header.pointer_to_raw_data < min_pos) min_pos = header.pointer_to_raw_data;
- }
- return min_pos - start;
-}
-
-fn findFreeSpace(coff: *Coff, object_size: u32, min_alignment: u32) u32 {
- var start: u32 = 0;
- while (coff.detectAllocCollision(start, object_size)) |item_end| {
- start = mem.alignForward(u32, item_end, min_alignment);
- }
- return start;
-}
-
-fn allocatedVirtualSize(coff: *Coff, start: u32) u32 {
- if (start == 0)
- return 0;
- var min_pos: u32 = std.math.maxInt(u32);
- for (coff.sections.items(.header)) |header| {
- if (header.virtual_address <= start) continue;
- if (header.virtual_address < min_pos) min_pos = header.virtual_address;
- }
- return min_pos - start;
-}
-
-fn getSizeOfHeaders(coff: Coff) u32 {
- const msdos_hdr_size = msdos_stub.len + 4;
- return @as(u32, @intCast(msdos_hdr_size + @sizeOf(coff_util.CoffHeader) + coff.getOptionalHeaderSize() +
- coff.getDataDirectoryHeadersSize() + coff.getSectionHeadersSize()));
-}
-
-fn getOptionalHeaderSize(coff: Coff) u32 {
- return switch (coff.ptr_width) {
- .p32 => @as(u32, @intCast(@sizeOf(coff_util.OptionalHeaderPE32))),
- .p64 => @as(u32, @intCast(@sizeOf(coff_util.OptionalHeaderPE64))),
- };
-}
-
-fn getDataDirectoryHeadersSize(coff: Coff) u32 {
- return @as(u32, @intCast(coff.data_directories.len * @sizeOf(coff_util.ImageDataDirectory)));
-}
-
-fn getSectionHeadersSize(coff: Coff) u32 {
- return @as(u32, @intCast(coff.sections.slice().len * @sizeOf(coff_util.SectionHeader)));
-}
-
-fn getDataDirectoryHeadersOffset(coff: Coff) u32 {
- const msdos_hdr_size = msdos_stub.len + 4;
- return @as(u32, @intCast(msdos_hdr_size + @sizeOf(coff_util.CoffHeader) + coff.getOptionalHeaderSize()));
-}
-
-fn getSectionHeadersOffset(coff: Coff) u32 {
- return coff.getDataDirectoryHeadersOffset() + coff.getDataDirectoryHeadersSize();
-}
-
-fn getSizeOfImage(coff: Coff) u32 {
- var image_size: u32 = mem.alignForward(u32, coff.getSizeOfHeaders(), coff.page_size);
- for (coff.sections.items(.header)) |header| {
- image_size += mem.alignForward(u32, header.virtual_size, coff.page_size);
- }
- return image_size;
-}
-
-/// Returns symbol location corresponding to the set entrypoint (if any).
-pub fn getEntryPoint(coff: Coff) ?SymbolWithLoc {
- const comp = coff.base.comp;
-
- // TODO This is incomplete.
- // The entry symbol name depends on the subsystem as well as the set of
- // public symbol names from linked objects.
- // See LinkerDriver::findDefaultEntry from the LLD project for the flow chart.
- const entry_name = switch (coff.entry) {
- .disabled => return null,
- .default => switch (comp.config.output_mode) {
- .Exe => "wWinMainCRTStartup",
- .Obj, .Lib => return null,
- },
- .enabled => "wWinMainCRTStartup",
- .named => |name| name,
- };
- const global_index = coff.resolver.get(entry_name) orelse return null;
- return coff.globals.items[global_index];
-}
-
-/// Returns pointer-to-symbol described by `sym_loc` descriptor.
-pub fn getSymbolPtr(coff: *Coff, sym_loc: SymbolWithLoc) *coff_util.Symbol {
- assert(sym_loc.file == null); // TODO linking object files
- return &coff.locals.items[sym_loc.sym_index];
-}
-
-/// Returns symbol described by `sym_loc` descriptor.
-pub fn getSymbol(coff: *const Coff, sym_loc: SymbolWithLoc) *const coff_util.Symbol {
- assert(sym_loc.file == null); // TODO linking object files
- return &coff.locals.items[sym_loc.sym_index];
-}
-
-/// Returns name of the symbol described by `sym_loc` descriptor.
-pub fn getSymbolName(coff: *const Coff, sym_loc: SymbolWithLoc) []const u8 {
- assert(sym_loc.file == null); // TODO linking object files
- const sym = coff.getSymbol(sym_loc);
- const offset = sym.getNameOffset() orelse return sym.getName().?;
- return coff.strtab.get(offset).?;
-}
-
-/// Returns pointer to the global entry for `name` if one exists.
-pub fn getGlobalPtr(coff: *Coff, name: []const u8) ?*SymbolWithLoc {
- const global_index = coff.resolver.get(name) orelse return null;
- return &coff.globals.items[global_index];
-}
-
-/// Returns the global entry for `name` if one exists.
-pub fn getGlobal(coff: *const Coff, name: []const u8) ?SymbolWithLoc {
- const global_index = coff.resolver.get(name) orelse return null;
- return coff.globals.items[global_index];
-}
-
-/// Returns the index of the global entry for `name` if one exists.
-pub fn getGlobalIndex(coff: *const Coff, name: []const u8) ?u32 {
- return coff.resolver.get(name);
-}
-
-/// Returns global entry at `index`.
-pub fn getGlobalByIndex(coff: *const Coff, index: u32) SymbolWithLoc {
- assert(index < coff.globals.items.len);
- return coff.globals.items[index];
-}
-
-const GetOrPutGlobalPtrResult = struct {
- found_existing: bool,
- value_ptr: *SymbolWithLoc,
-};
-
-/// Return pointer to the global entry for `name` if one exists.
-/// Puts a new global entry for `name` if one doesn't exist, and
-/// returns a pointer to it.
-pub fn getOrPutGlobalPtr(coff: *Coff, name: []const u8) !GetOrPutGlobalPtrResult {
- if (coff.getGlobalPtr(name)) |ptr| {
- return GetOrPutGlobalPtrResult{ .found_existing = true, .value_ptr = ptr };
- }
- const gpa = coff.base.comp.gpa;
- const global_index = try coff.allocateGlobal();
- const global_name = try gpa.dupe(u8, name);
- _ = try coff.resolver.put(gpa, global_name, global_index);
- const ptr = &coff.globals.items[global_index];
- return GetOrPutGlobalPtrResult{ .found_existing = false, .value_ptr = ptr };
-}
-
-pub fn getAtom(coff: *const Coff, atom_index: Atom.Index) Atom {
- assert(atom_index < coff.atoms.items.len);
- return coff.atoms.items[atom_index];
-}
-
-pub fn getAtomPtr(coff: *Coff, atom_index: Atom.Index) *Atom {
- assert(atom_index < coff.atoms.items.len);
- return &coff.atoms.items[atom_index];
-}
-
-/// Returns atom if there is an atom referenced by the symbol described by `sym_loc` descriptor.
-/// Returns null on failure.
-pub fn getAtomIndexForSymbol(coff: *const Coff, sym_loc: SymbolWithLoc) ?Atom.Index {
- assert(sym_loc.file == null); // TODO linking with object files
- return coff.atom_by_index_table.get(sym_loc.sym_index);
-}
-
-fn setSectionName(coff: *Coff, header: *coff_util.SectionHeader, name: []const u8) !void {
- if (name.len <= 8) {
- @memcpy(header.name[0..name.len], name);
- @memset(header.name[name.len..], 0);
- return;
- }
- const gpa = coff.base.comp.gpa;
- const offset = try coff.strtab.insert(gpa, name);
- const name_offset = fmt.bufPrint(&header.name, "/{d}", .{offset}) catch unreachable;
- @memset(header.name[name_offset.len..], 0);
-}
-
-fn getSectionName(coff: *const Coff, header: *const coff_util.SectionHeader) []const u8 {
- if (header.getName()) |name| {
- return name;
- }
- const offset = header.getNameOffset().?;
- return coff.strtab.get(offset).?;
-}
-
-fn setSymbolName(coff: *Coff, symbol: *coff_util.Symbol, name: []const u8) !void {
- if (name.len <= 8) {
- @memcpy(symbol.name[0..name.len], name);
- @memset(symbol.name[name.len..], 0);
- return;
- }
- const gpa = coff.base.comp.gpa;
- const offset = try coff.strtab.insert(gpa, name);
- @memset(symbol.name[0..4], 0);
- mem.writeInt(u32, symbol.name[4..8], offset, .little);
-}
-
-fn logSymAttributes(sym: *const coff_util.Symbol, buf: *[4]u8) []const u8 {
- @memset(buf[0..4], '_');
- switch (sym.section_number) {
- .UNDEFINED => {
- buf[3] = 'u';
- switch (sym.storage_class) {
- .EXTERNAL => buf[1] = 'e',
- .WEAK_EXTERNAL => buf[1] = 'w',
- .NULL => {},
- else => unreachable,
- }
- },
- .ABSOLUTE => unreachable, // handle ABSOLUTE
- .DEBUG => unreachable,
- else => {
- buf[0] = 's';
- switch (sym.storage_class) {
- .EXTERNAL => buf[1] = 'e',
- .WEAK_EXTERNAL => buf[1] = 'w',
- .NULL => {},
- else => unreachable,
- }
- },
- }
- return buf[0..];
-}
-
-fn logSymtab(coff: *Coff) void {
- var buf: [4]u8 = undefined;
-
- log.debug("symtab:", .{});
- log.debug(" object(null)", .{});
- for (coff.locals.items, 0..) |*sym, sym_id| {
- const where = if (sym.section_number == .UNDEFINED) "ord" else "sect";
- const def_index: u16 = switch (sym.section_number) {
- .UNDEFINED => 0, // TODO
- .ABSOLUTE => unreachable, // TODO
- .DEBUG => unreachable, // TODO
- else => @intFromEnum(sym.section_number),
- };
- log.debug(" %{d}: {s} @{x} in {s}({d}), {s}", .{
- sym_id,
- coff.getSymbolName(.{ .sym_index = @as(u32, @intCast(sym_id)), .file = null }),
- sym.value,
- where,
- def_index,
- logSymAttributes(sym, &buf),
- });
- }
-
- log.debug("globals table:", .{});
- for (coff.globals.items) |sym_loc| {
- const sym_name = coff.getSymbolName(sym_loc);
- log.debug(" {s} => %{d} in object({?d})", .{ sym_name, sym_loc.sym_index, sym_loc.file });
- }
-
- log.debug("GOT entries:", .{});
- log.debug("{f}", .{coff.got_table});
-}
-
-fn logSections(coff: *Coff) void {
- log.debug("sections:", .{});
- for (coff.sections.items(.header)) |*header| {
- log.debug(" {s}: VM({x}, {x}) FILE({x}, {x})", .{
- coff.getSectionName(header),
- header.virtual_address,
- header.virtual_address + header.virtual_size,
- header.pointer_to_raw_data,
- header.pointer_to_raw_data + header.size_of_raw_data,
- });
- }
-}
-
-fn logImportTables(coff: *const Coff) void {
- log.debug("import tables:", .{});
- for (coff.import_tables.keys(), 0..) |off, i| {
- const itable = coff.import_tables.values()[i];
- log.debug("{f}", .{itable.fmtDebug(.{
- .coff = coff,
- .index = i,
- .name_off = off,
- })});
- }
-}
-
-pub const Atom = struct {
- /// Each decl always gets a local symbol with the fully qualified name.
- /// The vaddr and size are found here directly.
- /// The file offset is found by computing the vaddr offset from the section vaddr
- /// the symbol references, and adding that to the file offset of the section.
- /// If this field is 0, it means the codegen size = 0 and there is no symbol or
- /// offset table entry.
- sym_index: u32,
-
- /// null means symbol defined by Zig source.
- file: ?u32,
-
- /// Size of the atom
- size: u32,
-
- /// Points to the previous and next neighbors, based on the `text_offset`.
- /// This can be used to find, for example, the capacity of this `Atom`.
- prev_index: ?Index,
- next_index: ?Index,
-
- const Index = u32;
-
- pub fn getSymbolIndex(atom: Atom) ?u32 {
- if (atom.sym_index == 0) return null;
- return atom.sym_index;
- }
-
- /// Returns symbol referencing this atom.
- fn getSymbol(atom: Atom, coff: *const Coff) *const coff_util.Symbol {
- const sym_index = atom.getSymbolIndex().?;
- return coff.getSymbol(.{
- .sym_index = sym_index,
- .file = atom.file,
- });
- }
-
- /// Returns pointer-to-symbol referencing this atom.
- fn getSymbolPtr(atom: Atom, coff: *Coff) *coff_util.Symbol {
- const sym_index = atom.getSymbolIndex().?;
- return coff.getSymbolPtr(.{
- .sym_index = sym_index,
- .file = atom.file,
- });
- }
-
- fn getSymbolWithLoc(atom: Atom) SymbolWithLoc {
- const sym_index = atom.getSymbolIndex().?;
- return .{ .sym_index = sym_index, .file = atom.file };
- }
-
- /// Returns the name of this atom.
- fn getName(atom: Atom, coff: *const Coff) []const u8 {
- const sym_index = atom.getSymbolIndex().?;
- return coff.getSymbolName(.{
- .sym_index = sym_index,
- .file = atom.file,
- });
- }
-
- /// Returns how much room there is to grow in virtual address space.
- fn capacity(atom: Atom, coff: *const Coff) u32 {
- const atom_sym = atom.getSymbol(coff);
- if (atom.next_index) |next_index| {
- const next = coff.getAtom(next_index);
- const next_sym = next.getSymbol(coff);
- return next_sym.value - atom_sym.value;
- } else {
- // We are the last atom.
- // The capacity is limited only by virtual address space.
- return std.math.maxInt(u32) - atom_sym.value;
- }
- }
-
- fn freeListEligible(atom: Atom, coff: *const Coff) bool {
- // No need to keep a free list node for the last atom.
- const next_index = atom.next_index orelse return false;
- const next = coff.getAtom(next_index);
- const atom_sym = atom.getSymbol(coff);
- const next_sym = next.getSymbol(coff);
- const cap = next_sym.value - atom_sym.value;
- const ideal_cap = padToIdeal(atom.size);
- if (cap <= ideal_cap) return false;
- const surplus = cap - ideal_cap;
- return surplus >= min_text_capacity;
- }
-};
-
-pub const Relocation = struct {
- type: enum {
- // x86, x86_64
- /// RIP-relative displacement to a GOT pointer
- got,
- /// RIP-relative displacement to an import pointer
- import,
-
- // aarch64
- /// PC-relative distance to target page in GOT section
- got_page,
- /// Offset to a GOT pointer relative to the start of a page in GOT section
- got_pageoff,
- /// PC-relative distance to target page in a section (e.g., .rdata)
- page,
- /// Offset to a pointer relative to the start of a page in a section (e.g., .rdata)
- pageoff,
- /// PC-relative distance to target page in a import section
- import_page,
- /// Offset to a pointer relative to the start of a page in an import section (e.g., .rdata)
- import_pageoff,
-
- // common
- /// Absolute pointer value
- direct,
- },
- target: SymbolWithLoc,
- offset: u32,
- addend: u32,
- pcrel: bool,
- length: u2,
- dirty: bool = true,
-
- /// Returns true if and only if the reloc can be resolved.
- fn isResolvable(reloc: Relocation, coff: *Coff) bool {
- _ = reloc.getTargetAddress(coff) orelse return false;
- return true;
- }
-
- fn isGotIndirection(reloc: Relocation) bool {
- return switch (reloc.type) {
- .got, .got_page, .got_pageoff => true,
- else => false,
- };
- }
-
- /// Returns address of the target if any.
- fn getTargetAddress(reloc: Relocation, coff: *const Coff) ?u32 {
- switch (reloc.type) {
- .got, .got_page, .got_pageoff => {
- const got_index = coff.got_table.lookup.get(reloc.target) orelse return null;
- const header = coff.sections.items(.header)[coff.got_section_index.?];
- return header.virtual_address + got_index * coff.ptr_width.size();
- },
- .import, .import_page, .import_pageoff => {
- const sym = coff.getSymbol(reloc.target);
- const index = coff.import_tables.getIndex(sym.value) orelse return null;
- const itab = coff.import_tables.values()[index];
- return itab.getImportAddress(reloc.target, .{
- .coff = coff,
- .index = index,
- .name_off = sym.value,
- });
- },
- else => {
- const target_atom_index = coff.getAtomIndexForSymbol(reloc.target) orelse return null;
- const target_atom = coff.getAtom(target_atom_index);
- return target_atom.getSymbol(coff).value;
- },
- }
- }
-
- fn resolve(reloc: Relocation, atom_index: Atom.Index, code: []u8, image_base: u64, coff: *Coff) void {
- const atom = coff.getAtom(atom_index);
- const source_sym = atom.getSymbol(coff);
- const source_vaddr = source_sym.value + reloc.offset;
-
- const target_vaddr = reloc.getTargetAddress(coff).?; // Oops, you didn't check if the relocation can be resolved with isResolvable().
- const target_vaddr_with_addend = target_vaddr + reloc.addend;
-
- log.debug(" ({x}: [() => 0x{x} ({s})) ({s}) ", .{
- source_vaddr,
- target_vaddr_with_addend,
- coff.getSymbolName(reloc.target),
- @tagName(reloc.type),
- });
-
- const ctx: Context = .{
- .source_vaddr = source_vaddr,
- .target_vaddr = target_vaddr_with_addend,
- .image_base = image_base,
- .code = code,
- .ptr_width = coff.ptr_width,
- };
-
- const target = &coff.base.comp.root_mod.resolved_target.result;
- switch (target.cpu.arch) {
- .aarch64 => reloc.resolveAarch64(ctx),
- .x86, .x86_64 => reloc.resolveX86(ctx),
- else => unreachable, // unhandled target architecture
- }
- }
-
- const Context = struct {
- source_vaddr: u32,
- target_vaddr: u32,
- image_base: u64,
- code: []u8,
- ptr_width: PtrWidth,
- };
-
- fn resolveAarch64(reloc: Relocation, ctx: Context) void {
- const Instruction = aarch64_util.encoding.Instruction;
- var buffer = ctx.code[reloc.offset..];
- switch (reloc.type) {
- .got_page, .import_page, .page => {
- const source_page = @as(i32, @intCast(ctx.source_vaddr >> 12));
- const target_page = @as(i32, @intCast(ctx.target_vaddr >> 12));
- const pages: i21 = @intCast(target_page - source_page);
- var inst: Instruction = .read(buffer[0..Instruction.size]);
- inst.data_processing_immediate.pc_relative_addressing.group.immhi = @intCast(pages >> 2);
- inst.data_processing_immediate.pc_relative_addressing.group.immlo = @truncate(@as(u21, @bitCast(pages)));
- inst.write(buffer[0..Instruction.size]);
- },
- .got_pageoff, .import_pageoff, .pageoff => {
- assert(!reloc.pcrel);
-
- const narrowed: u12 = @truncate(@as(u64, @intCast(ctx.target_vaddr)));
- var inst: Instruction = .read(buffer[0..Instruction.size]);
- switch (inst.decode()) {
- else => unreachable,
- .data_processing_immediate => inst.data_processing_immediate.add_subtract_immediate.group.imm12 = narrowed,
- .load_store => |load_store| inst.load_store.register_unsigned_immediate.group.imm12 =
- switch (load_store.register_unsigned_immediate.decode()) {
- .integer => |integer| @shrExact(narrowed, @intFromEnum(integer.group.size)),
- .vector => |vector| @shrExact(narrowed, @intFromEnum(vector.group.opc1.decode(vector.group.size))),
- },
- }
- inst.write(buffer[0..Instruction.size]);
- },
- .direct => {
- assert(!reloc.pcrel);
- switch (reloc.length) {
- 2 => mem.writeInt(
- u32,
- buffer[0..4],
- @as(u32, @truncate(ctx.target_vaddr + ctx.image_base)),
- .little,
- ),
- 3 => mem.writeInt(u64, buffer[0..8], ctx.target_vaddr + ctx.image_base, .little),
- else => unreachable,
- }
- },
-
- .got => unreachable,
- .import => unreachable,
- }
- }
-
- fn resolveX86(reloc: Relocation, ctx: Context) void {
- var buffer = ctx.code[reloc.offset..];
- switch (reloc.type) {
- .got_page => unreachable,
- .got_pageoff => unreachable,
- .page => unreachable,
- .pageoff => unreachable,
- .import_page => unreachable,
- .import_pageoff => unreachable,
-
- .got, .import => {
- assert(reloc.pcrel);
- const disp = @as(i32, @intCast(ctx.target_vaddr)) - @as(i32, @intCast(ctx.source_vaddr)) - 4;
- mem.writeInt(i32, buffer[0..4], disp, .little);
- },
- .direct => {
- if (reloc.pcrel) {
- const disp = @as(i32, @intCast(ctx.target_vaddr)) - @as(i32, @intCast(ctx.source_vaddr)) - 4;
- mem.writeInt(i32, buffer[0..4], disp, .little);
- } else switch (ctx.ptr_width) {
- .p32 => mem.writeInt(u32, buffer[0..4], @as(u32, @intCast(ctx.target_vaddr + ctx.image_base)), .little),
- .p64 => switch (reloc.length) {
- 2 => mem.writeInt(u32, buffer[0..4], @as(u32, @truncate(ctx.target_vaddr + ctx.image_base)), .little),
- 3 => mem.writeInt(u64, buffer[0..8], ctx.target_vaddr + ctx.image_base, .little),
- else => unreachable,
- },
- }
- },
- }
- }
-};
-
-pub fn addRelocation(coff: *Coff, atom_index: Atom.Index, reloc: Relocation) !void {
- const comp = coff.base.comp;
- const gpa = comp.gpa;
- log.debug(" (adding reloc of type {s} to target %{d})", .{ @tagName(reloc.type), reloc.target.sym_index });
- const gop = try coff.relocs.getOrPut(gpa, atom_index);
- if (!gop.found_existing) {
- gop.value_ptr.* = .{};
- }
- try gop.value_ptr.append(gpa, reloc);
-}
-
-fn addBaseRelocation(coff: *Coff, atom_index: Atom.Index, offset: u32) !void {
- const comp = coff.base.comp;
- const gpa = comp.gpa;
- log.debug(" (adding base relocation at offset 0x{x} in %{d})", .{
- offset,
- coff.getAtom(atom_index).getSymbolIndex().?,
- });
- const gop = try coff.base_relocs.getOrPut(gpa, atom_index);
- if (!gop.found_existing) {
- gop.value_ptr.* = .{};
- }
- try gop.value_ptr.append(gpa, offset);
-}
-
-fn freeRelocations(coff: *Coff, atom_index: Atom.Index) void {
- const comp = coff.base.comp;
- const gpa = comp.gpa;
- var removed_relocs = coff.relocs.fetchOrderedRemove(atom_index);
- if (removed_relocs) |*relocs| relocs.value.deinit(gpa);
- var removed_base_relocs = coff.base_relocs.fetchOrderedRemove(atom_index);
- if (removed_base_relocs) |*base_relocs| base_relocs.value.deinit(gpa);
-}
-
-/// Represents an import table in the .idata section where each contained pointer
-/// is to a symbol from the same DLL.
-///
-/// The layout of .idata section is as follows:
-///
-/// --- ADDR1 : IAT (all import tables concatenated together)
-/// ptr
-/// ptr
-/// 0 sentinel
-/// ptr
-/// 0 sentinel
-/// --- ADDR2: headers
-/// ImportDirectoryEntry header
-/// ImportDirectoryEntry header
-/// sentinel
-/// --- ADDR2: lookup tables
-/// Lookup table
-/// 0 sentinel
-/// Lookup table
-/// 0 sentinel
-/// --- ADDR3: name hint tables
-/// hint-symname
-/// hint-symname
-/// --- ADDR4: DLL names
-/// DLL#1 name
-/// DLL#2 name
-/// --- END
-const ImportTable = struct {
- entries: std.ArrayListUnmanaged(SymbolWithLoc) = .empty,
- free_list: std.ArrayListUnmanaged(u32) = .empty,
- lookup: std.AutoHashMapUnmanaged(SymbolWithLoc, u32) = .empty,
-
- fn deinit(itab: *ImportTable, allocator: Allocator) void {
- itab.entries.deinit(allocator);
- itab.free_list.deinit(allocator);
- itab.lookup.deinit(allocator);
- }
-
- /// Size of the import table does not include the sentinel.
- fn size(itab: ImportTable) u32 {
- return @as(u32, @intCast(itab.entries.items.len)) * @sizeOf(u64);
- }
-
- fn addImport(itab: *ImportTable, allocator: Allocator, target: SymbolWithLoc) !ImportIndex {
- try itab.entries.ensureUnusedCapacity(allocator, 1);
- const index: u32 = blk: {
- if (itab.free_list.pop()) |index| {
- log.debug(" (reusing import entry index {d})", .{index});
- break :blk index;
- } else {
- log.debug(" (allocating import entry at index {d})", .{itab.entries.items.len});
- const index = @as(u32, @intCast(itab.entries.items.len));
- _ = itab.entries.addOneAssumeCapacity();
- break :blk index;
- }
- };
- itab.entries.items[index] = target;
- try itab.lookup.putNoClobber(allocator, target, index);
- return index;
- }
-
- const Context = struct {
- coff: *const Coff,
- /// Index of this ImportTable in a global list of all tables.
- /// This is required in order to calculate the base vaddr of this ImportTable.
- index: usize,
- /// Offset into the string interning table of the DLL this ImportTable corresponds to.
- name_off: u32,
- };
-
- fn getBaseAddress(ctx: Context) u32 {
- const header = ctx.coff.sections.items(.header)[ctx.coff.idata_section_index.?];
- var addr = header.virtual_address;
- for (ctx.coff.import_tables.values(), 0..) |other_itab, i| {
- if (ctx.index == i) break;
- addr += @as(u32, @intCast(other_itab.entries.items.len * @sizeOf(u64))) + 8;
- }
- return addr;
- }
-
- fn getImportAddress(itab: *const ImportTable, target: SymbolWithLoc, ctx: Context) ?u32 {
- const index = itab.lookup.get(target) orelse return null;
- const base_vaddr = getBaseAddress(ctx);
- return base_vaddr + index * @sizeOf(u64);
- }
-
- const Format = struct {
- itab: ImportTable,
- ctx: Context,
-
- fn default(f: Format, writer: *std.Io.Writer) std.Io.Writer.Error!void {
- const lib_name = f.ctx.coff.temp_strtab.getAssumeExists(f.ctx.name_off);
- const base_vaddr = getBaseAddress(f.ctx);
- try writer.print("IAT({s}.dll) @{x}:", .{ lib_name, base_vaddr });
- for (f.itab.entries.items, 0..) |entry, i| {
- try writer.print("\n {d}@{?x} => {s}", .{
- i,
- f.itab.getImportAddress(entry, f.ctx),
- f.ctx.coff.getSymbolName(entry),
- });
- }
- }
- };
-
- fn fmtDebug(itab: ImportTable, ctx: Context) fmt.Alt(Format, Format.default) {
- return .{ .data = .{ .itab = itab, .ctx = ctx } };
- }
-
- const ImportIndex = u32;
-};
-
-fn pwriteAll(coff: *Coff, bytes: []const u8, offset: u64) error{LinkFailure}!void {
- const comp = coff.base.comp;
- const diags = &comp.link_diags;
- coff.base.file.?.pwriteAll(bytes, offset) catch |err| {
- return diags.fail("failed to write: {s}", .{@errorName(err)});
- };
-}
-
-/// This is the start of a Portable Executable (PE) file.
-/// It starts with a MS-DOS header followed by a MS-DOS stub program.
-/// This data does not change so we include it as follows in all binaries.
-///
-/// In this context,
-/// A "paragraph" is 16 bytes.
-/// A "page" is 512 bytes.
-/// A "long" is 4 bytes.
-/// A "word" is 2 bytes.
-const msdos_stub: [120]u8 = .{
- 'M', 'Z', // Magic number. Stands for Mark Zbikowski (designer of the MS-DOS executable format).
- 0x78, 0x00, // Number of bytes in the last page. This matches the size of this entire MS-DOS stub.
- 0x01, 0x00, // Number of pages.
- 0x00, 0x00, // Number of entries in the relocation table.
- 0x04, 0x00, // The number of paragraphs taken up by the header. 4 * 16 = 64, which matches the header size (all bytes before the MS-DOS stub program).
- 0x00, 0x00, // The number of paragraphs required by the program.
- 0x00, 0x00, // The number of paragraphs requested by the program.
- 0x00, 0x00, // Initial value for SS (relocatable segment address).
- 0x00, 0x00, // Initial value for SP.
- 0x00, 0x00, // Checksum.
- 0x00, 0x00, // Initial value for IP.
- 0x00, 0x00, // Initial value for CS (relocatable segment address).
- 0x40, 0x00, // Absolute offset to relocation table. 64 matches the header size (all bytes before the MS-DOS stub program).
- 0x00, 0x00, // Overlay number. Zero means this is the main executable.
-}
- // Reserved words.
- ++ .{ 0x00, 0x00 } ** 4
- // OEM-related fields.
- ++ .{
- 0x00, 0x00, // OEM identifier.
- 0x00, 0x00, // OEM information.
- }
- // Reserved words.
- ++ .{ 0x00, 0x00 } ** 10
- // Address of the PE header (a long). This matches the size of this entire MS-DOS stub, so that's the address of what's after this MS-DOS stub.
- ++ .{ 0x78, 0x00, 0x00, 0x00 }
- // What follows is a 16-bit x86 MS-DOS program of 7 instructions that prints the bytes after these instructions and then exits.
- ++ .{
- // Set the value of the data segment to the same value as the code segment.
- 0x0e, // push cs
- 0x1f, // pop ds
- // Set the DX register to the address of the message.
- // If you count all bytes of these 7 instructions you get 14, so that's the address of what's after these instructions.
- 0xba, 14, 0x00, // mov dx, 14
- // Set AH to the system call code for printing a message.
- 0xb4, 0x09, // mov ah, 0x09
- // Perform the system call to print the message.
- 0xcd, 0x21, // int 0x21
- // Set AH to 0x4c which is the system call code for exiting, and set AL to 0x01 which is the exit code.
- 0xb8, 0x01, 0x4c, // mov ax, 0x4c01
- // Peform the system call to exit the program with exit code 1.
- 0xcd, 0x21, // int 0x21
- }
- // Message to print.
- ++ "This program cannot be run in DOS mode.".*
- // Message terminators.
- ++ .{
- '$', // We do not pass a length to the print system call; the string is terminated by this character.
- 0x00, 0x00, // Terminating zero bytes.
- };
diff --git a/src/link/Coff2.zig b/src/link/Coff2.zig
new file mode 100644
index 0000000000..79d4b17505
--- /dev/null
+++ b/src/link/Coff2.zig
@@ -0,0 +1,2193 @@
+base: link.File,
+endian: std.builtin.Endian,
+mf: MappedFile,
+nodes: std.MultiArrayList(Node),
+import_table: ImportTable,
+strings: std.HashMapUnmanaged(
+ u32,
+ void,
+ std.hash_map.StringIndexContext,
+ std.hash_map.default_max_load_percentage,
+),
+string_bytes: std.ArrayList(u8),
+section_table: std.ArrayList(Symbol.Index),
+symbol_table: std.ArrayList(Symbol),
+globals: std.AutoArrayHashMapUnmanaged(GlobalName, Symbol.Index),
+global_pending_index: u32,
+navs: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, Symbol.Index),
+uavs: std.AutoArrayHashMapUnmanaged(InternPool.Index, Symbol.Index),
+lazy: std.EnumArray(link.File.LazySymbol.Kind, struct {
+ map: std.AutoArrayHashMapUnmanaged(InternPool.Index, Symbol.Index),
+ pending_index: u32,
+}),
+pending_uavs: std.AutoArrayHashMapUnmanaged(Node.UavMapIndex, struct {
+ alignment: InternPool.Alignment,
+ src_loc: Zcu.LazySrcLoc,
+}),
+relocs: std.ArrayList(Reloc),
+/// This is hiding actual bugs with global symbols! Reconsider once they are implemented correctly.
+entry_hack: Symbol.Index,
+
+pub const default_file_alignment: u16 = 0x200;
+pub const default_size_of_stack_reserve: u32 = 0x1000000;
+pub const default_size_of_stack_commit: u32 = 0x1000;
+pub const default_size_of_heap_reserve: u32 = 0x100000;
+pub const default_size_of_heap_commit: u32 = 0x1000;
+
+/// This is the start of a Portable Executable (PE) file.
+/// It starts with a MS-DOS header followed by a MS-DOS stub program.
+/// This data does not change so we include it as follows in all binaries.
+///
+/// In this context,
+/// A "paragraph" is 16 bytes.
+/// A "page" is 512 bytes.
+/// A "long" is 4 bytes.
+/// A "word" is 2 bytes.
+pub const msdos_stub: [120]u8 = .{
+ 'M', 'Z', // Magic number. Stands for Mark Zbikowski (designer of the MS-DOS executable format).
+ 0x78, 0x00, // Number of bytes in the last page. This matches the size of this entire MS-DOS stub.
+ 0x01, 0x00, // Number of pages.
+ 0x00, 0x00, // Number of entries in the relocation table.
+ 0x04, 0x00, // The number of paragraphs taken up by the header. 4 * 16 = 64, which matches the header size (all bytes before the MS-DOS stub program).
+ 0x00, 0x00, // The number of paragraphs required by the program.
+ 0x00, 0x00, // The number of paragraphs requested by the program.
+ 0x00, 0x00, // Initial value for SS (relocatable segment address).
+ 0x00, 0x00, // Initial value for SP.
+ 0x00, 0x00, // Checksum.
+ 0x00, 0x00, // Initial value for IP.
+ 0x00, 0x00, // Initial value for CS (relocatable segment address).
+ 0x40, 0x00, // Absolute offset to relocation table. 64 matches the header size (all bytes before the MS-DOS stub program).
+ 0x00, 0x00, // Overlay number. Zero means this is the main executable.
+}
+ // Reserved words.
+ ++ .{ 0x00, 0x00 } ** 4
+ // OEM-related fields.
+ ++ .{
+ 0x00, 0x00, // OEM identifier.
+ 0x00, 0x00, // OEM information.
+ }
+ // Reserved words.
+ ++ .{ 0x00, 0x00 } ** 10
+ // Address of the PE header (a long). This matches the size of this entire MS-DOS stub, so that's the address of what's after this MS-DOS stub.
+ ++ .{ 0x78, 0x00, 0x00, 0x00 }
+ // What follows is a 16-bit x86 MS-DOS program of 7 instructions that prints the bytes after these instructions and then exits.
+ ++ .{
+ // Set the value of the data segment to the same value as the code segment.
+ 0x0e, // push cs
+ 0x1f, // pop ds
+ // Set the DX register to the address of the message.
+ // If you count all bytes of these 7 instructions you get 14, so that's the address of what's after these instructions.
+ 0xba, 14, 0x00, // mov dx, 14
+ // Set AH to the system call code for printing a message.
+ 0xb4, 0x09, // mov ah, 0x09
+ // Perform the system call to print the message.
+ 0xcd, 0x21, // int 0x21
+ // Set AH to 0x4c which is the system call code for exiting, and set AL to 0x01 which is the exit code.
+ 0xb8, 0x01, 0x4c, // mov ax, 0x4c01
+ // Peform the system call to exit the program with exit code 1.
+ 0xcd, 0x21, // int 0x21
+ }
+ // Message to print.
+ ++ "This program cannot be run in DOS mode.".*
+ // Message terminators.
+ ++ .{
+ '$', // We do not pass a length to the print system call; the string is terminated by this character.
+ 0x00, 0x00, // Terminating zero bytes.
+ };
+
+pub const Node = union(enum) {
+ file,
+ header,
+ signature,
+ coff_header,
+ optional_header,
+ data_directories,
+ section_table,
+ section: Symbol.Index,
+ import_directory_table,
+ import_lookup_table: u32,
+ import_address_table: u32,
+ import_hint_name_table: u32,
+ global: GlobalMapIndex,
+ nav: NavMapIndex,
+ uav: UavMapIndex,
+ lazy_code: LazyMapRef.Index(.code),
+ lazy_const_data: LazyMapRef.Index(.const_data),
+
+ pub const GlobalMapIndex = enum(u32) {
+ _,
+
+ pub fn globalName(gmi: GlobalMapIndex, coff: *const Coff) GlobalName {
+ return coff.globals.keys()[@intFromEnum(gmi)];
+ }
+
+ pub fn symbol(gmi: GlobalMapIndex, coff: *const Coff) Symbol.Index {
+ return coff.globals.values()[@intFromEnum(gmi)];
+ }
+ };
+
+ pub const NavMapIndex = enum(u32) {
+ _,
+
+ pub fn navIndex(nmi: NavMapIndex, coff: *const Coff) InternPool.Nav.Index {
+ return coff.navs.keys()[@intFromEnum(nmi)];
+ }
+
+ pub fn symbol(nmi: NavMapIndex, coff: *const Coff) Symbol.Index {
+ return coff.navs.values()[@intFromEnum(nmi)];
+ }
+ };
+
+ pub const UavMapIndex = enum(u32) {
+ _,
+
+ pub fn uavValue(umi: UavMapIndex, coff: *const Coff) InternPool.Index {
+ return coff.uavs.keys()[@intFromEnum(umi)];
+ }
+
+ pub fn symbol(umi: UavMapIndex, coff: *const Coff) Symbol.Index {
+ return coff.uavs.values()[@intFromEnum(umi)];
+ }
+ };
+
+ pub const LazyMapRef = struct {
+ kind: link.File.LazySymbol.Kind,
+ index: u32,
+
+ pub fn Index(comptime kind: link.File.LazySymbol.Kind) type {
+ return enum(u32) {
+ _,
+
+ pub fn ref(lmi: @This()) LazyMapRef {
+ return .{ .kind = kind, .index = @intFromEnum(lmi) };
+ }
+
+ pub fn lazySymbol(lmi: @This(), coff: *const Coff) link.File.LazySymbol {
+ return lmi.ref().lazySymbol(coff);
+ }
+
+ pub fn symbol(lmi: @This(), coff: *const Coff) Symbol.Index {
+ return lmi.ref().symbol(coff);
+ }
+ };
+ }
+
+ pub fn lazySymbol(lmr: LazyMapRef, coff: *const Coff) link.File.LazySymbol {
+ return .{ .kind = lmr.kind, .ty = coff.lazy.getPtrConst(lmr.kind).map.keys()[lmr.index] };
+ }
+
+ pub fn symbol(lmr: LazyMapRef, coff: *const Coff) Symbol.Index {
+ return coff.lazy.getPtrConst(lmr.kind).map.values()[lmr.index];
+ }
+ };
+
+ pub const Tag = @typeInfo(Node).@"union".tag_type.?;
+
+ const known_count = @typeInfo(@TypeOf(known)).@"struct".fields.len;
+ const known = known: {
+ const Known = enum {
+ file,
+ header,
+ signature,
+ coff_header,
+ optional_header,
+ data_directories,
+ section_table,
+ };
+ var mut_known: std.enums.EnumFieldStruct(Known, MappedFile.Node.Index, null) = undefined;
+ for (@typeInfo(Known).@"enum".fields) |field|
+ @field(mut_known, field.name) = @enumFromInt(field.value);
+ break :known mut_known;
+ };
+
+ comptime {
+ if (!std.debug.runtime_safety) std.debug.assert(@sizeOf(Node) == 8);
+ }
+};
+
+pub const DataDirectory = enum {
+ export_table,
+ import_table,
+ resorce_table,
+ exception_table,
+ certificate_table,
+ base_relocation_table,
+ debug,
+ architecture,
+ global_ptr,
+ tls_table,
+ load_config_table,
+ bound_import,
+ import_address_table,
+ delay_import_descriptor,
+ clr_runtime_header,
+ reserved,
+};
+
+pub const ImportTable = struct {
+ directory_table_ni: MappedFile.Node.Index,
+ dlls: std.AutoArrayHashMapUnmanaged(void, Dll),
+
+ pub const Dll = struct {
+ import_lookup_table_ni: MappedFile.Node.Index,
+ import_address_table_si: Symbol.Index,
+ import_hint_name_table_ni: MappedFile.Node.Index,
+ len: u32,
+ hint_name_len: u32,
+ };
+
+ const Adapter = struct {
+ coff: *Coff,
+
+ pub fn eql(adapter: Adapter, lhs_key: []const u8, _: void, rhs_index: usize) bool {
+ const coff = adapter.coff;
+ const dll_name = coff.import_table.dlls.values()[rhs_index]
+ .import_hint_name_table_ni.sliceConst(&coff.mf);
+ return std.mem.startsWith(u8, dll_name, lhs_key) and
+ std.mem.startsWith(u8, dll_name[lhs_key.len..], ".dll\x00");
+ }
+
+ pub fn hash(_: Adapter, key: []const u8) u32 {
+ assert(std.mem.indexOfScalar(u8, key, 0) == null);
+ return std.array_hash_map.hashString(key);
+ }
+ };
+};
+
+pub const String = enum(u32) {
+ _,
+
+ pub const Optional = enum(u32) {
+ none = std.math.maxInt(u32),
+ _,
+
+ pub fn unwrap(os: String.Optional) ?String {
+ return switch (os) {
+ else => |s| @enumFromInt(@intFromEnum(s)),
+ .none => null,
+ };
+ }
+
+ pub fn toSlice(os: String.Optional, coff: *Coff) ?[:0]const u8 {
+ return (os.unwrap() orelse return null).toSlice(coff);
+ }
+ };
+
+ pub fn toSlice(s: String, coff: *Coff) [:0]const u8 {
+ const slice = coff.string_bytes.items[@intFromEnum(s)..];
+ return slice[0..std.mem.indexOfScalar(u8, slice, 0).? :0];
+ }
+
+ pub fn toOptional(s: String) String.Optional {
+ return @enumFromInt(@intFromEnum(s));
+ }
+};
+
+pub const GlobalName = struct { name: String, lib_name: String.Optional };
+
+pub const Symbol = struct {
+ ni: MappedFile.Node.Index,
+ rva: u32,
+ size: u32,
+ /// Relocations contained within this symbol
+ loc_relocs: Reloc.Index,
+ /// Relocations targeting this symbol
+ target_relocs: Reloc.Index,
+ section_number: SectionNumber,
+ data_directory: ?DataDirectory,
+ unused0: u32 = 0,
+ unused1: u32 = 0,
+
+ pub const SectionNumber = enum(i16) {
+ UNDEFINED = 0,
+ ABSOLUTE = -1,
+ DEBUG = -2,
+ _,
+
+ fn toIndex(sn: SectionNumber) u15 {
+ return @intCast(@intFromEnum(sn) - 1);
+ }
+
+ pub fn symbol(sn: SectionNumber, coff: *const Coff) Symbol.Index {
+ return coff.section_table.items[sn.toIndex()];
+ }
+
+ pub fn header(sn: SectionNumber, coff: *Coff) *std.coff.SectionHeader {
+ return &coff.sectionTableSlice()[sn.toIndex()];
+ }
+ };
+
+ pub const Index = enum(u32) {
+ null,
+ data,
+ idata,
+ rdata,
+ text,
+ _,
+
+ const known_count = @typeInfo(Index).@"enum".fields.len;
+
+ pub fn get(si: Symbol.Index, coff: *Coff) *Symbol {
+ return &coff.symbol_table.items[@intFromEnum(si)];
+ }
+
+ pub fn node(si: Symbol.Index, coff: *Coff) MappedFile.Node.Index {
+ const ni = si.get(coff).ni;
+ assert(ni != .none);
+ return ni;
+ }
+
+ pub fn flushMoved(si: Symbol.Index, coff: *Coff) void {
+ const sym = si.get(coff);
+ sym.rva = coff.computeNodeRva(sym.ni);
+ if (si == coff.entry_hack)
+ coff.targetStore(&coff.optionalHeaderStandardPtr().address_of_entry_point, sym.rva);
+ si.applyLocationRelocs(coff);
+ si.applyTargetRelocs(coff);
+ }
+
+ pub fn applyLocationRelocs(si: Symbol.Index, coff: *Coff) void {
+ for (coff.relocs.items[@intFromEnum(si.get(coff).loc_relocs)..]) |*reloc| {
+ if (reloc.loc != si) break;
+ reloc.apply(coff);
+ }
+ }
+
+ pub fn applyTargetRelocs(si: Symbol.Index, coff: *Coff) void {
+ var ri = si.get(coff).target_relocs;
+ while (ri != .none) {
+ const reloc = ri.get(coff);
+ assert(reloc.target == si);
+ reloc.apply(coff);
+ ri = reloc.next;
+ }
+ }
+
+ pub fn deleteLocationRelocs(si: Symbol.Index, coff: *Coff) void {
+ const sym = si.get(coff);
+ for (coff.relocs.items[@intFromEnum(sym.loc_relocs)..]) |*reloc| {
+ if (reloc.loc != si) break;
+ reloc.delete(coff);
+ }
+ sym.loc_relocs = .none;
+ }
+ };
+
+ comptime {
+ if (!std.debug.runtime_safety) std.debug.assert(@sizeOf(Symbol) == 32);
+ }
+};
+
+pub const Reloc = extern struct {
+ type: Reloc.Type,
+ prev: Reloc.Index,
+ next: Reloc.Index,
+ loc: Symbol.Index,
+ target: Symbol.Index,
+ unused: u32,
+ offset: u64,
+ addend: i64,
+
+ pub const Type = extern union {
+ AMD64: std.coff.IMAGE.REL.AMD64,
+ ARM: std.coff.IMAGE.REL.ARM,
+ ARM64: std.coff.IMAGE.REL.ARM64,
+ SH: std.coff.IMAGE.REL.SH,
+ PPC: std.coff.IMAGE.REL.PPC,
+ I386: std.coff.IMAGE.REL.I386,
+ IA64: std.coff.IMAGE.REL.IA64,
+ MIPS: std.coff.IMAGE.REL.MIPS,
+ M32R: std.coff.IMAGE.REL.M32R,
+ };
+
+ pub const Index = enum(u32) {
+ none = std.math.maxInt(u32),
+ _,
+
+ pub fn get(si: Reloc.Index, coff: *Coff) *Reloc {
+ return &coff.relocs.items[@intFromEnum(si)];
+ }
+ };
+
+ pub fn apply(reloc: *const Reloc, coff: *Coff) void {
+ const loc_sym = reloc.loc.get(coff);
+ switch (loc_sym.ni) {
+ .none => return,
+ else => |ni| if (ni.hasMoved(&coff.mf)) return,
+ }
+ const target_sym = reloc.target.get(coff);
+ switch (target_sym.ni) {
+ .none => return,
+ else => |ni| if (ni.hasMoved(&coff.mf)) return,
+ }
+ const loc_slice = loc_sym.ni.slice(&coff.mf)[@intCast(reloc.offset)..];
+ const target_rva = target_sym.rva +% @as(u64, @bitCast(reloc.addend));
+ const target_endian = coff.targetEndian();
+ switch (coff.targetLoad(&coff.headerPtr().machine)) {
+ else => |machine| @panic(@tagName(machine)),
+ .AMD64 => switch (reloc.type.AMD64) {
+ else => |kind| @panic(@tagName(kind)),
+ .ABSOLUTE => {},
+ .ADDR64 => std.mem.writeInt(
+ u64,
+ loc_slice[0..8],
+ coff.optionalHeaderField(.image_base) + target_rva,
+ target_endian,
+ ),
+ .ADDR32 => std.mem.writeInt(
+ u32,
+ loc_slice[0..4],
+ @intCast(coff.optionalHeaderField(.image_base) + target_rva),
+ target_endian,
+ ),
+ .ADDR32NB => std.mem.writeInt(
+ u32,
+ loc_slice[0..4],
+ @intCast(target_rva),
+ target_endian,
+ ),
+ .REL32 => std.mem.writeInt(
+ i32,
+ loc_slice[0..4],
+ @intCast(@as(i64, @bitCast(target_rva -% (loc_sym.rva + reloc.offset + 4)))),
+ target_endian,
+ ),
+ .REL32_1 => std.mem.writeInt(
+ i32,
+ loc_slice[0..4],
+ @intCast(@as(i64, @bitCast(target_rva -% (loc_sym.rva + reloc.offset + 5)))),
+ target_endian,
+ ),
+ .REL32_2 => std.mem.writeInt(
+ i32,
+ loc_slice[0..4],
+ @intCast(@as(i64, @bitCast(target_rva -% (loc_sym.rva + reloc.offset + 6)))),
+ target_endian,
+ ),
+ .REL32_3 => std.mem.writeInt(
+ i32,
+ loc_slice[0..4],
+ @intCast(@as(i64, @bitCast(target_rva -% (loc_sym.rva + reloc.offset + 7)))),
+ target_endian,
+ ),
+ .REL32_4 => std.mem.writeInt(
+ i32,
+ loc_slice[0..4],
+ @intCast(@as(i64, @bitCast(target_rva -% (loc_sym.rva + reloc.offset + 8)))),
+ target_endian,
+ ),
+ .REL32_5 => std.mem.writeInt(
+ i32,
+ loc_slice[0..4],
+ @intCast(@as(i64, @bitCast(target_rva -% (loc_sym.rva + reloc.offset + 9)))),
+ target_endian,
+ ),
+ },
+ .I386 => switch (reloc.type.I386) {
+ else => |kind| @panic(@tagName(kind)),
+ .ABSOLUTE => {},
+ .DIR16 => std.mem.writeInt(
+ u16,
+ loc_slice[0..2],
+ @intCast(coff.optionalHeaderField(.image_base) + target_rva),
+ target_endian,
+ ),
+ .REL16 => std.mem.writeInt(
+ i16,
+ loc_slice[0..2],
+ @intCast(@as(i64, @bitCast(target_rva -% (loc_sym.rva + reloc.offset + 2)))),
+ target_endian,
+ ),
+ .DIR32 => std.mem.writeInt(
+ u32,
+ loc_slice[0..4],
+ @intCast(coff.optionalHeaderField(.image_base) + target_rva),
+ target_endian,
+ ),
+ .DIR32NB => std.mem.writeInt(
+ u32,
+ loc_slice[0..4],
+ @intCast(target_rva),
+ target_endian,
+ ),
+ .REL32 => std.mem.writeInt(
+ i32,
+ loc_slice[0..4],
+ @intCast(@as(i64, @bitCast(target_rva -% (loc_sym.rva + reloc.offset + 4)))),
+ target_endian,
+ ),
+ },
+ }
+ }
+
+ pub fn delete(reloc: *Reloc, coff: *Coff) void {
+ switch (reloc.prev) {
+ .none => {
+ const target = reloc.target.get(coff);
+ assert(target.target_relocs.get(coff) == reloc);
+ target.target_relocs = reloc.next;
+ },
+ else => |prev| prev.get(coff).next = reloc.next,
+ }
+ switch (reloc.next) {
+ .none => {},
+ else => |next| next.get(coff).prev = reloc.prev,
+ }
+ reloc.* = undefined;
+ }
+
+ comptime {
+ if (!std.debug.runtime_safety) std.debug.assert(@sizeOf(Reloc) == 40);
+ }
+};
+
+pub fn open(
+ arena: std.mem.Allocator,
+ comp: *Compilation,
+ path: std.Build.Cache.Path,
+ options: link.File.OpenOptions,
+) !*Coff {
+ return create(arena, comp, path, options);
+}
+pub fn createEmpty(
+ arena: std.mem.Allocator,
+ comp: *Compilation,
+ path: std.Build.Cache.Path,
+ options: link.File.OpenOptions,
+) !*Coff {
+ return create(arena, comp, path, options);
+}
+fn create(
+ arena: std.mem.Allocator,
+ comp: *Compilation,
+ path: std.Build.Cache.Path,
+ options: link.File.OpenOptions,
+) !*Coff {
+ const target = &comp.root_mod.resolved_target.result;
+ assert(target.ofmt == .coff);
+ const is_image = switch (comp.config.output_mode) {
+ .Exe => true,
+ .Lib => switch (comp.config.link_mode) {
+ .static => false,
+ .dynamic => true,
+ },
+ .Obj => false,
+ };
+ const machine = target.toCoffMachine();
+ const timestamp: u32 = if (options.repro) 0 else @truncate(@as(u64, @bitCast(std.time.timestamp())));
+ const major_subsystem_version = options.major_subsystem_version orelse 6;
+ const minor_subsystem_version = options.minor_subsystem_version orelse 0;
+ const magic: std.coff.OptionalHeader.Magic = switch (target.ptrBitWidth()) {
+ 0...32 => .PE32,
+ 33...64 => .@"PE32+",
+ else => return error.UnsupportedCOFFArchitecture,
+ };
+ const section_align: std.mem.Alignment = switch (machine) {
+ .AMD64, .I386 => @enumFromInt(12),
+ .SH3, .SH3DSP, .SH4, .SH5 => @enumFromInt(12),
+ .MIPS16, .MIPSFPU, .MIPSFPU16, .WCEMIPSV2 => @enumFromInt(12),
+ .POWERPC, .POWERPCFP => @enumFromInt(12),
+ .ALPHA, .ALPHA64 => @enumFromInt(13),
+ .IA64 => @enumFromInt(13),
+ .ARM => @enumFromInt(12),
+ else => return error.UnsupportedCOFFArchitecture,
+ };
+
+ const coff = try arena.create(Coff);
+ const file = try path.root_dir.handle.createFile(path.sub_path, .{
+ .read = true,
+ .mode = link.File.determineMode(comp.config.output_mode, comp.config.link_mode),
+ });
+ errdefer file.close();
+ coff.* = .{
+ .base = .{
+ .tag = .coff2,
+
+ .comp = comp,
+ .emit = path,
+
+ .file = file,
+ .gc_sections = false,
+ .print_gc_sections = false,
+ .build_id = .none,
+ .allow_shlib_undefined = false,
+ .stack_size = 0,
+ },
+ .endian = target.cpu.arch.endian(),
+ .mf = try .init(file, comp.gpa),
+ .nodes = .empty,
+ .import_table = .{
+ .directory_table_ni = .none,
+ .dlls = .empty,
+ },
+ .strings = .empty,
+ .string_bytes = .empty,
+ .section_table = .empty,
+ .symbol_table = .empty,
+ .globals = .empty,
+ .global_pending_index = 0,
+ .navs = .empty,
+ .uavs = .empty,
+ .lazy = .initFill(.{
+ .map = .empty,
+ .pending_index = 0,
+ }),
+ .pending_uavs = .empty,
+ .relocs = .empty,
+ .entry_hack = .null,
+ };
+ errdefer coff.deinit();
+
+ try coff.initHeaders(
+ is_image,
+ machine,
+ timestamp,
+ major_subsystem_version,
+ minor_subsystem_version,
+ magic,
+ section_align,
+ );
+ return coff;
+}
+
+pub fn deinit(coff: *Coff) void {
+ const gpa = coff.base.comp.gpa;
+ coff.mf.deinit(gpa);
+ coff.nodes.deinit(gpa);
+ coff.import_table.dlls.deinit(gpa);
+ coff.strings.deinit(gpa);
+ coff.string_bytes.deinit(gpa);
+ coff.section_table.deinit(gpa);
+ coff.symbol_table.deinit(gpa);
+ coff.globals.deinit(gpa);
+ coff.navs.deinit(gpa);
+ coff.uavs.deinit(gpa);
+ for (&coff.lazy.values) |*lazy| lazy.map.deinit(gpa);
+ coff.pending_uavs.deinit(gpa);
+ coff.relocs.deinit(gpa);
+ coff.* = undefined;
+}
+
+fn initHeaders(
+ coff: *Coff,
+ is_image: bool,
+ machine: std.coff.IMAGE.FILE.MACHINE,
+ timestamp: u32,
+ major_subsystem_version: u16,
+ minor_subsystem_version: u16,
+ magic: std.coff.OptionalHeader.Magic,
+ section_align: std.mem.Alignment,
+) !void {
+ const comp = coff.base.comp;
+ const gpa = comp.gpa;
+ const file_align: std.mem.Alignment = comptime .fromByteUnits(default_file_alignment);
+ const target_endian = coff.targetEndian();
+
+ const optional_header_size: u16 = if (is_image) switch (magic) {
+ _ => unreachable,
+ inline else => |ct_magic| @sizeOf(@field(std.coff.OptionalHeader, @tagName(ct_magic))),
+ } else 0;
+ const data_directories_len = @typeInfo(DataDirectory).@"enum".fields.len;
+ const data_directories_size: u16 = if (is_image)
+ @sizeOf(std.coff.ImageDataDirectory) * data_directories_len
+ else
+ 0;
+
+ try coff.nodes.ensureTotalCapacity(gpa, Node.known_count);
+ coff.nodes.appendAssumeCapacity(.file);
+
+ const header_ni = Node.known.header;
+ assert(header_ni == try coff.mf.addOnlyChildNode(gpa, .root, .{
+ .alignment = coff.mf.flags.block_size,
+ .fixed = true,
+ }));
+ coff.nodes.appendAssumeCapacity(.header);
+
+ const signature_ni = Node.known.signature;
+ assert(signature_ni == try coff.mf.addOnlyChildNode(gpa, header_ni, .{
+ .size = (if (is_image) msdos_stub.len else 0) + "PE\x00\x00".len,
+ .alignment = .@"4",
+ .fixed = true,
+ }));
+ coff.nodes.appendAssumeCapacity(.signature);
+ {
+ const signature_slice = signature_ni.slice(&coff.mf);
+ if (is_image) @memcpy(signature_slice[0..msdos_stub.len], &msdos_stub);
+ @memcpy(signature_slice[signature_slice.len - 4 ..], "PE\x00\x00");
+ }
+
+ const coff_header_ni = Node.known.coff_header;
+ assert(coff_header_ni == try coff.mf.addLastChildNode(gpa, header_ni, .{
+ .size = @sizeOf(std.coff.Header),
+ .alignment = .@"4",
+ .fixed = true,
+ }));
+ coff.nodes.appendAssumeCapacity(.coff_header);
+ {
+ const coff_header: *std.coff.Header = @ptrCast(@alignCast(coff_header_ni.slice(&coff.mf)));
+ coff_header.* = .{
+ .machine = machine,
+ .number_of_sections = 0,
+ .time_date_stamp = timestamp,
+ .pointer_to_symbol_table = 0,
+ .number_of_symbols = 0,
+ .size_of_optional_header = optional_header_size + data_directories_size,
+ .flags = .{
+ .RELOCS_STRIPPED = is_image,
+ .EXECUTABLE_IMAGE = is_image,
+ .DEBUG_STRIPPED = true,
+ .@"32BIT_MACHINE" = magic == .PE32,
+ .LARGE_ADDRESS_AWARE = magic == .@"PE32+",
+ .DLL = comp.config.output_mode == .Lib and comp.config.link_mode == .dynamic,
+ },
+ };
+ if (target_endian != native_endian) std.mem.byteSwapAllFields(std.coff.Header, coff_header);
+ }
+
+ const optional_header_ni = Node.known.optional_header;
+ assert(optional_header_ni == try coff.mf.addLastChildNode(gpa, header_ni, .{
+ .size = optional_header_size,
+ .alignment = .@"4",
+ .fixed = true,
+ }));
+ coff.nodes.appendAssumeCapacity(.optional_header);
+ if (is_image) switch (magic) {
+ _ => unreachable,
+ .PE32 => {
+ const optional_header: *std.coff.OptionalHeader.PE32 =
+ @ptrCast(@alignCast(optional_header_ni.slice(&coff.mf)));
+ optional_header.* = .{
+ .standard = .{
+ .magic = .PE32,
+ .major_linker_version = 0,
+ .minor_linker_version = 0,
+ .size_of_code = 0,
+ .size_of_initialized_data = 0,
+ .size_of_uninitialized_data = 0,
+ .address_of_entry_point = 0,
+ .base_of_code = 0,
+ },
+ .base_of_data = 0,
+ .image_base = switch (coff.base.comp.config.output_mode) {
+ .Exe => 0x400000,
+ .Lib => switch (coff.base.comp.config.link_mode) {
+ .static => 0,
+ .dynamic => 0x10000000,
+ },
+ .Obj => 0,
+ },
+ .section_alignment = @intCast(section_align.toByteUnits()),
+ .file_alignment = @intCast(file_align.toByteUnits()),
+ .major_operating_system_version = 6,
+ .minor_operating_system_version = 0,
+ .major_image_version = 0,
+ .minor_image_version = 0,
+ .major_subsystem_version = major_subsystem_version,
+ .minor_subsystem_version = minor_subsystem_version,
+ .win32_version_value = 0,
+ .size_of_image = 0,
+ .size_of_headers = 0,
+ .checksum = 0,
+ .subsystem = .WINDOWS_CUI,
+ .dll_flags = .{
+ .HIGH_ENTROPY_VA = true,
+ .DYNAMIC_BASE = true,
+ .TERMINAL_SERVER_AWARE = true,
+ .NX_COMPAT = true,
+ },
+ .size_of_stack_reserve = default_size_of_stack_reserve,
+ .size_of_stack_commit = default_size_of_stack_commit,
+ .size_of_heap_reserve = default_size_of_heap_reserve,
+ .size_of_heap_commit = default_size_of_heap_commit,
+ .loader_flags = 0,
+ .number_of_rva_and_sizes = data_directories_len,
+ };
+ if (target_endian != native_endian)
+ std.mem.byteSwapAllFields(std.coff.OptionalHeader.PE32, optional_header);
+ },
+ .@"PE32+" => {
+ const header: *std.coff.OptionalHeader.@"PE32+" =
+ @ptrCast(@alignCast(optional_header_ni.slice(&coff.mf)));
+ header.* = .{
+ .standard = .{
+ .magic = .@"PE32+",
+ .major_linker_version = 0,
+ .minor_linker_version = 0,
+ .size_of_code = 0,
+ .size_of_initialized_data = 0,
+ .size_of_uninitialized_data = 0,
+ .address_of_entry_point = 0,
+ .base_of_code = 0,
+ },
+ .image_base = switch (coff.base.comp.config.output_mode) {
+ .Exe => 0x140000000,
+ .Lib => switch (coff.base.comp.config.link_mode) {
+ .static => 0,
+ .dynamic => 0x180000000,
+ },
+ .Obj => 0,
+ },
+ .section_alignment = @intCast(section_align.toByteUnits()),
+ .file_alignment = @intCast(file_align.toByteUnits()),
+ .major_operating_system_version = 6,
+ .minor_operating_system_version = 0,
+ .major_image_version = 0,
+ .minor_image_version = 0,
+ .major_subsystem_version = major_subsystem_version,
+ .minor_subsystem_version = minor_subsystem_version,
+ .win32_version_value = 0,
+ .size_of_image = 0,
+ .size_of_headers = 0,
+ .checksum = 0,
+ .subsystem = .WINDOWS_CUI,
+ .dll_flags = .{
+ .HIGH_ENTROPY_VA = true,
+ .DYNAMIC_BASE = true,
+ .TERMINAL_SERVER_AWARE = true,
+ .NX_COMPAT = true,
+ },
+ .size_of_stack_reserve = default_size_of_stack_reserve,
+ .size_of_stack_commit = default_size_of_stack_commit,
+ .size_of_heap_reserve = default_size_of_heap_reserve,
+ .size_of_heap_commit = default_size_of_heap_commit,
+ .loader_flags = 0,
+ .number_of_rva_and_sizes = data_directories_len,
+ };
+ if (target_endian != native_endian)
+ std.mem.byteSwapAllFields(std.coff.OptionalHeader.@"PE32+", header);
+ },
+ };
+
+ const data_directories_ni = Node.known.data_directories;
+ assert(data_directories_ni == try coff.mf.addLastChildNode(gpa, header_ni, .{
+ .size = data_directories_size,
+ .alignment = .@"4",
+ .fixed = true,
+ }));
+ coff.nodes.appendAssumeCapacity(.data_directories);
+ {
+ const data_directories: *[data_directories_len]std.coff.ImageDataDirectory =
+ @ptrCast(@alignCast(data_directories_ni.slice(&coff.mf)));
+ @memset(data_directories, .{ .virtual_address = 0, .size = 0 });
+ if (target_endian != native_endian) for (data_directories) |*data_directory|
+ std.mem.byteSwapAllFields(std.coff.ImageDataDirectory, data_directory);
+ }
+
+ const section_table_ni = Node.known.section_table;
+ assert(section_table_ni == try coff.mf.addLastChildNode(gpa, header_ni, .{
+ .alignment = .@"4",
+ .fixed = true,
+ }));
+ coff.nodes.appendAssumeCapacity(.section_table);
+
+ assert(coff.nodes.len == Node.known_count);
+
+ try coff.symbol_table.ensureTotalCapacity(gpa, Symbol.Index.known_count);
+ coff.symbol_table.addOneAssumeCapacity().* = .{
+ .ni = .none,
+ .rva = 0,
+ .size = 0,
+ .loc_relocs = .none,
+ .target_relocs = .none,
+ .section_number = .UNDEFINED,
+ .data_directory = null,
+ };
+ assert(try coff.addSection(".data", null, .{
+ .CNT_INITIALIZED_DATA = true,
+ .MEM_READ = true,
+ .MEM_WRITE = true,
+ }) == .data);
+ assert(try coff.addSection(".idata", .import_table, .{
+ .CNT_INITIALIZED_DATA = true,
+ .MEM_READ = true,
+ }) == .idata);
+ assert(try coff.addSection(".rdata", null, .{
+ .CNT_INITIALIZED_DATA = true,
+ .MEM_READ = true,
+ }) == .rdata);
+ assert(try coff.addSection(".text", null, .{
+ .CNT_CODE = true,
+ .MEM_EXECUTE = true,
+ .MEM_READ = true,
+ }) == .text);
+ coff.import_table.directory_table_ni = try coff.mf.addLastChildNode(
+ gpa,
+ Symbol.Index.idata.node(coff),
+ .{
+ .alignment = .@"4",
+ .fixed = true,
+ },
+ );
+ coff.nodes.appendAssumeCapacity(.import_directory_table);
+ assert(coff.symbol_table.items.len == Symbol.Index.known_count);
+}
+
+fn getNode(coff: *const Coff, ni: MappedFile.Node.Index) Node {
+ return coff.nodes.get(@intFromEnum(ni));
+}
+fn computeNodeRva(coff: *Coff, ni: MappedFile.Node.Index) u32 {
+ var section_offset: u32 = 0;
+ var parent_ni = ni;
+ while (true) {
+ assert(parent_ni != .none);
+ switch (coff.getNode(parent_ni)) {
+ else => {},
+ .section => |si| return si.get(coff).rva + section_offset,
+ }
+ const parent_offset, _ = parent_ni.location(&coff.mf).resolve(&coff.mf);
+ section_offset += @intCast(parent_offset);
+ parent_ni = parent_ni.parent(&coff.mf);
+ }
+}
+
+pub inline fn targetEndian(coff: *const Coff) std.builtin.Endian {
+ return coff.endian;
+}
+fn targetLoad(coff: *const Coff, ptr: anytype) @typeInfo(@TypeOf(ptr)).pointer.child {
+ const Child = @typeInfo(@TypeOf(ptr)).pointer.child;
+ return switch (@typeInfo(Child)) {
+ else => @compileError(@typeName(Child)),
+ .int => std.mem.toNative(Child, ptr.*, coff.targetEndian()),
+ .@"enum" => |@"enum"| @enumFromInt(coff.targetLoad(@as(*@"enum".tag_type, @ptrCast(ptr)))),
+ .@"struct" => |@"struct"| @bitCast(
+ coff.targetLoad(@as(*@"struct".backing_integer.?, @ptrCast(ptr))),
+ ),
+ };
+}
+fn targetStore(coff: *const Coff, ptr: anytype, val: @typeInfo(@TypeOf(ptr)).pointer.child) void {
+ const Child = @typeInfo(@TypeOf(ptr)).pointer.child;
+ return switch (@typeInfo(Child)) {
+ else => @compileError(@typeName(Child)),
+ .int => ptr.* = std.mem.nativeTo(Child, val, coff.targetEndian()),
+ .@"enum" => |@"enum"| coff.targetStore(
+ @as(*@"enum".tag_type, @ptrCast(ptr)),
+ @intFromEnum(val),
+ ),
+ .@"struct" => |@"struct"| coff.targetStore(
+ @as(*@"struct".backing_integer.?, @ptrCast(ptr)),
+ @bitCast(val),
+ ),
+ };
+}
+
+pub fn headerPtr(coff: *Coff) *std.coff.Header {
+ return @ptrCast(@alignCast(Node.known.coff_header.slice(&coff.mf)));
+}
+
+pub fn optionalHeaderStandardPtr(coff: *Coff) *std.coff.OptionalHeader {
+ return @ptrCast(@alignCast(
+ Node.known.optional_header.slice(&coff.mf)[0..@sizeOf(std.coff.OptionalHeader)],
+ ));
+}
+
+pub const OptionalHeaderPtr = union(std.coff.OptionalHeader.Magic) {
+ PE32: *std.coff.OptionalHeader.PE32,
+ @"PE32+": *std.coff.OptionalHeader.@"PE32+",
+};
+pub fn optionalHeaderPtr(coff: *Coff) OptionalHeaderPtr {
+ const slice = Node.known.optional_header.slice(&coff.mf);
+ return switch (coff.targetLoad(&coff.optionalHeaderStandardPtr().magic)) {
+ _ => unreachable,
+ inline else => |magic| @unionInit(
+ OptionalHeaderPtr,
+ @tagName(magic),
+ @ptrCast(@alignCast(slice)),
+ ),
+ };
+}
+pub fn optionalHeaderField(
+ coff: *Coff,
+ comptime field: std.meta.FieldEnum(std.coff.OptionalHeader.@"PE32+"),
+) @FieldType(std.coff.OptionalHeader.@"PE32+", @tagName(field)) {
+ return switch (coff.optionalHeaderPtr()) {
+ inline else => |optional_header| coff.targetLoad(&@field(optional_header, @tagName(field))),
+ };
+}
+
+pub fn dataDirectoriesSlice(coff: *Coff) []std.coff.ImageDataDirectory {
+ return @ptrCast(@alignCast(Node.known.data_directories.slice(&coff.mf)));
+}
+
+pub fn sectionTableSlice(coff: *Coff) []std.coff.SectionHeader {
+ return @ptrCast(@alignCast(Node.known.section_table.slice(&coff.mf)));
+}
+
+fn addSymbolAssumeCapacity(coff: *Coff) Symbol.Index {
+ defer coff.symbol_table.addOneAssumeCapacity().* = .{
+ .ni = .none,
+ .rva = 0,
+ .size = 0,
+ .loc_relocs = .none,
+ .target_relocs = .none,
+ .section_number = .UNDEFINED,
+ .data_directory = null,
+ };
+ return @enumFromInt(coff.symbol_table.items.len);
+}
+
+fn initSymbolAssumeCapacity(coff: *Coff) !Symbol.Index {
+ const si = coff.addSymbolAssumeCapacity();
+ return si;
+}
+
+fn getOrPutString(coff: *Coff, string: []const u8) !String {
+ const gpa = coff.base.comp.gpa;
+ try coff.string_bytes.ensureUnusedCapacity(gpa, string.len + 1);
+ const gop = try coff.strings.getOrPutContextAdapted(
+ gpa,
+ string,
+ std.hash_map.StringIndexAdapter{ .bytes = &coff.string_bytes },
+ .{ .bytes = &coff.string_bytes },
+ );
+ if (!gop.found_existing) {
+ gop.key_ptr.* = @intCast(coff.string_bytes.items.len);
+ gop.value_ptr.* = {};
+ coff.string_bytes.appendSliceAssumeCapacity(string);
+ coff.string_bytes.appendAssumeCapacity(0);
+ }
+ return @enumFromInt(gop.key_ptr.*);
+}
+
+fn getOrPutOptionalString(coff: *Coff, string: ?[]const u8) !String.Optional {
+ return (try coff.getOrPutString(string orelse return .none)).toOptional();
+}
+
+pub fn globalSymbol(coff: *Coff, name: []const u8, lib_name: ?[]const u8) !Symbol.Index {
+ const gpa = coff.base.comp.gpa;
+ try coff.symbol_table.ensureUnusedCapacity(gpa, 1);
+ const sym_gop = try coff.globals.getOrPut(gpa, .{
+ .name = try coff.getOrPutString(name),
+ .lib_name = try coff.getOrPutOptionalString(lib_name),
+ });
+ if (!sym_gop.found_existing) {
+ sym_gop.value_ptr.* = coff.addSymbolAssumeCapacity();
+ coff.base.comp.link_synth_prog_node.increaseEstimatedTotalItems(1);
+ }
+ return sym_gop.value_ptr.*;
+}
+
+fn navMapIndex(coff: *Coff, zcu: *Zcu, nav_index: InternPool.Nav.Index) !Node.NavMapIndex {
+ const gpa = zcu.gpa;
+ try coff.symbol_table.ensureUnusedCapacity(gpa, 1);
+ const sym_gop = try coff.navs.getOrPut(gpa, nav_index);
+ if (!sym_gop.found_existing) sym_gop.value_ptr.* = coff.addSymbolAssumeCapacity();
+ return @enumFromInt(sym_gop.index);
+}
+pub fn navSymbol(coff: *Coff, zcu: *Zcu, nav_index: InternPool.Nav.Index) !Symbol.Index {
+ const ip = &zcu.intern_pool;
+ const nav = ip.getNav(nav_index);
+ if (nav.getExtern(ip)) |@"extern"| return coff.globalSymbol(
+ @"extern".name.toSlice(ip),
+ @"extern".lib_name.toSlice(ip),
+ );
+ const nmi = try coff.navMapIndex(zcu, nav_index);
+ return nmi.symbol(coff);
+}
+
+fn uavMapIndex(coff: *Coff, uav_val: InternPool.Index) !Node.UavMapIndex {
+ const gpa = coff.base.comp.gpa;
+ try coff.symbol_table.ensureUnusedCapacity(gpa, 1);
+ const sym_gop = try coff.uavs.getOrPut(gpa, uav_val);
+ if (!sym_gop.found_existing) sym_gop.value_ptr.* = coff.addSymbolAssumeCapacity();
+ return @enumFromInt(sym_gop.index);
+}
+pub fn uavSymbol(coff: *Coff, uav_val: InternPool.Index) !Symbol.Index {
+ const umi = try coff.uavMapIndex(uav_val);
+ return umi.symbol(coff);
+}
+
+pub fn lazySymbol(coff: *Coff, lazy: link.File.LazySymbol) !Symbol.Index {
+ const gpa = coff.base.comp.gpa;
+ try coff.symbol_table.ensureUnusedCapacity(gpa, 1);
+ const sym_gop = try coff.lazy.getPtr(lazy.kind).map.getOrPut(gpa, lazy.ty);
+ if (!sym_gop.found_existing) {
+ sym_gop.value_ptr.* = try coff.initSymbolAssumeCapacity();
+ coff.base.comp.link_synth_prog_node.increaseEstimatedTotalItems(1);
+ }
+ return sym_gop.value_ptr.*;
+}
+
+pub fn getNavVAddr(
+ coff: *Coff,
+ pt: Zcu.PerThread,
+ nav: InternPool.Nav.Index,
+ reloc_info: link.File.RelocInfo,
+) !u64 {
+ return coff.getVAddr(reloc_info, try coff.navSymbol(pt.zcu, nav));
+}
+
+pub fn getUavVAddr(
+ coff: *Coff,
+ uav: InternPool.Index,
+ reloc_info: link.File.RelocInfo,
+) !u64 {
+ return coff.getVAddr(reloc_info, try coff.uavSymbol(uav));
+}
+
+pub fn getVAddr(coff: *Coff, reloc_info: link.File.RelocInfo, target_si: Symbol.Index) !u64 {
+ try coff.addReloc(
+ @enumFromInt(reloc_info.parent.atom_index),
+ reloc_info.offset,
+ target_si,
+ reloc_info.addend,
+ switch (coff.targetLoad(&coff.headerPtr().machine)) {
+ else => unreachable,
+ .AMD64 => .{ .AMD64 = .ADDR64 },
+ .I386 => .{ .I386 = .DIR32 },
+ },
+ );
+ return coff.optionalHeaderField(.image_base) + target_si.get(coff).rva;
+}
+
+fn addSection(
+ coff: *Coff,
+ name: []const u8,
+ maybe_data_directory: ?DataDirectory,
+ flags: std.coff.SectionHeader.Flags,
+) !Symbol.Index {
+ const gpa = coff.base.comp.gpa;
+ try coff.nodes.ensureUnusedCapacity(gpa, 1);
+ try coff.section_table.ensureUnusedCapacity(gpa, 1);
+ try coff.symbol_table.ensureUnusedCapacity(gpa, 1);
+
+ const coff_header = coff.headerPtr();
+ const section_index = coff.targetLoad(&coff_header.number_of_sections);
+ const section_table_len = section_index + 1;
+ coff.targetStore(&coff_header.number_of_sections, section_table_len);
+ try Node.known.section_table.resize(
+ &coff.mf,
+ gpa,
+ @sizeOf(std.coff.SectionHeader) * section_table_len,
+ );
+ const ni = try coff.mf.addLastChildNode(gpa, .root, .{
+ .alignment = coff.mf.flags.block_size,
+ .moved = true,
+ .bubbles_moved = false,
+ });
+ const si = coff.addSymbolAssumeCapacity();
+ coff.section_table.appendAssumeCapacity(si);
+ coff.nodes.appendAssumeCapacity(.{ .section = si });
+ const section_table = coff.sectionTableSlice();
+ const virtual_size = coff.optionalHeaderField(.section_alignment);
+ const rva: u32 = switch (section_index) {
+ 0 => @intCast(Node.known.header.location(&coff.mf).resolve(&coff.mf)[1]),
+ else => coff.section_table.items[section_index - 1].get(coff).rva +
+ coff.targetLoad(&section_table[section_index - 1].virtual_size),
+ };
+ {
+ const sym = si.get(coff);
+ sym.ni = ni;
+ sym.rva = rva;
+ sym.section_number = @enumFromInt(section_table_len);
+ sym.data_directory = maybe_data_directory;
+ }
+ const section = &section_table[section_index];
+ section.* = .{
+ .name = undefined,
+ .virtual_size = virtual_size,
+ .virtual_address = rva,
+ .size_of_raw_data = 0,
+ .pointer_to_raw_data = 0,
+ .pointer_to_relocations = 0,
+ .pointer_to_linenumbers = 0,
+ .number_of_relocations = 0,
+ .number_of_linenumbers = 0,
+ .flags = flags,
+ };
+ @memcpy(section.name[0..name.len], name);
+ @memset(section.name[name.len..], 0);
+ if (coff.targetEndian() != native_endian)
+ std.mem.byteSwapAllFields(std.coff.SectionHeader, section);
+ if (maybe_data_directory) |data_directory|
+ coff.dataDirectoriesSlice()[@intFromEnum(data_directory)] = .{
+ .virtual_address = section.virtual_address,
+ .size = section.virtual_size,
+ };
+ switch (coff.optionalHeaderPtr()) {
+ inline else => |optional_header| coff.targetStore(
+ &optional_header.size_of_image,
+ @intCast(rva + virtual_size),
+ ),
+ }
+ return si;
+}
+
+pub fn addReloc(
+ coff: *Coff,
+ loc_si: Symbol.Index,
+ offset: u64,
+ target_si: Symbol.Index,
+ addend: i64,
+ @"type": Reloc.Type,
+) !void {
+ const gpa = coff.base.comp.gpa;
+ const target = target_si.get(coff);
+ const ri: Reloc.Index = @enumFromInt(coff.relocs.items.len);
+ (try coff.relocs.addOne(gpa)).* = .{
+ .type = @"type",
+ .prev = .none,
+ .next = target.target_relocs,
+ .loc = loc_si,
+ .target = target_si,
+ .unused = 0,
+ .offset = offset,
+ .addend = addend,
+ };
+ switch (target.target_relocs) {
+ .none => {},
+ else => |target_ri| target_ri.get(coff).prev = ri,
+ }
+ target.target_relocs = ri;
+}
+
+pub fn prelink(coff: *Coff, prog_node: std.Progress.Node) void {
+ _ = coff;
+ _ = prog_node;
+}
+
+pub fn updateNav(coff: *Coff, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) !void {
+ coff.updateNavInner(pt, nav_index) catch |err| switch (err) {
+ error.OutOfMemory,
+ error.Overflow,
+ error.RelocationNotByteAligned,
+ => |e| return e,
+ else => |e| return coff.base.cgFail(nav_index, "linker failed to update variable: {t}", .{e}),
+ };
+}
+fn updateNavInner(coff: *Coff, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) !void {
+ const zcu = pt.zcu;
+ const gpa = zcu.gpa;
+ const ip = &zcu.intern_pool;
+
+ const nav = ip.getNav(nav_index);
+ const nav_val = nav.status.fully_resolved.val;
+ const nav_init, const is_threadlocal = switch (ip.indexToKey(nav_val)) {
+ else => .{ nav_val, false },
+ .variable => |variable| .{ variable.init, variable.is_threadlocal },
+ .@"extern" => return,
+ .func => .{ .none, false },
+ };
+ if (nav_init == .none or !Type.fromInterned(ip.typeOf(nav_init)).hasRuntimeBits(zcu)) return;
+
+ const nmi = try coff.navMapIndex(zcu, nav_index);
+ const si = nmi.symbol(coff);
+ const ni = ni: {
+ const sym = si.get(coff);
+ switch (sym.ni) {
+ .none => {
+ try coff.nodes.ensureUnusedCapacity(gpa, 1);
+ _ = is_threadlocal;
+ const ni = try coff.mf.addLastChildNode(gpa, Symbol.Index.data.node(coff), .{
+ .alignment = pt.navAlignment(nav_index).toStdMem(),
+ .moved = true,
+ });
+ coff.nodes.appendAssumeCapacity(.{ .nav = nmi });
+ sym.ni = ni;
+ sym.section_number = Symbol.Index.data.get(coff).section_number;
+ },
+ else => si.deleteLocationRelocs(coff),
+ }
+ assert(sym.loc_relocs == .none);
+ sym.loc_relocs = @enumFromInt(coff.relocs.items.len);
+ break :ni sym.ni;
+ };
+
+ var nw: MappedFile.Node.Writer = undefined;
+ ni.writer(&coff.mf, gpa, &nw);
+ defer nw.deinit();
+ codegen.generateSymbol(
+ &coff.base,
+ pt,
+ zcu.navSrcLoc(nav_index),
+ .fromInterned(nav_init),
+ &nw.interface,
+ .{ .atom_index = @intFromEnum(si) },
+ ) catch |err| switch (err) {
+ error.WriteFailed => return error.OutOfMemory,
+ else => |e| return e,
+ };
+ si.get(coff).size = @intCast(nw.interface.end);
+ si.applyLocationRelocs(coff);
+}
+
+pub fn lowerUav(
+ coff: *Coff,
+ pt: Zcu.PerThread,
+ uav_val: InternPool.Index,
+ uav_align: InternPool.Alignment,
+ src_loc: Zcu.LazySrcLoc,
+) !codegen.SymbolResult {
+ const zcu = pt.zcu;
+ const gpa = zcu.gpa;
+
+ try coff.pending_uavs.ensureUnusedCapacity(gpa, 1);
+ const umi = try coff.uavMapIndex(uav_val);
+ const si = umi.symbol(coff);
+ if (switch (si.get(coff).ni) {
+ .none => true,
+ else => |ni| uav_align.toStdMem().order(ni.alignment(&coff.mf)).compare(.gt),
+ }) {
+ const gop = coff.pending_uavs.getOrPutAssumeCapacity(umi);
+ if (gop.found_existing) {
+ gop.value_ptr.alignment = gop.value_ptr.alignment.max(uav_align);
+ } else {
+ gop.value_ptr.* = .{
+ .alignment = uav_align,
+ .src_loc = src_loc,
+ };
+ coff.base.comp.link_const_prog_node.increaseEstimatedTotalItems(1);
+ }
+ }
+ return .{ .sym_index = @intFromEnum(si) };
+}
+
+pub fn updateFunc(
+ coff: *Coff,
+ pt: Zcu.PerThread,
+ func_index: InternPool.Index,
+ mir: *const codegen.AnyMir,
+) !void {
+ coff.updateFuncInner(pt, func_index, mir) catch |err| switch (err) {
+ error.OutOfMemory,
+ error.Overflow,
+ error.RelocationNotByteAligned,
+ error.CodegenFail,
+ => |e| return e,
+ else => |e| return coff.base.cgFail(
+ pt.zcu.funcInfo(func_index).owner_nav,
+ "linker failed to update function: {s}",
+ .{@errorName(e)},
+ ),
+ };
+}
+fn updateFuncInner(
+ coff: *Coff,
+ pt: Zcu.PerThread,
+ func_index: InternPool.Index,
+ mir: *const codegen.AnyMir,
+) !void {
+ const zcu = pt.zcu;
+ const gpa = zcu.gpa;
+ const ip = &zcu.intern_pool;
+ const func = zcu.funcInfo(func_index);
+ const nav = ip.getNav(func.owner_nav);
+
+ const nmi = try coff.navMapIndex(zcu, func.owner_nav);
+ const si = nmi.symbol(coff);
+ log.debug("updateFunc({f}) = {d}", .{ nav.fqn.fmt(ip), si });
+ const ni = ni: {
+ const sym = si.get(coff);
+ switch (sym.ni) {
+ .none => {
+ try coff.nodes.ensureUnusedCapacity(gpa, 1);
+ const mod = zcu.navFileScope(func.owner_nav).mod.?;
+ const target = &mod.resolved_target.result;
+ const ni = try coff.mf.addLastChildNode(gpa, Symbol.Index.text.node(coff), .{
+ .alignment = switch (nav.status.fully_resolved.alignment) {
+ .none => switch (mod.optimize_mode) {
+ .Debug,
+ .ReleaseSafe,
+ .ReleaseFast,
+ => target_util.defaultFunctionAlignment(target),
+ .ReleaseSmall => target_util.minFunctionAlignment(target),
+ },
+ else => |a| a.maxStrict(target_util.minFunctionAlignment(target)),
+ }.toStdMem(),
+ .moved = true,
+ });
+ coff.nodes.appendAssumeCapacity(.{ .nav = nmi });
+ sym.ni = ni;
+ sym.section_number = Symbol.Index.text.get(coff).section_number;
+ },
+ else => si.deleteLocationRelocs(coff),
+ }
+ assert(sym.loc_relocs == .none);
+ sym.loc_relocs = @enumFromInt(coff.relocs.items.len);
+ break :ni sym.ni;
+ };
+
+ var nw: MappedFile.Node.Writer = undefined;
+ ni.writer(&coff.mf, gpa, &nw);
+ defer nw.deinit();
+ codegen.emitFunction(
+ &coff.base,
+ pt,
+ zcu.navSrcLoc(func.owner_nav),
+ func_index,
+ @intFromEnum(si),
+ mir,
+ &nw.interface,
+ .none,
+ ) catch |err| switch (err) {
+ error.WriteFailed => return nw.err.?,
+ else => |e| return e,
+ };
+ si.get(coff).size = @intCast(nw.interface.end);
+ si.applyLocationRelocs(coff);
+}
+
+pub fn updateErrorData(coff: *Coff, pt: Zcu.PerThread) !void {
+ coff.flushLazy(pt, .{
+ .kind = .const_data,
+ .index = @intCast(coff.lazy.getPtr(.const_data).map.getIndex(.anyerror_type) orelse return),
+ }) catch |err| switch (err) {
+ error.OutOfMemory => return error.OutOfMemory,
+ error.CodegenFail => return error.LinkFailure,
+ else => |e| return coff.base.comp.link_diags.fail("updateErrorData failed {t}", .{e}),
+ };
+}
+
+pub fn flush(
+ coff: *Coff,
+ arena: std.mem.Allocator,
+ tid: Zcu.PerThread.Id,
+ prog_node: std.Progress.Node,
+) !void {
+ _ = arena;
+ _ = prog_node;
+ while (try coff.idle(tid)) {}
+
+ // hack for stage2_x86_64 + coff
+ const comp = coff.base.comp;
+ if (comp.compiler_rt_dyn_lib) |crt_file| {
+ const gpa = comp.gpa;
+ const compiler_rt_sub_path = try std.fs.path.join(gpa, &.{
+ std.fs.path.dirname(coff.base.emit.sub_path) orelse "",
+ std.fs.path.basename(crt_file.full_object_path.sub_path),
+ });
+ defer gpa.free(compiler_rt_sub_path);
+ crt_file.full_object_path.root_dir.handle.copyFile(
+ crt_file.full_object_path.sub_path,
+ coff.base.emit.root_dir.handle,
+ compiler_rt_sub_path,
+ .{},
+ ) catch |err| switch (err) {
+ else => |e| return comp.link_diags.fail("Copy '{s}' failed: {s}", .{
+ compiler_rt_sub_path,
+ @errorName(e),
+ }),
+ };
+ }
+}
+
+pub fn idle(coff: *Coff, tid: Zcu.PerThread.Id) !bool {
+ const comp = coff.base.comp;
+ task: {
+ while (coff.pending_uavs.pop()) |pending_uav| {
+ const sub_prog_node = coff.idleProgNode(
+ tid,
+ comp.link_const_prog_node,
+ .{ .uav = pending_uav.key },
+ );
+ defer sub_prog_node.end();
+ coff.flushUav(
+ .{ .zcu = coff.base.comp.zcu.?, .tid = tid },
+ pending_uav.key,
+ pending_uav.value.alignment,
+ pending_uav.value.src_loc,
+ ) catch |err| switch (err) {
+ error.OutOfMemory => return error.OutOfMemory,
+ else => |e| return coff.base.comp.link_diags.fail(
+ "linker failed to lower constant: {t}",
+ .{e},
+ ),
+ };
+ break :task;
+ }
+ if (coff.global_pending_index < coff.globals.count()) {
+ const pt: Zcu.PerThread = .{ .zcu = coff.base.comp.zcu.?, .tid = tid };
+ const gmi: Node.GlobalMapIndex = @enumFromInt(coff.global_pending_index);
+ coff.global_pending_index += 1;
+ const sub_prog_node = comp.link_synth_prog_node.start(
+ gmi.globalName(coff).name.toSlice(coff),
+ 0,
+ );
+ defer sub_prog_node.end();
+ coff.flushGlobal(pt, gmi) catch |err| switch (err) {
+ error.OutOfMemory => return error.OutOfMemory,
+ else => |e| return coff.base.comp.link_diags.fail(
+ "linker failed to lower constant: {t}",
+ .{e},
+ ),
+ };
+ break :task;
+ }
+ var lazy_it = coff.lazy.iterator();
+ while (lazy_it.next()) |lazy| if (lazy.value.pending_index < lazy.value.map.count()) {
+ const pt: Zcu.PerThread = .{ .zcu = coff.base.comp.zcu.?, .tid = tid };
+ const lmr: Node.LazyMapRef = .{ .kind = lazy.key, .index = lazy.value.pending_index };
+ lazy.value.pending_index += 1;
+ const kind = switch (lmr.kind) {
+ .code => "code",
+ .const_data => "data",
+ };
+ var name: [std.Progress.Node.max_name_len]u8 = undefined;
+ const sub_prog_node = comp.link_synth_prog_node.start(
+ std.fmt.bufPrint(&name, "lazy {s} for {f}", .{
+ kind,
+ Type.fromInterned(lmr.lazySymbol(coff).ty).fmt(pt),
+ }) catch &name,
+ 0,
+ );
+ defer sub_prog_node.end();
+ coff.flushLazy(pt, lmr) catch |err| switch (err) {
+ error.OutOfMemory => return error.OutOfMemory,
+ else => |e| return coff.base.comp.link_diags.fail(
+ "linker failed to lower lazy {s}: {t}",
+ .{ kind, e },
+ ),
+ };
+ break :task;
+ };
+ while (coff.mf.updates.pop()) |ni| {
+ const clean_moved = ni.cleanMoved(&coff.mf);
+ const clean_resized = ni.cleanResized(&coff.mf);
+ if (clean_moved or clean_resized) {
+ const sub_prog_node = coff.idleProgNode(tid, coff.mf.update_prog_node, coff.getNode(ni));
+ defer sub_prog_node.end();
+ if (clean_moved) try coff.flushMoved(ni);
+ if (clean_resized) try coff.flushResized(ni);
+ break :task;
+ } else coff.mf.update_prog_node.completeOne();
+ }
+ }
+ if (coff.pending_uavs.count() > 0) return true;
+ for (&coff.lazy.values) |lazy| if (lazy.map.count() > lazy.pending_index) return true;
+ if (coff.mf.updates.items.len > 0) return true;
+ return false;
+}
+
+fn idleProgNode(
+ coff: *Coff,
+ tid: Zcu.PerThread.Id,
+ prog_node: std.Progress.Node,
+ node: Node,
+) std.Progress.Node {
+ var name: [std.Progress.Node.max_name_len]u8 = undefined;
+ return prog_node.start(name: switch (node) {
+ else => |tag| @tagName(tag),
+ .section => |si| std.mem.sliceTo(&si.get(coff).section_number.header(coff).name, 0),
+ .nav => |nmi| {
+ const ip = &coff.base.comp.zcu.?.intern_pool;
+ break :name ip.getNav(nmi.navIndex(coff)).fqn.toSlice(ip);
+ },
+ .uav => |umi| std.fmt.bufPrint(&name, "{f}", .{
+ Value.fromInterned(umi.uavValue(coff)).fmtValue(.{
+ .zcu = coff.base.comp.zcu.?,
+ .tid = tid,
+ }),
+ }) catch &name,
+ }, 0);
+}
+
+fn flushUav(
+ coff: *Coff,
+ pt: Zcu.PerThread,
+ umi: Node.UavMapIndex,
+ uav_align: InternPool.Alignment,
+ src_loc: Zcu.LazySrcLoc,
+) !void {
+ const zcu = pt.zcu;
+ const gpa = zcu.gpa;
+
+ const uav_val = umi.uavValue(coff);
+ const si = umi.symbol(coff);
+ const ni = ni: {
+ const sym = si.get(coff);
+ switch (sym.ni) {
+ .none => {
+ try coff.nodes.ensureUnusedCapacity(gpa, 1);
+ const ni = try coff.mf.addLastChildNode(gpa, Symbol.Index.data.node(coff), .{
+ .alignment = uav_align.toStdMem(),
+ .moved = true,
+ });
+ coff.nodes.appendAssumeCapacity(.{ .uav = umi });
+ sym.ni = ni;
+ sym.section_number = Symbol.Index.data.get(coff).section_number;
+ },
+ else => {
+ if (sym.ni.alignment(&coff.mf).order(uav_align.toStdMem()).compare(.gte)) return;
+ si.deleteLocationRelocs(coff);
+ },
+ }
+ assert(sym.loc_relocs == .none);
+ sym.loc_relocs = @enumFromInt(coff.relocs.items.len);
+ break :ni sym.ni;
+ };
+
+ var nw: MappedFile.Node.Writer = undefined;
+ ni.writer(&coff.mf, gpa, &nw);
+ defer nw.deinit();
+ codegen.generateSymbol(
+ &coff.base,
+ pt,
+ src_loc,
+ .fromInterned(uav_val),
+ &nw.interface,
+ .{ .atom_index = @intFromEnum(si) },
+ ) catch |err| switch (err) {
+ error.WriteFailed => return error.OutOfMemory,
+ else => |e| return e,
+ };
+ si.get(coff).size = @intCast(nw.interface.end);
+ si.applyLocationRelocs(coff);
+}
+
+fn flushGlobal(coff: *Coff, pt: Zcu.PerThread, gmi: Node.GlobalMapIndex) !void {
+ const zcu = pt.zcu;
+ const comp = zcu.comp;
+ const gpa = zcu.gpa;
+ const gn = gmi.globalName(coff);
+ if (gn.lib_name.toSlice(coff)) |lib_name| {
+ const name = gn.name.toSlice(coff);
+ try coff.nodes.ensureUnusedCapacity(gpa, 4);
+ try coff.symbol_table.ensureUnusedCapacity(gpa, 1);
+
+ const target_endian = coff.targetEndian();
+ const magic = coff.targetLoad(&coff.optionalHeaderStandardPtr().magic);
+ const addr_size: u64, const addr_align: std.mem.Alignment = switch (magic) {
+ _ => unreachable,
+ .PE32 => .{ 4, .@"4" },
+ .@"PE32+" => .{ 8, .@"8" },
+ };
+
+ const gop = try coff.import_table.dlls.getOrPutAdapted(
+ gpa,
+ lib_name,
+ ImportTable.Adapter{ .coff = coff },
+ );
+ const import_hint_name_align: std.mem.Alignment = .@"2";
+ if (!gop.found_existing) {
+ errdefer _ = coff.import_table.dlls.pop();
+ try coff.import_table.directory_table_ni.resize(
+ &coff.mf,
+ gpa,
+ @sizeOf(std.coff.ImportDirectoryEntry) * (gop.index + 2),
+ );
+ const import_hint_name_table_len =
+ import_hint_name_align.forward(lib_name.len + ".dll".len + 1);
+ const idata_section_ni = Symbol.Index.idata.node(coff);
+ const import_lookup_table_ni = try coff.mf.addLastChildNode(gpa, idata_section_ni, .{
+ .size = addr_size * 2,
+ .alignment = addr_align,
+ .moved = true,
+ });
+ const import_address_table_ni = try coff.mf.addLastChildNode(gpa, idata_section_ni, .{
+ .size = addr_size * 2,
+ .alignment = addr_align,
+ .moved = true,
+ });
+ const import_address_table_si = coff.addSymbolAssumeCapacity();
+ {
+ const import_address_table_sym = import_address_table_si.get(coff);
+ import_address_table_sym.ni = import_address_table_ni;
+ assert(import_address_table_sym.loc_relocs == .none);
+ import_address_table_sym.loc_relocs = @enumFromInt(coff.relocs.items.len);
+ import_address_table_sym.section_number = Symbol.Index.idata.get(coff).section_number;
+ }
+ const import_hint_name_table_ni = try coff.mf.addLastChildNode(gpa, idata_section_ni, .{
+ .size = import_hint_name_table_len,
+ .alignment = import_hint_name_align,
+ .moved = true,
+ });
+ gop.value_ptr.* = .{
+ .import_lookup_table_ni = import_lookup_table_ni,
+ .import_address_table_si = import_address_table_si,
+ .import_hint_name_table_ni = import_hint_name_table_ni,
+ .len = 0,
+ .hint_name_len = @intCast(import_hint_name_table_len),
+ };
+ const import_hint_name_slice = import_hint_name_table_ni.slice(&coff.mf);
+ @memcpy(import_hint_name_slice[0..lib_name.len], lib_name);
+ @memcpy(import_hint_name_slice[lib_name.len..][0..".dll".len], ".dll");
+ @memset(import_hint_name_slice[lib_name.len + ".dll".len ..], 0);
+ coff.nodes.appendAssumeCapacity(.{ .import_lookup_table = @intCast(gop.index) });
+ coff.nodes.appendAssumeCapacity(.{ .import_address_table = @intCast(gop.index) });
+ coff.nodes.appendAssumeCapacity(.{ .import_hint_name_table = @intCast(gop.index) });
+
+ const import_directory_table: []std.coff.ImportDirectoryEntry =
+ @ptrCast(@alignCast(coff.import_table.directory_table_ni.slice(&coff.mf)));
+ import_directory_table[gop.index..][0..2].* = .{ .{
+ .import_lookup_table_rva = coff.computeNodeRva(import_lookup_table_ni),
+ .time_date_stamp = 0,
+ .forwarder_chain = 0,
+ .name_rva = coff.computeNodeRva(import_hint_name_table_ni),
+ .import_address_table_rva = coff.computeNodeRva(import_address_table_ni),
+ }, .{
+ .import_lookup_table_rva = 0,
+ .time_date_stamp = 0,
+ .forwarder_chain = 0,
+ .name_rva = 0,
+ .import_address_table_rva = 0,
+ } };
+ }
+ const import_symbol_index = gop.value_ptr.len;
+ gop.value_ptr.len = import_symbol_index + 1;
+ const new_symbol_table_size = addr_size * (import_symbol_index + 2);
+ const import_hint_name_index = gop.value_ptr.hint_name_len;
+ gop.value_ptr.hint_name_len = @intCast(
+ import_hint_name_align.forward(import_hint_name_index + 2 + name.len + 1),
+ );
+ try gop.value_ptr.import_lookup_table_ni.resize(&coff.mf, gpa, new_symbol_table_size);
+ const import_address_table_ni = gop.value_ptr.import_address_table_si.node(coff);
+ try import_address_table_ni.resize(&coff.mf, gpa, new_symbol_table_size);
+ try gop.value_ptr.import_hint_name_table_ni.resize(&coff.mf, gpa, gop.value_ptr.hint_name_len);
+ const import_lookup_slice = gop.value_ptr.import_lookup_table_ni.slice(&coff.mf);
+ const import_address_slice = import_address_table_ni.slice(&coff.mf);
+ const import_hint_name_slice = gop.value_ptr.import_hint_name_table_ni.slice(&coff.mf);
+ @memset(import_hint_name_slice[import_hint_name_index..][0..2], 0);
+ @memcpy(import_hint_name_slice[import_hint_name_index + 2 ..][0..name.len], name);
+ @memset(import_hint_name_slice[import_hint_name_index + 2 + name.len ..], 0);
+ const import_hint_name_rva =
+ coff.computeNodeRva(gop.value_ptr.import_hint_name_table_ni) + import_hint_name_index;
+ switch (magic) {
+ _ => unreachable,
+ inline .PE32, .@"PE32+" => |ct_magic| {
+ const Addr = switch (ct_magic) {
+ _ => comptime unreachable,
+ .PE32 => u32,
+ .@"PE32+" => u64,
+ };
+ const import_lookup_table: []Addr = @ptrCast(@alignCast(import_lookup_slice));
+ const import_address_table: []Addr = @ptrCast(@alignCast(import_address_slice));
+ const import_hint_name_rvas: [2]Addr = .{
+ std.mem.nativeTo(Addr, @intCast(import_hint_name_rva), target_endian),
+ std.mem.nativeTo(Addr, 0, target_endian),
+ };
+ import_lookup_table[import_symbol_index..][0..2].* = import_hint_name_rvas;
+ import_address_table[import_symbol_index..][0..2].* = import_hint_name_rvas;
+ },
+ }
+ const si = gmi.symbol(coff);
+ const sym = si.get(coff);
+ sym.section_number = Symbol.Index.text.get(coff).section_number;
+ assert(sym.loc_relocs == .none);
+ sym.loc_relocs = @enumFromInt(coff.relocs.items.len);
+ switch (coff.targetLoad(&coff.headerPtr().machine)) {
+ else => |tag| @panic(@tagName(tag)),
+ .AMD64 => {
+ const init = [_]u8{ 0xff, 0x25, 0x00, 0x00, 0x00, 0x00 };
+ const target = &comp.root_mod.resolved_target.result;
+ const ni = try coff.mf.addLastChildNode(gpa, Symbol.Index.text.node(coff), .{
+ .alignment = switch (comp.root_mod.optimize_mode) {
+ .Debug,
+ .ReleaseSafe,
+ .ReleaseFast,
+ => target_util.defaultFunctionAlignment(target),
+ .ReleaseSmall => target_util.minFunctionAlignment(target),
+ }.toStdMem(),
+ .size = init.len,
+ });
+ @memcpy(ni.slice(&coff.mf)[0..init.len], &init);
+ sym.ni = ni;
+ sym.size = init.len;
+ try coff.addReloc(
+ si,
+ init.len - 4,
+ gop.value_ptr.import_address_table_si,
+ @intCast(addr_size * import_symbol_index),
+ .{ .AMD64 = .REL32 },
+ );
+ },
+ }
+ coff.nodes.appendAssumeCapacity(.{ .global = gmi });
+ sym.rva = coff.computeNodeRva(sym.ni);
+ si.applyLocationRelocs(coff);
+ }
+}
+
+fn flushLazy(coff: *Coff, pt: Zcu.PerThread, lmr: Node.LazyMapRef) !void {
+ const zcu = pt.zcu;
+ const gpa = zcu.gpa;
+
+ const lazy = lmr.lazySymbol(coff);
+ const si = lmr.symbol(coff);
+ const ni = ni: {
+ const sym = si.get(coff);
+ switch (sym.ni) {
+ .none => {
+ try coff.nodes.ensureUnusedCapacity(gpa, 1);
+ const sec_si: Symbol.Index = switch (lazy.kind) {
+ .code => .text,
+ .const_data => .rdata,
+ };
+ const ni = try coff.mf.addLastChildNode(gpa, sec_si.node(coff), .{ .moved = true });
+ coff.nodes.appendAssumeCapacity(switch (lazy.kind) {
+ .code => .{ .lazy_code = @enumFromInt(lmr.index) },
+ .const_data => .{ .lazy_const_data = @enumFromInt(lmr.index) },
+ });
+ sym.ni = ni;
+ sym.section_number = sec_si.get(coff).section_number;
+ },
+ else => si.deleteLocationRelocs(coff),
+ }
+ assert(sym.loc_relocs == .none);
+ sym.loc_relocs = @enumFromInt(coff.relocs.items.len);
+ break :ni sym.ni;
+ };
+
+ var required_alignment: InternPool.Alignment = .none;
+ var nw: MappedFile.Node.Writer = undefined;
+ ni.writer(&coff.mf, gpa, &nw);
+ defer nw.deinit();
+ try codegen.generateLazySymbol(
+ &coff.base,
+ pt,
+ Type.fromInterned(lazy.ty).srcLocOrNull(pt.zcu) orelse .unneeded,
+ lazy,
+ &required_alignment,
+ &nw.interface,
+ .none,
+ .{ .atom_index = @intFromEnum(si) },
+ );
+ si.get(coff).size = @intCast(nw.interface.end);
+ si.applyLocationRelocs(coff);
+}
+
+fn flushMoved(coff: *Coff, ni: MappedFile.Node.Index) !void {
+ const node = coff.getNode(ni);
+ switch (node) {
+ else => |tag| @panic(@tagName(tag)),
+ .section => |si| return coff.targetStore(
+ &si.get(coff).section_number.header(coff).pointer_to_raw_data,
+ @intCast(ni.fileLocation(&coff.mf, false).offset),
+ ),
+ .import_directory_table => {},
+ .import_lookup_table => |import_directory_table_index| {
+ const import_directory_table: []std.coff.ImportDirectoryEntry =
+ @ptrCast(@alignCast(coff.import_table.directory_table_ni.slice(&coff.mf)));
+ const import_directory_entry = &import_directory_table[import_directory_table_index];
+ coff.targetStore(&import_directory_entry.import_lookup_table_rva, coff.computeNodeRva(ni));
+ },
+ .import_address_table => |import_directory_table_index| {
+ const import_directory_table: []std.coff.ImportDirectoryEntry =
+ @ptrCast(@alignCast(coff.import_table.directory_table_ni.slice(&coff.mf)));
+ const import_directory_entry = &import_directory_table[import_directory_table_index];
+ coff.targetStore(&import_directory_entry.import_lookup_table_rva, coff.computeNodeRva(ni));
+ const import_address_table_si =
+ coff.import_table.dlls.values()[import_directory_table_index].import_address_table_si;
+ import_address_table_si.flushMoved(coff);
+ coff.targetStore(
+ &import_directory_entry.import_address_table_rva,
+ import_address_table_si.get(coff).rva,
+ );
+ },
+ .import_hint_name_table => |import_directory_table_index| {
+ const target_endian = coff.targetEndian();
+ const magic = coff.targetLoad(&coff.optionalHeaderStandardPtr().magic);
+ const import_directory_table: []std.coff.ImportDirectoryEntry =
+ @ptrCast(@alignCast(coff.import_table.directory_table_ni.slice(&coff.mf)));
+ const import_directory_entry = &import_directory_table[import_directory_table_index];
+ const import_hint_name_rva = coff.computeNodeRva(ni);
+ coff.targetStore(&import_directory_entry.name_rva, import_hint_name_rva);
+ const import_entry = &coff.import_table.dlls.values()[import_directory_table_index];
+ const import_lookup_slice = import_entry.import_lookup_table_ni.slice(&coff.mf);
+ const import_address_slice =
+ import_entry.import_address_table_si.node(coff).slice(&coff.mf);
+ const import_hint_name_slice = ni.slice(&coff.mf);
+ const import_hint_name_align = ni.alignment(&coff.mf);
+ var import_hint_name_index: u32 = 0;
+ for (0..import_entry.len) |import_symbol_index| {
+ import_hint_name_index = @intCast(import_hint_name_align.forward(
+ std.mem.indexOfScalarPos(
+ u8,
+ import_hint_name_slice,
+ import_hint_name_index,
+ 0,
+ ).? + 1,
+ ));
+ switch (magic) {
+ _ => unreachable,
+ inline .PE32, .@"PE32+" => |ct_magic| {
+ const Addr = switch (ct_magic) {
+ _ => comptime unreachable,
+ .PE32 => u32,
+ .@"PE32+" => u64,
+ };
+ const import_lookup_table: []Addr = @ptrCast(@alignCast(import_lookup_slice));
+ const import_address_table: []Addr = @ptrCast(@alignCast(import_address_slice));
+ const rva = std.mem.nativeTo(
+ Addr,
+ import_hint_name_rva + import_hint_name_index,
+ target_endian,
+ );
+ import_lookup_table[import_symbol_index] = rva;
+ import_address_table[import_symbol_index] = rva;
+ },
+ }
+ import_hint_name_index += 2;
+ }
+ },
+ inline .global,
+ .nav,
+ .uav,
+ .lazy_code,
+ .lazy_const_data,
+ => |mi| mi.symbol(coff).flushMoved(coff),
+ }
+ try ni.childrenMoved(coff.base.comp.gpa, &coff.mf);
+}
+
+fn flushResized(coff: *Coff, ni: MappedFile.Node.Index) !void {
+ _, const size = ni.location(&coff.mf).resolve(&coff.mf);
+ const node = coff.getNode(ni);
+ switch (node) {
+ else => |tag| @panic(@tagName(tag)),
+ .file => {},
+ .header => {
+ switch (coff.optionalHeaderPtr()) {
+ inline else => |optional_header| coff.targetStore(
+ &optional_header.size_of_headers,
+ @intCast(size),
+ ),
+ }
+ if (size > coff.section_table.items[0].get(coff).rva) try coff.virtualSlide(
+ 0,
+ std.mem.alignForward(
+ u32,
+ @intCast(size * 4),
+ coff.optionalHeaderField(.section_alignment),
+ ),
+ );
+ },
+ .section_table => {},
+ .section => |si| {
+ const sym = si.get(coff);
+ const section_table = coff.sectionTableSlice();
+ const section_index = sym.section_number.toIndex();
+ const section = &section_table[section_index];
+ coff.targetStore(&section.size_of_raw_data, @intCast(size));
+ if (size > coff.targetLoad(&section.virtual_size)) {
+ const virtual_size = std.mem.alignForward(
+ u32,
+ @intCast(size * 4),
+ coff.optionalHeaderField(.section_alignment),
+ );
+ coff.targetStore(&section.virtual_size, virtual_size);
+ if (sym.data_directory) |data_directory|
+ coff.dataDirectoriesSlice()[@intFromEnum(data_directory)].size =
+ section.virtual_size;
+ try coff.virtualSlide(section_index + 1, sym.rva + virtual_size);
+ }
+ },
+ .import_directory_table,
+ .import_lookup_table,
+ .import_address_table,
+ .import_hint_name_table,
+ .global,
+ .nav,
+ .uav,
+ .lazy_code,
+ .lazy_const_data,
+ => {},
+ }
+}
+
+fn virtualSlide(coff: *Coff, start_section_index: usize, start_rva: u32) !void {
+ const section_table = coff.sectionTableSlice();
+ var rva = start_rva;
+ for (
+ coff.section_table.items[start_section_index..],
+ section_table[start_section_index..],
+ ) |section_si, *section| {
+ const section_sym = section_si.get(coff);
+ section_sym.rva = rva;
+ coff.targetStore(&section.virtual_address, rva);
+ if (section_sym.data_directory) |data_directory|
+ coff.dataDirectoriesSlice()[@intFromEnum(data_directory)].virtual_address =
+ section.virtual_address;
+ try section_sym.ni.childrenMoved(coff.base.comp.gpa, &coff.mf);
+ rva += coff.targetLoad(&section.virtual_size);
+ }
+ switch (coff.optionalHeaderPtr()) {
+ inline else => |optional_header| coff.targetStore(
+ &optional_header.size_of_image,
+ @intCast(rva),
+ ),
+ }
+}
+
+pub fn updateExports(
+ coff: *Coff,
+ pt: Zcu.PerThread,
+ exported: Zcu.Exported,
+ export_indices: []const Zcu.Export.Index,
+) !void {
+ return coff.updateExportsInner(pt, exported, export_indices) catch |err| switch (err) {
+ error.OutOfMemory => error.OutOfMemory,
+ error.LinkFailure => error.AnalysisFail,
+ };
+}
+fn updateExportsInner(
+ coff: *Coff,
+ pt: Zcu.PerThread,
+ exported: Zcu.Exported,
+ export_indices: []const Zcu.Export.Index,
+) !void {
+ const zcu = pt.zcu;
+ const gpa = zcu.gpa;
+ const ip = &zcu.intern_pool;
+
+ switch (exported) {
+ .nav => |nav| log.debug("updateExports({f})", .{ip.getNav(nav).fqn.fmt(ip)}),
+ .uav => |uav| log.debug("updateExports(@as({f}, {f}))", .{
+ Type.fromInterned(ip.typeOf(uav)).fmt(pt),
+ Value.fromInterned(uav).fmtValue(pt),
+ }),
+ }
+ try coff.symbol_table.ensureUnusedCapacity(gpa, export_indices.len);
+ const exported_si: Symbol.Index = switch (exported) {
+ .nav => |nav| try coff.navSymbol(zcu, nav),
+ .uav => |uav| @enumFromInt(switch (try coff.lowerUav(
+ pt,
+ uav,
+ Type.fromInterned(ip.typeOf(uav)).abiAlignment(zcu),
+ export_indices[0].ptr(zcu).src,
+ )) {
+ .sym_index => |si| si,
+ .fail => |em| {
+ defer em.destroy(gpa);
+ return coff.base.comp.link_diags.fail("{s}", .{em.msg});
+ },
+ }),
+ };
+ while (try coff.idle(pt.tid)) {}
+ const exported_ni = exported_si.node(coff);
+ const exported_sym = exported_si.get(coff);
+ for (export_indices) |export_index| {
+ const @"export" = export_index.ptr(zcu);
+ const export_si = try coff.globalSymbol(@"export".opts.name.toSlice(ip), null);
+ const export_sym = export_si.get(coff);
+ export_sym.ni = exported_ni;
+ export_sym.rva = exported_sym.rva;
+ export_sym.size = exported_sym.size;
+ export_sym.section_number = exported_sym.section_number;
+ export_si.applyTargetRelocs(coff);
+ if (@"export".opts.name.eqlSlice("wWinMainCRTStartup", ip)) {
+ coff.entry_hack = exported_si;
+ coff.optionalHeaderStandardPtr().address_of_entry_point = exported_sym.rva;
+ }
+ }
+}
+
+pub fn deleteExport(coff: *Coff, exported: Zcu.Exported, name: InternPool.NullTerminatedString) void {
+ _ = coff;
+ _ = exported;
+ _ = name;
+}
+
+pub fn dump(coff: *Coff, tid: Zcu.PerThread.Id) void {
+ const w = std.debug.lockStderrWriter(&.{});
+ defer std.debug.unlockStderrWriter();
+ coff.printNode(tid, w, .root, 0) catch {};
+}
+
+pub fn printNode(
+ coff: *Coff,
+ tid: Zcu.PerThread.Id,
+ w: *std.Io.Writer,
+ ni: MappedFile.Node.Index,
+ indent: usize,
+) !void {
+ const node = coff.getNode(ni);
+ try w.splatByteAll(' ', indent);
+ try w.writeAll(@tagName(node));
+ switch (node) {
+ else => {},
+ .section => |si| try w.print("({s})", .{
+ std.mem.sliceTo(&si.get(coff).section_number.header(coff).name, 0),
+ }),
+ .import_lookup_table,
+ .import_address_table,
+ .import_hint_name_table,
+ => |import_directory_table_index| try w.print("({s})", .{
+ std.mem.sliceTo(coff.import_table.dlls.values()[import_directory_table_index]
+ .import_hint_name_table_ni.sliceConst(&coff.mf), 0),
+ }),
+ .global => |gmi| {
+ const gn = gmi.globalName(coff);
+ try w.writeByte('(');
+ if (gn.lib_name.toSlice(coff)) |lib_name| try w.print("{s}.dll, ", .{lib_name});
+ try w.print("{s})", .{gn.name.toSlice(coff)});
+ },
+ .nav => |nmi| {
+ const zcu = coff.base.comp.zcu.?;
+ const ip = &zcu.intern_pool;
+ const nav = ip.getNav(nmi.navIndex(coff));
+ try w.print("({f}, {f})", .{
+ Type.fromInterned(nav.typeOf(ip)).fmt(.{ .zcu = zcu, .tid = tid }),
+ nav.fqn.fmt(ip),
+ });
+ },
+ .uav => |umi| {
+ const zcu = coff.base.comp.zcu.?;
+ const val: Value = .fromInterned(umi.uavValue(coff));
+ try w.print("({f}, {f})", .{
+ val.typeOf(zcu).fmt(.{ .zcu = zcu, .tid = tid }),
+ val.fmtValue(.{ .zcu = zcu, .tid = tid }),
+ });
+ },
+ inline .lazy_code, .lazy_const_data => |lmi| try w.print("({f})", .{
+ Type.fromInterned(lmi.lazySymbol(coff).ty).fmt(.{
+ .zcu = coff.base.comp.zcu.?,
+ .tid = tid,
+ }),
+ }),
+ }
+ {
+ const mf_node = &coff.mf.nodes.items[@intFromEnum(ni)];
+ const off, const size = mf_node.location().resolve(&coff.mf);
+ try w.print(" index={d} offset=0x{x} size=0x{x} align=0x{x}{s}{s}{s}{s}\n", .{
+ @intFromEnum(ni),
+ off,
+ size,
+ mf_node.flags.alignment.toByteUnits(),
+ if (mf_node.flags.fixed) " fixed" else "",
+ if (mf_node.flags.moved) " moved" else "",
+ if (mf_node.flags.resized) " resized" else "",
+ if (mf_node.flags.has_content) " has_content" else "",
+ });
+ }
+ var leaf = true;
+ var child_it = ni.children(&coff.mf);
+ while (child_it.next()) |child_ni| {
+ leaf = false;
+ try coff.printNode(tid, w, child_ni, indent + 1);
+ }
+ if (leaf) {
+ const file_loc = ni.fileLocation(&coff.mf, false);
+ if (file_loc.size == 0) return;
+ var address = file_loc.offset;
+ const line_len = 0x10;
+ var line_it = std.mem.window(
+ u8,
+ coff.mf.contents[@intCast(file_loc.offset)..][0..@intCast(file_loc.size)],
+ line_len,
+ line_len,
+ );
+ while (line_it.next()) |line_bytes| : (address += line_len) {
+ try w.splatByteAll(' ', indent + 1);
+ try w.print("{x:0>8} ", .{address});
+ for (line_bytes) |byte| try w.print("{x:0>2} ", .{byte});
+ try w.splatByteAll(' ', 3 * (line_len - line_bytes.len) + 1);
+ for (line_bytes) |byte| try w.writeByte(if (std.ascii.isPrint(byte)) byte else '.');
+ try w.writeByte('\n');
+ }
+ }
+}
+
+const assert = std.debug.assert;
+const builtin = @import("builtin");
+const codegen = @import("../codegen.zig");
+const Compilation = @import("../Compilation.zig");
+const Coff = @This();
+const InternPool = @import("../InternPool.zig");
+const link = @import("../link.zig");
+const log = std.log.scoped(.link);
+const MappedFile = @import("MappedFile.zig");
+const native_endian = builtin.cpu.arch.endian();
+const std = @import("std");
+const target_util = @import("../target.zig");
+const Type = @import("../Type.zig");
+const Value = @import("../Value.zig");
+const Zcu = @import("../Zcu.zig");
diff --git a/src/link/Elf2.zig b/src/link/Elf2.zig
index e43ebf2639..90e7dfbc67 100644
--- a/src/link/Elf2.zig
+++ b/src/link/Elf2.zig
@@ -11,7 +11,7 @@ lazy: std.EnumArray(link.File.LazySymbol.Kind, struct {
map: std.AutoArrayHashMapUnmanaged(InternPool.Index, Symbol.Index),
pending_index: u32,
}),
-pending_uavs: std.AutoArrayHashMapUnmanaged(InternPool.Index, struct {
+pending_uavs: std.AutoArrayHashMapUnmanaged(Node.UavMapIndex, struct {
alignment: InternPool.Alignment,
src_loc: Zcu.LazySrcLoc,
}),
@@ -25,10 +25,65 @@ pub const Node = union(enum) {
shdr,
segment: u32,
section: Symbol.Index,
- nav: InternPool.Nav.Index,
- uav: InternPool.Index,
- lazy_code: InternPool.Index,
- lazy_const_data: InternPool.Index,
+ nav: NavMapIndex,
+ uav: UavMapIndex,
+ lazy_code: LazyMapRef.Index(.code),
+ lazy_const_data: LazyMapRef.Index(.const_data),
+
+ pub const NavMapIndex = enum(u32) {
+ _,
+
+ pub fn navIndex(nmi: NavMapIndex, elf: *const Elf) InternPool.Nav.Index {
+ return elf.navs.keys()[@intFromEnum(nmi)];
+ }
+
+ pub fn symbol(nmi: NavMapIndex, elf: *const Elf) Symbol.Index {
+ return elf.navs.values()[@intFromEnum(nmi)];
+ }
+ };
+
+ pub const UavMapIndex = enum(u32) {
+ _,
+
+ pub fn uavValue(umi: UavMapIndex, elf: *const Elf) InternPool.Index {
+ return elf.uavs.keys()[@intFromEnum(umi)];
+ }
+
+ pub fn symbol(umi: UavMapIndex, elf: *const Elf) Symbol.Index {
+ return elf.uavs.values()[@intFromEnum(umi)];
+ }
+ };
+
+ pub const LazyMapRef = struct {
+ kind: link.File.LazySymbol.Kind,
+ index: u32,
+
+ pub fn Index(comptime kind: link.File.LazySymbol.Kind) type {
+ return enum(u32) {
+ _,
+
+ pub fn ref(lmi: @This()) LazyMapRef {
+ return .{ .kind = kind, .index = @intFromEnum(lmi) };
+ }
+
+ pub fn lazySymbol(lmi: @This(), elf: *const Elf) link.File.LazySymbol {
+ return lmi.ref().lazySymbol(elf);
+ }
+
+ pub fn symbol(lmi: @This(), elf: *const Elf) Symbol.Index {
+ return lmi.ref().symbol(elf);
+ }
+ };
+ }
+
+ pub fn lazySymbol(lmr: LazyMapRef, elf: *const Elf) link.File.LazySymbol {
+ return .{ .kind = lmr.kind, .ty = elf.lazy.getPtrConst(lmr.kind).map.keys()[lmr.index] };
+ }
+
+ pub fn symbol(lmr: LazyMapRef, elf: *const Elf) Symbol.Index {
+ return elf.lazy.getPtrConst(lmr.kind).map.values()[lmr.index];
+ }
+ };
pub const Tag = @typeInfo(Node).@"union".tag_type.?;
@@ -43,11 +98,7 @@ pub const Node = union(enum) {
seg_text,
seg_data,
};
- var mut_known: std.enums.EnumFieldStruct(
- Known,
- MappedFile.Node.Index,
- null,
- ) = undefined;
+ var mut_known: std.enums.EnumFieldStruct(Known, MappedFile.Node.Index, null) = undefined;
for (@typeInfo(Known).@"enum".fields) |field|
@field(mut_known, field.name) = @enumFromInt(field.value);
break :known mut_known;
@@ -223,10 +274,10 @@ pub const Reloc = extern struct {
addend: i64,
pub const Type = extern union {
- x86_64: std.elf.R_X86_64,
- aarch64: std.elf.R_AARCH64,
- riscv: std.elf.R_RISCV,
- ppc64: std.elf.R_PPC64,
+ X86_64: std.elf.R_X86_64,
+ AARCH64: std.elf.R_AARCH64,
+ RISCV: std.elf.R_RISCV,
+ PPC64: std.elf.R_PPC64,
};
pub const Index = enum(u32) {
@@ -239,7 +290,7 @@ pub const Reloc = extern struct {
};
pub fn apply(reloc: *const Reloc, elf: *Elf) void {
- const target_endian = elf.endian();
+ const target_endian = elf.targetEndian();
switch (reloc.loc.get(elf).ni) {
.none => return,
else => |ni| if (ni.hasMoved(&elf.mf)) return,
@@ -274,7 +325,7 @@ pub const Reloc = extern struct {
) +% @as(u64, @bitCast(reloc.addend));
switch (elf.ehdrField(.machine)) {
else => |machine| @panic(@tagName(machine)),
- .X86_64 => switch (reloc.type.x86_64) {
+ .X86_64 => switch (reloc.type.X86_64) {
else => |kind| @panic(@tagName(kind)),
.@"64" => std.mem.writeInt(
u64,
@@ -394,37 +445,7 @@ fn create(
},
.Obj => .REL,
};
- const machine: std.elf.EM = switch (target.cpu.arch) {
- .spirv32, .spirv64, .wasm32, .wasm64 => .NONE,
- .sparc => .SPARC,
- .x86 => .@"386",
- .m68k => .@"68K",
- .mips, .mipsel, .mips64, .mips64el => .MIPS,
- .powerpc, .powerpcle => .PPC,
- .powerpc64, .powerpc64le => .PPC64,
- .s390x => .S390,
- .arm, .armeb, .thumb, .thumbeb => .ARM,
- .hexagon => .SH,
- .sparc64 => .SPARCV9,
- .arc => .ARC,
- .x86_64 => .X86_64,
- .or1k => .OR1K,
- .xtensa => .XTENSA,
- .msp430 => .MSP430,
- .avr => .AVR,
- .nvptx, .nvptx64 => .CUDA,
- .kalimba => .CSR_KALIMBA,
- .aarch64, .aarch64_be => .AARCH64,
- .xcore => .XCORE,
- .amdgcn => .AMDGPU,
- .riscv32, .riscv32be, .riscv64, .riscv64be => .RISCV,
- .lanai => .LANAI,
- .bpfel, .bpfeb => .BPF,
- .ve => .VE,
- .csky => .CSKY,
- .loongarch32, .loongarch64 => .LOONGARCH,
- .propeller => if (target.cpu.has(.propeller, .p2)) .PROPELLER2 else .PROPELLER,
- };
+ const machine = target.toElfMachine();
const maybe_interp = switch (comp.config.output_mode) {
.Exe, .Lib => switch (comp.config.link_mode) {
.static => null,
@@ -479,7 +500,7 @@ fn create(
switch (class) {
.NONE, _ => unreachable,
- inline .@"32", .@"64" => |ct_class| try elf.initHeaders(
+ inline else => |ct_class| try elf.initHeaders(
ct_class,
data,
osabi,
@@ -567,30 +588,31 @@ fn initHeaders(
.fixed = true,
}));
elf.nodes.appendAssumeCapacity(.ehdr);
-
- const ehdr: *ElfN.Ehdr = @ptrCast(@alignCast(ehdr_ni.slice(&elf.mf)));
- const EI = std.elf.EI;
- @memcpy(ehdr.ident[0..std.elf.MAGIC.len], std.elf.MAGIC);
- ehdr.ident[EI.CLASS] = @intFromEnum(class);
- ehdr.ident[EI.DATA] = @intFromEnum(data);
- ehdr.ident[EI.VERSION] = 1;
- ehdr.ident[EI.OSABI] = @intFromEnum(osabi);
- ehdr.ident[EI.ABIVERSION] = 0;
- @memset(ehdr.ident[EI.PAD..], 0);
- ehdr.type = @"type";
- ehdr.machine = machine;
- ehdr.version = 1;
- ehdr.entry = 0;
- ehdr.phoff = 0;
- ehdr.shoff = 0;
- ehdr.flags = 0;
- ehdr.ehsize = @sizeOf(ElfN.Ehdr);
- ehdr.phentsize = @sizeOf(ElfN.Phdr);
- ehdr.phnum = @min(phnum, std.elf.PN_XNUM);
- ehdr.shentsize = @sizeOf(ElfN.Shdr);
- ehdr.shnum = 1;
- ehdr.shstrndx = 0;
- if (target_endian != native_endian) std.mem.byteSwapAllFields(ElfN.Ehdr, ehdr);
+ {
+ const ehdr: *ElfN.Ehdr = @ptrCast(@alignCast(ehdr_ni.slice(&elf.mf)));
+ const EI = std.elf.EI;
+ @memcpy(ehdr.ident[0..std.elf.MAGIC.len], std.elf.MAGIC);
+ ehdr.ident[EI.CLASS] = @intFromEnum(class);
+ ehdr.ident[EI.DATA] = @intFromEnum(data);
+ ehdr.ident[EI.VERSION] = 1;
+ ehdr.ident[EI.OSABI] = @intFromEnum(osabi);
+ ehdr.ident[EI.ABIVERSION] = 0;
+ @memset(ehdr.ident[EI.PAD..], 0);
+ ehdr.type = @"type";
+ ehdr.machine = machine;
+ ehdr.version = 1;
+ ehdr.entry = 0;
+ ehdr.phoff = 0;
+ ehdr.shoff = 0;
+ ehdr.flags = 0;
+ ehdr.ehsize = @sizeOf(ElfN.Ehdr);
+ ehdr.phentsize = @sizeOf(ElfN.Phdr);
+ ehdr.phnum = @min(phnum, std.elf.PN_XNUM);
+ ehdr.shentsize = @sizeOf(ElfN.Shdr);
+ ehdr.shnum = 1;
+ ehdr.shstrndx = 0;
+ if (target_endian != native_endian) std.mem.byteSwapAllFields(ElfN.Ehdr, ehdr);
+ }
const phdr_ni = Node.known.phdr;
assert(phdr_ni == try elf.mf.addLastChildNode(gpa, seg_rodata_ni, .{
@@ -750,7 +772,10 @@ fn initHeaders(
},
.shndx = std.elf.SHN_UNDEF,
};
- ehdr.shstrndx = ehdr.shnum;
+ {
+ const ehdr = @field(elf.ehdrPtr(), @tagName(class));
+ ehdr.shstrndx = ehdr.shnum;
+ }
assert(try elf.addSection(seg_rodata_ni, .{
.type = std.elf.SHT_STRTAB,
.addralign = elf.mf.flags.block_size,
@@ -821,6 +846,24 @@ fn getNode(elf: *Elf, ni: MappedFile.Node.Index) Node {
return elf.nodes.get(@intFromEnum(ni));
}
+pub fn identClass(elf: *Elf) std.elf.CLASS {
+ return @enumFromInt(elf.mf.contents[std.elf.EI.CLASS]);
+}
+
+pub fn identData(elf: *Elf) std.elf.DATA {
+ return @enumFromInt(elf.mf.contents[std.elf.EI.DATA]);
+}
+fn endianForData(data: std.elf.DATA) std.builtin.Endian {
+ return switch (data) {
+ .NONE, _ => unreachable,
+ .@"2LSB" => .little,
+ .@"2MSB" => .big,
+ };
+}
+pub fn targetEndian(elf: *Elf) std.builtin.Endian {
+ return endianForData(elf.identData());
+}
+
pub const EhdrPtr = union(std.elf.CLASS) {
NONE: noreturn,
@"32": *std.elf.Elf32.Ehdr,
@@ -830,7 +873,7 @@ pub fn ehdrPtr(elf: *Elf) EhdrPtr {
const slice = Node.known.ehdr.slice(&elf.mf);
return switch (elf.identClass()) {
.NONE, _ => unreachable,
- inline .@"32", .@"64" => |class| @unionInit(
+ inline else => |class| @unionInit(
EhdrPtr,
@tagName(class),
@ptrCast(@alignCast(slice)),
@@ -841,35 +884,15 @@ pub fn ehdrField(
elf: *Elf,
comptime field: enum { type, machine },
) @FieldType(std.elf.Elf32.Ehdr, @tagName(field)) {
- const Field = @FieldType(std.elf.Elf32.Ehdr, @tagName(field));
- comptime assert(@FieldType(std.elf.Elf64.Ehdr, @tagName(field)) == Field);
return @enumFromInt(std.mem.toNative(
- @typeInfo(Field).@"enum".tag_type,
+ @typeInfo(@FieldType(std.elf.Elf32.Ehdr, @tagName(field))).@"enum".tag_type,
@intFromEnum(switch (elf.ehdrPtr()) {
inline else => |ehdr| @field(ehdr, @tagName(field)),
}),
- elf.endian(),
+ elf.targetEndian(),
));
}
-pub fn identClass(elf: *Elf) std.elf.CLASS {
- return @enumFromInt(elf.mf.contents[std.elf.EI.CLASS]);
-}
-
-pub fn identData(elf: *Elf) std.elf.DATA {
- return @enumFromInt(elf.mf.contents[std.elf.EI.DATA]);
-}
-fn endianForData(data: std.elf.DATA) std.builtin.Endian {
- return switch (data) {
- .NONE, _ => unreachable,
- .@"2LSB" => .little,
- .@"2MSB" => .big,
- };
-}
-pub fn endian(elf: *Elf) std.builtin.Endian {
- return endianForData(elf.identData());
-}
-
fn baseAddrForType(@"type": std.elf.ET) u64 {
return switch (@"type") {
else => 0,
@@ -889,7 +912,7 @@ pub fn phdrSlice(elf: *Elf) PhdrSlice {
const slice = Node.known.phdr.slice(&elf.mf);
return switch (elf.identClass()) {
.NONE, _ => unreachable,
- inline .@"32", .@"64" => |class| @unionInit(
+ inline else => |class| @unionInit(
PhdrSlice,
@tagName(class),
@ptrCast(@alignCast(slice)),
@@ -906,7 +929,7 @@ pub fn shdrSlice(elf: *Elf) ShdrSlice {
const slice = Node.known.shdr.slice(&elf.mf);
return switch (elf.identClass()) {
.NONE, _ => unreachable,
- inline .@"32", .@"64" => |class| @unionInit(
+ inline else => |class| @unionInit(
ShdrSlice,
@tagName(class),
@ptrCast(@alignCast(slice)),
@@ -923,7 +946,7 @@ pub fn symSlice(elf: *Elf) SymSlice {
const slice = Symbol.Index.symtab.node(elf).slice(&elf.mf);
return switch (elf.identClass()) {
.NONE, _ => unreachable,
- inline .@"32", .@"64" => |class| @unionInit(
+ inline else => |class| @unionInit(
SymSlice,
@tagName(class),
@ptrCast(@alignCast(slice)),
@@ -942,7 +965,7 @@ pub fn symPtr(elf: *Elf, si: Symbol.Index) SymPtr {
};
}
-fn addSymbolAssumeCapacity(elf: *Elf) !Symbol.Index {
+fn addSymbolAssumeCapacity(elf: *Elf) Symbol.Index {
defer elf.symtab.addOneAssumeCapacity().* = .{
.ni = .none,
.loc_relocs = .none,
@@ -953,30 +976,27 @@ fn addSymbolAssumeCapacity(elf: *Elf) !Symbol.Index {
}
fn initSymbolAssumeCapacity(elf: *Elf, opts: Symbol.Index.InitOptions) !Symbol.Index {
- const si = try elf.addSymbolAssumeCapacity();
+ const si = elf.addSymbolAssumeCapacity();
try si.init(elf, opts);
return si;
}
-pub fn globalSymbol(
- elf: *Elf,
- opts: struct {
- name: []const u8,
- type: std.elf.STT,
- bind: std.elf.STB = .GLOBAL,
- visibility: std.elf.STV = .DEFAULT,
- },
-) !Symbol.Index {
+pub fn globalSymbol(elf: *Elf, opts: struct {
+ name: []const u8,
+ type: std.elf.STT,
+ bind: std.elf.STB = .GLOBAL,
+ visibility: std.elf.STV = .DEFAULT,
+}) !Symbol.Index {
const gpa = elf.base.comp.gpa;
try elf.symtab.ensureUnusedCapacity(gpa, 1);
- const sym_gop = try elf.globals.getOrPut(gpa, try elf.string(.strtab, opts.name));
- if (!sym_gop.found_existing) sym_gop.value_ptr.* = try elf.initSymbolAssumeCapacity(.{
+ const global_gop = try elf.globals.getOrPut(gpa, try elf.string(.strtab, opts.name));
+ if (!global_gop.found_existing) global_gop.value_ptr.* = try elf.initSymbolAssumeCapacity(.{
.name = opts.name,
.type = opts.type,
.bind = opts.bind,
.visibility = opts.visibility,
});
- return sym_gop.value_ptr.*;
+ return global_gop.value_ptr.*;
}
fn navType(
@@ -1008,10 +1028,21 @@ fn navType(
},
};
}
-pub fn navSymbol(elf: *Elf, zcu: *Zcu, nav_index: InternPool.Nav.Index) !Symbol.Index {
+fn navMapIndex(elf: *Elf, zcu: *Zcu, nav_index: InternPool.Nav.Index) !Node.NavMapIndex {
const gpa = zcu.gpa;
const ip = &zcu.intern_pool;
const nav = ip.getNav(nav_index);
+ try elf.symtab.ensureUnusedCapacity(gpa, 1);
+ const nav_gop = try elf.navs.getOrPut(gpa, nav_index);
+ if (!nav_gop.found_existing) nav_gop.value_ptr.* = try elf.initSymbolAssumeCapacity(.{
+ .name = nav.fqn.toSlice(ip),
+ .type = navType(ip, nav.status, elf.base.comp.config.any_non_single_threaded),
+ });
+ return @enumFromInt(nav_gop.index);
+}
+pub fn navSymbol(elf: *Elf, zcu: *Zcu, nav_index: InternPool.Nav.Index) !Symbol.Index {
+ const ip = &zcu.intern_pool;
+ const nav = ip.getNav(nav_index);
if (nav.getExtern(ip)) |@"extern"| return elf.globalSymbol(.{
.name = @"extern".name.toSlice(ip),
.type = navType(ip, nav.status, elf.base.comp.config.any_non_single_threaded),
@@ -1027,40 +1058,37 @@ pub fn navSymbol(elf: *Elf, zcu: *Zcu, nav_index: InternPool.Nav.Index) !Symbol.
.protected => .PROTECTED,
},
});
- try elf.symtab.ensureUnusedCapacity(gpa, 1);
- const sym_gop = try elf.navs.getOrPut(gpa, nav_index);
- if (!sym_gop.found_existing) {
- sym_gop.value_ptr.* = try elf.initSymbolAssumeCapacity(.{
- .name = nav.fqn.toSlice(ip),
- .type = navType(ip, nav.status, elf.base.comp.config.any_non_single_threaded),
- });
- }
- return sym_gop.value_ptr.*;
+ const nmi = try elf.navMapIndex(zcu, nav_index);
+ return nmi.symbol(elf);
}
-pub fn uavSymbol(elf: *Elf, uav_val: InternPool.Index) !Symbol.Index {
+fn uavMapIndex(elf: *Elf, uav_val: InternPool.Index) !Node.UavMapIndex {
const gpa = elf.base.comp.gpa;
try elf.symtab.ensureUnusedCapacity(gpa, 1);
- const sym_gop = try elf.uavs.getOrPut(gpa, uav_val);
- if (!sym_gop.found_existing)
- sym_gop.value_ptr.* = try elf.initSymbolAssumeCapacity(.{ .type = .OBJECT });
- return sym_gop.value_ptr.*;
+ const uav_gop = try elf.uavs.getOrPut(gpa, uav_val);
+ if (!uav_gop.found_existing)
+ uav_gop.value_ptr.* = try elf.initSymbolAssumeCapacity(.{ .type = .OBJECT });
+ return @enumFromInt(uav_gop.index);
+}
+pub fn uavSymbol(elf: *Elf, uav_val: InternPool.Index) !Symbol.Index {
+ const umi = try elf.uavMapIndex(uav_val);
+ return umi.symbol(elf);
}
pub fn lazySymbol(elf: *Elf, lazy: link.File.LazySymbol) !Symbol.Index {
const gpa = elf.base.comp.gpa;
try elf.symtab.ensureUnusedCapacity(gpa, 1);
- const sym_gop = try elf.lazy.getPtr(lazy.kind).map.getOrPut(gpa, lazy.ty);
- if (!sym_gop.found_existing) {
- sym_gop.value_ptr.* = try elf.initSymbolAssumeCapacity(.{
+ const lazy_gop = try elf.lazy.getPtr(lazy.kind).map.getOrPut(gpa, lazy.ty);
+ if (!lazy_gop.found_existing) {
+ lazy_gop.value_ptr.* = try elf.initSymbolAssumeCapacity(.{
.type = switch (lazy.kind) {
.code => .FUNC,
.const_data => .OBJECT,
},
});
- elf.base.comp.link_lazy_prog_node.increaseEstimatedTotalItems(1);
+ elf.base.comp.link_synth_prog_node.increaseEstimatedTotalItems(1);
}
- return sym_gop.value_ptr.*;
+ return lazy_gop.value_ptr.*;
}
pub fn getNavVAddr(
@@ -1088,7 +1116,7 @@ pub fn getVAddr(elf: *Elf, reloc_info: link.File.RelocInfo, target_si: Symbol.In
reloc_info.addend,
switch (elf.ehdrField(.machine)) {
else => unreachable,
- .X86_64 => .{ .x86_64 = switch (elf.identClass()) {
+ .X86_64 => .{ .X86_64 = switch (elf.identClass()) {
.NONE, _ => unreachable,
.@"32" => .@"32",
.@"64" => .@"64",
@@ -1107,7 +1135,7 @@ fn addSection(elf: *Elf, segment_ni: MappedFile.Node.Index, opts: struct {
entsize: std.elf.Word = 0,
}) !Symbol.Index {
const gpa = elf.base.comp.gpa;
- const target_endian = elf.endian();
+ const target_endian = elf.targetEndian();
try elf.nodes.ensureUnusedCapacity(gpa, 1);
try elf.symtab.ensureUnusedCapacity(gpa, 1);
@@ -1127,7 +1155,7 @@ fn addSection(elf: *Elf, segment_ni: MappedFile.Node.Index, opts: struct {
.size = opts.size,
.moved = true,
});
- const si = try elf.addSymbolAssumeCapacity();
+ const si = elf.addSymbolAssumeCapacity();
elf.nodes.appendAssumeCapacity(.{ .section = si });
si.get(elf).ni = ni;
try si.init(elf, .{
@@ -1160,7 +1188,7 @@ fn addSection(elf: *Elf, segment_ni: MappedFile.Node.Index, opts: struct {
fn renameSection(elf: *Elf, si: Symbol.Index, name: []const u8) !void {
const strtab_entry = try elf.string(.strtab, name);
const shstrtab_entry = try elf.string(.shstrtab, name);
- const target_endian = elf.endian();
+ const target_endian = elf.targetEndian();
switch (elf.shdrSlice()) {
inline else => |shdr, class| {
const sym = @field(elf.symPtr(si), @tagName(class));
@@ -1173,7 +1201,7 @@ fn renameSection(elf: *Elf, si: Symbol.Index, name: []const u8) !void {
}
fn linkSections(elf: *Elf, si: Symbol.Index, link_si: Symbol.Index) !void {
- const target_endian = elf.endian();
+ const target_endian = elf.targetEndian();
switch (elf.shdrSlice()) {
inline else => |shdr, class| {
const sym = @field(elf.symPtr(si), @tagName(class));
@@ -1184,7 +1212,7 @@ fn linkSections(elf: *Elf, si: Symbol.Index, link_si: Symbol.Index) !void {
}
fn sectionName(elf: *Elf, si: Symbol.Index) [:0]const u8 {
- const target_endian = elf.endian();
+ const target_endian = elf.targetEndian();
const name = Symbol.Index.shstrtab.node(elf).slice(&elf.mf)[name: switch (elf.shdrSlice()) {
inline else => |shndx, class| {
const sym = @field(elf.symPtr(si), @tagName(class));
@@ -1263,7 +1291,8 @@ fn updateNavInner(elf: *Elf, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index)
};
if (nav_init == .none or !Type.fromInterned(ip.typeOf(nav_init)).hasRuntimeBits(zcu)) return;
- const si = try elf.navSymbol(zcu, nav_index);
+ const nmi = try elf.navMapIndex(zcu, nav_index);
+ const si = nmi.symbol(elf);
const ni = ni: {
const sym = si.get(elf);
switch (sym.ni) {
@@ -1275,7 +1304,7 @@ fn updateNavInner(elf: *Elf, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index)
.alignment = pt.navAlignment(nav_index).toStdMem(),
.moved = true,
});
- elf.nodes.appendAssumeCapacity(.{ .nav = nav_index });
+ elf.nodes.appendAssumeCapacity(.{ .nav = nmi });
sym.ni = ni;
switch (elf.symPtr(si)) {
inline else => |sym_ptr, class| sym_ptr.shndx =
@@ -1289,28 +1318,24 @@ fn updateNavInner(elf: *Elf, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index)
break :ni sym.ni;
};
- const size = size: {
- var nw: MappedFile.Node.Writer = undefined;
- ni.writer(&elf.mf, gpa, &nw);
- defer nw.deinit();
- codegen.generateSymbol(
- &elf.base,
- pt,
- zcu.navSrcLoc(nav_index),
- .fromInterned(nav_init),
- &nw.interface,
- .{ .atom_index = @intFromEnum(si) },
- ) catch |err| switch (err) {
- error.WriteFailed => return error.OutOfMemory,
- else => |e| return e,
- };
- break :size nw.interface.end;
+ var nw: MappedFile.Node.Writer = undefined;
+ ni.writer(&elf.mf, gpa, &nw);
+ defer nw.deinit();
+ codegen.generateSymbol(
+ &elf.base,
+ pt,
+ zcu.navSrcLoc(nav_index),
+ .fromInterned(nav_init),
+ &nw.interface,
+ .{ .atom_index = @intFromEnum(si) },
+ ) catch |err| switch (err) {
+ error.WriteFailed => return error.OutOfMemory,
+ else => |e| return e,
};
-
- const target_endian = elf.endian();
+ const target_endian = elf.targetEndian();
switch (elf.symPtr(si)) {
inline else => |sym| sym.size =
- std.mem.nativeTo(@TypeOf(sym.size), @intCast(size), target_endian),
+ std.mem.nativeTo(@TypeOf(sym.size), @intCast(nw.interface.end), target_endian),
}
si.applyLocationRelocs(elf);
}
@@ -1326,7 +1351,7 @@ pub fn lowerUav(
const gpa = zcu.gpa;
try elf.pending_uavs.ensureUnusedCapacity(gpa, 1);
- const si = elf.uavSymbol(uav_val) catch |err| switch (err) {
+ const umi = elf.uavMapIndex(uav_val) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
else => |e| return .{ .fail = try Zcu.ErrorMsg.create(
gpa,
@@ -1335,11 +1360,12 @@ pub fn lowerUav(
.{@errorName(e)},
) },
};
+ const si = umi.symbol(elf);
if (switch (si.get(elf).ni) {
.none => true,
else => |ni| uav_align.toStdMem().order(ni.alignment(&elf.mf)).compare(.gt),
}) {
- const gop = elf.pending_uavs.getOrPutAssumeCapacity(uav_val);
+ const gop = elf.pending_uavs.getOrPutAssumeCapacity(umi);
if (gop.found_existing) {
gop.value_ptr.alignment = gop.value_ptr.alignment.max(uav_align);
} else {
@@ -1347,7 +1373,7 @@ pub fn lowerUav(
.alignment = uav_align,
.src_loc = src_loc,
};
- elf.base.comp.link_uav_prog_node.increaseEstimatedTotalItems(1);
+ elf.base.comp.link_const_prog_node.increaseEstimatedTotalItems(1);
}
}
return .{ .sym_index = @intFromEnum(si) };
@@ -1384,7 +1410,8 @@ fn updateFuncInner(
const func = zcu.funcInfo(func_index);
const nav = ip.getNav(func.owner_nav);
- const si = try elf.navSymbol(zcu, func.owner_nav);
+ const nmi = try elf.navMapIndex(zcu, func.owner_nav);
+ const si = nmi.symbol(elf);
log.debug("updateFunc({f}) = {d}", .{ nav.fqn.fmt(ip), si });
const ni = ni: {
const sym = si.get(elf);
@@ -1406,7 +1433,7 @@ fn updateFuncInner(
}.toStdMem(),
.moved = true,
});
- elf.nodes.appendAssumeCapacity(.{ .nav = func.owner_nav });
+ elf.nodes.appendAssumeCapacity(.{ .nav = nmi });
sym.ni = ni;
switch (elf.symPtr(si)) {
inline else => |sym_ptr, class| sym_ptr.shndx =
@@ -1420,37 +1447,35 @@ fn updateFuncInner(
break :ni sym.ni;
};
- const size = size: {
- var nw: MappedFile.Node.Writer = undefined;
- ni.writer(&elf.mf, gpa, &nw);
- defer nw.deinit();
- codegen.emitFunction(
- &elf.base,
- pt,
- zcu.navSrcLoc(func.owner_nav),
- func_index,
- @intFromEnum(si),
- mir,
- &nw.interface,
- .none,
- ) catch |err| switch (err) {
- error.WriteFailed => return nw.err.?,
- else => |e| return e,
- };
- break :size nw.interface.end;
+ var nw: MappedFile.Node.Writer = undefined;
+ ni.writer(&elf.mf, gpa, &nw);
+ defer nw.deinit();
+ codegen.emitFunction(
+ &elf.base,
+ pt,
+ zcu.navSrcLoc(func.owner_nav),
+ func_index,
+ @intFromEnum(si),
+ mir,
+ &nw.interface,
+ .none,
+ ) catch |err| switch (err) {
+ error.WriteFailed => return nw.err.?,
+ else => |e| return e,
};
-
- const target_endian = elf.endian();
+ const target_endian = elf.targetEndian();
switch (elf.symPtr(si)) {
inline else => |sym| sym.size =
- std.mem.nativeTo(@TypeOf(sym.size), @intCast(size), target_endian),
+ std.mem.nativeTo(@TypeOf(sym.size), @intCast(nw.interface.end), target_endian),
}
si.applyLocationRelocs(elf);
}
pub fn updateErrorData(elf: *Elf, pt: Zcu.PerThread) !void {
- const si = elf.lazy.getPtr(.const_data).map.get(.anyerror_type) orelse return;
- elf.flushLazy(pt, .{ .kind = .const_data, .ty = .anyerror_type }, si) catch |err| switch (err) {
+ elf.flushLazy(pt, .{
+ .kind = .const_data,
+ .index = @intCast(elf.lazy.getPtr(.const_data).map.getIndex(.anyerror_type) orelse return),
+ }) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.CodegenFail => return error.LinkFailure,
else => |e| return elf.base.comp.link_diags.fail("updateErrorData failed {t}", .{e}),
@@ -1472,14 +1497,13 @@ pub fn idle(elf: *Elf, tid: Zcu.PerThread.Id) !bool {
const comp = elf.base.comp;
task: {
while (elf.pending_uavs.pop()) |pending_uav| {
- const sub_prog_node =
- elf.idleProgNode(
- tid,
- comp.link_uav_prog_node,
- .{ .uav = pending_uav.key },
- );
+ const sub_prog_node = elf.idleProgNode(
+ tid,
+ comp.link_const_prog_node,
+ .{ .uav = pending_uav.key },
+ );
defer sub_prog_node.end();
- break :task elf.flushUav(
+ elf.flushUav(
.{ .zcu = elf.base.comp.zcu.?, .tid = tid },
pending_uav.key,
pending_uav.value.alignment,
@@ -1491,37 +1515,34 @@ pub fn idle(elf: *Elf, tid: Zcu.PerThread.Id) !bool {
.{e},
),
};
+ break :task;
}
var lazy_it = elf.lazy.iterator();
- while (lazy_it.next()) |lazy| for (
- lazy.value.map.keys()[lazy.value.pending_index..],
- lazy.value.map.values()[lazy.value.pending_index..],
- ) |ty, si| {
- lazy.value.pending_index += 1;
+ while (lazy_it.next()) |lazy| if (lazy.value.pending_index < lazy.value.map.count()) {
const pt: Zcu.PerThread = .{ .zcu = elf.base.comp.zcu.?, .tid = tid };
- const kind = switch (lazy.key) {
+ const lmr: Node.LazyMapRef = .{ .kind = lazy.key, .index = lazy.value.pending_index };
+ lazy.value.pending_index += 1;
+ const kind = switch (lmr.kind) {
.code => "code",
.const_data => "data",
};
var name: [std.Progress.Node.max_name_len]u8 = undefined;
- const sub_prog_node = comp.link_lazy_prog_node.start(
+ const sub_prog_node = comp.link_synth_prog_node.start(
std.fmt.bufPrint(&name, "lazy {s} for {f}", .{
kind,
- Type.fromInterned(ty).fmt(pt),
+ Type.fromInterned(lmr.lazySymbol(elf).ty).fmt(pt),
}) catch &name,
0,
);
defer sub_prog_node.end();
- break :task elf.flushLazy(pt, .{
- .kind = lazy.key,
- .ty = ty,
- }, si) catch |err| switch (err) {
+ elf.flushLazy(pt, lmr) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
else => |e| return elf.base.comp.link_diags.fail(
"linker failed to lower lazy {s}: {t}",
.{ kind, e },
),
};
+ break :task;
};
while (elf.mf.updates.pop()) |ni| {
const clean_moved = ni.cleanMoved(&elf.mf);
@@ -1551,12 +1572,12 @@ fn idleProgNode(
return prog_node.start(name: switch (node) {
else => |tag| @tagName(tag),
.section => |si| elf.sectionName(si),
- .nav => |nav| {
+ .nav => |nmi| {
const ip = &elf.base.comp.zcu.?.intern_pool;
- break :name ip.getNav(nav).fqn.toSlice(ip);
+ break :name ip.getNav(nmi.navIndex(elf)).fqn.toSlice(ip);
},
- .uav => |uav| std.fmt.bufPrint(&name, "{f}", .{
- Value.fromInterned(uav).fmtValue(.{ .zcu = elf.base.comp.zcu.?, .tid = tid }),
+ .uav => |umi| std.fmt.bufPrint(&name, "{f}", .{
+ Value.fromInterned(umi.uavValue(elf)).fmtValue(.{ .zcu = elf.base.comp.zcu.?, .tid = tid }),
}) catch &name,
}, 0);
}
@@ -1564,14 +1585,15 @@ fn idleProgNode(
fn flushUav(
elf: *Elf,
pt: Zcu.PerThread,
- uav_val: InternPool.Index,
+ umi: Node.UavMapIndex,
uav_align: InternPool.Alignment,
src_loc: Zcu.LazySrcLoc,
) !void {
const zcu = pt.zcu;
const gpa = zcu.gpa;
- const si = try elf.uavSymbol(uav_val);
+ const uav_val = umi.uavValue(elf);
+ const si = umi.symbol(elf);
const ni = ni: {
const sym = si.get(elf);
switch (sym.ni) {
@@ -1581,7 +1603,7 @@ fn flushUav(
.alignment = uav_align.toStdMem(),
.moved = true,
});
- elf.nodes.appendAssumeCapacity(.{ .uav = uav_val });
+ elf.nodes.appendAssumeCapacity(.{ .uav = umi });
sym.ni = ni;
switch (elf.symPtr(si)) {
inline else => |sym_ptr, class| sym_ptr.shndx =
@@ -1598,36 +1620,34 @@ fn flushUav(
break :ni sym.ni;
};
- const size = size: {
- var nw: MappedFile.Node.Writer = undefined;
- ni.writer(&elf.mf, gpa, &nw);
- defer nw.deinit();
- codegen.generateSymbol(
- &elf.base,
- pt,
- src_loc,
- .fromInterned(uav_val),
- &nw.interface,
- .{ .atom_index = @intFromEnum(si) },
- ) catch |err| switch (err) {
- error.WriteFailed => return error.OutOfMemory,
- else => |e| return e,
- };
- break :size nw.interface.end;
+ var nw: MappedFile.Node.Writer = undefined;
+ ni.writer(&elf.mf, gpa, &nw);
+ defer nw.deinit();
+ codegen.generateSymbol(
+ &elf.base,
+ pt,
+ src_loc,
+ .fromInterned(uav_val),
+ &nw.interface,
+ .{ .atom_index = @intFromEnum(si) },
+ ) catch |err| switch (err) {
+ error.WriteFailed => return error.OutOfMemory,
+ else => |e| return e,
};
-
- const target_endian = elf.endian();
+ const target_endian = elf.targetEndian();
switch (elf.symPtr(si)) {
inline else => |sym| sym.size =
- std.mem.nativeTo(@TypeOf(sym.size), @intCast(size), target_endian),
+ std.mem.nativeTo(@TypeOf(sym.size), @intCast(nw.interface.end), target_endian),
}
si.applyLocationRelocs(elf);
}
-fn flushLazy(elf: *Elf, pt: Zcu.PerThread, lazy: link.File.LazySymbol, si: Symbol.Index) !void {
+fn flushLazy(elf: *Elf, pt: Zcu.PerThread, lmr: Node.LazyMapRef) !void {
const zcu = pt.zcu;
const gpa = zcu.gpa;
+ const lazy = lmr.lazySymbol(elf);
+ const si = lmr.symbol(elf);
const ni = ni: {
const sym = si.get(elf);
switch (sym.ni) {
@@ -1639,8 +1659,8 @@ fn flushLazy(elf: *Elf, pt: Zcu.PerThread, lazy: link.File.LazySymbol, si: Symbo
};
const ni = try elf.mf.addLastChildNode(gpa, sec_si.node(elf), .{ .moved = true });
elf.nodes.appendAssumeCapacity(switch (lazy.kind) {
- .code => .{ .lazy_code = lazy.ty },
- .const_data => .{ .lazy_const_data = lazy.ty },
+ .code => .{ .lazy_code = @enumFromInt(lmr.index) },
+ .const_data => .{ .lazy_const_data = @enumFromInt(lmr.index) },
});
sym.ni = ni;
switch (elf.symPtr(si)) {
@@ -1655,34 +1675,30 @@ fn flushLazy(elf: *Elf, pt: Zcu.PerThread, lazy: link.File.LazySymbol, si: Symbo
break :ni sym.ni;
};
- const size = size: {
- var required_alignment: InternPool.Alignment = .none;
- var nw: MappedFile.Node.Writer = undefined;
- ni.writer(&elf.mf, gpa, &nw);
- defer nw.deinit();
- try codegen.generateLazySymbol(
- &elf.base,
- pt,
- Type.fromInterned(lazy.ty).srcLocOrNull(pt.zcu) orelse .unneeded,
- lazy,
- &required_alignment,
- &nw.interface,
- .none,
- .{ .atom_index = @intFromEnum(si) },
- );
- break :size nw.interface.end;
- };
-
- const target_endian = elf.endian();
+ var required_alignment: InternPool.Alignment = .none;
+ var nw: MappedFile.Node.Writer = undefined;
+ ni.writer(&elf.mf, gpa, &nw);
+ defer nw.deinit();
+ try codegen.generateLazySymbol(
+ &elf.base,
+ pt,
+ Type.fromInterned(lazy.ty).srcLocOrNull(pt.zcu) orelse .unneeded,
+ lazy,
+ &required_alignment,
+ &nw.interface,
+ .none,
+ .{ .atom_index = @intFromEnum(si) },
+ );
+ const target_endian = elf.targetEndian();
switch (elf.symPtr(si)) {
inline else => |sym| sym.size =
- std.mem.nativeTo(@TypeOf(sym.size), @intCast(size), target_endian),
+ std.mem.nativeTo(@TypeOf(sym.size), @intCast(nw.interface.end), target_endian),
}
si.applyLocationRelocs(elf);
}
fn flushMoved(elf: *Elf, ni: MappedFile.Node.Index) !void {
- const target_endian = elf.endian();
+ const target_endian = elf.targetEndian();
const file_offset = ni.fileLocation(&elf.mf, false).offset;
const node = elf.getNode(ni);
switch (node) {
@@ -1738,11 +1754,8 @@ fn flushMoved(elf: *Elf, ni: MappedFile.Node.Index) !void {
.nav, .uav, .lazy_code, .lazy_const_data => {
const si = switch (node) {
else => unreachable,
- .nav => |nav| elf.navs.get(nav),
- .uav => |uav| elf.uavs.get(uav),
- .lazy_code => |ty| elf.lazy.getPtr(.code).map.get(ty),
- .lazy_const_data => |ty| elf.lazy.getPtr(.const_data).map.get(ty),
- }.?;
+ inline .nav, .uav, .lazy_code, .lazy_const_data => |mi| mi.symbol(elf),
+ };
switch (elf.shdrSlice()) {
inline else => |shdr, class| {
const sym = @field(elf.symPtr(si), @tagName(class));
@@ -1773,7 +1786,7 @@ fn flushMoved(elf: *Elf, ni: MappedFile.Node.Index) !void {
}
fn flushResized(elf: *Elf, ni: MappedFile.Node.Index) !void {
- const target_endian = elf.endian();
+ const target_endian = elf.targetEndian();
_, const size = ni.location(&elf.mf).resolve(&elf.mf);
const node = elf.getNode(ni);
switch (node) {
@@ -1957,65 +1970,74 @@ pub fn printNode(
indent: usize,
) !void {
const node = elf.getNode(ni);
- const mf_node = &elf.mf.nodes.items[@intFromEnum(ni)];
- const off, const size = mf_node.location().resolve(&elf.mf);
try w.splatByteAll(' ', indent);
try w.writeAll(@tagName(node));
switch (node) {
else => {},
.section => |si| try w.print("({s})", .{elf.sectionName(si)}),
- .nav => |nav_index| {
+ .nav => |nmi| {
const zcu = elf.base.comp.zcu.?;
const ip = &zcu.intern_pool;
- const nav = ip.getNav(nav_index);
+ const nav = ip.getNav(nmi.navIndex(elf));
try w.print("({f}, {f})", .{
Type.fromInterned(nav.typeOf(ip)).fmt(.{ .zcu = zcu, .tid = tid }),
nav.fqn.fmt(ip),
});
},
- .uav => |uav| {
+ .uav => |umi| {
const zcu = elf.base.comp.zcu.?;
- const val: Value = .fromInterned(uav);
+ const val: Value = .fromInterned(umi.uavValue(elf));
try w.print("({f}, {f})", .{
val.typeOf(zcu).fmt(.{ .zcu = zcu, .tid = tid }),
val.fmtValue(.{ .zcu = zcu, .tid = tid }),
});
},
+ inline .lazy_code, .lazy_const_data => |lmi| try w.print("({f})", .{
+ Type.fromInterned(lmi.lazySymbol(elf).ty).fmt(.{
+ .zcu = elf.base.comp.zcu.?,
+ .tid = tid,
+ }),
+ }),
}
- try w.print(" index={d} offset=0x{x} size=0x{x} align=0x{x}{s}{s}{s}{s}\n", .{
- @intFromEnum(ni),
- off,
- size,
- mf_node.flags.alignment.toByteUnits(),
- if (mf_node.flags.fixed) " fixed" else "",
- if (mf_node.flags.moved) " moved" else "",
- if (mf_node.flags.resized) " resized" else "",
- if (mf_node.flags.has_content) " has_content" else "",
- });
- var child_ni = mf_node.first;
- switch (child_ni) {
- .none => {
- const file_loc = ni.fileLocation(&elf.mf, false);
- if (file_loc.size == 0) return;
- var address = file_loc.offset;
- const line_len = 0x10;
- var line_it = std.mem.window(
- u8,
- elf.mf.contents[@intCast(file_loc.offset)..][0..@intCast(file_loc.size)],
- line_len,
- line_len,
- );
- while (line_it.next()) |line_bytes| : (address += line_len) {
- try w.splatByteAll(' ', indent + 1);
- try w.print("{x:0>8}", .{address});
- for (line_bytes) |byte| try w.print(" {x:0>2}", .{byte});
- try w.writeByte('\n');
- }
- },
- else => while (child_ni != .none) {
- try elf.printNode(tid, w, child_ni, indent + 1);
- child_ni = elf.mf.nodes.items[@intFromEnum(child_ni)].next;
- },
+ {
+ const mf_node = &elf.mf.nodes.items[@intFromEnum(ni)];
+ const off, const size = mf_node.location().resolve(&elf.mf);
+ try w.print(" index={d} offset=0x{x} size=0x{x} align=0x{x}{s}{s}{s}{s}\n", .{
+ @intFromEnum(ni),
+ off,
+ size,
+ mf_node.flags.alignment.toByteUnits(),
+ if (mf_node.flags.fixed) " fixed" else "",
+ if (mf_node.flags.moved) " moved" else "",
+ if (mf_node.flags.resized) " resized" else "",
+ if (mf_node.flags.has_content) " has_content" else "",
+ });
+ }
+ var leaf = true;
+ var child_it = ni.children(&elf.mf);
+ while (child_it.next()) |child_ni| {
+ leaf = false;
+ try elf.printNode(tid, w, child_ni, indent + 1);
+ }
+ if (leaf) {
+ const file_loc = ni.fileLocation(&elf.mf, false);
+ if (file_loc.size == 0) return;
+ var address = file_loc.offset;
+ const line_len = 0x10;
+ var line_it = std.mem.window(
+ u8,
+ elf.mf.contents[@intCast(file_loc.offset)..][0..@intCast(file_loc.size)],
+ line_len,
+ line_len,
+ );
+ while (line_it.next()) |line_bytes| : (address += line_len) {
+ try w.splatByteAll(' ', indent + 1);
+ try w.print("{x:0>8} ", .{address});
+ for (line_bytes) |byte| try w.print("{x:0>2} ", .{byte});
+ try w.splatByteAll(' ', 3 * (line_len - line_bytes.len) + 1);
+ for (line_bytes) |byte| try w.writeByte(if (std.ascii.isPrint(byte)) byte else '.');
+ try w.writeByte('\n');
+ }
}
}
diff --git a/src/link/MappedFile.zig b/src/link/MappedFile.zig
index cd100f1496..c44f1f68fb 100644
--- a/src/link/MappedFile.zig
+++ b/src/link/MappedFile.zig
@@ -34,17 +34,28 @@ pub fn init(file: std.fs.File, gpa: std.mem.Allocator) !MappedFile {
.writers = .{},
};
errdefer mf.deinit(gpa);
- const size: u64, const blksize = if (is_windows)
- .{ try windows.GetFileSizeEx(file.handle), 1 }
- else stat: {
+ const size: u64, const block_size = stat: {
+ if (is_windows) {
+ var sbi: windows.SYSTEM_BASIC_INFORMATION = undefined;
+ break :stat .{
+ try windows.GetFileSizeEx(file.handle),
+ switch (windows.ntdll.NtQuerySystemInformation(
+ .SystemBasicInformation,
+ &sbi,
+ @sizeOf(windows.SYSTEM_BASIC_INFORMATION),
+ null,
+ )) {
+ .SUCCESS => @max(sbi.PageSize, sbi.AllocationGranularity),
+ else => std.heap.page_size_max,
+ },
+ };
+ }
const stat = try std.posix.fstat(mf.file.handle);
if (!std.posix.S.ISREG(stat.mode)) return error.PathAlreadyExists;
- break :stat .{ @bitCast(stat.size), stat.blksize };
+ break :stat .{ @bitCast(stat.size), @max(std.heap.pageSize(), stat.blksize) };
};
mf.flags = .{
- .block_size = .fromByteUnits(
- std.math.ceilPowerOfTwoAssert(usize, @max(std.heap.pageSize(), blksize)),
- ),
+ .block_size = .fromByteUnits(std.math.ceilPowerOfTwoAssert(usize, block_size)),
.copy_file_range_unsupported = false,
.fallocate_insert_range_unsupported = false,
.fallocate_punch_hole_unsupported = false,
@@ -90,9 +101,11 @@ pub const Node = extern struct {
resized: bool,
/// Whether this node might contain non-zero bytes.
has_content: bool,
+ /// Whether a moved event on this node bubbles down to children.
+ bubbles_moved: bool,
unused: @Type(.{ .int = .{
.signedness = .unsigned,
- .bits = 32 - @bitSizeOf(std.mem.Alignment) - 5,
+ .bits = 32 - @bitSizeOf(std.mem.Alignment) - 6,
} }) = 0,
};
@@ -136,6 +149,25 @@ pub const Node = extern struct {
return &mf.nodes.items[@intFromEnum(ni)];
}
+ pub fn parent(ni: Node.Index, mf: *const MappedFile) Node.Index {
+ return ni.get(mf).parent;
+ }
+
+ pub const ChildIterator = struct {
+ mf: *const MappedFile,
+ ni: Node.Index,
+
+ pub fn next(it: *ChildIterator) ?Node.Index {
+ const ni = it.ni;
+ if (ni == .none) return null;
+ it.ni = ni.get(it.mf).next;
+ return ni;
+ }
+ };
+ pub fn children(ni: Node.Index, mf: *const MappedFile) ChildIterator {
+ return .{ .mf = mf, .ni = ni.get(mf).first };
+ }
+
pub fn childrenMoved(ni: Node.Index, gpa: std.mem.Allocator, mf: *MappedFile) !void {
var child_ni = ni.get(mf).last;
while (child_ni != .none) {
@@ -147,9 +179,10 @@ pub const Node = extern struct {
pub fn hasMoved(ni: Node.Index, mf: *const MappedFile) bool {
var parent_ni = ni;
while (parent_ni != Node.Index.root) {
- const parent = parent_ni.get(mf);
- if (parent.flags.moved) return true;
- parent_ni = parent.parent;
+ const parent_node = parent_ni.get(mf);
+ if (!parent_node.flags.bubbles_moved) break;
+ if (parent_node.flags.moved) return true;
+ parent_ni = parent_node.parent;
}
return false;
}
@@ -163,12 +196,7 @@ pub const Node = extern struct {
return node_moved.*;
}
fn movedAssumeCapacity(ni: Node.Index, mf: *MappedFile) void {
- var parent_ni = ni;
- while (parent_ni != Node.Index.root) {
- const parent_node = parent_ni.get(mf);
- if (parent_node.flags.moved) return;
- parent_ni = parent_node.parent;
- }
+ if (ni.hasMoved(mf)) return;
const node = ni.get(mf);
node.flags.moved = true;
if (node.flags.resized) return;
@@ -242,10 +270,10 @@ pub const Node = extern struct {
var offset, const size = ni.location(mf).resolve(mf);
var parent_ni = ni;
while (true) {
- const parent = parent_ni.get(mf);
- if (set_has_content) parent.flags.has_content = true;
+ const parent_node = parent_ni.get(mf);
+ if (set_has_content) parent_node.flags.has_content = true;
if (parent_ni == .none) break;
- parent_ni = parent.parent;
+ parent_ni = parent_node.parent;
offset += parent_ni.location(mf).resolve(mf)[0];
}
return .{ .offset = offset, .size = size };
@@ -449,6 +477,7 @@ fn addNode(mf: *MappedFile, gpa: std.mem.Allocator, opts: struct {
.moved = true,
.resized = true,
.has_content = false,
+ .bubbles_moved = opts.add_node.bubbles_moved,
},
.location_payload = location_payload,
};
@@ -471,6 +500,7 @@ pub const AddNodeOptions = struct {
fixed: bool = false,
moved: bool = false,
resized: bool = false,
+ bubbles_moved: bool = true,
};
pub fn addOnlyChildNode(