diff options
| author | Andrew Kelley <andrew@ziglang.org> | 2023-08-24 20:43:43 -0700 |
|---|---|---|
| committer | Andrew Kelley <andrew@ziglang.org> | 2023-09-21 14:48:40 -0700 |
| commit | accd5701c251c2741479fe08e56c8271c444f021 (patch) | |
| tree | 78871f150609687a9210063e90f8f4eb53997c38 /src/link | |
| parent | 0345d7866347c9066b0646f9e46be9a068dcfaa3 (diff) | |
| download | zig-accd5701c251c2741479fe08e56c8271c444f021.tar.gz zig-accd5701c251c2741479fe08e56c8271c444f021.zip | |
compiler: move struct types into InternPool proper
Structs were previously using `SegmentedList` to be given indexes, but
were not actually backed by the InternPool arrays.
After this, the only remaining uses of `SegmentedList` in the compiler
are `Module.Decl` and `Module.Namespace`. Once those last two are
migrated to become backed by InternPool arrays as well, we can introduce
state serialization via writing these arrays to disk all at once.
Unfortunately there are a lot of source code locations that touch the
struct type API, so this commit is still work-in-progress. Once I get it
compiling and passing the test suite, I can provide some interesting
data points such as how it affected the InternPool memory size and
performance comparison against master branch.
I also couldn't resist migrating over a bunch of alignment API over to
use the log2 Alignment type rather than a mismash of u32 and u64 byte
units with 0 meaning something implicitly different and special at every
location. Turns out you can do all the math you need directly on the
log2 representation of alignments.
Diffstat (limited to 'src/link')
| -rw-r--r-- | src/link/Coff.zig | 8 | ||||
| -rw-r--r-- | src/link/Dwarf.zig | 28 | ||||
| -rw-r--r-- | src/link/Elf.zig | 52 | ||||
| -rw-r--r-- | src/link/Elf/Atom.zig | 17 | ||||
| -rw-r--r-- | src/link/Elf/Object.zig | 7 | ||||
| -rw-r--r-- | src/link/MachO.zig | 35 | ||||
| -rw-r--r-- | src/link/MachO/Atom.zig | 4 | ||||
| -rw-r--r-- | src/link/MachO/Object.zig | 20 | ||||
| -rw-r--r-- | src/link/MachO/thunks.zig | 11 | ||||
| -rw-r--r-- | src/link/MachO/zld.zig | 9 | ||||
| -rw-r--r-- | src/link/Plan9.zig | 2 | ||||
| -rw-r--r-- | src/link/Wasm.zig | 44 | ||||
| -rw-r--r-- | src/link/Wasm/Atom.zig | 4 | ||||
| -rw-r--r-- | src/link/Wasm/Object.zig | 16 | ||||
| -rw-r--r-- | src/link/Wasm/types.zig | 4 |
15 files changed, 133 insertions, 128 deletions
diff --git a/src/link/Coff.zig b/src/link/Coff.zig index f86269fd52..40e33db2bd 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -1118,7 +1118,7 @@ pub fn lowerUnnamedConst(self: *Coff, tv: TypedValue, decl_index: Module.Decl.In }, }; - const required_alignment = tv.ty.abiAlignment(mod); + const required_alignment: u32 = @intCast(tv.ty.abiAlignment(mod).toByteUnits(0)); const atom = self.getAtomPtr(atom_index); atom.size = @as(u32, @intCast(code.len)); atom.getSymbolPtr(self).value = try self.allocateAtom(atom_index, atom.size, required_alignment); @@ -1196,7 +1196,7 @@ fn updateLazySymbolAtom( const gpa = self.base.allocator; const mod = self.base.options.module.?; - var required_alignment: u32 = undefined; + var required_alignment: InternPool.Alignment = .none; var code_buffer = std.ArrayList(u8).init(gpa); defer code_buffer.deinit(); @@ -1240,7 +1240,7 @@ fn updateLazySymbolAtom( symbol.section_number = @as(coff.SectionNumber, @enumFromInt(section_index + 1)); symbol.type = .{ .complex_type = .NULL, .base_type = .NULL }; - const vaddr = try self.allocateAtom(atom_index, code_len, required_alignment); + const vaddr = try self.allocateAtom(atom_index, code_len, @intCast(required_alignment.toByteUnits(0))); errdefer self.freeAtom(atom_index); log.debug("allocated atom for {s} at 0x{x}", .{ name, vaddr }); @@ -1322,7 +1322,7 @@ fn updateDeclCode(self: *Coff, decl_index: Module.Decl.Index, code: []u8, comple const decl_name = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod)); log.debug("updateDeclCode {s}{*}", .{ decl_name, decl }); - const required_alignment = decl.getAlignment(mod); + const required_alignment: u32 = @intCast(decl.getAlignment(mod).toByteUnits(0)); const decl_metadata = self.decls.get(decl_index).?; const atom_index = decl_metadata.atom; diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index 52d6550bcb..70654662b4 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -341,23 +341,22 @@ pub const DeclState = struct { try leb128.writeULEB128(dbg_info_buffer.writer(), field_off); } }, - .struct_type => |struct_type| s: { - const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse break :s; + .struct_type => |struct_type| { // DW.AT.name, DW.FORM.string try ty.print(dbg_info_buffer.writer(), mod); try dbg_info_buffer.append(0); - if (struct_obj.layout == .Packed) { + if (struct_type.layout == .Packed) { log.debug("TODO implement .debug_info for packed structs", .{}); break :blk; } for ( - struct_obj.fields.keys(), - struct_obj.fields.values(), - 0.., - ) |field_name_ip, field, field_index| { - if (!field.ty.hasRuntimeBits(mod)) continue; + struct_type.field_names.get(ip), + struct_type.field_types.get(ip), + struct_type.offsets.get(ip), + ) |field_name_ip, field_ty, field_off| { + if (!field_ty.toType().hasRuntimeBits(mod)) continue; const field_name = ip.stringToSlice(field_name_ip); // DW.AT.member try dbg_info_buffer.ensureUnusedCapacity(field_name.len + 2); @@ -368,9 +367,8 @@ pub const DeclState = struct { // DW.AT.type, DW.FORM.ref4 var index = dbg_info_buffer.items.len; try dbg_info_buffer.resize(index + 4); - try self.addTypeRelocGlobal(atom_index, field.ty, @as(u32, @intCast(index))); + try self.addTypeRelocGlobal(atom_index, field_ty.toType(), @intCast(index)); // DW.AT.data_member_location, DW.FORM.udata - const field_off = ty.structFieldOffset(field_index, mod); try leb128.writeULEB128(dbg_info_buffer.writer(), field_off); } }, @@ -416,8 +414,8 @@ pub const DeclState = struct { .Union => { const union_obj = mod.typeToUnion(ty).?; const layout = mod.getUnionLayout(union_obj); - const payload_offset = if (layout.tag_align >= layout.payload_align) layout.tag_size else 0; - const tag_offset = if (layout.tag_align >= layout.payload_align) 0 else layout.payload_size; + const payload_offset = if (layout.tag_align.compare(.gte, layout.payload_align)) layout.tag_size else 0; + const tag_offset = if (layout.tag_align.compare(.gte, layout.payload_align)) 0 else layout.payload_size; // TODO this is temporary to match current state of unions in Zig - we don't yet have // safety checks implemented meaning the implicit tag is not yet stored and generated // for untagged unions. @@ -496,11 +494,11 @@ pub const DeclState = struct { .ErrorUnion => { const error_ty = ty.errorUnionSet(mod); const payload_ty = ty.errorUnionPayload(mod); - const payload_align = if (payload_ty.isNoReturn(mod)) 0 else payload_ty.abiAlignment(mod); + const payload_align = if (payload_ty.isNoReturn(mod)) .none else payload_ty.abiAlignment(mod); const error_align = Type.anyerror.abiAlignment(mod); const abi_size = ty.abiSize(mod); - const payload_off = if (error_align >= payload_align) Type.anyerror.abiSize(mod) else 0; - const error_off = if (error_align >= payload_align) 0 else payload_ty.abiSize(mod); + const payload_off = if (error_align.compare(.gte, payload_align)) Type.anyerror.abiSize(mod) else 0; + const error_off = if (error_align.compare(.gte, payload_align)) 0 else payload_ty.abiSize(mod); // DW.AT.structure_type try dbg_info_buffer.append(@intFromEnum(AbbrevKind.struct_type)); diff --git a/src/link/Elf.zig b/src/link/Elf.zig index b12d24745b..c752f15ff2 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -409,7 +409,7 @@ pub fn populateMissingMetadata(self: *Elf) !void { const image_base = self.calcImageBase(); if (self.phdr_table_index == null) { - self.phdr_table_index = @as(u16, @intCast(self.phdrs.items.len)); + self.phdr_table_index = @intCast(self.phdrs.items.len); const p_align: u16 = switch (self.ptr_width) { .p32 => @alignOf(elf.Elf32_Phdr), .p64 => @alignOf(elf.Elf64_Phdr), @@ -428,7 +428,7 @@ pub fn populateMissingMetadata(self: *Elf) !void { } if (self.phdr_table_load_index == null) { - self.phdr_table_load_index = @as(u16, @intCast(self.phdrs.items.len)); + self.phdr_table_load_index = @intCast(self.phdrs.items.len); // TODO Same as for GOT try self.phdrs.append(gpa, .{ .p_type = elf.PT_LOAD, @@ -444,7 +444,7 @@ pub fn populateMissingMetadata(self: *Elf) !void { } if (self.phdr_load_re_index == null) { - self.phdr_load_re_index = @as(u16, @intCast(self.phdrs.items.len)); + self.phdr_load_re_index = @intCast(self.phdrs.items.len); const file_size = self.base.options.program_code_size_hint; const p_align = self.page_size; const off = self.findFreeSpace(file_size, p_align); @@ -465,7 +465,7 @@ pub fn populateMissingMetadata(self: *Elf) !void { } if (self.phdr_got_index == null) { - self.phdr_got_index = @as(u16, @intCast(self.phdrs.items.len)); + self.phdr_got_index = @intCast(self.phdrs.items.len); const file_size = @as(u64, ptr_size) * self.base.options.symbol_count_hint; // We really only need ptr alignment but since we are using PROGBITS, linux requires // page align. @@ -490,7 +490,7 @@ pub fn populateMissingMetadata(self: *Elf) !void { } if (self.phdr_load_ro_index == null) { - self.phdr_load_ro_index = @as(u16, @intCast(self.phdrs.items.len)); + self.phdr_load_ro_index = @intCast(self.phdrs.items.len); // TODO Find a hint about how much data need to be in rodata ? const file_size = 1024; // Same reason as for GOT @@ -513,7 +513,7 @@ pub fn populateMissingMetadata(self: *Elf) !void { } if (self.phdr_load_rw_index == null) { - self.phdr_load_rw_index = @as(u16, @intCast(self.phdrs.items.len)); + self.phdr_load_rw_index = @intCast(self.phdrs.items.len); // TODO Find a hint about how much data need to be in data ? const file_size = 1024; // Same reason as for GOT @@ -536,7 +536,7 @@ pub fn populateMissingMetadata(self: *Elf) !void { } if (self.phdr_load_zerofill_index == null) { - self.phdr_load_zerofill_index = @as(u16, @intCast(self.phdrs.items.len)); + self.phdr_load_zerofill_index = @intCast(self.phdrs.items.len); const p_align = if (self.base.options.target.os.tag == .linux) self.page_size else @as(u16, ptr_size); const off = self.phdrs.items[self.phdr_load_rw_index.?].p_offset; log.debug("found PT_LOAD zerofill free space 0x{x} to 0x{x}", .{ off, off }); @@ -556,7 +556,7 @@ pub fn populateMissingMetadata(self: *Elf) !void { } if (self.shstrtab_section_index == null) { - self.shstrtab_section_index = @as(u16, @intCast(self.shdrs.items.len)); + self.shstrtab_section_index = @intCast(self.shdrs.items.len); assert(self.shstrtab.buffer.items.len == 0); try self.shstrtab.buffer.append(gpa, 0); // need a 0 at position 0 const off = self.findFreeSpace(self.shstrtab.buffer.items.len, 1); @@ -578,7 +578,7 @@ pub fn populateMissingMetadata(self: *Elf) !void { } if (self.strtab_section_index == null) { - self.strtab_section_index = @as(u16, @intCast(self.shdrs.items.len)); + self.strtab_section_index = @intCast(self.shdrs.items.len); assert(self.strtab.buffer.items.len == 0); try self.strtab.buffer.append(gpa, 0); // need a 0 at position 0 const off = self.findFreeSpace(self.strtab.buffer.items.len, 1); @@ -600,7 +600,7 @@ pub fn populateMissingMetadata(self: *Elf) !void { } if (self.text_section_index == null) { - self.text_section_index = @as(u16, @intCast(self.shdrs.items.len)); + self.text_section_index = @intCast(self.shdrs.items.len); const phdr = &self.phdrs.items[self.phdr_load_re_index.?]; try self.shdrs.append(gpa, .{ .sh_name = try self.shstrtab.insert(gpa, ".text"), @@ -620,7 +620,7 @@ pub fn populateMissingMetadata(self: *Elf) !void { } if (self.got_section_index == null) { - self.got_section_index = @as(u16, @intCast(self.shdrs.items.len)); + self.got_section_index = @intCast(self.shdrs.items.len); const phdr = &self.phdrs.items[self.phdr_got_index.?]; try self.shdrs.append(gpa, .{ .sh_name = try self.shstrtab.insert(gpa, ".got"), @@ -639,7 +639,7 @@ pub fn populateMissingMetadata(self: *Elf) !void { } if (self.rodata_section_index == null) { - self.rodata_section_index = @as(u16, @intCast(self.shdrs.items.len)); + self.rodata_section_index = @intCast(self.shdrs.items.len); const phdr = &self.phdrs.items[self.phdr_load_ro_index.?]; try self.shdrs.append(gpa, .{ .sh_name = try self.shstrtab.insert(gpa, ".rodata"), @@ -659,7 +659,7 @@ pub fn populateMissingMetadata(self: *Elf) !void { } if (self.data_section_index == null) { - self.data_section_index = @as(u16, @intCast(self.shdrs.items.len)); + self.data_section_index = @intCast(self.shdrs.items.len); const phdr = &self.phdrs.items[self.phdr_load_rw_index.?]; try self.shdrs.append(gpa, .{ .sh_name = try self.shstrtab.insert(gpa, ".data"), @@ -679,7 +679,7 @@ pub fn populateMissingMetadata(self: *Elf) !void { } if (self.bss_section_index == null) { - self.bss_section_index = @as(u16, @intCast(self.shdrs.items.len)); + self.bss_section_index = @intCast(self.shdrs.items.len); const phdr = &self.phdrs.items[self.phdr_load_zerofill_index.?]; try self.shdrs.append(gpa, .{ .sh_name = try self.shstrtab.insert(gpa, ".bss"), @@ -699,7 +699,7 @@ pub fn populateMissingMetadata(self: *Elf) !void { } if (self.symtab_section_index == null) { - self.symtab_section_index = @as(u16, @intCast(self.shdrs.items.len)); + self.symtab_section_index = @intCast(self.shdrs.items.len); const min_align: u16 = if (small_ptr) @alignOf(elf.Elf32_Sym) else @alignOf(elf.Elf64_Sym); const each_size: u64 = if (small_ptr) @sizeOf(elf.Elf32_Sym) else @sizeOf(elf.Elf64_Sym); const file_size = self.base.options.symbol_count_hint * each_size; @@ -714,7 +714,7 @@ pub fn populateMissingMetadata(self: *Elf) !void { .sh_size = file_size, // The section header index of the associated string table. .sh_link = self.strtab_section_index.?, - .sh_info = @as(u32, @intCast(self.symbols.items.len)), + .sh_info = @intCast(self.symbols.items.len), .sh_addralign = min_align, .sh_entsize = each_size, }); @@ -723,7 +723,7 @@ pub fn populateMissingMetadata(self: *Elf) !void { if (self.dwarf) |*dw| { if (self.debug_str_section_index == null) { - self.debug_str_section_index = @as(u16, @intCast(self.shdrs.items.len)); + self.debug_str_section_index = @intCast(self.shdrs.items.len); assert(dw.strtab.buffer.items.len == 0); try dw.strtab.buffer.append(gpa, 0); try self.shdrs.append(gpa, .{ @@ -743,7 +743,7 @@ pub fn populateMissingMetadata(self: *Elf) !void { } if (self.debug_info_section_index == null) { - self.debug_info_section_index = @as(u16, @intCast(self.shdrs.items.len)); + self.debug_info_section_index = @intCast(self.shdrs.items.len); const file_size_hint = 200; const p_align = 1; const off = self.findFreeSpace(file_size_hint, p_align); @@ -768,7 +768,7 @@ pub fn populateMissingMetadata(self: *Elf) !void { } if (self.debug_abbrev_section_index == null) { - self.debug_abbrev_section_index = @as(u16, @intCast(self.shdrs.items.len)); + self.debug_abbrev_section_index = @intCast(self.shdrs.items.len); const file_size_hint = 128; const p_align = 1; const off = self.findFreeSpace(file_size_hint, p_align); @@ -793,7 +793,7 @@ pub fn populateMissingMetadata(self: *Elf) !void { } if (self.debug_aranges_section_index == null) { - self.debug_aranges_section_index = @as(u16, @intCast(self.shdrs.items.len)); + self.debug_aranges_section_index = @intCast(self.shdrs.items.len); const file_size_hint = 160; const p_align = 16; const off = self.findFreeSpace(file_size_hint, p_align); @@ -818,7 +818,7 @@ pub fn populateMissingMetadata(self: *Elf) !void { } if (self.debug_line_section_index == null) { - self.debug_line_section_index = @as(u16, @intCast(self.shdrs.items.len)); + self.debug_line_section_index = @intCast(self.shdrs.items.len); const file_size_hint = 250; const p_align = 1; const off = self.findFreeSpace(file_size_hint, p_align); @@ -2666,12 +2666,12 @@ fn updateDeclCode( const old_size = atom_ptr.size; const old_vaddr = atom_ptr.value; - atom_ptr.alignment = math.log2_int(u64, required_alignment); + atom_ptr.alignment = required_alignment; atom_ptr.size = code.len; if (old_size > 0 and self.base.child_pid == null) { const capacity = atom_ptr.capacity(self); - const need_realloc = code.len > capacity or !mem.isAlignedGeneric(u64, sym.value, required_alignment); + const need_realloc = code.len > capacity or !required_alignment.check(sym.value); if (need_realloc) { try atom_ptr.grow(self); log.debug("growing {s} from 0x{x} to 0x{x}", .{ decl_name, old_vaddr, atom_ptr.value }); @@ -2869,7 +2869,7 @@ fn updateLazySymbol(self: *Elf, sym: link.File.LazySymbol, symbol_index: Symbol. const mod = self.base.options.module.?; const zig_module = self.file(self.zig_module_index.?).?.zig_module; - var required_alignment: u32 = undefined; + var required_alignment: InternPool.Alignment = .none; var code_buffer = std.ArrayList(u8).init(gpa); defer code_buffer.deinit(); @@ -2918,7 +2918,7 @@ fn updateLazySymbol(self: *Elf, sym: link.File.LazySymbol, symbol_index: Symbol. const atom_ptr = local_sym.atom(self).?; atom_ptr.alive = true; atom_ptr.name_offset = name_str_index; - atom_ptr.alignment = math.log2_int(u64, required_alignment); + atom_ptr.alignment = required_alignment; atom_ptr.size = code.len; try atom_ptr.allocate(self); @@ -2995,7 +2995,7 @@ pub fn lowerUnnamedConst(self: *Elf, typed_value: TypedValue, decl_index: Module const atom_ptr = local_sym.atom(self).?; atom_ptr.alive = true; atom_ptr.name_offset = name_str_index; - atom_ptr.alignment = math.log2_int(u64, required_alignment); + atom_ptr.alignment = required_alignment; atom_ptr.size = code.len; try atom_ptr.allocate(self); diff --git a/src/link/Elf/Atom.zig b/src/link/Elf/Atom.zig index 073536fbaa..0223751d06 100644 --- a/src/link/Elf/Atom.zig +++ b/src/link/Elf/Atom.zig @@ -11,7 +11,7 @@ file_index: File.Index = 0, size: u64 = 0, /// Alignment of this atom as a power of two. -alignment: u8 = 0, +alignment: Alignment = .@"1", /// Index of the input section. input_section_index: Index = 0, @@ -42,6 +42,8 @@ fde_end: u32 = 0, prev_index: Index = 0, next_index: Index = 0, +pub const Alignment = @import("../../InternPool.zig").Alignment; + pub fn name(self: Atom, elf_file: *Elf) []const u8 { return elf_file.strtab.getAssumeExists(self.name_offset); } @@ -112,7 +114,6 @@ pub fn allocate(self: *Atom, elf_file: *Elf) !void { const free_list = &meta.free_list; const last_atom_index = &meta.last_atom_index; const new_atom_ideal_capacity = Elf.padToIdeal(self.size); - const alignment = try std.math.powi(u64, 2, self.alignment); // We use these to indicate our intention to update metadata, placing the new atom, // and possibly removing a free list node. @@ -136,7 +137,7 @@ pub fn allocate(self: *Atom, elf_file: *Elf) !void { const ideal_capacity_end_vaddr = std.math.add(u64, big_atom.value, ideal_capacity) catch ideal_capacity; const capacity_end_vaddr = big_atom.value + cap; const new_start_vaddr_unaligned = capacity_end_vaddr - new_atom_ideal_capacity; - const new_start_vaddr = std.mem.alignBackward(u64, new_start_vaddr_unaligned, alignment); + const new_start_vaddr = self.alignment.backward(new_start_vaddr_unaligned); if (new_start_vaddr < ideal_capacity_end_vaddr) { // Additional bookkeeping here to notice if this free list node // should be deleted because the block that it points to has grown to take up @@ -163,7 +164,7 @@ pub fn allocate(self: *Atom, elf_file: *Elf) !void { } else if (elf_file.atom(last_atom_index.*)) |last| { const ideal_capacity = Elf.padToIdeal(last.size); const ideal_capacity_end_vaddr = last.value + ideal_capacity; - const new_start_vaddr = std.mem.alignForward(u64, ideal_capacity_end_vaddr, alignment); + const new_start_vaddr = self.alignment.forward(ideal_capacity_end_vaddr); // Set up the metadata to be updated, after errors are no longer possible. atom_placement = last.atom_index; break :blk new_start_vaddr; @@ -192,7 +193,7 @@ pub fn allocate(self: *Atom, elf_file: *Elf) !void { elf_file.debug_aranges_section_dirty = true; } } - shdr.sh_addralign = @max(shdr.sh_addralign, alignment); + shdr.sh_addralign = @max(shdr.sh_addralign, self.alignment.toByteUnitsOptional().?); // This function can also reallocate an atom. // In this case we need to "unplug" it from its previous location before @@ -224,10 +225,8 @@ pub fn shrink(self: *Atom, elf_file: *Elf) void { } pub fn grow(self: *Atom, elf_file: *Elf) !void { - const alignment = try std.math.powi(u64, 2, self.alignment); - const align_ok = std.mem.alignBackward(u64, self.value, alignment) == self.value; - const need_realloc = !align_ok or self.size > self.capacity(elf_file); - if (need_realloc) try self.allocate(elf_file); + if (!self.alignment.check(self.value) or self.size > self.capacity(elf_file)) + try self.allocate(elf_file); } pub fn free(self: *Atom, elf_file: *Elf) void { diff --git a/src/link/Elf/Object.zig b/src/link/Elf/Object.zig index 32c96b8d95..36fb531fa9 100644 --- a/src/link/Elf/Object.zig +++ b/src/link/Elf/Object.zig @@ -181,10 +181,10 @@ fn addAtom(self: *Object, shdr: elf.Elf64_Shdr, shndx: u16, name: [:0]const u8, const data = try self.shdrContents(shndx); const chdr = @as(*align(1) const elf.Elf64_Chdr, @ptrCast(data.ptr)).*; atom.size = chdr.ch_size; - atom.alignment = math.log2_int(u64, chdr.ch_addralign); + atom.alignment = Alignment.fromNonzeroByteUnits(chdr.ch_addralign); } else { atom.size = shdr.sh_size; - atom.alignment = math.log2_int(u64, shdr.sh_addralign); + atom.alignment = Alignment.fromNonzeroByteUnits(shdr.sh_addralign); } } @@ -571,7 +571,7 @@ pub fn convertCommonSymbols(self: *Object, elf_file: *Elf) !void { atom.file = self.index; atom.size = this_sym.st_size; const alignment = this_sym.st_value; - atom.alignment = math.log2_int(u64, alignment); + atom.alignment = Alignment.fromNonzeroByteUnits(alignment); var sh_flags: u32 = elf.SHF_ALLOC | elf.SHF_WRITE; if (is_tls) sh_flags |= elf.SHF_TLS; @@ -870,3 +870,4 @@ const Fde = eh_frame.Fde; const File = @import("file.zig").File; const StringTable = @import("../strtab.zig").StringTable; const Symbol = @import("Symbol.zig"); +const Alignment = Atom.Alignment; diff --git a/src/link/MachO.zig b/src/link/MachO.zig index ea5d5011f6..735a36a9df 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -1425,7 +1425,7 @@ pub fn allocateSpecialSymbols(self: *MachO) !void { const CreateAtomOpts = struct { size: u64 = 0, - alignment: u32 = 0, + alignment: Alignment = .@"1", }; pub fn createAtom(self: *MachO, sym_index: u32, opts: CreateAtomOpts) !Atom.Index { @@ -1473,7 +1473,7 @@ pub fn createTentativeDefAtoms(self: *MachO) !void { const atom_index = try self.createAtom(global.sym_index, .{ .size = size, - .alignment = alignment, + .alignment = @enumFromInt(alignment), }); const atom = self.getAtomPtr(atom_index); atom.file = global.file; @@ -1493,7 +1493,7 @@ pub fn createDyldPrivateAtom(self: *MachO) !void { const sym_index = try self.allocateSymbol(); const atom_index = try self.createAtom(sym_index, .{ .size = @sizeOf(u64), - .alignment = 3, + .alignment = .@"8", }); try self.atom_by_index_table.putNoClobber(self.base.allocator, sym_index, atom_index); @@ -1510,7 +1510,7 @@ pub fn createDyldPrivateAtom(self: *MachO) !void { switch (self.mode) { .zld => self.addAtomToSection(atom_index), .incremental => { - sym.n_value = try self.allocateAtom(atom_index, atom.size, @alignOf(u64)); + sym.n_value = try self.allocateAtom(atom_index, atom.size, .@"8"); log.debug("allocated dyld_private atom at 0x{x}", .{sym.n_value}); var buffer: [@sizeOf(u64)]u8 = [_]u8{0} ** @sizeOf(u64); try self.writeAtom(atom_index, &buffer); @@ -1521,7 +1521,7 @@ pub fn createDyldPrivateAtom(self: *MachO) !void { fn createThreadLocalDescriptorAtom(self: *MachO, sym_name: []const u8, target: SymbolWithLoc) !Atom.Index { const gpa = self.base.allocator; const size = 3 * @sizeOf(u64); - const required_alignment: u32 = 1; + const required_alignment: Alignment = .@"1"; const sym_index = try self.allocateSymbol(); const atom_index = try self.createAtom(sym_index, .{}); try self.atom_by_index_table.putNoClobber(gpa, sym_index, atom_index); @@ -2030,10 +2030,10 @@ fn shrinkAtom(self: *MachO, atom_index: Atom.Index, new_block_size: u64) void { // capacity, insert a free list node for it. } -fn growAtom(self: *MachO, atom_index: Atom.Index, new_atom_size: u64, alignment: u64) !u64 { +fn growAtom(self: *MachO, atom_index: Atom.Index, new_atom_size: u64, alignment: Alignment) !u64 { const atom = self.getAtom(atom_index); const sym = atom.getSymbol(self); - const align_ok = mem.alignBackward(u64, sym.n_value, alignment) == sym.n_value; + const align_ok = alignment.check(sym.n_value); const need_realloc = !align_ok or new_atom_size > atom.capacity(self); if (!need_realloc) return sym.n_value; return self.allocateAtom(atom_index, new_atom_size, alignment); @@ -2350,7 +2350,7 @@ fn updateLazySymbolAtom( const gpa = self.base.allocator; const mod = self.base.options.module.?; - var required_alignment: u32 = undefined; + var required_alignment: Alignment = .none; var code_buffer = std.ArrayList(u8).init(gpa); defer code_buffer.deinit(); @@ -2617,7 +2617,7 @@ fn updateDeclCode(self: *MachO, decl_index: Module.Decl.Index, code: []u8) !u64 sym.n_desc = 0; const capacity = atom.capacity(self); - const need_realloc = code_len > capacity or !mem.isAlignedGeneric(u64, sym.n_value, required_alignment); + const need_realloc = code_len > capacity or !required_alignment.check(sym.n_value); if (need_realloc) { const vaddr = try self.growAtom(atom_index, code_len, required_alignment); @@ -3204,7 +3204,7 @@ pub fn addAtomToSection(self: *MachO, atom_index: Atom.Index) void { self.sections.set(sym.n_sect - 1, section); } -fn allocateAtom(self: *MachO, atom_index: Atom.Index, new_atom_size: u64, alignment: u64) !u64 { +fn allocateAtom(self: *MachO, atom_index: Atom.Index, new_atom_size: u64, alignment: Alignment) !u64 { const tracy = trace(@src()); defer tracy.end(); @@ -3247,7 +3247,7 @@ fn allocateAtom(self: *MachO, atom_index: Atom.Index, new_atom_size: u64, alignm const ideal_capacity_end_vaddr = math.add(u64, sym.n_value, ideal_capacity) catch ideal_capacity; const capacity_end_vaddr = sym.n_value + capacity; const new_start_vaddr_unaligned = capacity_end_vaddr - new_atom_ideal_capacity; - const new_start_vaddr = mem.alignBackward(u64, new_start_vaddr_unaligned, alignment); + const new_start_vaddr = alignment.backward(new_start_vaddr_unaligned); if (new_start_vaddr < ideal_capacity_end_vaddr) { // Additional bookkeeping here to notice if this free list node // should be deleted because the atom that it points to has grown to take up @@ -3276,11 +3276,11 @@ fn allocateAtom(self: *MachO, atom_index: Atom.Index, new_atom_size: u64, alignm const last_symbol = last.getSymbol(self); const ideal_capacity = if (requires_padding) padToIdeal(last.size) else last.size; const ideal_capacity_end_vaddr = last_symbol.n_value + ideal_capacity; - const new_start_vaddr = mem.alignForward(u64, ideal_capacity_end_vaddr, alignment); + const new_start_vaddr = alignment.forward(ideal_capacity_end_vaddr); atom_placement = last_index; break :blk new_start_vaddr; } else { - break :blk mem.alignForward(u64, segment.vmaddr, alignment); + break :blk alignment.forward(segment.vmaddr); } }; @@ -3295,10 +3295,8 @@ fn allocateAtom(self: *MachO, atom_index: Atom.Index, new_atom_size: u64, alignm self.segment_table_dirty = true; } - const align_pow = @as(u32, @intCast(math.log2(alignment))); - if (header.@"align" < align_pow) { - header.@"align" = align_pow; - } + assert(alignment != .none); + header.@"align" = @min(header.@"align", @intFromEnum(alignment)); self.getAtomPtr(atom_index).size = new_atom_size; if (atom.prev_index) |prev_index| { @@ -3338,7 +3336,7 @@ pub fn getGlobalSymbol(self: *MachO, name: []const u8, lib_name: ?[]const u8) !u pub fn writeSegmentHeaders(self: *MachO, writer: anytype) !void { for (self.segments.items, 0..) |seg, i| { - const indexes = self.getSectionIndexes(@as(u8, @intCast(i))); + const indexes = self.getSectionIndexes(@intCast(i)); var out_seg = seg; out_seg.cmdsize = @sizeOf(macho.segment_command_64); out_seg.nsects = 0; @@ -5526,6 +5524,7 @@ const Trie = @import("MachO/Trie.zig"); const Type = @import("../type.zig").Type; const TypedValue = @import("../TypedValue.zig"); const Value = @import("../value.zig").Value; +const Alignment = Atom.Alignment; pub const DebugSymbols = @import("MachO/DebugSymbols.zig"); pub const Bind = @import("MachO/dyld_info/bind.zig").Bind(*const MachO, SymbolWithLoc); diff --git a/src/link/MachO/Atom.zig b/src/link/MachO/Atom.zig index 16d318ba2c..a548c4e538 100644 --- a/src/link/MachO/Atom.zig +++ b/src/link/MachO/Atom.zig @@ -28,13 +28,15 @@ size: u64 = 0, /// Alignment of this atom as a power of 2. /// For instance, aligmment of 0 should be read as 2^0 = 1 byte aligned. -alignment: u32 = 0, +alignment: Alignment = .@"1", /// Points to the previous and next neighbours /// TODO use the same trick as with symbols: reserve index 0 as null atom next_index: ?Index = null, prev_index: ?Index = null, +pub const Alignment = @import("../../InternPool.zig").Alignment; + pub const Index = u32; pub const Binding = struct { diff --git a/src/link/MachO/Object.zig b/src/link/MachO/Object.zig index ab12ede5d7..b0b87c8c34 100644 --- a/src/link/MachO/Object.zig +++ b/src/link/MachO/Object.zig @@ -382,7 +382,7 @@ pub fn splitRegularSections(self: *Object, macho_file: *MachO, object_id: u32) ! const out_sect_id = (try Atom.getOutputSection(macho_file, sect)) orelse continue; if (sect.size == 0) continue; - const sect_id = @as(u8, @intCast(id)); + const sect_id: u8 = @intCast(id); const sym_index = self.getSectionAliasSymbolIndex(sect_id); const atom_index = try self.createAtomFromSubsection( macho_file, @@ -391,7 +391,7 @@ pub fn splitRegularSections(self: *Object, macho_file: *MachO, object_id: u32) ! sym_index, 1, sect.size, - sect.@"align", + Alignment.fromLog2Units(sect.@"align"), out_sect_id, ); macho_file.addAtomToSection(atom_index); @@ -470,7 +470,7 @@ pub fn splitRegularSections(self: *Object, macho_file: *MachO, object_id: u32) ! sym_index, 1, atom_size, - sect.@"align", + Alignment.fromLog2Units(sect.@"align"), out_sect_id, ); if (!sect.isZerofill()) { @@ -494,10 +494,10 @@ pub fn splitRegularSections(self: *Object, macho_file: *MachO, object_id: u32) ! else sect.addr + sect.size - addr; - const atom_align = if (addr > 0) + const atom_align = Alignment.fromLog2Units(if (addr > 0) @min(@ctz(addr), sect.@"align") else - sect.@"align"; + sect.@"align"); const atom_index = try self.createAtomFromSubsection( macho_file, @@ -532,7 +532,7 @@ pub fn splitRegularSections(self: *Object, macho_file: *MachO, object_id: u32) ! sect_start_index, sect_loc.len, sect.size, - sect.@"align", + Alignment.fromLog2Units(sect.@"align"), out_sect_id, ); if (!sect.isZerofill()) { @@ -551,11 +551,14 @@ fn createAtomFromSubsection( inner_sym_index: u32, inner_nsyms_trailing: u32, size: u64, - alignment: u32, + alignment: Alignment, out_sect_id: u8, ) !Atom.Index { const gpa = macho_file.base.allocator; - const atom_index = try macho_file.createAtom(sym_index, .{ .size = size, .alignment = alignment }); + const atom_index = try macho_file.createAtom(sym_index, .{ + .size = size, + .alignment = alignment, + }); const atom = macho_file.getAtomPtr(atom_index); atom.inner_sym_index = inner_sym_index; atom.inner_nsyms_trailing = inner_nsyms_trailing; @@ -1115,3 +1118,4 @@ const MachO = @import("../MachO.zig"); const Platform = @import("load_commands.zig").Platform; const SymbolWithLoc = MachO.SymbolWithLoc; const UnwindInfo = @import("UnwindInfo.zig"); +const Alignment = Atom.Alignment; diff --git a/src/link/MachO/thunks.zig b/src/link/MachO/thunks.zig index 2ee47478f4..75f20bbf6d 100644 --- a/src/link/MachO/thunks.zig +++ b/src/link/MachO/thunks.zig @@ -104,7 +104,7 @@ pub fn createThunks(macho_file: *MachO, sect_id: u8) !void { while (true) { const atom = macho_file.getAtom(group_end); - offset = mem.alignForward(u64, offset, try math.powi(u32, 2, atom.alignment)); + offset = atom.alignment.forward(offset); const sym = macho_file.getSymbolPtr(atom.getSymbolWithLoc()); sym.n_value = offset; @@ -112,7 +112,7 @@ pub fn createThunks(macho_file: *MachO, sect_id: u8) !void { macho_file.logAtom(group_end, log); - header.@"align" = @max(header.@"align", atom.alignment); + header.@"align" = @max(header.@"align", atom.alignment.toLog2Units()); allocated.putAssumeCapacityNoClobber(group_end, {}); @@ -196,7 +196,7 @@ fn allocateThunk( macho_file.logAtom(atom_index, log); - header.@"align" = @max(header.@"align", atom.alignment); + header.@"align" = @max(header.@"align", atom.alignment.toLog2Units()); if (end_atom_index == atom_index) break; @@ -326,7 +326,10 @@ fn isReachable( fn createThunkAtom(macho_file: *MachO) !Atom.Index { const sym_index = try macho_file.allocateSymbol(); - const atom_index = try macho_file.createAtom(sym_index, .{ .size = @sizeOf(u32) * 3, .alignment = 2 }); + const atom_index = try macho_file.createAtom(sym_index, .{ + .size = @sizeOf(u32) * 3, + .alignment = .@"4", + }); const sym = macho_file.getSymbolPtr(.{ .sym_index = sym_index }); sym.n_type = macho.N_SECT; sym.n_sect = macho_file.text_section_index.? + 1; diff --git a/src/link/MachO/zld.zig b/src/link/MachO/zld.zig index 6c8ce03a72..1c1301e2b5 100644 --- a/src/link/MachO/zld.zig +++ b/src/link/MachO/zld.zig @@ -985,19 +985,16 @@ fn calcSectionSizes(macho_file: *MachO) !void { while (true) { const atom = macho_file.getAtom(atom_index); - const atom_alignment = try math.powi(u32, 2, atom.alignment); - const atom_offset = mem.alignForward(u64, header.size, atom_alignment); + const atom_offset = atom.alignment.forward(header.size); const padding = atom_offset - header.size; const sym = macho_file.getSymbolPtr(atom.getSymbolWithLoc()); sym.n_value = atom_offset; header.size += padding + atom.size; - header.@"align" = @max(header.@"align", atom.alignment); + header.@"align" = @max(header.@"align", atom.alignment.toLog2Units()); - if (atom.next_index) |next_index| { - atom_index = next_index; - } else break; + atom_index = atom.next_index orelse break; } } diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig index d88816f912..98f5b37a82 100644 --- a/src/link/Plan9.zig +++ b/src/link/Plan9.zig @@ -1106,7 +1106,7 @@ fn updateLazySymbolAtom(self: *Plan9, sym: File.LazySymbol, atom_index: Atom.Ind const gpa = self.base.allocator; const mod = self.base.options.module.?; - var required_alignment: u32 = undefined; + var required_alignment: InternPool.Alignment = .none; var code_buffer = std.ArrayList(u8).init(gpa); defer code_buffer.deinit(); diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index b26d6b3b2a..a59527359c 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -187,8 +187,10 @@ debug_pubtypes_atom: ?Atom.Index = null, /// rather than by the linker. synthetic_functions: std.ArrayListUnmanaged(Atom.Index) = .{}, +pub const Alignment = types.Alignment; + pub const Segment = struct { - alignment: u32, + alignment: Alignment, size: u32, offset: u32, flags: u32, @@ -1490,7 +1492,7 @@ fn finishUpdateDecl(wasm: *Wasm, decl_index: Module.Decl.Index, code: []const u8 try atom.code.appendSlice(wasm.base.allocator, code); try wasm.resolved_symbols.put(wasm.base.allocator, atom.symbolLoc(), {}); - atom.size = @as(u32, @intCast(code.len)); + atom.size = @intCast(code.len); if (code.len == 0) return; atom.alignment = decl.getAlignment(mod); } @@ -2050,7 +2052,7 @@ fn parseAtom(wasm: *Wasm, atom_index: Atom.Index, kind: Kind) !void { }; const segment: *Segment = &wasm.segments.items[final_index]; - segment.alignment = @max(segment.alignment, atom.alignment); + segment.alignment = segment.alignment.max(atom.alignment); try wasm.appendAtomAtIndex(final_index, atom_index); } @@ -2121,7 +2123,7 @@ fn allocateAtoms(wasm: *Wasm) !void { } } } - offset = std.mem.alignForward(u32, offset, atom.alignment); + offset = @intCast(atom.alignment.forward(offset)); atom.offset = offset; log.debug("Atom '{s}' allocated from 0x{x:0>8} to 0x{x:0>8} size={d}", .{ symbol_loc.getName(wasm), @@ -2132,7 +2134,7 @@ fn allocateAtoms(wasm: *Wasm) !void { offset += atom.size; atom_index = atom.prev orelse break; } - segment.size = std.mem.alignForward(u32, offset, segment.alignment); + segment.size = @intCast(segment.alignment.forward(offset)); } } @@ -2351,7 +2353,7 @@ fn createSyntheticFunction( .offset = 0, .sym_index = loc.index, .file = null, - .alignment = 1, + .alignment = .@"1", .next = null, .prev = null, .code = function_body.moveToUnmanaged(), @@ -2382,11 +2384,11 @@ pub fn createFunction( const atom_index = @as(Atom.Index, @intCast(wasm.managed_atoms.items.len)); const atom = try wasm.managed_atoms.addOne(wasm.base.allocator); atom.* = .{ - .size = @as(u32, @intCast(function_body.items.len)), + .size = @intCast(function_body.items.len), .offset = 0, .sym_index = loc.index, .file = null, - .alignment = 1, + .alignment = .@"1", .next = null, .prev = null, .code = function_body.moveToUnmanaged(), @@ -2734,8 +2736,8 @@ fn setupMemory(wasm: *Wasm) !void { const page_size = std.wasm.page_size; // 64kb // Use the user-provided stack size or else we use 1MB by default const stack_size = wasm.base.options.stack_size_override orelse page_size * 16; - const stack_alignment = 16; // wasm's stack alignment as specified by tool-convention - const heap_alignment = 16; // wasm's heap alignment as specified by tool-convention + const stack_alignment: Alignment = .@"16"; // wasm's stack alignment as specified by tool-convention + const heap_alignment: Alignment = .@"16"; // wasm's heap alignment as specified by tool-convention // Always place the stack at the start by default // unless the user specified the global-base flag @@ -2748,7 +2750,7 @@ fn setupMemory(wasm: *Wasm) !void { const is_obj = wasm.base.options.output_mode == .Obj; if (place_stack_first and !is_obj) { - memory_ptr = std.mem.alignForward(u64, memory_ptr, stack_alignment); + memory_ptr = stack_alignment.forward(memory_ptr); memory_ptr += stack_size; // We always put the stack pointer global at index 0 wasm.wasm_globals.items[0].init.i32_const = @as(i32, @bitCast(@as(u32, @intCast(memory_ptr)))); @@ -2758,7 +2760,7 @@ fn setupMemory(wasm: *Wasm) !void { var data_seg_it = wasm.data_segments.iterator(); while (data_seg_it.next()) |entry| { const segment = &wasm.segments.items[entry.value_ptr.*]; - memory_ptr = std.mem.alignForward(u64, memory_ptr, segment.alignment); + memory_ptr = segment.alignment.forward(memory_ptr); // set TLS-related symbols if (mem.eql(u8, entry.key_ptr.*, ".tdata")) { @@ -2768,7 +2770,7 @@ fn setupMemory(wasm: *Wasm) !void { } if (wasm.findGlobalSymbol("__tls_align")) |loc| { const sym = loc.getSymbol(wasm); - wasm.wasm_globals.items[sym.index - wasm.imported_globals_count].init.i32_const = @intCast(segment.alignment); + wasm.wasm_globals.items[sym.index - wasm.imported_globals_count].init.i32_const = @intCast(segment.alignment.toByteUnitsOptional().?); } if (wasm.findGlobalSymbol("__tls_base")) |loc| { const sym = loc.getSymbol(wasm); @@ -2795,7 +2797,7 @@ fn setupMemory(wasm: *Wasm) !void { } if (!place_stack_first and !is_obj) { - memory_ptr = std.mem.alignForward(u64, memory_ptr, stack_alignment); + memory_ptr = stack_alignment.forward(memory_ptr); memory_ptr += stack_size; wasm.wasm_globals.items[0].init.i32_const = @as(i32, @bitCast(@as(u32, @intCast(memory_ptr)))); } @@ -2804,7 +2806,7 @@ fn setupMemory(wasm: *Wasm) !void { // We must set its virtual address so it can be used in relocations. if (wasm.findGlobalSymbol("__heap_base")) |loc| { const symbol = loc.getSymbol(wasm); - symbol.virtual_address = @as(u32, @intCast(mem.alignForward(u64, memory_ptr, heap_alignment))); + symbol.virtual_address = @intCast(heap_alignment.forward(memory_ptr)); } // Setup the max amount of pages @@ -2879,7 +2881,7 @@ pub fn getMatchingSegment(wasm: *Wasm, object_index: u16, relocatable_index: u32 flags |= @intFromEnum(Segment.Flag.WASM_DATA_SEGMENT_IS_PASSIVE); } try wasm.segments.append(wasm.base.allocator, .{ - .alignment = 1, + .alignment = .@"1", .size = 0, .offset = 0, .flags = flags, @@ -2954,7 +2956,7 @@ pub fn getMatchingSegment(wasm: *Wasm, object_index: u16, relocatable_index: u32 /// Appends a new segment with default field values fn appendDummySegment(wasm: *Wasm) !void { try wasm.segments.append(wasm.base.allocator, .{ - .alignment = 1, + .alignment = .@"1", .size = 0, .offset = 0, .flags = 0, @@ -3011,7 +3013,7 @@ fn populateErrorNameTable(wasm: *Wasm) !void { // the pointers into the list using addends which are appended to the relocation. const names_atom_index = try wasm.createAtom(); const names_atom = wasm.getAtomPtr(names_atom_index); - names_atom.alignment = 1; + names_atom.alignment = .@"1"; const sym_name = try wasm.string_table.put(wasm.base.allocator, "__zig_err_names"); const names_symbol = &wasm.symbols.items[names_atom.sym_index]; names_symbol.* = .{ @@ -3085,7 +3087,7 @@ pub fn createDebugSectionForIndex(wasm: *Wasm, index: *?u32, name: []const u8) ! .flags = @intFromEnum(Symbol.Flag.WASM_SYM_BINDING_LOCAL), }; - atom.alignment = 1; // debug sections are always 1-byte-aligned + atom.alignment = .@"1"; // debug sections are always 1-byte-aligned return atom_index; } @@ -4724,12 +4726,12 @@ fn emitSegmentInfo(wasm: *Wasm, binary_bytes: *std.ArrayList(u8)) !void { for (wasm.segment_info.values()) |segment_info| { log.debug("Emit segment: {s} align({d}) flags({b})", .{ segment_info.name, - @ctz(segment_info.alignment), + segment_info.alignment, segment_info.flags, }); try leb.writeULEB128(writer, @as(u32, @intCast(segment_info.name.len))); try writer.writeAll(segment_info.name); - try leb.writeULEB128(writer, @ctz(segment_info.alignment)); + try leb.writeULEB128(writer, segment_info.alignment.toLog2Units()); try leb.writeULEB128(writer, segment_info.flags); } diff --git a/src/link/Wasm/Atom.zig b/src/link/Wasm/Atom.zig index 8987893ed7..348b9cf8bd 100644 --- a/src/link/Wasm/Atom.zig +++ b/src/link/Wasm/Atom.zig @@ -19,7 +19,7 @@ relocs: std.ArrayListUnmanaged(types.Relocation) = .{}, /// Contains the binary data of an atom, which can be non-relocated code: std.ArrayListUnmanaged(u8) = .{}, /// For code this is 1, for data this is set to the highest value of all segments -alignment: u32, +alignment: Wasm.Alignment, /// Offset into the section where the atom lives, this already accounts /// for alignment. offset: u32, @@ -43,7 +43,7 @@ pub const Index = u32; /// Represents a default empty wasm `Atom` pub const empty: Atom = .{ - .alignment = 1, + .alignment = .@"1", .file = null, .next = null, .offset = 0, diff --git a/src/link/Wasm/Object.zig b/src/link/Wasm/Object.zig index 6feec26aea..dbe369280e 100644 --- a/src/link/Wasm/Object.zig +++ b/src/link/Wasm/Object.zig @@ -8,6 +8,7 @@ const types = @import("types.zig"); const std = @import("std"); const Wasm = @import("../Wasm.zig"); const Symbol = @import("Symbol.zig"); +const Alignment = types.Alignment; const Allocator = std.mem.Allocator; const leb = std.leb; @@ -88,12 +89,9 @@ const RelocatableData = struct { /// meta data of the given object file. /// NOTE: Alignment is encoded as a power of 2, so we shift the symbol's /// alignment to retrieve the natural alignment. - pub fn getAlignment(relocatable_data: RelocatableData, object: *const Object) u32 { - if (relocatable_data.type != .data) return 1; - const data_alignment = object.segment_info[relocatable_data.index].alignment; - if (data_alignment == 0) return 1; - // Decode from power of 2 to natural alignment - return @as(u32, 1) << @as(u5, @intCast(data_alignment)); + pub fn getAlignment(relocatable_data: RelocatableData, object: *const Object) Alignment { + if (relocatable_data.type != .data) return .@"1"; + return object.segment_info[relocatable_data.index].alignment; } /// Returns the symbol kind that corresponds to the relocatable section @@ -671,7 +669,7 @@ fn Parser(comptime ReaderType: type) type { try reader.readNoEof(name); segment.* = .{ .name = name, - .alignment = try leb.readULEB128(u32, reader), + .alignment = @enumFromInt(try leb.readULEB128(u32, reader)), .flags = try leb.readULEB128(u32, reader), }; log.debug("Found segment: {s} align({d}) flags({b})", .{ @@ -919,7 +917,7 @@ pub fn parseIntoAtoms(object: *Object, gpa: Allocator, object_index: u16, wasm_b continue; // found unknown section, so skip parsing into atom as we do not know how to handle it. }; - const atom_index = @as(Atom.Index, @intCast(wasm_bin.managed_atoms.items.len)); + const atom_index: Atom.Index = @intCast(wasm_bin.managed_atoms.items.len); const atom = try wasm_bin.managed_atoms.addOne(gpa); atom.* = Atom.empty; atom.file = object_index; @@ -984,7 +982,7 @@ pub fn parseIntoAtoms(object: *Object, gpa: Allocator, object_index: u16, wasm_b const segment: *Wasm.Segment = &wasm_bin.segments.items[final_index]; if (relocatable_data.type == .data) { //code section and debug sections are 1-byte aligned - segment.alignment = @max(segment.alignment, atom.alignment); + segment.alignment = segment.alignment.max(atom.alignment); } try wasm_bin.appendAtomAtIndex(final_index, atom_index); diff --git a/src/link/Wasm/types.zig b/src/link/Wasm/types.zig index cce5cdef49..ebb2ddf895 100644 --- a/src/link/Wasm/types.zig +++ b/src/link/Wasm/types.zig @@ -109,11 +109,13 @@ pub const SubsectionType = enum(u8) { WASM_SYMBOL_TABLE = 8, }; +pub const Alignment = @import("../../InternPool.zig").Alignment; + pub const Segment = struct { /// Segment's name, encoded as UTF-8 bytes. name: []const u8, /// The required alignment of the segment, encoded as a power of 2 - alignment: u32, + alignment: Alignment, /// Bitfield containing flags for a segment flags: u32, |
