diff options
| author | mlugg <mlugg@mlugg.co.uk> | 2023-06-22 18:46:56 +0100 |
|---|---|---|
| committer | Andrew Kelley <andrew@ziglang.org> | 2023-06-24 16:56:39 -0700 |
| commit | f26dda21171e26f44aeec8c59a75bbb3331eeb2e (patch) | |
| tree | c935248861ae2693b314f2c8bc78fe38d9961b6d /src/link/Coff.zig | |
| parent | 447ca4e3fff021f471b748187b53f0a4744ad0bc (diff) | |
| download | zig-f26dda21171e26f44aeec8c59a75bbb3331eeb2e.tar.gz zig-f26dda21171e26f44aeec8c59a75bbb3331eeb2e.zip | |
all: migrate code to new cast builtin syntax
Most of this migration was performed automatically with `zig fmt`. There
were a few exceptions which I had to manually fix:
* `@alignCast` and `@addrSpaceCast` cannot be automatically rewritten
* `@truncate`'s fixup is incorrect for vectors
* Test cases are not formatted, and their error locations change
Diffstat (limited to 'src/link/Coff.zig')
| -rw-r--r-- | src/link/Coff.zig | 110 |
1 files changed, 55 insertions, 55 deletions
diff --git a/src/link/Coff.zig b/src/link/Coff.zig index e3fcc941eb..a724d4023a 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -358,7 +358,7 @@ fn populateMissingMetadata(self: *Coff) !void { }); if (self.text_section_index == null) { - const file_size = @intCast(u32, self.base.options.program_code_size_hint); + const file_size = @as(u32, @intCast(self.base.options.program_code_size_hint)); self.text_section_index = try self.allocateSection(".text", file_size, .{ .CNT_CODE = 1, .MEM_EXECUTE = 1, @@ -367,7 +367,7 @@ fn populateMissingMetadata(self: *Coff) !void { } if (self.got_section_index == null) { - const file_size = @intCast(u32, self.base.options.symbol_count_hint) * self.ptr_width.size(); + const file_size = @as(u32, @intCast(self.base.options.symbol_count_hint)) * self.ptr_width.size(); self.got_section_index = try self.allocateSection(".got", file_size, .{ .CNT_INITIALIZED_DATA = 1, .MEM_READ = 1, @@ -392,7 +392,7 @@ fn populateMissingMetadata(self: *Coff) !void { } if (self.idata_section_index == null) { - const file_size = @intCast(u32, self.base.options.symbol_count_hint) * self.ptr_width.size(); + const file_size = @as(u32, @intCast(self.base.options.symbol_count_hint)) * self.ptr_width.size(); self.idata_section_index = try self.allocateSection(".idata", file_size, .{ .CNT_INITIALIZED_DATA = 1, .MEM_READ = 1, @@ -400,7 +400,7 @@ fn populateMissingMetadata(self: *Coff) !void { } if (self.reloc_section_index == null) { - const file_size = @intCast(u32, self.base.options.symbol_count_hint) * @sizeOf(coff.BaseRelocation); + const file_size = @as(u32, @intCast(self.base.options.symbol_count_hint)) * @sizeOf(coff.BaseRelocation); self.reloc_section_index = try self.allocateSection(".reloc", file_size, .{ .CNT_INITIALIZED_DATA = 1, .MEM_DISCARDABLE = 1, @@ -409,7 +409,7 @@ fn populateMissingMetadata(self: *Coff) !void { } if (self.strtab_offset == null) { - const file_size = @intCast(u32, self.strtab.len()); + const file_size = @as(u32, @intCast(self.strtab.len())); self.strtab_offset = self.findFreeSpace(file_size, @alignOf(u32)); // 4bytes aligned seems like a good idea here log.debug("found strtab free space 0x{x} to 0x{x}", .{ self.strtab_offset.?, self.strtab_offset.? + file_size }); } @@ -430,7 +430,7 @@ fn populateMissingMetadata(self: *Coff) !void { } fn allocateSection(self: *Coff, name: []const u8, size: u32, flags: coff.SectionHeaderFlags) !u16 { - const index = @intCast(u16, self.sections.slice().len); + const index = @as(u16, @intCast(self.sections.slice().len)); const off = self.findFreeSpace(size, default_file_alignment); // Memory is always allocated in sequence // TODO: investigate if we can allocate .text last; this way it would never need to grow in memory! @@ -652,7 +652,7 @@ pub fn allocateSymbol(self: *Coff) !u32 { break :blk index; } else { log.debug(" (allocating symbol index {d})", .{self.locals.items.len}); - const index = @intCast(u32, self.locals.items.len); + const index = @as(u32, @intCast(self.locals.items.len)); _ = self.locals.addOneAssumeCapacity(); break :blk index; } @@ -680,7 +680,7 @@ fn allocateGlobal(self: *Coff) !u32 { break :blk index; } else { log.debug(" (allocating global index {d})", .{self.globals.items.len}); - const index = @intCast(u32, self.globals.items.len); + const index = @as(u32, @intCast(self.globals.items.len)); _ = self.globals.addOneAssumeCapacity(); break :blk index; } @@ -704,7 +704,7 @@ fn addGotEntry(self: *Coff, target: SymbolWithLoc) !void { pub fn createAtom(self: *Coff) !Atom.Index { const gpa = self.base.allocator; - const atom_index = @intCast(Atom.Index, self.atoms.items.len); + const atom_index = @as(Atom.Index, @intCast(self.atoms.items.len)); const atom = try self.atoms.addOne(gpa); const sym_index = try self.allocateSymbol(); try self.atom_by_index_table.putNoClobber(gpa, sym_index, atom_index); @@ -776,7 +776,7 @@ fn writeAtom(self: *Coff, atom_index: Atom.Index, code: []u8) !void { self.resolveRelocs(atom_index, relocs.items, mem_code, slide); const vaddr = sym.value + slide; - const pvaddr = @ptrFromInt(*anyopaque, vaddr); + const pvaddr = @as(*anyopaque, @ptrFromInt(vaddr)); log.debug("writing to memory at address {x}", .{vaddr}); @@ -830,7 +830,7 @@ fn writeOffsetTableEntry(self: *Coff, index: usize) !void { const sect_id = self.got_section_index.?; if (self.got_table_count_dirty) { - const needed_size = @intCast(u32, self.got_table.entries.items.len * self.ptr_width.size()); + const needed_size = @as(u32, @intCast(self.got_table.entries.items.len * self.ptr_width.size())); try self.growSection(sect_id, needed_size); self.got_table_count_dirty = false; } @@ -847,7 +847,7 @@ fn writeOffsetTableEntry(self: *Coff, index: usize) !void { switch (self.ptr_width) { .p32 => { var buf: [4]u8 = undefined; - mem.writeIntLittle(u32, &buf, @intCast(u32, entry_value + self.getImageBase())); + mem.writeIntLittle(u32, &buf, @as(u32, @intCast(entry_value + self.getImageBase()))); try self.base.file.?.pwriteAll(&buf, file_offset); }, .p64 => { @@ -862,7 +862,7 @@ fn writeOffsetTableEntry(self: *Coff, index: usize) !void { const gpa = self.base.allocator; const slide = @intFromPtr(self.hot_state.loaded_base_address.?); const actual_vmaddr = vmaddr + slide; - const pvaddr = @ptrFromInt(*anyopaque, actual_vmaddr); + const pvaddr = @as(*anyopaque, @ptrFromInt(actual_vmaddr)); log.debug("writing GOT entry to memory at address {x}", .{actual_vmaddr}); if (build_options.enable_logging) { switch (self.ptr_width) { @@ -880,7 +880,7 @@ fn writeOffsetTableEntry(self: *Coff, index: usize) !void { switch (self.ptr_width) { .p32 => { var buf: [4]u8 = undefined; - mem.writeIntLittle(u32, &buf, @intCast(u32, entry_value + slide)); + mem.writeIntLittle(u32, &buf, @as(u32, @intCast(entry_value + slide))); writeMem(handle, pvaddr, &buf) catch |err| { log.warn("writing to protected memory failed with error: {s}", .{@errorName(err)}); }; @@ -1107,7 +1107,7 @@ pub fn lowerUnnamedConst(self: *Coff, tv: TypedValue, decl_index: Module.Decl.In const atom = self.getAtom(atom_index); const sym = atom.getSymbolPtr(self); try self.setSymbolName(sym, sym_name); - sym.section_number = @enumFromInt(coff.SectionNumber, self.rdata_section_index.? + 1); + sym.section_number = @as(coff.SectionNumber, @enumFromInt(self.rdata_section_index.? + 1)); } const res = try codegen.generateSymbol(&self.base, decl.srcLoc(mod), tv, &code_buffer, .none, .{ @@ -1125,7 +1125,7 @@ pub fn lowerUnnamedConst(self: *Coff, tv: TypedValue, decl_index: Module.Decl.In const required_alignment = tv.ty.abiAlignment(mod); const atom = self.getAtomPtr(atom_index); - atom.size = @intCast(u32, code.len); + atom.size = @as(u32, @intCast(code.len)); atom.getSymbolPtr(self).value = try self.allocateAtom(atom_index, atom.size, required_alignment); errdefer self.freeAtom(atom_index); @@ -1241,10 +1241,10 @@ fn updateLazySymbolAtom( }, }; - const code_len = @intCast(u32, code.len); + const code_len = @as(u32, @intCast(code.len)); const symbol = atom.getSymbolPtr(self); try self.setSymbolName(symbol, name); - symbol.section_number = @enumFromInt(coff.SectionNumber, section_index + 1); + symbol.section_number = @as(coff.SectionNumber, @enumFromInt(section_index + 1)); symbol.type = .{ .complex_type = .NULL, .base_type = .NULL }; const vaddr = try self.allocateAtom(atom_index, code_len, required_alignment); @@ -1336,12 +1336,12 @@ fn updateDeclCode(self: *Coff, decl_index: Module.Decl.Index, code: []u8, comple const atom = self.getAtom(atom_index); const sym_index = atom.getSymbolIndex().?; const sect_index = decl_metadata.section; - const code_len = @intCast(u32, code.len); + const code_len = @as(u32, @intCast(code.len)); if (atom.size != 0) { const sym = atom.getSymbolPtr(self); try self.setSymbolName(sym, decl_name); - sym.section_number = @enumFromInt(coff.SectionNumber, sect_index + 1); + sym.section_number = @as(coff.SectionNumber, @enumFromInt(sect_index + 1)); sym.type = .{ .complex_type = complex_type, .base_type = .NULL }; const capacity = atom.capacity(self); @@ -1365,7 +1365,7 @@ fn updateDeclCode(self: *Coff, decl_index: Module.Decl.Index, code: []u8, comple } else { const sym = atom.getSymbolPtr(self); try self.setSymbolName(sym, decl_name); - sym.section_number = @enumFromInt(coff.SectionNumber, sect_index + 1); + sym.section_number = @as(coff.SectionNumber, @enumFromInt(sect_index + 1)); sym.type = .{ .complex_type = complex_type, .base_type = .NULL }; const vaddr = try self.allocateAtom(atom_index, code_len, required_alignment); @@ -1502,7 +1502,7 @@ pub fn updateDeclExports( const sym = self.getSymbolPtr(sym_loc); try self.setSymbolName(sym, mod.intern_pool.stringToSlice(exp.opts.name)); sym.value = decl_sym.value; - sym.section_number = @enumFromInt(coff.SectionNumber, self.text_section_index.? + 1); + sym.section_number = @as(coff.SectionNumber, @enumFromInt(self.text_section_index.? + 1)); sym.type = .{ .complex_type = .FUNCTION, .base_type = .NULL }; switch (exp.opts.linkage) { @@ -1728,12 +1728,12 @@ pub fn getDeclVAddr(self: *Coff, decl_index: Module.Decl.Index, reloc_info: link try Atom.addRelocation(self, atom_index, .{ .type = .direct, .target = target, - .offset = @intCast(u32, reloc_info.offset), + .offset = @as(u32, @intCast(reloc_info.offset)), .addend = reloc_info.addend, .pcrel = false, .length = 3, }); - try Atom.addBaseRelocation(self, atom_index, @intCast(u32, reloc_info.offset)); + try Atom.addBaseRelocation(self, atom_index, @as(u32, @intCast(reloc_info.offset))); return 0; } @@ -1804,7 +1804,7 @@ fn writeBaseRelocations(self: *Coff) !void { gop.value_ptr.* = std.ArrayList(coff.BaseRelocation).init(gpa); } try gop.value_ptr.append(.{ - .offset = @intCast(u12, rva - page), + .offset = @as(u12, @intCast(rva - page)), .type = .DIR64, }); } @@ -1818,14 +1818,14 @@ fn writeBaseRelocations(self: *Coff) !void { const sym = self.getSymbol(entry); if (sym.section_number == .UNDEFINED) continue; - const rva = @intCast(u32, header.virtual_address + index * self.ptr_width.size()); + const rva = @as(u32, @intCast(header.virtual_address + index * self.ptr_width.size())); const page = mem.alignBackward(u32, rva, self.page_size); const gop = try page_table.getOrPut(page); if (!gop.found_existing) { gop.value_ptr.* = std.ArrayList(coff.BaseRelocation).init(gpa); } try gop.value_ptr.append(.{ - .offset = @intCast(u12, rva - page), + .offset = @as(u12, @intCast(rva - page)), .type = .DIR64, }); } @@ -1860,9 +1860,9 @@ fn writeBaseRelocations(self: *Coff) !void { }); } - const block_size = @intCast( + const block_size = @as( u32, - entries.items.len * @sizeOf(coff.BaseRelocation) + @sizeOf(coff.BaseRelocationDirectoryEntry), + @intCast(entries.items.len * @sizeOf(coff.BaseRelocation) + @sizeOf(coff.BaseRelocationDirectoryEntry)), ); try buffer.ensureUnusedCapacity(block_size); buffer.appendSliceAssumeCapacity(mem.asBytes(&coff.BaseRelocationDirectoryEntry{ @@ -1873,7 +1873,7 @@ fn writeBaseRelocations(self: *Coff) !void { } const header = &self.sections.items(.header)[self.reloc_section_index.?]; - const needed_size = @intCast(u32, buffer.items.len); + const needed_size = @as(u32, @intCast(buffer.items.len)); try self.growSection(self.reloc_section_index.?, needed_size); try self.base.file.?.pwriteAll(buffer.items, header.pointer_to_raw_data); @@ -1904,12 +1904,12 @@ fn writeImportTables(self: *Coff) !void { const itable = self.import_tables.values()[i]; iat_size += itable.size() + 8; dir_table_size += @sizeOf(coff.ImportDirectoryEntry); - lookup_table_size += @intCast(u32, itable.entries.items.len + 1) * @sizeOf(coff.ImportLookupEntry64.ByName); + lookup_table_size += @as(u32, @intCast(itable.entries.items.len + 1)) * @sizeOf(coff.ImportLookupEntry64.ByName); for (itable.entries.items) |entry| { const sym_name = self.getSymbolName(entry); - names_table_size += 2 + mem.alignForward(u32, @intCast(u32, sym_name.len + 1), 2); + names_table_size += 2 + mem.alignForward(u32, @as(u32, @intCast(sym_name.len + 1)), 2); } - dll_names_size += @intCast(u32, lib_name.len + ext.len + 1); + dll_names_size += @as(u32, @intCast(lib_name.len + ext.len + 1)); } const needed_size = iat_size + dir_table_size + lookup_table_size + names_table_size + dll_names_size; @@ -1948,7 +1948,7 @@ fn writeImportTables(self: *Coff) !void { const import_name = self.getSymbolName(entry); // IAT and lookup table entry - const lookup = coff.ImportLookupEntry64.ByName{ .name_table_rva = @intCast(u31, header.virtual_address + names_table_offset) }; + const lookup = coff.ImportLookupEntry64.ByName{ .name_table_rva = @as(u31, @intCast(header.virtual_address + names_table_offset)) }; @memcpy( buffer.items[iat_offset..][0..@sizeOf(coff.ImportLookupEntry64.ByName)], mem.asBytes(&lookup), @@ -1964,7 +1964,7 @@ fn writeImportTables(self: *Coff) !void { mem.writeIntLittle(u16, buffer.items[names_table_offset..][0..2], 0); // Hint set to 0 until we learn how to parse DLLs names_table_offset += 2; @memcpy(buffer.items[names_table_offset..][0..import_name.len], import_name); - names_table_offset += @intCast(u32, import_name.len); + names_table_offset += @as(u32, @intCast(import_name.len)); buffer.items[names_table_offset] = 0; names_table_offset += 1; if (!mem.isAlignedGeneric(usize, names_table_offset, @sizeOf(u16))) { @@ -1986,9 +1986,9 @@ fn writeImportTables(self: *Coff) !void { // DLL name @memcpy(buffer.items[dll_names_offset..][0..lib_name.len], lib_name); - dll_names_offset += @intCast(u32, lib_name.len); + dll_names_offset += @as(u32, @intCast(lib_name.len)); @memcpy(buffer.items[dll_names_offset..][0..ext.len], ext); - dll_names_offset += @intCast(u32, ext.len); + dll_names_offset += @as(u32, @intCast(ext.len)); buffer.items[dll_names_offset] = 0; dll_names_offset += 1; } @@ -2027,11 +2027,11 @@ fn writeStrtab(self: *Coff) !void { if (self.strtab_offset == null) return; const allocated_size = self.allocatedSize(self.strtab_offset.?); - const needed_size = @intCast(u32, self.strtab.len()); + const needed_size = @as(u32, @intCast(self.strtab.len())); if (needed_size > allocated_size) { self.strtab_offset = null; - self.strtab_offset = @intCast(u32, self.findFreeSpace(needed_size, @alignOf(u32))); + self.strtab_offset = @as(u32, @intCast(self.findFreeSpace(needed_size, @alignOf(u32)))); } log.debug("writing strtab from 0x{x} to 0x{x}", .{ self.strtab_offset.?, self.strtab_offset.? + needed_size }); @@ -2042,7 +2042,7 @@ fn writeStrtab(self: *Coff) !void { buffer.appendSliceAssumeCapacity(self.strtab.items()); // Here, we do a trick in that we do not commit the size of the strtab to strtab buffer, instead // we write the length of the strtab to a temporary buffer that goes to file. - mem.writeIntLittle(u32, buffer.items[0..4], @intCast(u32, self.strtab.len())); + mem.writeIntLittle(u32, buffer.items[0..4], @as(u32, @intCast(self.strtab.len()))); try self.base.file.?.pwriteAll(buffer.items, self.strtab_offset.?); } @@ -2081,11 +2081,11 @@ fn writeHeader(self: *Coff) !void { } const timestamp = std.time.timestamp(); - const size_of_optional_header = @intCast(u16, self.getOptionalHeaderSize() + self.getDataDirectoryHeadersSize()); + const size_of_optional_header = @as(u16, @intCast(self.getOptionalHeaderSize() + self.getDataDirectoryHeadersSize())); var coff_header = coff.CoffHeader{ .machine = coff.MachineType.fromTargetCpuArch(self.base.options.target.cpu.arch), - .number_of_sections = @intCast(u16, self.sections.slice().len), // TODO what if we prune a section - .time_date_stamp = @truncate(u32, @bitCast(u64, timestamp)), + .number_of_sections = @as(u16, @intCast(self.sections.slice().len)), // TODO what if we prune a section + .time_date_stamp = @as(u32, @truncate(@as(u64, @bitCast(timestamp)))), .pointer_to_symbol_table = self.strtab_offset orelse 0, .number_of_symbols = 0, .size_of_optional_header = size_of_optional_header, @@ -2135,7 +2135,7 @@ fn writeHeader(self: *Coff) !void { .address_of_entry_point = self.entry_addr orelse 0, .base_of_code = base_of_code, .base_of_data = base_of_data, - .image_base = @intCast(u32, image_base), + .image_base = @as(u32, @intCast(image_base)), .section_alignment = self.page_size, .file_alignment = default_file_alignment, .major_operating_system_version = 6, @@ -2155,7 +2155,7 @@ fn writeHeader(self: *Coff) !void { .size_of_heap_reserve = default_size_of_heap_reserve, .size_of_heap_commit = default_size_of_heap_commit, .loader_flags = 0, - .number_of_rva_and_sizes = @intCast(u32, self.data_directories.len), + .number_of_rva_and_sizes = @as(u32, @intCast(self.data_directories.len)), }; writer.writeAll(mem.asBytes(&opt_header)) catch unreachable; }, @@ -2189,7 +2189,7 @@ fn writeHeader(self: *Coff) !void { .size_of_heap_reserve = default_size_of_heap_reserve, .size_of_heap_commit = default_size_of_heap_commit, .loader_flags = 0, - .number_of_rva_and_sizes = @intCast(u32, self.data_directories.len), + .number_of_rva_and_sizes = @as(u32, @intCast(self.data_directories.len)), }; writer.writeAll(mem.asBytes(&opt_header)) catch unreachable; }, @@ -2210,7 +2210,7 @@ fn detectAllocCollision(self: *Coff, start: u32, size: u32) ?u32 { const end = start + padToIdeal(size); if (self.strtab_offset) |off| { - const tight_size = @intCast(u32, self.strtab.len()); + const tight_size = @as(u32, @intCast(self.strtab.len())); const increased_size = padToIdeal(tight_size); const test_end = off + increased_size; if (end > off and start < test_end) { @@ -2265,28 +2265,28 @@ fn allocatedVirtualSize(self: *Coff, start: u32) u32 { inline fn getSizeOfHeaders(self: Coff) u32 { const msdos_hdr_size = msdos_stub.len + 4; - return @intCast(u32, msdos_hdr_size + @sizeOf(coff.CoffHeader) + self.getOptionalHeaderSize() + - self.getDataDirectoryHeadersSize() + self.getSectionHeadersSize()); + return @as(u32, @intCast(msdos_hdr_size + @sizeOf(coff.CoffHeader) + self.getOptionalHeaderSize() + + self.getDataDirectoryHeadersSize() + self.getSectionHeadersSize())); } inline fn getOptionalHeaderSize(self: Coff) u32 { return switch (self.ptr_width) { - .p32 => @intCast(u32, @sizeOf(coff.OptionalHeaderPE32)), - .p64 => @intCast(u32, @sizeOf(coff.OptionalHeaderPE64)), + .p32 => @as(u32, @intCast(@sizeOf(coff.OptionalHeaderPE32))), + .p64 => @as(u32, @intCast(@sizeOf(coff.OptionalHeaderPE64))), }; } inline fn getDataDirectoryHeadersSize(self: Coff) u32 { - return @intCast(u32, self.data_directories.len * @sizeOf(coff.ImageDataDirectory)); + return @as(u32, @intCast(self.data_directories.len * @sizeOf(coff.ImageDataDirectory))); } inline fn getSectionHeadersSize(self: Coff) u32 { - return @intCast(u32, self.sections.slice().len * @sizeOf(coff.SectionHeader)); + return @as(u32, @intCast(self.sections.slice().len * @sizeOf(coff.SectionHeader))); } inline fn getDataDirectoryHeadersOffset(self: Coff) u32 { const msdos_hdr_size = msdos_stub.len + 4; - return @intCast(u32, msdos_hdr_size + @sizeOf(coff.CoffHeader) + self.getOptionalHeaderSize()); + return @as(u32, @intCast(msdos_hdr_size + @sizeOf(coff.CoffHeader) + self.getOptionalHeaderSize())); } inline fn getSectionHeadersOffset(self: Coff) u32 { @@ -2473,7 +2473,7 @@ fn logSymtab(self: *Coff) void { }; log.debug(" %{d}: {?s} @{x} in {s}({d}), {s}", .{ sym_id, - self.getSymbolName(.{ .sym_index = @intCast(u32, sym_id), .file = null }), + self.getSymbolName(.{ .sym_index = @as(u32, @intCast(sym_id)), .file = null }), sym.value, where, def_index, |
