aboutsummaryrefslogtreecommitdiff
path: root/src/link
diff options
context:
space:
mode:
authorAndrew Kelley <andrew@ziglang.org>2023-06-24 16:58:19 -0700
committerGitHub <noreply@github.com>2023-06-24 16:58:19 -0700
commit146b79af153bbd5dafda0ba12a040385c7fc58f8 (patch)
tree67e3db8b444d65c667e314770fc983a7fc8ba293 /src/link
parent13853bef0df3c90633021850cc6d6abaeea03282 (diff)
parent21ac0beb436f49fe49c6982a872f2dc48e4bea5e (diff)
downloadzig-146b79af153bbd5dafda0ba12a040385c7fc58f8.tar.gz
zig-146b79af153bbd5dafda0ba12a040385c7fc58f8.zip
Merge pull request #16163 from mlugg/feat/builtins-infer-dest-ty
Infer destination type of cast builtins using result type
Diffstat (limited to 'src/link')
-rw-r--r--src/link/C.zig8
-rw-r--r--src/link/Coff.zig110
-rw-r--r--src/link/Coff/ImportTable.zig6
-rw-r--r--src/link/Coff/Relocation.zig24
-rw-r--r--src/link/Dwarf.zig116
-rw-r--r--src/link/Elf.zig104
-rw-r--r--src/link/MachO.zig98
-rw-r--r--src/link/MachO/Archive.zig2
-rw-r--r--src/link/MachO/CodeSignature.zig18
-rw-r--r--src/link/MachO/DebugSymbols.zig42
-rw-r--r--src/link/MachO/DwarfInfo.zig8
-rw-r--r--src/link/MachO/Dylib.zig12
-rw-r--r--src/link/MachO/Object.zig64
-rw-r--r--src/link/MachO/Relocation.zig46
-rw-r--r--src/link/MachO/Trie.zig2
-rw-r--r--src/link/MachO/UnwindInfo.zig108
-rw-r--r--src/link/MachO/ZldAtom.zig120
-rw-r--r--src/link/MachO/dead_strip.zig24
-rw-r--r--src/link/MachO/dyld_info/Rebase.zig10
-rw-r--r--src/link/MachO/dyld_info/bind.zig36
-rw-r--r--src/link/MachO/eh_frame.zig72
-rw-r--r--src/link/MachO/load_commands.zig24
-rw-r--r--src/link/MachO/thunks.zig12
-rw-r--r--src/link/MachO/zld.zig144
-rw-r--r--src/link/Plan9.zig44
-rw-r--r--src/link/Wasm.zig276
-rw-r--r--src/link/Wasm/Atom.zig18
-rw-r--r--src/link/Wasm/Object.zig32
-rw-r--r--src/link/Wasm/types.zig2
-rw-r--r--src/link/strtab.zig6
-rw-r--r--src/link/table_section.zig2
-rw-r--r--src/link/tapi/Tokenizer.zig4
32 files changed, 797 insertions, 797 deletions
diff --git a/src/link/C.zig b/src/link/C.zig
index 9a42daa061..e3f8653852 100644
--- a/src/link/C.zig
+++ b/src/link/C.zig
@@ -292,7 +292,7 @@ pub fn flushModule(self: *C, _: *Compilation, prog_node: *std.Progress.Node) !vo
{
var export_names: std.AutoHashMapUnmanaged(InternPool.NullTerminatedString, void) = .{};
defer export_names.deinit(gpa);
- try export_names.ensureTotalCapacity(gpa, @intCast(u32, module.decl_exports.entries.len));
+ try export_names.ensureTotalCapacity(gpa, @as(u32, @intCast(module.decl_exports.entries.len)));
for (module.decl_exports.values()) |exports| for (exports.items) |@"export"|
try export_names.put(gpa, @"export".opts.name, {});
@@ -426,7 +426,7 @@ fn flushCTypes(
return ctx.ctypes_map[idx - codegen.CType.Tag.no_payload_count];
}
};
- const decl_idx = @intCast(codegen.CType.Index, codegen.CType.Tag.no_payload_count + decl_i);
+ const decl_idx = @as(codegen.CType.Index, @intCast(codegen.CType.Tag.no_payload_count + decl_i));
const ctx = Context{
.arena = global_ctypes.arena.allocator(),
.ctypes_map = f.ctypes_map.items,
@@ -437,7 +437,7 @@ fn flushCTypes(
.store = &global_ctypes.set,
});
const global_idx =
- @intCast(codegen.CType.Index, codegen.CType.Tag.no_payload_count + gop.index);
+ @as(codegen.CType.Index, @intCast(codegen.CType.Tag.no_payload_count + gop.index));
f.ctypes_map.appendAssumeCapacity(global_idx);
if (!gop.found_existing) {
errdefer _ = global_ctypes.set.map.pop();
@@ -538,7 +538,7 @@ fn flushLazyFn(self: *C, db: *DeclBlock, lazy_fn: codegen.LazyFnMap.Entry) Flush
fn flushLazyFns(self: *C, f: *Flush, lazy_fns: codegen.LazyFnMap) FlushDeclError!void {
const gpa = self.base.allocator;
- try f.lazy_fns.ensureUnusedCapacity(gpa, @intCast(Flush.LazyFns.Size, lazy_fns.count()));
+ try f.lazy_fns.ensureUnusedCapacity(gpa, @as(Flush.LazyFns.Size, @intCast(lazy_fns.count())));
var it = lazy_fns.iterator();
while (it.next()) |entry| {
diff --git a/src/link/Coff.zig b/src/link/Coff.zig
index e3fcc941eb..a724d4023a 100644
--- a/src/link/Coff.zig
+++ b/src/link/Coff.zig
@@ -358,7 +358,7 @@ fn populateMissingMetadata(self: *Coff) !void {
});
if (self.text_section_index == null) {
- const file_size = @intCast(u32, self.base.options.program_code_size_hint);
+ const file_size = @as(u32, @intCast(self.base.options.program_code_size_hint));
self.text_section_index = try self.allocateSection(".text", file_size, .{
.CNT_CODE = 1,
.MEM_EXECUTE = 1,
@@ -367,7 +367,7 @@ fn populateMissingMetadata(self: *Coff) !void {
}
if (self.got_section_index == null) {
- const file_size = @intCast(u32, self.base.options.symbol_count_hint) * self.ptr_width.size();
+ const file_size = @as(u32, @intCast(self.base.options.symbol_count_hint)) * self.ptr_width.size();
self.got_section_index = try self.allocateSection(".got", file_size, .{
.CNT_INITIALIZED_DATA = 1,
.MEM_READ = 1,
@@ -392,7 +392,7 @@ fn populateMissingMetadata(self: *Coff) !void {
}
if (self.idata_section_index == null) {
- const file_size = @intCast(u32, self.base.options.symbol_count_hint) * self.ptr_width.size();
+ const file_size = @as(u32, @intCast(self.base.options.symbol_count_hint)) * self.ptr_width.size();
self.idata_section_index = try self.allocateSection(".idata", file_size, .{
.CNT_INITIALIZED_DATA = 1,
.MEM_READ = 1,
@@ -400,7 +400,7 @@ fn populateMissingMetadata(self: *Coff) !void {
}
if (self.reloc_section_index == null) {
- const file_size = @intCast(u32, self.base.options.symbol_count_hint) * @sizeOf(coff.BaseRelocation);
+ const file_size = @as(u32, @intCast(self.base.options.symbol_count_hint)) * @sizeOf(coff.BaseRelocation);
self.reloc_section_index = try self.allocateSection(".reloc", file_size, .{
.CNT_INITIALIZED_DATA = 1,
.MEM_DISCARDABLE = 1,
@@ -409,7 +409,7 @@ fn populateMissingMetadata(self: *Coff) !void {
}
if (self.strtab_offset == null) {
- const file_size = @intCast(u32, self.strtab.len());
+ const file_size = @as(u32, @intCast(self.strtab.len()));
self.strtab_offset = self.findFreeSpace(file_size, @alignOf(u32)); // 4bytes aligned seems like a good idea here
log.debug("found strtab free space 0x{x} to 0x{x}", .{ self.strtab_offset.?, self.strtab_offset.? + file_size });
}
@@ -430,7 +430,7 @@ fn populateMissingMetadata(self: *Coff) !void {
}
fn allocateSection(self: *Coff, name: []const u8, size: u32, flags: coff.SectionHeaderFlags) !u16 {
- const index = @intCast(u16, self.sections.slice().len);
+ const index = @as(u16, @intCast(self.sections.slice().len));
const off = self.findFreeSpace(size, default_file_alignment);
// Memory is always allocated in sequence
// TODO: investigate if we can allocate .text last; this way it would never need to grow in memory!
@@ -652,7 +652,7 @@ pub fn allocateSymbol(self: *Coff) !u32 {
break :blk index;
} else {
log.debug(" (allocating symbol index {d})", .{self.locals.items.len});
- const index = @intCast(u32, self.locals.items.len);
+ const index = @as(u32, @intCast(self.locals.items.len));
_ = self.locals.addOneAssumeCapacity();
break :blk index;
}
@@ -680,7 +680,7 @@ fn allocateGlobal(self: *Coff) !u32 {
break :blk index;
} else {
log.debug(" (allocating global index {d})", .{self.globals.items.len});
- const index = @intCast(u32, self.globals.items.len);
+ const index = @as(u32, @intCast(self.globals.items.len));
_ = self.globals.addOneAssumeCapacity();
break :blk index;
}
@@ -704,7 +704,7 @@ fn addGotEntry(self: *Coff, target: SymbolWithLoc) !void {
pub fn createAtom(self: *Coff) !Atom.Index {
const gpa = self.base.allocator;
- const atom_index = @intCast(Atom.Index, self.atoms.items.len);
+ const atom_index = @as(Atom.Index, @intCast(self.atoms.items.len));
const atom = try self.atoms.addOne(gpa);
const sym_index = try self.allocateSymbol();
try self.atom_by_index_table.putNoClobber(gpa, sym_index, atom_index);
@@ -776,7 +776,7 @@ fn writeAtom(self: *Coff, atom_index: Atom.Index, code: []u8) !void {
self.resolveRelocs(atom_index, relocs.items, mem_code, slide);
const vaddr = sym.value + slide;
- const pvaddr = @ptrFromInt(*anyopaque, vaddr);
+ const pvaddr = @as(*anyopaque, @ptrFromInt(vaddr));
log.debug("writing to memory at address {x}", .{vaddr});
@@ -830,7 +830,7 @@ fn writeOffsetTableEntry(self: *Coff, index: usize) !void {
const sect_id = self.got_section_index.?;
if (self.got_table_count_dirty) {
- const needed_size = @intCast(u32, self.got_table.entries.items.len * self.ptr_width.size());
+ const needed_size = @as(u32, @intCast(self.got_table.entries.items.len * self.ptr_width.size()));
try self.growSection(sect_id, needed_size);
self.got_table_count_dirty = false;
}
@@ -847,7 +847,7 @@ fn writeOffsetTableEntry(self: *Coff, index: usize) !void {
switch (self.ptr_width) {
.p32 => {
var buf: [4]u8 = undefined;
- mem.writeIntLittle(u32, &buf, @intCast(u32, entry_value + self.getImageBase()));
+ mem.writeIntLittle(u32, &buf, @as(u32, @intCast(entry_value + self.getImageBase())));
try self.base.file.?.pwriteAll(&buf, file_offset);
},
.p64 => {
@@ -862,7 +862,7 @@ fn writeOffsetTableEntry(self: *Coff, index: usize) !void {
const gpa = self.base.allocator;
const slide = @intFromPtr(self.hot_state.loaded_base_address.?);
const actual_vmaddr = vmaddr + slide;
- const pvaddr = @ptrFromInt(*anyopaque, actual_vmaddr);
+ const pvaddr = @as(*anyopaque, @ptrFromInt(actual_vmaddr));
log.debug("writing GOT entry to memory at address {x}", .{actual_vmaddr});
if (build_options.enable_logging) {
switch (self.ptr_width) {
@@ -880,7 +880,7 @@ fn writeOffsetTableEntry(self: *Coff, index: usize) !void {
switch (self.ptr_width) {
.p32 => {
var buf: [4]u8 = undefined;
- mem.writeIntLittle(u32, &buf, @intCast(u32, entry_value + slide));
+ mem.writeIntLittle(u32, &buf, @as(u32, @intCast(entry_value + slide)));
writeMem(handle, pvaddr, &buf) catch |err| {
log.warn("writing to protected memory failed with error: {s}", .{@errorName(err)});
};
@@ -1107,7 +1107,7 @@ pub fn lowerUnnamedConst(self: *Coff, tv: TypedValue, decl_index: Module.Decl.In
const atom = self.getAtom(atom_index);
const sym = atom.getSymbolPtr(self);
try self.setSymbolName(sym, sym_name);
- sym.section_number = @enumFromInt(coff.SectionNumber, self.rdata_section_index.? + 1);
+ sym.section_number = @as(coff.SectionNumber, @enumFromInt(self.rdata_section_index.? + 1));
}
const res = try codegen.generateSymbol(&self.base, decl.srcLoc(mod), tv, &code_buffer, .none, .{
@@ -1125,7 +1125,7 @@ pub fn lowerUnnamedConst(self: *Coff, tv: TypedValue, decl_index: Module.Decl.In
const required_alignment = tv.ty.abiAlignment(mod);
const atom = self.getAtomPtr(atom_index);
- atom.size = @intCast(u32, code.len);
+ atom.size = @as(u32, @intCast(code.len));
atom.getSymbolPtr(self).value = try self.allocateAtom(atom_index, atom.size, required_alignment);
errdefer self.freeAtom(atom_index);
@@ -1241,10 +1241,10 @@ fn updateLazySymbolAtom(
},
};
- const code_len = @intCast(u32, code.len);
+ const code_len = @as(u32, @intCast(code.len));
const symbol = atom.getSymbolPtr(self);
try self.setSymbolName(symbol, name);
- symbol.section_number = @enumFromInt(coff.SectionNumber, section_index + 1);
+ symbol.section_number = @as(coff.SectionNumber, @enumFromInt(section_index + 1));
symbol.type = .{ .complex_type = .NULL, .base_type = .NULL };
const vaddr = try self.allocateAtom(atom_index, code_len, required_alignment);
@@ -1336,12 +1336,12 @@ fn updateDeclCode(self: *Coff, decl_index: Module.Decl.Index, code: []u8, comple
const atom = self.getAtom(atom_index);
const sym_index = atom.getSymbolIndex().?;
const sect_index = decl_metadata.section;
- const code_len = @intCast(u32, code.len);
+ const code_len = @as(u32, @intCast(code.len));
if (atom.size != 0) {
const sym = atom.getSymbolPtr(self);
try self.setSymbolName(sym, decl_name);
- sym.section_number = @enumFromInt(coff.SectionNumber, sect_index + 1);
+ sym.section_number = @as(coff.SectionNumber, @enumFromInt(sect_index + 1));
sym.type = .{ .complex_type = complex_type, .base_type = .NULL };
const capacity = atom.capacity(self);
@@ -1365,7 +1365,7 @@ fn updateDeclCode(self: *Coff, decl_index: Module.Decl.Index, code: []u8, comple
} else {
const sym = atom.getSymbolPtr(self);
try self.setSymbolName(sym, decl_name);
- sym.section_number = @enumFromInt(coff.SectionNumber, sect_index + 1);
+ sym.section_number = @as(coff.SectionNumber, @enumFromInt(sect_index + 1));
sym.type = .{ .complex_type = complex_type, .base_type = .NULL };
const vaddr = try self.allocateAtom(atom_index, code_len, required_alignment);
@@ -1502,7 +1502,7 @@ pub fn updateDeclExports(
const sym = self.getSymbolPtr(sym_loc);
try self.setSymbolName(sym, mod.intern_pool.stringToSlice(exp.opts.name));
sym.value = decl_sym.value;
- sym.section_number = @enumFromInt(coff.SectionNumber, self.text_section_index.? + 1);
+ sym.section_number = @as(coff.SectionNumber, @enumFromInt(self.text_section_index.? + 1));
sym.type = .{ .complex_type = .FUNCTION, .base_type = .NULL };
switch (exp.opts.linkage) {
@@ -1728,12 +1728,12 @@ pub fn getDeclVAddr(self: *Coff, decl_index: Module.Decl.Index, reloc_info: link
try Atom.addRelocation(self, atom_index, .{
.type = .direct,
.target = target,
- .offset = @intCast(u32, reloc_info.offset),
+ .offset = @as(u32, @intCast(reloc_info.offset)),
.addend = reloc_info.addend,
.pcrel = false,
.length = 3,
});
- try Atom.addBaseRelocation(self, atom_index, @intCast(u32, reloc_info.offset));
+ try Atom.addBaseRelocation(self, atom_index, @as(u32, @intCast(reloc_info.offset)));
return 0;
}
@@ -1804,7 +1804,7 @@ fn writeBaseRelocations(self: *Coff) !void {
gop.value_ptr.* = std.ArrayList(coff.BaseRelocation).init(gpa);
}
try gop.value_ptr.append(.{
- .offset = @intCast(u12, rva - page),
+ .offset = @as(u12, @intCast(rva - page)),
.type = .DIR64,
});
}
@@ -1818,14 +1818,14 @@ fn writeBaseRelocations(self: *Coff) !void {
const sym = self.getSymbol(entry);
if (sym.section_number == .UNDEFINED) continue;
- const rva = @intCast(u32, header.virtual_address + index * self.ptr_width.size());
+ const rva = @as(u32, @intCast(header.virtual_address + index * self.ptr_width.size()));
const page = mem.alignBackward(u32, rva, self.page_size);
const gop = try page_table.getOrPut(page);
if (!gop.found_existing) {
gop.value_ptr.* = std.ArrayList(coff.BaseRelocation).init(gpa);
}
try gop.value_ptr.append(.{
- .offset = @intCast(u12, rva - page),
+ .offset = @as(u12, @intCast(rva - page)),
.type = .DIR64,
});
}
@@ -1860,9 +1860,9 @@ fn writeBaseRelocations(self: *Coff) !void {
});
}
- const block_size = @intCast(
+ const block_size = @as(
u32,
- entries.items.len * @sizeOf(coff.BaseRelocation) + @sizeOf(coff.BaseRelocationDirectoryEntry),
+ @intCast(entries.items.len * @sizeOf(coff.BaseRelocation) + @sizeOf(coff.BaseRelocationDirectoryEntry)),
);
try buffer.ensureUnusedCapacity(block_size);
buffer.appendSliceAssumeCapacity(mem.asBytes(&coff.BaseRelocationDirectoryEntry{
@@ -1873,7 +1873,7 @@ fn writeBaseRelocations(self: *Coff) !void {
}
const header = &self.sections.items(.header)[self.reloc_section_index.?];
- const needed_size = @intCast(u32, buffer.items.len);
+ const needed_size = @as(u32, @intCast(buffer.items.len));
try self.growSection(self.reloc_section_index.?, needed_size);
try self.base.file.?.pwriteAll(buffer.items, header.pointer_to_raw_data);
@@ -1904,12 +1904,12 @@ fn writeImportTables(self: *Coff) !void {
const itable = self.import_tables.values()[i];
iat_size += itable.size() + 8;
dir_table_size += @sizeOf(coff.ImportDirectoryEntry);
- lookup_table_size += @intCast(u32, itable.entries.items.len + 1) * @sizeOf(coff.ImportLookupEntry64.ByName);
+ lookup_table_size += @as(u32, @intCast(itable.entries.items.len + 1)) * @sizeOf(coff.ImportLookupEntry64.ByName);
for (itable.entries.items) |entry| {
const sym_name = self.getSymbolName(entry);
- names_table_size += 2 + mem.alignForward(u32, @intCast(u32, sym_name.len + 1), 2);
+ names_table_size += 2 + mem.alignForward(u32, @as(u32, @intCast(sym_name.len + 1)), 2);
}
- dll_names_size += @intCast(u32, lib_name.len + ext.len + 1);
+ dll_names_size += @as(u32, @intCast(lib_name.len + ext.len + 1));
}
const needed_size = iat_size + dir_table_size + lookup_table_size + names_table_size + dll_names_size;
@@ -1948,7 +1948,7 @@ fn writeImportTables(self: *Coff) !void {
const import_name = self.getSymbolName(entry);
// IAT and lookup table entry
- const lookup = coff.ImportLookupEntry64.ByName{ .name_table_rva = @intCast(u31, header.virtual_address + names_table_offset) };
+ const lookup = coff.ImportLookupEntry64.ByName{ .name_table_rva = @as(u31, @intCast(header.virtual_address + names_table_offset)) };
@memcpy(
buffer.items[iat_offset..][0..@sizeOf(coff.ImportLookupEntry64.ByName)],
mem.asBytes(&lookup),
@@ -1964,7 +1964,7 @@ fn writeImportTables(self: *Coff) !void {
mem.writeIntLittle(u16, buffer.items[names_table_offset..][0..2], 0); // Hint set to 0 until we learn how to parse DLLs
names_table_offset += 2;
@memcpy(buffer.items[names_table_offset..][0..import_name.len], import_name);
- names_table_offset += @intCast(u32, import_name.len);
+ names_table_offset += @as(u32, @intCast(import_name.len));
buffer.items[names_table_offset] = 0;
names_table_offset += 1;
if (!mem.isAlignedGeneric(usize, names_table_offset, @sizeOf(u16))) {
@@ -1986,9 +1986,9 @@ fn writeImportTables(self: *Coff) !void {
// DLL name
@memcpy(buffer.items[dll_names_offset..][0..lib_name.len], lib_name);
- dll_names_offset += @intCast(u32, lib_name.len);
+ dll_names_offset += @as(u32, @intCast(lib_name.len));
@memcpy(buffer.items[dll_names_offset..][0..ext.len], ext);
- dll_names_offset += @intCast(u32, ext.len);
+ dll_names_offset += @as(u32, @intCast(ext.len));
buffer.items[dll_names_offset] = 0;
dll_names_offset += 1;
}
@@ -2027,11 +2027,11 @@ fn writeStrtab(self: *Coff) !void {
if (self.strtab_offset == null) return;
const allocated_size = self.allocatedSize(self.strtab_offset.?);
- const needed_size = @intCast(u32, self.strtab.len());
+ const needed_size = @as(u32, @intCast(self.strtab.len()));
if (needed_size > allocated_size) {
self.strtab_offset = null;
- self.strtab_offset = @intCast(u32, self.findFreeSpace(needed_size, @alignOf(u32)));
+ self.strtab_offset = @as(u32, @intCast(self.findFreeSpace(needed_size, @alignOf(u32))));
}
log.debug("writing strtab from 0x{x} to 0x{x}", .{ self.strtab_offset.?, self.strtab_offset.? + needed_size });
@@ -2042,7 +2042,7 @@ fn writeStrtab(self: *Coff) !void {
buffer.appendSliceAssumeCapacity(self.strtab.items());
// Here, we do a trick in that we do not commit the size of the strtab to strtab buffer, instead
// we write the length of the strtab to a temporary buffer that goes to file.
- mem.writeIntLittle(u32, buffer.items[0..4], @intCast(u32, self.strtab.len()));
+ mem.writeIntLittle(u32, buffer.items[0..4], @as(u32, @intCast(self.strtab.len())));
try self.base.file.?.pwriteAll(buffer.items, self.strtab_offset.?);
}
@@ -2081,11 +2081,11 @@ fn writeHeader(self: *Coff) !void {
}
const timestamp = std.time.timestamp();
- const size_of_optional_header = @intCast(u16, self.getOptionalHeaderSize() + self.getDataDirectoryHeadersSize());
+ const size_of_optional_header = @as(u16, @intCast(self.getOptionalHeaderSize() + self.getDataDirectoryHeadersSize()));
var coff_header = coff.CoffHeader{
.machine = coff.MachineType.fromTargetCpuArch(self.base.options.target.cpu.arch),
- .number_of_sections = @intCast(u16, self.sections.slice().len), // TODO what if we prune a section
- .time_date_stamp = @truncate(u32, @bitCast(u64, timestamp)),
+ .number_of_sections = @as(u16, @intCast(self.sections.slice().len)), // TODO what if we prune a section
+ .time_date_stamp = @as(u32, @truncate(@as(u64, @bitCast(timestamp)))),
.pointer_to_symbol_table = self.strtab_offset orelse 0,
.number_of_symbols = 0,
.size_of_optional_header = size_of_optional_header,
@@ -2135,7 +2135,7 @@ fn writeHeader(self: *Coff) !void {
.address_of_entry_point = self.entry_addr orelse 0,
.base_of_code = base_of_code,
.base_of_data = base_of_data,
- .image_base = @intCast(u32, image_base),
+ .image_base = @as(u32, @intCast(image_base)),
.section_alignment = self.page_size,
.file_alignment = default_file_alignment,
.major_operating_system_version = 6,
@@ -2155,7 +2155,7 @@ fn writeHeader(self: *Coff) !void {
.size_of_heap_reserve = default_size_of_heap_reserve,
.size_of_heap_commit = default_size_of_heap_commit,
.loader_flags = 0,
- .number_of_rva_and_sizes = @intCast(u32, self.data_directories.len),
+ .number_of_rva_and_sizes = @as(u32, @intCast(self.data_directories.len)),
};
writer.writeAll(mem.asBytes(&opt_header)) catch unreachable;
},
@@ -2189,7 +2189,7 @@ fn writeHeader(self: *Coff) !void {
.size_of_heap_reserve = default_size_of_heap_reserve,
.size_of_heap_commit = default_size_of_heap_commit,
.loader_flags = 0,
- .number_of_rva_and_sizes = @intCast(u32, self.data_directories.len),
+ .number_of_rva_and_sizes = @as(u32, @intCast(self.data_directories.len)),
};
writer.writeAll(mem.asBytes(&opt_header)) catch unreachable;
},
@@ -2210,7 +2210,7 @@ fn detectAllocCollision(self: *Coff, start: u32, size: u32) ?u32 {
const end = start + padToIdeal(size);
if (self.strtab_offset) |off| {
- const tight_size = @intCast(u32, self.strtab.len());
+ const tight_size = @as(u32, @intCast(self.strtab.len()));
const increased_size = padToIdeal(tight_size);
const test_end = off + increased_size;
if (end > off and start < test_end) {
@@ -2265,28 +2265,28 @@ fn allocatedVirtualSize(self: *Coff, start: u32) u32 {
inline fn getSizeOfHeaders(self: Coff) u32 {
const msdos_hdr_size = msdos_stub.len + 4;
- return @intCast(u32, msdos_hdr_size + @sizeOf(coff.CoffHeader) + self.getOptionalHeaderSize() +
- self.getDataDirectoryHeadersSize() + self.getSectionHeadersSize());
+ return @as(u32, @intCast(msdos_hdr_size + @sizeOf(coff.CoffHeader) + self.getOptionalHeaderSize() +
+ self.getDataDirectoryHeadersSize() + self.getSectionHeadersSize()));
}
inline fn getOptionalHeaderSize(self: Coff) u32 {
return switch (self.ptr_width) {
- .p32 => @intCast(u32, @sizeOf(coff.OptionalHeaderPE32)),
- .p64 => @intCast(u32, @sizeOf(coff.OptionalHeaderPE64)),
+ .p32 => @as(u32, @intCast(@sizeOf(coff.OptionalHeaderPE32))),
+ .p64 => @as(u32, @intCast(@sizeOf(coff.OptionalHeaderPE64))),
};
}
inline fn getDataDirectoryHeadersSize(self: Coff) u32 {
- return @intCast(u32, self.data_directories.len * @sizeOf(coff.ImageDataDirectory));
+ return @as(u32, @intCast(self.data_directories.len * @sizeOf(coff.ImageDataDirectory)));
}
inline fn getSectionHeadersSize(self: Coff) u32 {
- return @intCast(u32, self.sections.slice().len * @sizeOf(coff.SectionHeader));
+ return @as(u32, @intCast(self.sections.slice().len * @sizeOf(coff.SectionHeader)));
}
inline fn getDataDirectoryHeadersOffset(self: Coff) u32 {
const msdos_hdr_size = msdos_stub.len + 4;
- return @intCast(u32, msdos_hdr_size + @sizeOf(coff.CoffHeader) + self.getOptionalHeaderSize());
+ return @as(u32, @intCast(msdos_hdr_size + @sizeOf(coff.CoffHeader) + self.getOptionalHeaderSize()));
}
inline fn getSectionHeadersOffset(self: Coff) u32 {
@@ -2473,7 +2473,7 @@ fn logSymtab(self: *Coff) void {
};
log.debug(" %{d}: {?s} @{x} in {s}({d}), {s}", .{
sym_id,
- self.getSymbolName(.{ .sym_index = @intCast(u32, sym_id), .file = null }),
+ self.getSymbolName(.{ .sym_index = @as(u32, @intCast(sym_id)), .file = null }),
sym.value,
where,
def_index,
diff --git a/src/link/Coff/ImportTable.zig b/src/link/Coff/ImportTable.zig
index c3ba77e855..c25851fe72 100644
--- a/src/link/Coff/ImportTable.zig
+++ b/src/link/Coff/ImportTable.zig
@@ -38,7 +38,7 @@ pub fn deinit(itab: *ImportTable, allocator: Allocator) void {
/// Size of the import table does not include the sentinel.
pub fn size(itab: ImportTable) u32 {
- return @intCast(u32, itab.entries.items.len) * @sizeOf(u64);
+ return @as(u32, @intCast(itab.entries.items.len)) * @sizeOf(u64);
}
pub fn addImport(itab: *ImportTable, allocator: Allocator, target: SymbolWithLoc) !ImportIndex {
@@ -49,7 +49,7 @@ pub fn addImport(itab: *ImportTable, allocator: Allocator, target: SymbolWithLoc
break :blk index;
} else {
log.debug(" (allocating import entry at index {d})", .{itab.entries.items.len});
- const index = @intCast(u32, itab.entries.items.len);
+ const index = @as(u32, @intCast(itab.entries.items.len));
_ = itab.entries.addOneAssumeCapacity();
break :blk index;
}
@@ -73,7 +73,7 @@ fn getBaseAddress(ctx: Context) u32 {
var addr = header.virtual_address;
for (ctx.coff_file.import_tables.values(), 0..) |other_itab, i| {
if (ctx.index == i) break;
- addr += @intCast(u32, other_itab.entries.items.len * @sizeOf(u64)) + 8;
+ addr += @as(u32, @intCast(other_itab.entries.items.len * @sizeOf(u64))) + 8;
}
return addr;
}
diff --git a/src/link/Coff/Relocation.zig b/src/link/Coff/Relocation.zig
index 10d4eed92b..ded7483667 100644
--- a/src/link/Coff/Relocation.zig
+++ b/src/link/Coff/Relocation.zig
@@ -126,23 +126,23 @@ fn resolveAarch64(self: Relocation, ctx: Context) void {
var buffer = ctx.code[self.offset..];
switch (self.type) {
.got_page, .import_page, .page => {
- const source_page = @intCast(i32, ctx.source_vaddr >> 12);
- const target_page = @intCast(i32, ctx.target_vaddr >> 12);
- const pages = @bitCast(u21, @intCast(i21, target_page - source_page));
+ const source_page = @as(i32, @intCast(ctx.source_vaddr >> 12));
+ const target_page = @as(i32, @intCast(ctx.target_vaddr >> 12));
+ const pages = @as(u21, @bitCast(@as(i21, @intCast(target_page - source_page))));
var inst = aarch64.Instruction{
.pc_relative_address = mem.bytesToValue(meta.TagPayload(
aarch64.Instruction,
aarch64.Instruction.pc_relative_address,
), buffer[0..4]),
};
- inst.pc_relative_address.immhi = @truncate(u19, pages >> 2);
- inst.pc_relative_address.immlo = @truncate(u2, pages);
+ inst.pc_relative_address.immhi = @as(u19, @truncate(pages >> 2));
+ inst.pc_relative_address.immlo = @as(u2, @truncate(pages));
mem.writeIntLittle(u32, buffer[0..4], inst.toU32());
},
.got_pageoff, .import_pageoff, .pageoff => {
assert(!self.pcrel);
- const narrowed = @truncate(u12, @intCast(u64, ctx.target_vaddr));
+ const narrowed = @as(u12, @truncate(@as(u64, @intCast(ctx.target_vaddr))));
if (isArithmeticOp(buffer[0..4])) {
var inst = aarch64.Instruction{
.add_subtract_immediate = mem.bytesToValue(meta.TagPayload(
@@ -182,7 +182,7 @@ fn resolveAarch64(self: Relocation, ctx: Context) void {
2 => mem.writeIntLittle(
u32,
buffer[0..4],
- @truncate(u32, ctx.target_vaddr + ctx.image_base),
+ @as(u32, @truncate(ctx.target_vaddr + ctx.image_base)),
),
3 => mem.writeIntLittle(u64, buffer[0..8], ctx.target_vaddr + ctx.image_base),
else => unreachable,
@@ -206,17 +206,17 @@ fn resolveX86(self: Relocation, ctx: Context) void {
.got, .import => {
assert(self.pcrel);
- const disp = @intCast(i32, ctx.target_vaddr) - @intCast(i32, ctx.source_vaddr) - 4;
+ const disp = @as(i32, @intCast(ctx.target_vaddr)) - @as(i32, @intCast(ctx.source_vaddr)) - 4;
mem.writeIntLittle(i32, buffer[0..4], disp);
},
.direct => {
if (self.pcrel) {
- const disp = @intCast(i32, ctx.target_vaddr) - @intCast(i32, ctx.source_vaddr) - 4;
+ const disp = @as(i32, @intCast(ctx.target_vaddr)) - @as(i32, @intCast(ctx.source_vaddr)) - 4;
mem.writeIntLittle(i32, buffer[0..4], disp);
} else switch (ctx.ptr_width) {
- .p32 => mem.writeIntLittle(u32, buffer[0..4], @intCast(u32, ctx.target_vaddr + ctx.image_base)),
+ .p32 => mem.writeIntLittle(u32, buffer[0..4], @as(u32, @intCast(ctx.target_vaddr + ctx.image_base))),
.p64 => switch (self.length) {
- 2 => mem.writeIntLittle(u32, buffer[0..4], @truncate(u32, ctx.target_vaddr + ctx.image_base)),
+ 2 => mem.writeIntLittle(u32, buffer[0..4], @as(u32, @truncate(ctx.target_vaddr + ctx.image_base))),
3 => mem.writeIntLittle(u64, buffer[0..8], ctx.target_vaddr + ctx.image_base),
else => unreachable,
},
@@ -226,6 +226,6 @@ fn resolveX86(self: Relocation, ctx: Context) void {
}
inline fn isArithmeticOp(inst: *const [4]u8) bool {
- const group_decode = @truncate(u5, inst[3]);
+ const group_decode = @as(u5, @truncate(inst[3]));
return ((group_decode >> 2) == 4);
}
diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig
index 14be46b621..499855b330 100644
--- a/src/link/Dwarf.zig
+++ b/src/link/Dwarf.zig
@@ -138,7 +138,7 @@ pub const DeclState = struct {
/// which we use as our target of the relocation.
fn addTypeRelocGlobal(self: *DeclState, atom_index: Atom.Index, ty: Type, offset: u32) !void {
const resolv = self.abbrev_resolver.get(ty.toIntern()) orelse blk: {
- const sym_index = @intCast(u32, self.abbrev_table.items.len);
+ const sym_index = @as(u32, @intCast(self.abbrev_table.items.len));
try self.abbrev_table.append(self.gpa, .{
.atom_index = atom_index,
.type = ty,
@@ -225,7 +225,7 @@ pub const DeclState = struct {
// DW.AT.type, DW.FORM.ref4
var index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
- try self.addTypeRelocGlobal(atom_index, Type.bool, @intCast(u32, index));
+ try self.addTypeRelocGlobal(atom_index, Type.bool, @as(u32, @intCast(index)));
// DW.AT.data_member_location, DW.FORM.udata
try dbg_info_buffer.ensureUnusedCapacity(6);
dbg_info_buffer.appendAssumeCapacity(0);
@@ -237,7 +237,7 @@ pub const DeclState = struct {
// DW.AT.type, DW.FORM.ref4
index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
- try self.addTypeRelocGlobal(atom_index, payload_ty, @intCast(u32, index));
+ try self.addTypeRelocGlobal(atom_index, payload_ty, @as(u32, @intCast(index)));
// DW.AT.data_member_location, DW.FORM.udata
const offset = abi_size - payload_ty.abiSize(mod);
try leb128.writeULEB128(dbg_info_buffer.writer(), offset);
@@ -249,7 +249,7 @@ pub const DeclState = struct {
if (ty.isSlice(mod)) {
// Slices are structs: struct { .ptr = *, .len = N }
const ptr_bits = target.ptrBitWidth();
- const ptr_bytes = @intCast(u8, @divExact(ptr_bits, 8));
+ const ptr_bytes = @as(u8, @intCast(@divExact(ptr_bits, 8)));
// DW.AT.structure_type
try dbg_info_buffer.ensureUnusedCapacity(2);
dbg_info_buffer.appendAssumeCapacity(@intFromEnum(AbbrevKind.struct_type));
@@ -267,7 +267,7 @@ pub const DeclState = struct {
var index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
const ptr_ty = ty.slicePtrFieldType(mod);
- try self.addTypeRelocGlobal(atom_index, ptr_ty, @intCast(u32, index));
+ try self.addTypeRelocGlobal(atom_index, ptr_ty, @as(u32, @intCast(index)));
// DW.AT.data_member_location, DW.FORM.udata
try dbg_info_buffer.ensureUnusedCapacity(6);
dbg_info_buffer.appendAssumeCapacity(0);
@@ -279,7 +279,7 @@ pub const DeclState = struct {
// DW.AT.type, DW.FORM.ref4
index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
- try self.addTypeRelocGlobal(atom_index, Type.usize, @intCast(u32, index));
+ try self.addTypeRelocGlobal(atom_index, Type.usize, @as(u32, @intCast(index)));
// DW.AT.data_member_location, DW.FORM.udata
try dbg_info_buffer.ensureUnusedCapacity(2);
dbg_info_buffer.appendAssumeCapacity(ptr_bytes);
@@ -291,7 +291,7 @@ pub const DeclState = struct {
// DW.AT.type, DW.FORM.ref4
const index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
- try self.addTypeRelocGlobal(atom_index, ty.childType(mod), @intCast(u32, index));
+ try self.addTypeRelocGlobal(atom_index, ty.childType(mod), @as(u32, @intCast(index)));
}
},
.Array => {
@@ -302,13 +302,13 @@ pub const DeclState = struct {
// DW.AT.type, DW.FORM.ref4
var index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
- try self.addTypeRelocGlobal(atom_index, ty.childType(mod), @intCast(u32, index));
+ try self.addTypeRelocGlobal(atom_index, ty.childType(mod), @as(u32, @intCast(index)));
// DW.AT.subrange_type
try dbg_info_buffer.append(@intFromEnum(AbbrevKind.array_dim));
// DW.AT.type, DW.FORM.ref4
index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
- try self.addTypeRelocGlobal(atom_index, Type.usize, @intCast(u32, index));
+ try self.addTypeRelocGlobal(atom_index, Type.usize, @as(u32, @intCast(index)));
// DW.AT.count, DW.FORM.udata
const len = ty.arrayLenIncludingSentinel(mod);
try leb128.writeULEB128(dbg_info_buffer.writer(), len);
@@ -334,7 +334,7 @@ pub const DeclState = struct {
// DW.AT.type, DW.FORM.ref4
var index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
- try self.addTypeRelocGlobal(atom_index, field_ty.toType(), @intCast(u32, index));
+ try self.addTypeRelocGlobal(atom_index, field_ty.toType(), @as(u32, @intCast(index)));
// DW.AT.data_member_location, DW.FORM.udata
const field_off = ty.structFieldOffset(field_index, mod);
try leb128.writeULEB128(dbg_info_buffer.writer(), field_off);
@@ -367,7 +367,7 @@ pub const DeclState = struct {
// DW.AT.type, DW.FORM.ref4
var index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
- try self.addTypeRelocGlobal(atom_index, field.ty, @intCast(u32, index));
+ try self.addTypeRelocGlobal(atom_index, field.ty, @as(u32, @intCast(index)));
// DW.AT.data_member_location, DW.FORM.udata
const field_off = ty.structFieldOffset(field_index, mod);
try leb128.writeULEB128(dbg_info_buffer.writer(), field_off);
@@ -404,7 +404,7 @@ pub const DeclState = struct {
// TODO do not assume a 64bit enum value - could be bigger.
// See https://github.com/ziglang/zig/issues/645
const field_int_val = try value.toValue().intFromEnum(ty, mod);
- break :value @bitCast(u64, field_int_val.toSignedInt(mod));
+ break :value @as(u64, @bitCast(field_int_val.toSignedInt(mod)));
};
mem.writeInt(u64, dbg_info_buffer.addManyAsArrayAssumeCapacity(8), value, target_endian);
}
@@ -439,7 +439,7 @@ pub const DeclState = struct {
// DW.AT.type, DW.FORM.ref4
const inner_union_index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(inner_union_index + 4);
- try self.addTypeRelocLocal(atom_index, @intCast(u32, inner_union_index), 5);
+ try self.addTypeRelocLocal(atom_index, @as(u32, @intCast(inner_union_index)), 5);
// DW.AT.data_member_location, DW.FORM.udata
try leb128.writeULEB128(dbg_info_buffer.writer(), payload_offset);
}
@@ -468,7 +468,7 @@ pub const DeclState = struct {
// DW.AT.type, DW.FORM.ref4
const index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
- try self.addTypeRelocGlobal(atom_index, field.ty, @intCast(u32, index));
+ try self.addTypeRelocGlobal(atom_index, field.ty, @as(u32, @intCast(index)));
// DW.AT.data_member_location, DW.FORM.udata
try dbg_info_buffer.append(0);
}
@@ -485,7 +485,7 @@ pub const DeclState = struct {
// DW.AT.type, DW.FORM.ref4
const index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
- try self.addTypeRelocGlobal(atom_index, union_obj.tag_ty, @intCast(u32, index));
+ try self.addTypeRelocGlobal(atom_index, union_obj.tag_ty, @as(u32, @intCast(index)));
// DW.AT.data_member_location, DW.FORM.udata
try leb128.writeULEB128(dbg_info_buffer.writer(), tag_offset);
@@ -521,7 +521,7 @@ pub const DeclState = struct {
// DW.AT.type, DW.FORM.ref4
const index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
- try self.addTypeRelocGlobal(atom_index, payload_ty, @intCast(u32, index));
+ try self.addTypeRelocGlobal(atom_index, payload_ty, @as(u32, @intCast(index)));
// DW.AT.data_member_location, DW.FORM.udata
try leb128.writeULEB128(dbg_info_buffer.writer(), payload_off);
}
@@ -536,7 +536,7 @@ pub const DeclState = struct {
// DW.AT.type, DW.FORM.ref4
const index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
- try self.addTypeRelocGlobal(atom_index, error_ty, @intCast(u32, index));
+ try self.addTypeRelocGlobal(atom_index, error_ty, @as(u32, @intCast(index)));
// DW.AT.data_member_location, DW.FORM.udata
try leb128.writeULEB128(dbg_info_buffer.writer(), error_off);
}
@@ -640,7 +640,7 @@ pub const DeclState = struct {
try dbg_info.ensureUnusedCapacity(5 + name_with_null.len);
const index = dbg_info.items.len;
try dbg_info.resize(index + 4); // dw.at.type, dw.form.ref4
- try self.addTypeRelocGlobal(atom_index, ty, @intCast(u32, index)); // DW.AT.type, DW.FORM.ref4
+ try self.addTypeRelocGlobal(atom_index, ty, @as(u32, @intCast(index))); // DW.AT.type, DW.FORM.ref4
dbg_info.appendSliceAssumeCapacity(name_with_null); // DW.AT.name, DW.FORM.string
}
@@ -723,20 +723,20 @@ pub const DeclState = struct {
.memory,
.linker_load,
=> {
- const ptr_width = @intCast(u8, @divExact(target.ptrBitWidth(), 8));
+ const ptr_width = @as(u8, @intCast(@divExact(target.ptrBitWidth(), 8)));
try dbg_info.ensureUnusedCapacity(2 + ptr_width);
dbg_info.appendSliceAssumeCapacity(&[2]u8{ // DW.AT.location, DW.FORM.exprloc
1 + ptr_width + @intFromBool(is_ptr),
DW.OP.addr, // literal address
});
- const offset = @intCast(u32, dbg_info.items.len);
+ const offset = @as(u32, @intCast(dbg_info.items.len));
const addr = switch (loc) {
.memory => |x| x,
else => 0,
};
switch (ptr_width) {
0...4 => {
- try dbg_info.writer().writeInt(u32, @intCast(u32, addr), endian);
+ try dbg_info.writer().writeInt(u32, @as(u32, @intCast(addr)), endian);
},
5...8 => {
try dbg_info.writer().writeInt(u64, addr, endian);
@@ -765,19 +765,19 @@ pub const DeclState = struct {
if (child_ty.isSignedInt(mod)) DW.OP.consts else DW.OP.constu,
});
if (child_ty.isSignedInt(mod)) {
- try leb128.writeILEB128(dbg_info.writer(), @bitCast(i64, x));
+ try leb128.writeILEB128(dbg_info.writer(), @as(i64, @bitCast(x)));
} else {
try leb128.writeULEB128(dbg_info.writer(), x);
}
try dbg_info.append(DW.OP.stack_value);
- dbg_info.items[fixup] += @intCast(u8, dbg_info.items.len - fixup - 2);
+ dbg_info.items[fixup] += @as(u8, @intCast(dbg_info.items.len - fixup - 2));
},
.undef => {
// DW.AT.location, DW.FORM.exprloc
// uleb128(exprloc_len)
// DW.OP.implicit_value uleb128(len_of_bytes) bytes
- const abi_size = @intCast(u32, child_ty.abiSize(mod));
+ const abi_size = @as(u32, @intCast(child_ty.abiSize(mod)));
var implicit_value_len = std.ArrayList(u8).init(self.gpa);
defer implicit_value_len.deinit();
try leb128.writeULEB128(implicit_value_len.writer(), abi_size);
@@ -807,7 +807,7 @@ pub const DeclState = struct {
try dbg_info.ensureUnusedCapacity(5 + name_with_null.len);
const index = dbg_info.items.len;
try dbg_info.resize(index + 4); // dw.at.type, dw.form.ref4
- try self.addTypeRelocGlobal(atom_index, child_ty, @intCast(u32, index));
+ try self.addTypeRelocGlobal(atom_index, child_ty, @as(u32, @intCast(index)));
dbg_info.appendSliceAssumeCapacity(name_with_null); // DW.AT.name, DW.FORM.string
}
@@ -963,7 +963,7 @@ pub fn initDeclState(self: *Dwarf, mod: *Module, decl_index: Module.Decl.Index)
func.lbrace_line,
func.rbrace_line,
});
- const line = @intCast(u28, decl.src_line + func.lbrace_line);
+ const line = @as(u28, @intCast(decl.src_line + func.lbrace_line));
const ptr_width_bytes = self.ptrWidthBytes();
dbg_line_buffer.appendSliceAssumeCapacity(&[_]u8{
@@ -1013,7 +1013,7 @@ pub fn initDeclState(self: *Dwarf, mod: *Module, decl_index: Module.Decl.Index)
dbg_info_buffer.items.len += 4; // DW.AT.high_pc, DW.FORM.data4
//
if (fn_ret_has_bits) {
- try decl_state.addTypeRelocGlobal(di_atom_index, fn_ret_type, @intCast(u32, dbg_info_buffer.items.len));
+ try decl_state.addTypeRelocGlobal(di_atom_index, fn_ret_type, @as(u32, @intCast(dbg_info_buffer.items.len)));
dbg_info_buffer.items.len += 4; // DW.AT.type, DW.FORM.ref4
}
@@ -1055,11 +1055,11 @@ pub fn commitDeclState(
.p32 => {
{
const ptr = dbg_line_buffer.items[dbg_line_vaddr_reloc_index..][0..4];
- mem.writeInt(u32, ptr, @intCast(u32, sym_addr), target_endian);
+ mem.writeInt(u32, ptr, @as(u32, @intCast(sym_addr)), target_endian);
}
{
const ptr = dbg_info_buffer.items[dbg_info_low_pc_reloc_index..][0..4];
- mem.writeInt(u32, ptr, @intCast(u32, sym_addr), target_endian);
+ mem.writeInt(u32, ptr, @as(u32, @intCast(sym_addr)), target_endian);
}
},
.p64 => {
@@ -1079,7 +1079,7 @@ pub fn commitDeclState(
sym_size,
});
const ptr = dbg_info_buffer.items[self.getRelocDbgInfoSubprogramHighPC()..][0..4];
- mem.writeInt(u32, ptr, @intCast(u32, sym_size), target_endian);
+ mem.writeInt(u32, ptr, @as(u32, @intCast(sym_size)), target_endian);
}
try dbg_line_buffer.appendSlice(&[_]u8{ DW.LNS.extended_op, 1, DW.LNE.end_sequence });
@@ -1091,7 +1091,7 @@ pub fn commitDeclState(
// probably need to edit that logic too.
const src_fn_index = self.src_fn_decls.get(decl_index).?;
const src_fn = self.getAtomPtr(.src_fn, src_fn_index);
- src_fn.len = @intCast(u32, dbg_line_buffer.items.len);
+ src_fn.len = @as(u32, @intCast(dbg_line_buffer.items.len));
if (self.src_fn_last_index) |last_index| blk: {
if (src_fn_index == last_index) break :blk;
@@ -1254,12 +1254,12 @@ pub fn commitDeclState(
};
if (deferred) continue;
- symbol.offset = @intCast(u32, dbg_info_buffer.items.len);
+ symbol.offset = @as(u32, @intCast(dbg_info_buffer.items.len));
try decl_state.addDbgInfoType(mod, di_atom_index, ty);
}
}
- try self.updateDeclDebugInfoAllocation(di_atom_index, @intCast(u32, dbg_info_buffer.items.len));
+ try self.updateDeclDebugInfoAllocation(di_atom_index, @as(u32, @intCast(dbg_info_buffer.items.len)));
while (decl_state.abbrev_relocs.popOrNull()) |reloc| {
if (reloc.target) |target| {
@@ -1402,7 +1402,7 @@ fn updateDeclDebugInfoAllocation(self: *Dwarf, atom_index: Atom.Index, len: u32)
self.di_atom_first_index = atom_index;
self.di_atom_last_index = atom_index;
- atom.off = @intCast(u32, padToIdeal(self.dbgInfoHeaderBytes()));
+ atom.off = @as(u32, @intCast(padToIdeal(self.dbgInfoHeaderBytes())));
}
}
@@ -1513,7 +1513,7 @@ pub fn updateDeclLineNumber(self: *Dwarf, mod: *Module, decl_index: Module.Decl.
func.lbrace_line,
func.rbrace_line,
});
- const line = @intCast(u28, decl.src_line + func.lbrace_line);
+ const line = @as(u28, @intCast(decl.src_line + func.lbrace_line));
var data: [4]u8 = undefined;
leb128.writeUnsignedFixed(4, &data, line);
@@ -1791,10 +1791,10 @@ pub fn writeDbgInfoHeader(self: *Dwarf, module: *Module, low_pc: u64, high_pc: u
const dbg_info_end = self.getDebugInfoEnd().? + 1;
const init_len = dbg_info_end - after_init_len;
if (self.bin_file.tag == .macho) {
- mem.writeIntLittle(u32, di_buf.addManyAsArrayAssumeCapacity(4), @intCast(u32, init_len));
+ mem.writeIntLittle(u32, di_buf.addManyAsArrayAssumeCapacity(4), @as(u32, @intCast(init_len)));
} else switch (self.ptr_width) {
.p32 => {
- mem.writeInt(u32, di_buf.addManyAsArrayAssumeCapacity(4), @intCast(u32, init_len), target_endian);
+ mem.writeInt(u32, di_buf.addManyAsArrayAssumeCapacity(4), @as(u32, @intCast(init_len)), target_endian);
},
.p64 => {
di_buf.appendNTimesAssumeCapacity(0xff, 4);
@@ -1804,11 +1804,11 @@ pub fn writeDbgInfoHeader(self: *Dwarf, module: *Module, low_pc: u64, high_pc: u
mem.writeInt(u16, di_buf.addManyAsArrayAssumeCapacity(2), 4, target_endian); // DWARF version
const abbrev_offset = self.abbrev_table_offset.?;
if (self.bin_file.tag == .macho) {
- mem.writeIntLittle(u32, di_buf.addManyAsArrayAssumeCapacity(4), @intCast(u32, abbrev_offset));
+ mem.writeIntLittle(u32, di_buf.addManyAsArrayAssumeCapacity(4), @as(u32, @intCast(abbrev_offset)));
di_buf.appendAssumeCapacity(8); // address size
} else switch (self.ptr_width) {
.p32 => {
- mem.writeInt(u32, di_buf.addManyAsArrayAssumeCapacity(4), @intCast(u32, abbrev_offset), target_endian);
+ mem.writeInt(u32, di_buf.addManyAsArrayAssumeCapacity(4), @as(u32, @intCast(abbrev_offset)), target_endian);
di_buf.appendAssumeCapacity(4); // address size
},
.p64 => {
@@ -1828,9 +1828,9 @@ pub fn writeDbgInfoHeader(self: *Dwarf, module: *Module, low_pc: u64, high_pc: u
mem.writeIntLittle(u32, di_buf.addManyAsArrayAssumeCapacity(4), 0); // DW.AT.stmt_list, DW.FORM.sec_offset
mem.writeIntLittle(u64, di_buf.addManyAsArrayAssumeCapacity(8), low_pc);
mem.writeIntLittle(u64, di_buf.addManyAsArrayAssumeCapacity(8), high_pc);
- mem.writeIntLittle(u32, di_buf.addManyAsArrayAssumeCapacity(4), @intCast(u32, name_strp));
- mem.writeIntLittle(u32, di_buf.addManyAsArrayAssumeCapacity(4), @intCast(u32, comp_dir_strp));
- mem.writeIntLittle(u32, di_buf.addManyAsArrayAssumeCapacity(4), @intCast(u32, producer_strp));
+ mem.writeIntLittle(u32, di_buf.addManyAsArrayAssumeCapacity(4), @as(u32, @intCast(name_strp)));
+ mem.writeIntLittle(u32, di_buf.addManyAsArrayAssumeCapacity(4), @as(u32, @intCast(comp_dir_strp)));
+ mem.writeIntLittle(u32, di_buf.addManyAsArrayAssumeCapacity(4), @as(u32, @intCast(producer_strp)));
} else {
self.writeAddrAssumeCapacity(&di_buf, 0); // DW.AT.stmt_list, DW.FORM.sec_offset
self.writeAddrAssumeCapacity(&di_buf, low_pc);
@@ -1885,7 +1885,7 @@ fn resolveCompilationDir(module: *Module, buffer: *[std.fs.MAX_PATH_BYTES]u8) []
fn writeAddrAssumeCapacity(self: *Dwarf, buf: *std.ArrayList(u8), addr: u64) void {
const target_endian = self.target.cpu.arch.endian();
switch (self.ptr_width) {
- .p32 => mem.writeInt(u32, buf.addManyAsArrayAssumeCapacity(4), @intCast(u32, addr), target_endian),
+ .p32 => mem.writeInt(u32, buf.addManyAsArrayAssumeCapacity(4), @as(u32, @intCast(addr)), target_endian),
.p64 => mem.writeInt(u64, buf.addManyAsArrayAssumeCapacity(8), addr, target_endian),
}
}
@@ -2152,10 +2152,10 @@ pub fn writeDbgAranges(self: *Dwarf, addr: u64, size: u64) !void {
// Go back and populate the initial length.
const init_len = di_buf.items.len - after_init_len;
if (self.bin_file.tag == .macho) {
- mem.writeIntLittle(u32, di_buf.items[init_len_index..][0..4], @intCast(u32, init_len));
+ mem.writeIntLittle(u32, di_buf.items[init_len_index..][0..4], @as(u32, @intCast(init_len)));
} else switch (self.ptr_width) {
.p32 => {
- mem.writeInt(u32, di_buf.items[init_len_index..][0..4], @intCast(u32, init_len), target_endian);
+ mem.writeInt(u32, di_buf.items[init_len_index..][0..4], @as(u32, @intCast(init_len)), target_endian);
},
.p64 => {
// initial length - length of the .debug_aranges contribution for this compilation unit,
@@ -2165,7 +2165,7 @@ pub fn writeDbgAranges(self: *Dwarf, addr: u64, size: u64) !void {
},
}
- const needed_size = @intCast(u32, di_buf.items.len);
+ const needed_size = @as(u32, @intCast(di_buf.items.len));
switch (self.bin_file.tag) {
.elf => {
const elf_file = self.bin_file.cast(File.Elf).?;
@@ -2293,7 +2293,7 @@ pub fn writeDbgLineHeader(self: *Dwarf) !void {
di_buf.appendSliceAssumeCapacity(file);
di_buf.appendSliceAssumeCapacity(&[_]u8{
0, // null byte for the relative path name
- @intCast(u8, dir_index), // directory_index
+ @as(u8, @intCast(dir_index)), // directory_index
0, // mtime (TODO supply this)
0, // file size bytes (TODO supply this)
});
@@ -2304,11 +2304,11 @@ pub fn writeDbgLineHeader(self: *Dwarf) !void {
switch (self.bin_file.tag) {
.macho => {
- mem.writeIntLittle(u32, di_buf.items[before_header_len..][0..4], @intCast(u32, header_len));
+ mem.writeIntLittle(u32, di_buf.items[before_header_len..][0..4], @as(u32, @intCast(header_len)));
},
else => switch (self.ptr_width) {
.p32 => {
- mem.writeInt(u32, di_buf.items[before_header_len..][0..4], @intCast(u32, header_len), target_endian);
+ mem.writeInt(u32, di_buf.items[before_header_len..][0..4], @as(u32, @intCast(header_len)), target_endian);
},
.p64 => {
mem.writeInt(u64, di_buf.items[before_header_len..][0..8], header_len, target_endian);
@@ -2348,7 +2348,7 @@ pub fn writeDbgLineHeader(self: *Dwarf) !void {
.macho => {
const d_sym = self.bin_file.cast(File.MachO).?.getDebugSymbols().?;
const sect_index = d_sym.debug_line_section_index.?;
- const needed_size = @intCast(u32, d_sym.getSection(sect_index).size + delta);
+ const needed_size = @as(u32, @intCast(d_sym.getSection(sect_index).size + delta));
try d_sym.growSection(sect_index, needed_size, true);
const file_pos = d_sym.getSection(sect_index).offset + first_fn.off;
@@ -2384,11 +2384,11 @@ pub fn writeDbgLineHeader(self: *Dwarf) !void {
const init_len = self.getDebugLineProgramEnd().? - before_init_len - init_len_size;
switch (self.bin_file.tag) {
.macho => {
- mem.writeIntLittle(u32, di_buf.items[before_init_len..][0..4], @intCast(u32, init_len));
+ mem.writeIntLittle(u32, di_buf.items[before_init_len..][0..4], @as(u32, @intCast(init_len)));
},
else => switch (self.ptr_width) {
.p32 => {
- mem.writeInt(u32, di_buf.items[before_init_len..][0..4], @intCast(u32, init_len), target_endian);
+ mem.writeInt(u32, di_buf.items[before_init_len..][0..4], @as(u32, @intCast(init_len)), target_endian);
},
.p64 => {
mem.writeInt(u64, di_buf.items[before_init_len + 4 ..][0..8], init_len, target_endian);
@@ -2477,7 +2477,7 @@ fn dbgLineNeededHeaderBytes(self: Dwarf, dirs: []const []const u8, files: []cons
}
size += 1; // file names sentinel
- return @intCast(u32, size);
+ return @as(u32, @intCast(size));
}
/// The reloc offset for the line offset of a function from the previous function's line.
@@ -2516,7 +2516,7 @@ pub fn flushModule(self: *Dwarf, module: *Module) !void {
const di_atom_index = try self.createAtom(.di_atom);
log.debug("updateDeclDebugInfoAllocation in flushModule", .{});
- try self.updateDeclDebugInfoAllocation(di_atom_index, @intCast(u32, dbg_info_buffer.items.len));
+ try self.updateDeclDebugInfoAllocation(di_atom_index, @as(u32, @intCast(dbg_info_buffer.items.len)));
log.debug("writeDeclDebugInfo in flushModule", .{});
try self.writeDeclDebugInfo(di_atom_index, dbg_info_buffer.items);
@@ -2581,7 +2581,7 @@ fn addDIFile(self: *Dwarf, mod: *Module, decl_index: Module.Decl.Index) !u28 {
else => unreachable,
}
}
- return @intCast(u28, gop.index + 1);
+ return @as(u28, @intCast(gop.index + 1));
}
fn genIncludeDirsAndFileNames(self: *Dwarf, arena: Allocator) !struct {
@@ -2614,7 +2614,7 @@ fn genIncludeDirsAndFileNames(self: *Dwarf, arena: Allocator) !struct {
const dir_index: u28 = blk: {
const dirs_gop = dirs.getOrPutAssumeCapacity(dir_path);
- break :blk @intCast(u28, dirs_gop.index + 1);
+ break :blk @as(u28, @intCast(dirs_gop.index + 1));
};
files_dir_indexes.appendAssumeCapacity(dir_index);
@@ -2679,12 +2679,12 @@ fn createAtom(self: *Dwarf, comptime kind: Kind) !Atom.Index {
const index = blk: {
switch (kind) {
.src_fn => {
- const index = @intCast(Atom.Index, self.src_fns.items.len);
+ const index = @as(Atom.Index, @intCast(self.src_fns.items.len));
_ = try self.src_fns.addOne(self.allocator);
break :blk index;
},
.di_atom => {
- const index = @intCast(Atom.Index, self.di_atoms.items.len);
+ const index = @as(Atom.Index, @intCast(self.di_atoms.items.len));
_ = try self.di_atoms.addOne(self.allocator);
break :blk index;
},
diff --git a/src/link/Elf.zig b/src/link/Elf.zig
index 283bd9ccca..8d08b73d6a 100644
--- a/src/link/Elf.zig
+++ b/src/link/Elf.zig
@@ -455,7 +455,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
const ptr_size: u8 = self.ptrWidthBytes();
if (self.phdr_table_index == null) {
- self.phdr_table_index = @intCast(u16, self.program_headers.items.len);
+ self.phdr_table_index = @as(u16, @intCast(self.program_headers.items.len));
const p_align: u16 = switch (self.ptr_width) {
.p32 => @alignOf(elf.Elf32_Phdr),
.p64 => @alignOf(elf.Elf64_Phdr),
@@ -474,7 +474,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
}
if (self.phdr_table_load_index == null) {
- self.phdr_table_load_index = @intCast(u16, self.program_headers.items.len);
+ self.phdr_table_load_index = @as(u16, @intCast(self.program_headers.items.len));
// TODO Same as for GOT
const phdr_addr: u64 = if (self.base.options.target.ptrBitWidth() >= 32) 0x1000000 else 0x1000;
const p_align = self.page_size;
@@ -492,7 +492,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
}
if (self.phdr_load_re_index == null) {
- self.phdr_load_re_index = @intCast(u16, self.program_headers.items.len);
+ self.phdr_load_re_index = @as(u16, @intCast(self.program_headers.items.len));
const file_size = self.base.options.program_code_size_hint;
const p_align = self.page_size;
const off = self.findFreeSpace(file_size, p_align);
@@ -513,7 +513,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
}
if (self.phdr_got_index == null) {
- self.phdr_got_index = @intCast(u16, self.program_headers.items.len);
+ self.phdr_got_index = @as(u16, @intCast(self.program_headers.items.len));
const file_size = @as(u64, ptr_size) * self.base.options.symbol_count_hint;
// We really only need ptr alignment but since we are using PROGBITS, linux requires
// page align.
@@ -538,7 +538,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
}
if (self.phdr_load_ro_index == null) {
- self.phdr_load_ro_index = @intCast(u16, self.program_headers.items.len);
+ self.phdr_load_ro_index = @as(u16, @intCast(self.program_headers.items.len));
// TODO Find a hint about how much data need to be in rodata ?
const file_size = 1024;
// Same reason as for GOT
@@ -561,7 +561,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
}
if (self.phdr_load_rw_index == null) {
- self.phdr_load_rw_index = @intCast(u16, self.program_headers.items.len);
+ self.phdr_load_rw_index = @as(u16, @intCast(self.program_headers.items.len));
// TODO Find a hint about how much data need to be in data ?
const file_size = 1024;
// Same reason as for GOT
@@ -584,7 +584,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
}
if (self.shstrtab_index == null) {
- self.shstrtab_index = @intCast(u16, self.sections.slice().len);
+ self.shstrtab_index = @as(u16, @intCast(self.sections.slice().len));
assert(self.shstrtab.buffer.items.len == 0);
try self.shstrtab.buffer.append(gpa, 0); // need a 0 at position 0
const off = self.findFreeSpace(self.shstrtab.buffer.items.len, 1);
@@ -609,7 +609,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
}
if (self.text_section_index == null) {
- self.text_section_index = @intCast(u16, self.sections.slice().len);
+ self.text_section_index = @as(u16, @intCast(self.sections.slice().len));
const phdr = &self.program_headers.items[self.phdr_load_re_index.?];
try self.sections.append(gpa, .{
@@ -631,7 +631,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
}
if (self.got_section_index == null) {
- self.got_section_index = @intCast(u16, self.sections.slice().len);
+ self.got_section_index = @as(u16, @intCast(self.sections.slice().len));
const phdr = &self.program_headers.items[self.phdr_got_index.?];
try self.sections.append(gpa, .{
@@ -653,7 +653,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
}
if (self.rodata_section_index == null) {
- self.rodata_section_index = @intCast(u16, self.sections.slice().len);
+ self.rodata_section_index = @as(u16, @intCast(self.sections.slice().len));
const phdr = &self.program_headers.items[self.phdr_load_ro_index.?];
try self.sections.append(gpa, .{
@@ -675,7 +675,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
}
if (self.data_section_index == null) {
- self.data_section_index = @intCast(u16, self.sections.slice().len);
+ self.data_section_index = @as(u16, @intCast(self.sections.slice().len));
const phdr = &self.program_headers.items[self.phdr_load_rw_index.?];
try self.sections.append(gpa, .{
@@ -697,7 +697,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
}
if (self.symtab_section_index == null) {
- self.symtab_section_index = @intCast(u16, self.sections.slice().len);
+ self.symtab_section_index = @as(u16, @intCast(self.sections.slice().len));
const min_align: u16 = if (small_ptr) @alignOf(elf.Elf32_Sym) else @alignOf(elf.Elf64_Sym);
const each_size: u64 = if (small_ptr) @sizeOf(elf.Elf32_Sym) else @sizeOf(elf.Elf64_Sym);
const file_size = self.base.options.symbol_count_hint * each_size;
@@ -714,7 +714,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
.sh_size = file_size,
// The section header index of the associated string table.
.sh_link = self.shstrtab_index.?,
- .sh_info = @intCast(u32, self.local_symbols.items.len),
+ .sh_info = @as(u32, @intCast(self.local_symbols.items.len)),
.sh_addralign = min_align,
.sh_entsize = each_size,
},
@@ -726,7 +726,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
if (self.dwarf) |*dw| {
if (self.debug_str_section_index == null) {
- self.debug_str_section_index = @intCast(u16, self.sections.slice().len);
+ self.debug_str_section_index = @as(u16, @intCast(self.sections.slice().len));
assert(dw.strtab.buffer.items.len == 0);
try dw.strtab.buffer.append(gpa, 0);
try self.sections.append(gpa, .{
@@ -749,7 +749,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
}
if (self.debug_info_section_index == null) {
- self.debug_info_section_index = @intCast(u16, self.sections.slice().len);
+ self.debug_info_section_index = @as(u16, @intCast(self.sections.slice().len));
const file_size_hint = 200;
const p_align = 1;
@@ -778,7 +778,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
}
if (self.debug_abbrev_section_index == null) {
- self.debug_abbrev_section_index = @intCast(u16, self.sections.slice().len);
+ self.debug_abbrev_section_index = @as(u16, @intCast(self.sections.slice().len));
const file_size_hint = 128;
const p_align = 1;
@@ -807,7 +807,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
}
if (self.debug_aranges_section_index == null) {
- self.debug_aranges_section_index = @intCast(u16, self.sections.slice().len);
+ self.debug_aranges_section_index = @as(u16, @intCast(self.sections.slice().len));
const file_size_hint = 160;
const p_align = 16;
@@ -836,7 +836,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
}
if (self.debug_line_section_index == null) {
- self.debug_line_section_index = @intCast(u16, self.sections.slice().len);
+ self.debug_line_section_index = @as(u16, @intCast(self.sections.slice().len));
const file_size_hint = 250;
const p_align = 1;
@@ -1100,7 +1100,7 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
});
switch (self.ptr_width) {
- .p32 => try self.base.file.?.pwriteAll(mem.asBytes(&@intCast(u32, target_vaddr)), file_offset),
+ .p32 => try self.base.file.?.pwriteAll(mem.asBytes(&@as(u32, @intCast(target_vaddr))), file_offset),
.p64 => try self.base.file.?.pwriteAll(mem.asBytes(&target_vaddr), file_offset),
}
@@ -1170,7 +1170,7 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
if (needed_size > allocated_size) {
phdr_table.p_offset = 0; // free the space
- phdr_table.p_offset = self.findFreeSpace(needed_size, @intCast(u32, phdr_table.p_align));
+ phdr_table.p_offset = self.findFreeSpace(needed_size, @as(u32, @intCast(phdr_table.p_align)));
}
phdr_table_load.p_offset = mem.alignBackward(u64, phdr_table.p_offset, phdr_table_load.p_align);
@@ -2004,7 +2004,7 @@ fn linkWithLLD(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node) !v
fn writeDwarfAddrAssumeCapacity(self: *Elf, buf: *std.ArrayList(u8), addr: u64) void {
const target_endian = self.base.options.target.cpu.arch.endian();
switch (self.ptr_width) {
- .p32 => mem.writeInt(u32, buf.addManyAsArrayAssumeCapacity(4), @intCast(u32, addr), target_endian),
+ .p32 => mem.writeInt(u32, buf.addManyAsArrayAssumeCapacity(4), @as(u32, @intCast(addr)), target_endian),
.p64 => mem.writeInt(u64, buf.addManyAsArrayAssumeCapacity(8), addr, target_endian),
}
}
@@ -2064,15 +2064,15 @@ fn writeElfHeader(self: *Elf) !void {
const phdr_table_offset = self.program_headers.items[self.phdr_table_index.?].p_offset;
switch (self.ptr_width) {
.p32 => {
- mem.writeInt(u32, hdr_buf[index..][0..4], @intCast(u32, e_entry), endian);
+ mem.writeInt(u32, hdr_buf[index..][0..4], @as(u32, @intCast(e_entry)), endian);
index += 4;
// e_phoff
- mem.writeInt(u32, hdr_buf[index..][0..4], @intCast(u32, phdr_table_offset), endian);
+ mem.writeInt(u32, hdr_buf[index..][0..4], @as(u32, @intCast(phdr_table_offset)), endian);
index += 4;
// e_shoff
- mem.writeInt(u32, hdr_buf[index..][0..4], @intCast(u32, self.shdr_table_offset.?), endian);
+ mem.writeInt(u32, hdr_buf[index..][0..4], @as(u32, @intCast(self.shdr_table_offset.?)), endian);
index += 4;
},
.p64 => {
@@ -2108,7 +2108,7 @@ fn writeElfHeader(self: *Elf) !void {
mem.writeInt(u16, hdr_buf[index..][0..2], e_phentsize, endian);
index += 2;
- const e_phnum = @intCast(u16, self.program_headers.items.len);
+ const e_phnum = @as(u16, @intCast(self.program_headers.items.len));
mem.writeInt(u16, hdr_buf[index..][0..2], e_phnum, endian);
index += 2;
@@ -2119,7 +2119,7 @@ fn writeElfHeader(self: *Elf) !void {
mem.writeInt(u16, hdr_buf[index..][0..2], e_shentsize, endian);
index += 2;
- const e_shnum = @intCast(u16, self.sections.slice().len);
+ const e_shnum = @as(u16, @intCast(self.sections.slice().len));
mem.writeInt(u16, hdr_buf[index..][0..2], e_shnum, endian);
index += 2;
@@ -2223,7 +2223,7 @@ fn growAtom(self: *Elf, atom_index: Atom.Index, new_block_size: u64, alignment:
pub fn createAtom(self: *Elf) !Atom.Index {
const gpa = self.base.allocator;
- const atom_index = @intCast(Atom.Index, self.atoms.items.len);
+ const atom_index = @as(Atom.Index, @intCast(self.atoms.items.len));
const atom = try self.atoms.addOne(gpa);
const local_sym_index = try self.allocateLocalSymbol();
try self.atom_by_index_table.putNoClobber(gpa, local_sym_index, atom_index);
@@ -2367,7 +2367,7 @@ pub fn allocateLocalSymbol(self: *Elf) !u32 {
break :blk index;
} else {
log.debug(" (allocating symbol index {d})", .{self.local_symbols.items.len});
- const index = @intCast(u32, self.local_symbols.items.len);
+ const index = @as(u32, @intCast(self.local_symbols.items.len));
_ = self.local_symbols.addOneAssumeCapacity();
break :blk index;
}
@@ -2557,7 +2557,7 @@ fn updateDeclCode(self: *Elf, decl_index: Module.Decl.Index, code: []const u8, s
.iov_len = code.len,
}};
var remote_vec: [1]std.os.iovec_const = .{.{
- .iov_base = @ptrFromInt([*]u8, @intCast(usize, local_sym.st_value)),
+ .iov_base = @as([*]u8, @ptrFromInt(@as(usize, @intCast(local_sym.st_value)))),
.iov_len = code.len,
}};
const rc = std.os.linux.process_vm_writev(pid, &code_vec, &remote_vec, 0);
@@ -2910,7 +2910,7 @@ pub fn updateDeclExports(
continue;
},
};
- const stt_bits: u8 = @truncate(u4, decl_sym.st_info);
+ const stt_bits: u8 = @as(u4, @truncate(decl_sym.st_info));
if (decl_metadata.getExport(self, exp_name)) |i| {
const sym = &self.global_symbols.items[i];
sym.* = .{
@@ -2926,7 +2926,7 @@ pub fn updateDeclExports(
_ = self.global_symbols.addOneAssumeCapacity();
break :blk self.global_symbols.items.len - 1;
};
- try decl_metadata.exports.append(gpa, @intCast(u32, i));
+ try decl_metadata.exports.append(gpa, @as(u32, @intCast(i)));
self.global_symbols.items[i] = .{
.st_name = try self.shstrtab.insert(gpa, exp_name),
.st_info = (stb_bits << 4) | stt_bits,
@@ -3030,12 +3030,12 @@ fn writeOffsetTableEntry(self: *Elf, index: @TypeOf(self.got_table).Index) !void
switch (entry_size) {
2 => {
var buf: [2]u8 = undefined;
- mem.writeInt(u16, &buf, @intCast(u16, got_value), endian);
+ mem.writeInt(u16, &buf, @as(u16, @intCast(got_value)), endian);
try self.base.file.?.pwriteAll(&buf, off);
},
4 => {
var buf: [4]u8 = undefined;
- mem.writeInt(u32, &buf, @intCast(u32, got_value), endian);
+ mem.writeInt(u32, &buf, @as(u32, @intCast(got_value)), endian);
try self.base.file.?.pwriteAll(&buf, off);
},
8 => {
@@ -3051,7 +3051,7 @@ fn writeOffsetTableEntry(self: *Elf, index: @TypeOf(self.got_table).Index) !void
.iov_len = buf.len,
}};
var remote_vec: [1]std.os.iovec_const = .{.{
- .iov_base = @ptrFromInt([*]u8, @intCast(usize, vaddr)),
+ .iov_base = @as([*]u8, @ptrFromInt(@as(usize, @intCast(vaddr)))),
.iov_len = buf.len,
}};
const rc = std.os.linux.process_vm_writev(pid, &local_vec, &remote_vec, 0);
@@ -3086,7 +3086,7 @@ fn writeSymbol(self: *Elf, index: usize) !void {
};
const needed_size = (self.local_symbols.items.len + self.global_symbols.items.len) * sym_size;
try self.growNonAllocSection(self.symtab_section_index.?, needed_size, sym_align, true);
- syms_sect.sh_info = @intCast(u32, self.local_symbols.items.len);
+ syms_sect.sh_info = @as(u32, @intCast(self.local_symbols.items.len));
}
const foreign_endian = self.base.options.target.cpu.arch.endian() != builtin.cpu.arch.endian();
const off = switch (self.ptr_width) {
@@ -3101,8 +3101,8 @@ fn writeSymbol(self: *Elf, index: usize) !void {
var sym = [1]elf.Elf32_Sym{
.{
.st_name = local.st_name,
- .st_value = @intCast(u32, local.st_value),
- .st_size = @intCast(u32, local.st_size),
+ .st_value = @as(u32, @intCast(local.st_value)),
+ .st_size = @as(u32, @intCast(local.st_size)),
.st_info = local.st_info,
.st_other = local.st_other,
.st_shndx = local.st_shndx,
@@ -3148,8 +3148,8 @@ fn writeAllGlobalSymbols(self: *Elf) !void {
const global = self.global_symbols.items[i];
sym.* = .{
.st_name = global.st_name,
- .st_value = @intCast(u32, global.st_value),
- .st_size = @intCast(u32, global.st_size),
+ .st_value = @as(u32, @intCast(global.st_value)),
+ .st_size = @as(u32, @intCast(global.st_size)),
.st_info = global.st_info,
.st_other = global.st_other,
.st_shndx = global.st_shndx,
@@ -3194,19 +3194,19 @@ fn ptrWidthBytes(self: Elf) u8 {
/// Does not necessarily match `ptrWidthBytes` for example can be 2 bytes
/// in a 32-bit ELF file.
fn archPtrWidthBytes(self: Elf) u8 {
- return @intCast(u8, self.base.options.target.ptrBitWidth() / 8);
+ return @as(u8, @intCast(self.base.options.target.ptrBitWidth() / 8));
}
fn progHeaderTo32(phdr: elf.Elf64_Phdr) elf.Elf32_Phdr {
return .{
.p_type = phdr.p_type,
.p_flags = phdr.p_flags,
- .p_offset = @intCast(u32, phdr.p_offset),
- .p_vaddr = @intCast(u32, phdr.p_vaddr),
- .p_paddr = @intCast(u32, phdr.p_paddr),
- .p_filesz = @intCast(u32, phdr.p_filesz),
- .p_memsz = @intCast(u32, phdr.p_memsz),
- .p_align = @intCast(u32, phdr.p_align),
+ .p_offset = @as(u32, @intCast(phdr.p_offset)),
+ .p_vaddr = @as(u32, @intCast(phdr.p_vaddr)),
+ .p_paddr = @as(u32, @intCast(phdr.p_paddr)),
+ .p_filesz = @as(u32, @intCast(phdr.p_filesz)),
+ .p_memsz = @as(u32, @intCast(phdr.p_memsz)),
+ .p_align = @as(u32, @intCast(phdr.p_align)),
};
}
@@ -3214,14 +3214,14 @@ fn sectHeaderTo32(shdr: elf.Elf64_Shdr) elf.Elf32_Shdr {
return .{
.sh_name = shdr.sh_name,
.sh_type = shdr.sh_type,
- .sh_flags = @intCast(u32, shdr.sh_flags),
- .sh_addr = @intCast(u32, shdr.sh_addr),
- .sh_offset = @intCast(u32, shdr.sh_offset),
- .sh_size = @intCast(u32, shdr.sh_size),
+ .sh_flags = @as(u32, @intCast(shdr.sh_flags)),
+ .sh_addr = @as(u32, @intCast(shdr.sh_addr)),
+ .sh_offset = @as(u32, @intCast(shdr.sh_offset)),
+ .sh_size = @as(u32, @intCast(shdr.sh_size)),
.sh_link = shdr.sh_link,
.sh_info = shdr.sh_info,
- .sh_addralign = @intCast(u32, shdr.sh_addralign),
- .sh_entsize = @intCast(u32, shdr.sh_entsize),
+ .sh_addralign = @as(u32, @intCast(shdr.sh_addralign)),
+ .sh_entsize = @as(u32, @intCast(shdr.sh_entsize)),
};
}
diff --git a/src/link/MachO.zig b/src/link/MachO.zig
index c91d18b0f7..80195a454d 100644
--- a/src/link/MachO.zig
+++ b/src/link/MachO.zig
@@ -741,7 +741,7 @@ pub fn flushModule(self: *MachO, comp: *Compilation, prog_node: *std.Progress.No
};
const sym = self.getSymbol(global);
try lc_writer.writeStruct(macho.entry_point_command{
- .entryoff = @intCast(u32, sym.n_value - seg.vmaddr),
+ .entryoff = @as(u32, @intCast(sym.n_value - seg.vmaddr)),
.stacksize = self.base.options.stack_size_override orelse 0,
});
},
@@ -757,7 +757,7 @@ pub fn flushModule(self: *MachO, comp: *Compilation, prog_node: *std.Progress.No
});
try load_commands.writeBuildVersionLC(&self.base.options, lc_writer);
- const uuid_cmd_offset = @sizeOf(macho.mach_header_64) + @intCast(u32, lc_buffer.items.len);
+ const uuid_cmd_offset = @sizeOf(macho.mach_header_64) + @as(u32, @intCast(lc_buffer.items.len));
try lc_writer.writeStruct(self.uuid_cmd);
try load_commands.writeLoadDylibLCs(self.dylibs.items, self.referenced_dylibs.keys(), lc_writer);
@@ -768,7 +768,7 @@ pub fn flushModule(self: *MachO, comp: *Compilation, prog_node: *std.Progress.No
const ncmds = load_commands.calcNumOfLCs(lc_buffer.items);
try self.base.file.?.pwriteAll(lc_buffer.items, @sizeOf(macho.mach_header_64));
- try self.writeHeader(ncmds, @intCast(u32, lc_buffer.items.len));
+ try self.writeHeader(ncmds, @as(u32, @intCast(lc_buffer.items.len)));
try self.writeUuid(comp, uuid_cmd_offset, requires_codesig);
if (codesig) |*csig| {
@@ -992,7 +992,7 @@ pub fn parseDylib(
const contents = try file.readToEndAllocOptions(gpa, file_size, file_size, @alignOf(u64), null);
defer gpa.free(contents);
- const dylib_id = @intCast(u16, self.dylibs.items.len);
+ const dylib_id = @as(u16, @intCast(self.dylibs.items.len));
var dylib = Dylib{ .weak = opts.weak };
dylib.parseFromBinary(
@@ -1412,7 +1412,7 @@ pub fn allocateSpecialSymbols(self: *MachO) !void {
pub fn createAtom(self: *MachO) !Atom.Index {
const gpa = self.base.allocator;
- const atom_index = @intCast(Atom.Index, self.atoms.items.len);
+ const atom_index = @as(Atom.Index, @intCast(self.atoms.items.len));
const atom = try self.atoms.addOne(gpa);
const sym_index = try self.allocateSymbol();
try self.atom_by_index_table.putNoClobber(gpa, sym_index, atom_index);
@@ -1588,14 +1588,14 @@ fn resolveSymbolsInDylibs(self: *MachO, actions: *std.ArrayList(ResolveAction))
for (self.dylibs.items, 0..) |dylib, id| {
if (!dylib.symbols.contains(sym_name)) continue;
- const dylib_id = @intCast(u16, id);
+ const dylib_id = @as(u16, @intCast(id));
if (!self.referenced_dylibs.contains(dylib_id)) {
try self.referenced_dylibs.putNoClobber(gpa, dylib_id, {});
}
const ordinal = self.referenced_dylibs.getIndex(dylib_id) orelse unreachable;
sym.n_type |= macho.N_EXT;
- sym.n_desc = @intCast(u16, ordinal + 1) * macho.N_SYMBOL_RESOLVER;
+ sym.n_desc = @as(u16, @intCast(ordinal + 1)) * macho.N_SYMBOL_RESOLVER;
if (dylib.weak) {
sym.n_desc |= macho.N_WEAK_REF;
@@ -1789,7 +1789,7 @@ fn allocateSymbol(self: *MachO) !u32 {
break :blk index;
} else {
log.debug(" (allocating symbol index {d})", .{self.locals.items.len});
- const index = @intCast(u32, self.locals.items.len);
+ const index = @as(u32, @intCast(self.locals.items.len));
_ = self.locals.addOneAssumeCapacity();
break :blk index;
}
@@ -1815,7 +1815,7 @@ fn allocateGlobal(self: *MachO) !u32 {
break :blk index;
} else {
log.debug(" (allocating symbol index {d})", .{self.globals.items.len});
- const index = @intCast(u32, self.globals.items.len);
+ const index = @as(u32, @intCast(self.globals.items.len));
_ = self.globals.addOneAssumeCapacity();
break :blk index;
}
@@ -2563,12 +2563,12 @@ pub fn getDeclVAddr(self: *MachO, decl_index: Module.Decl.Index, reloc_info: Fil
try Atom.addRelocation(self, atom_index, .{
.type = .unsigned,
.target = .{ .sym_index = sym_index, .file = null },
- .offset = @intCast(u32, reloc_info.offset),
+ .offset = @as(u32, @intCast(reloc_info.offset)),
.addend = reloc_info.addend,
.pcrel = false,
.length = 3,
});
- try Atom.addRebase(self, atom_index, @intCast(u32, reloc_info.offset));
+ try Atom.addRebase(self, atom_index, @as(u32, @intCast(reloc_info.offset)));
return 0;
}
@@ -2582,7 +2582,7 @@ fn populateMissingMetadata(self: *MachO) !void {
if (self.pagezero_segment_cmd_index == null) {
if (pagezero_vmsize > 0) {
- self.pagezero_segment_cmd_index = @intCast(u8, self.segments.items.len);
+ self.pagezero_segment_cmd_index = @as(u8, @intCast(self.segments.items.len));
try self.segments.append(gpa, .{
.segname = makeStaticString("__PAGEZERO"),
.vmsize = pagezero_vmsize,
@@ -2593,7 +2593,7 @@ fn populateMissingMetadata(self: *MachO) !void {
if (self.header_segment_cmd_index == null) {
// The first __TEXT segment is immovable and covers MachO header and load commands.
- self.header_segment_cmd_index = @intCast(u8, self.segments.items.len);
+ self.header_segment_cmd_index = @as(u8, @intCast(self.segments.items.len));
const ideal_size = @max(self.base.options.headerpad_size orelse 0, default_headerpad_size);
const needed_size = mem.alignForward(u64, padToIdeal(ideal_size), self.page_size);
@@ -2719,7 +2719,7 @@ fn populateMissingMetadata(self: *MachO) !void {
}
if (self.linkedit_segment_cmd_index == null) {
- self.linkedit_segment_cmd_index = @intCast(u8, self.segments.items.len);
+ self.linkedit_segment_cmd_index = @as(u8, @intCast(self.segments.items.len));
try self.segments.append(gpa, .{
.segname = makeStaticString("__LINKEDIT"),
@@ -2752,8 +2752,8 @@ fn allocateSection(self: *MachO, segname: []const u8, sectname: []const u8, opts
const gpa = self.base.allocator;
// In incremental context, we create one section per segment pairing. This way,
// we can move the segment in raw file as we please.
- const segment_id = @intCast(u8, self.segments.items.len);
- const section_id = @intCast(u8, self.sections.slice().len);
+ const segment_id = @as(u8, @intCast(self.segments.items.len));
+ const section_id = @as(u8, @intCast(self.sections.slice().len));
const vmaddr = blk: {
const prev_segment = self.segments.items[segment_id - 1];
break :blk mem.alignForward(u64, prev_segment.vmaddr + prev_segment.vmsize, self.page_size);
@@ -2788,7 +2788,7 @@ fn allocateSection(self: *MachO, segname: []const u8, sectname: []const u8, opts
.sectname = makeStaticString(sectname),
.segname = makeStaticString(segname),
.addr = mem.alignForward(u64, vmaddr, opts.alignment),
- .offset = mem.alignForward(u32, @intCast(u32, off), opts.alignment),
+ .offset = mem.alignForward(u32, @as(u32, @intCast(off)), opts.alignment),
.size = opts.size,
.@"align" = math.log2(opts.alignment),
.flags = opts.flags,
@@ -2832,7 +2832,7 @@ fn growSection(self: *MachO, sect_id: u8, needed_size: u64) !void {
current_size,
);
if (amt != current_size) return error.InputOutput;
- header.offset = @intCast(u32, new_offset);
+ header.offset = @as(u32, @intCast(new_offset));
segment.fileoff = new_offset;
}
@@ -2862,7 +2862,7 @@ fn growSectionVirtualMemory(self: *MachO, sect_id: u8, needed_size: u64) !void {
// TODO: enforce order by increasing VM addresses in self.sections container.
for (self.sections.items(.header)[sect_id + 1 ..], 0..) |*next_header, next_sect_id| {
- const index = @intCast(u8, sect_id + 1 + next_sect_id);
+ const index = @as(u8, @intCast(sect_id + 1 + next_sect_id));
const next_segment = self.getSegmentPtr(index);
next_header.addr += diff;
next_segment.vmaddr += diff;
@@ -2972,7 +2972,7 @@ fn allocateAtom(self: *MachO, atom_index: Atom.Index, new_atom_size: u64, alignm
self.segment_table_dirty = true;
}
- const align_pow = @intCast(u32, math.log2(alignment));
+ const align_pow = @as(u32, @intCast(math.log2(alignment)));
if (header.@"align" < align_pow) {
header.@"align" = align_pow;
}
@@ -3015,7 +3015,7 @@ pub fn getGlobalSymbol(self: *MachO, name: []const u8, lib_name: ?[]const u8) !u
fn writeSegmentHeaders(self: *MachO, writer: anytype) !void {
for (self.segments.items, 0..) |seg, i| {
- const indexes = self.getSectionIndexes(@intCast(u8, i));
+ const indexes = self.getSectionIndexes(@as(u8, @intCast(i)));
try writer.writeStruct(seg);
for (self.sections.items(.header)[indexes.start..indexes.end]) |header| {
try writer.writeStruct(header);
@@ -3029,7 +3029,7 @@ fn writeLinkeditSegmentData(self: *MachO) !void {
seg.vmsize = 0;
for (self.segments.items, 0..) |segment, id| {
- if (self.linkedit_segment_cmd_index.? == @intCast(u8, id)) continue;
+ if (self.linkedit_segment_cmd_index.? == @as(u8, @intCast(id))) continue;
if (seg.vmaddr < segment.vmaddr + segment.vmsize) {
seg.vmaddr = mem.alignForward(u64, segment.vmaddr + segment.vmsize, self.page_size);
}
@@ -3115,7 +3115,7 @@ fn collectBindDataFromTableSection(self: *MachO, sect_id: u8, bind: anytype, tab
log.debug(" | bind at {x}, import('{s}') in dylib({d})", .{
base_offset + offset,
self.getSymbolName(entry),
- @divTrunc(@bitCast(i16, bind_sym.n_desc), macho.N_SYMBOL_RESOLVER),
+ @divTrunc(@as(i16, @bitCast(bind_sym.n_desc)), macho.N_SYMBOL_RESOLVER),
});
if (bind_sym.weakRef()) {
log.debug(" | marking as weak ref ", .{});
@@ -3150,7 +3150,7 @@ fn collectBindData(self: *MachO, bind: anytype, raw_bindings: anytype) !void {
const bind_sym = self.getSymbol(binding.target);
const bind_sym_name = self.getSymbolName(binding.target);
const dylib_ordinal = @divTrunc(
- @bitCast(i16, bind_sym.n_desc),
+ @as(i16, @bitCast(bind_sym.n_desc)),
macho.N_SYMBOL_RESOLVER,
);
log.debug(" | bind at {x}, import('{s}') in dylib({d})", .{
@@ -3285,14 +3285,14 @@ fn writeDyldInfoData(self: *MachO) !void {
try self.base.file.?.pwriteAll(buffer, rebase_off);
try self.populateLazyBindOffsetsInStubHelper(lazy_bind);
- self.dyld_info_cmd.rebase_off = @intCast(u32, rebase_off);
- self.dyld_info_cmd.rebase_size = @intCast(u32, rebase_size_aligned);
- self.dyld_info_cmd.bind_off = @intCast(u32, bind_off);
- self.dyld_info_cmd.bind_size = @intCast(u32, bind_size_aligned);
- self.dyld_info_cmd.lazy_bind_off = @intCast(u32, lazy_bind_off);
- self.dyld_info_cmd.lazy_bind_size = @intCast(u32, lazy_bind_size_aligned);
- self.dyld_info_cmd.export_off = @intCast(u32, export_off);
- self.dyld_info_cmd.export_size = @intCast(u32, export_size_aligned);
+ self.dyld_info_cmd.rebase_off = @as(u32, @intCast(rebase_off));
+ self.dyld_info_cmd.rebase_size = @as(u32, @intCast(rebase_size_aligned));
+ self.dyld_info_cmd.bind_off = @as(u32, @intCast(bind_off));
+ self.dyld_info_cmd.bind_size = @as(u32, @intCast(bind_size_aligned));
+ self.dyld_info_cmd.lazy_bind_off = @as(u32, @intCast(lazy_bind_off));
+ self.dyld_info_cmd.lazy_bind_size = @as(u32, @intCast(lazy_bind_size_aligned));
+ self.dyld_info_cmd.export_off = @as(u32, @intCast(export_off));
+ self.dyld_info_cmd.export_size = @as(u32, @intCast(export_size_aligned));
}
fn populateLazyBindOffsetsInStubHelper(self: *MachO, lazy_bind: LazyBind) !void {
@@ -3337,7 +3337,7 @@ fn writeSymtab(self: *MachO) !SymtabCtx {
for (self.locals.items, 0..) |sym, sym_id| {
if (sym.n_strx == 0) continue; // no name, skip
- const sym_loc = SymbolWithLoc{ .sym_index = @intCast(u32, sym_id), .file = null };
+ const sym_loc = SymbolWithLoc{ .sym_index = @as(u32, @intCast(sym_id)), .file = null };
if (self.symbolIsTemp(sym_loc)) continue; // local temp symbol, skip
if (self.getGlobal(self.getSymbolName(sym_loc)) != null) continue; // global symbol is either an export or import, skip
try locals.append(sym);
@@ -3363,16 +3363,16 @@ fn writeSymtab(self: *MachO) !SymtabCtx {
const sym = self.getSymbol(global);
if (sym.n_strx == 0) continue; // no name, skip
if (!sym.undf()) continue; // not an import, skip
- const new_index = @intCast(u32, imports.items.len);
+ const new_index = @as(u32, @intCast(imports.items.len));
var out_sym = sym;
out_sym.n_strx = try self.strtab.insert(gpa, self.getSymbolName(global));
try imports.append(out_sym);
try imports_table.putNoClobber(global, new_index);
}
- const nlocals = @intCast(u32, locals.items.len);
- const nexports = @intCast(u32, exports.items.len);
- const nimports = @intCast(u32, imports.items.len);
+ const nlocals = @as(u32, @intCast(locals.items.len));
+ const nexports = @as(u32, @intCast(exports.items.len));
+ const nimports = @as(u32, @intCast(imports.items.len));
const nsyms = nlocals + nexports + nimports;
const seg = self.getLinkeditSegmentPtr();
@@ -3392,7 +3392,7 @@ fn writeSymtab(self: *MachO) !SymtabCtx {
log.debug("writing symtab from 0x{x} to 0x{x}", .{ offset, offset + needed_size });
try self.base.file.?.pwriteAll(buffer.items, offset);
- self.symtab_cmd.symoff = @intCast(u32, offset);
+ self.symtab_cmd.symoff = @as(u32, @intCast(offset));
self.symtab_cmd.nsyms = nsyms;
return SymtabCtx{
@@ -3421,8 +3421,8 @@ fn writeStrtab(self: *MachO) !void {
try self.base.file.?.pwriteAll(buffer, offset);
- self.symtab_cmd.stroff = @intCast(u32, offset);
- self.symtab_cmd.strsize = @intCast(u32, needed_size_aligned);
+ self.symtab_cmd.stroff = @as(u32, @intCast(offset));
+ self.symtab_cmd.strsize = @as(u32, @intCast(needed_size_aligned));
}
const SymtabCtx = struct {
@@ -3434,8 +3434,8 @@ const SymtabCtx = struct {
fn writeDysymtab(self: *MachO, ctx: SymtabCtx) !void {
const gpa = self.base.allocator;
- const nstubs = @intCast(u32, self.stub_table.lookup.count());
- const ngot_entries = @intCast(u32, self.got_table.lookup.count());
+ const nstubs = @as(u32, @intCast(self.stub_table.lookup.count()));
+ const ngot_entries = @as(u32, @intCast(self.got_table.lookup.count()));
const nindirectsyms = nstubs * 2 + ngot_entries;
const iextdefsym = ctx.nlocalsym;
const iundefsym = iextdefsym + ctx.nextdefsym;
@@ -3503,7 +3503,7 @@ fn writeDysymtab(self: *MachO, ctx: SymtabCtx) !void {
self.dysymtab_cmd.nextdefsym = ctx.nextdefsym;
self.dysymtab_cmd.iundefsym = iundefsym;
self.dysymtab_cmd.nundefsym = ctx.nundefsym;
- self.dysymtab_cmd.indirectsymoff = @intCast(u32, offset);
+ self.dysymtab_cmd.indirectsymoff = @as(u32, @intCast(offset));
self.dysymtab_cmd.nindirectsyms = nindirectsyms;
}
@@ -3530,8 +3530,8 @@ fn writeCodeSignaturePadding(self: *MachO, code_sig: *CodeSignature) !void {
// except for code signature data.
try self.base.file.?.pwriteAll(&[_]u8{0}, offset + needed_size - 1);
- self.codesig_cmd.dataoff = @intCast(u32, offset);
- self.codesig_cmd.datasize = @intCast(u32, needed_size);
+ self.codesig_cmd.dataoff = @as(u32, @intCast(offset));
+ self.codesig_cmd.datasize = @as(u32, @intCast(needed_size));
}
fn writeCodeSignature(self: *MachO, comp: *const Compilation, code_sig: *CodeSignature) !void {
@@ -3711,7 +3711,7 @@ pub fn makeStaticString(bytes: []const u8) [16]u8 {
fn getSegmentByName(self: MachO, segname: []const u8) ?u8 {
for (self.segments.items, 0..) |seg, i| {
- if (mem.eql(u8, segname, seg.segName())) return @intCast(u8, i);
+ if (mem.eql(u8, segname, seg.segName())) return @as(u8, @intCast(i));
} else return null;
}
@@ -3734,15 +3734,15 @@ pub fn getSectionByName(self: MachO, segname: []const u8, sectname: []const u8)
// TODO investigate caching with a hashmap
for (self.sections.items(.header), 0..) |header, i| {
if (mem.eql(u8, header.segName(), segname) and mem.eql(u8, header.sectName(), sectname))
- return @intCast(u8, i);
+ return @as(u8, @intCast(i));
} else return null;
}
pub fn getSectionIndexes(self: MachO, segment_index: u8) struct { start: u8, end: u8 } {
var start: u8 = 0;
const nsects = for (self.segments.items, 0..) |seg, i| {
- if (i == segment_index) break @intCast(u8, seg.nsects);
- start += @intCast(u8, seg.nsects);
+ if (i == segment_index) break @as(u8, @intCast(seg.nsects));
+ start += @as(u8, @intCast(seg.nsects));
} else 0;
return .{ .start = start, .end = start + nsects };
}
diff --git a/src/link/MachO/Archive.zig b/src/link/MachO/Archive.zig
index d222394ad5..5276bf041e 100644
--- a/src/link/MachO/Archive.zig
+++ b/src/link/MachO/Archive.zig
@@ -169,7 +169,7 @@ fn parseTableOfContents(self: *Archive, allocator: Allocator, reader: anytype) !
};
const object_offset = try symtab_reader.readIntLittle(u32);
- const sym_name = mem.sliceTo(@ptrCast([*:0]const u8, strtab.ptr + n_strx), 0);
+ const sym_name = mem.sliceTo(@as([*:0]const u8, @ptrCast(strtab.ptr + n_strx)), 0);
const owned_name = try allocator.dupe(u8, sym_name);
const res = try self.toc.getOrPut(allocator, owned_name);
defer if (res.found_existing) allocator.free(owned_name);
diff --git a/src/link/MachO/CodeSignature.zig b/src/link/MachO/CodeSignature.zig
index fcb4c16063..f527ca3581 100644
--- a/src/link/MachO/CodeSignature.zig
+++ b/src/link/MachO/CodeSignature.zig
@@ -72,7 +72,7 @@ const CodeDirectory = struct {
.hashSize = hash_size,
.hashType = macho.CS_HASHTYPE_SHA256,
.platform = 0,
- .pageSize = @truncate(u8, std.math.log2(page_size)),
+ .pageSize = @as(u8, @truncate(std.math.log2(page_size))),
.spare2 = 0,
.scatterOffset = 0,
.teamOffset = 0,
@@ -110,7 +110,7 @@ const CodeDirectory = struct {
fn size(self: CodeDirectory) u32 {
const code_slots = self.inner.nCodeSlots * hash_size;
const special_slots = self.inner.nSpecialSlots * hash_size;
- return @sizeOf(macho.CodeDirectory) + @intCast(u32, self.ident.len + 1 + special_slots + code_slots);
+ return @sizeOf(macho.CodeDirectory) + @as(u32, @intCast(self.ident.len + 1 + special_slots + code_slots));
}
fn write(self: CodeDirectory, writer: anytype) !void {
@@ -139,9 +139,9 @@ const CodeDirectory = struct {
try writer.writeAll(self.ident);
try writer.writeByte(0);
- var i: isize = @intCast(isize, self.inner.nSpecialSlots);
+ var i: isize = @as(isize, @intCast(self.inner.nSpecialSlots));
while (i > 0) : (i -= 1) {
- try writer.writeAll(&self.special_slots[@intCast(usize, i - 1)]);
+ try writer.writeAll(&self.special_slots[@as(usize, @intCast(i - 1))]);
}
for (self.code_slots.items) |slot| {
@@ -186,7 +186,7 @@ const Entitlements = struct {
}
fn size(self: Entitlements) u32 {
- return @intCast(u32, self.inner.len) + 2 * @sizeOf(u32);
+ return @as(u32, @intCast(self.inner.len)) + 2 * @sizeOf(u32);
}
fn write(self: Entitlements, writer: anytype) !void {
@@ -281,7 +281,7 @@ pub fn writeAdhocSignature(
self.code_directory.inner.execSegFlags = if (opts.output_mode == .Exe) macho.CS_EXECSEG_MAIN_BINARY else 0;
self.code_directory.inner.codeLimit = opts.file_size;
- const total_pages = @intCast(u32, mem.alignForward(usize, opts.file_size, self.page_size) / self.page_size);
+ const total_pages = @as(u32, @intCast(mem.alignForward(usize, opts.file_size, self.page_size) / self.page_size));
try self.code_directory.code_slots.ensureTotalCapacityPrecise(gpa, total_pages);
self.code_directory.code_slots.items.len = total_pages;
@@ -331,7 +331,7 @@ pub fn writeAdhocSignature(
}
self.code_directory.inner.hashOffset =
- @sizeOf(macho.CodeDirectory) + @intCast(u32, self.code_directory.ident.len + 1 + self.code_directory.inner.nSpecialSlots * hash_size);
+ @sizeOf(macho.CodeDirectory) + @as(u32, @intCast(self.code_directory.ident.len + 1 + self.code_directory.inner.nSpecialSlots * hash_size));
self.code_directory.inner.length = self.code_directory.size();
header.length += self.code_directory.size();
@@ -339,7 +339,7 @@ pub fn writeAdhocSignature(
try writer.writeIntBig(u32, header.length);
try writer.writeIntBig(u32, header.count);
- var offset: u32 = @sizeOf(macho.SuperBlob) + @sizeOf(macho.BlobIndex) * @intCast(u32, blobs.items.len);
+ var offset: u32 = @sizeOf(macho.SuperBlob) + @sizeOf(macho.BlobIndex) * @as(u32, @intCast(blobs.items.len));
for (blobs.items) |blob| {
try writer.writeIntBig(u32, blob.slotType());
try writer.writeIntBig(u32, offset);
@@ -383,7 +383,7 @@ pub fn estimateSize(self: CodeSignature, file_size: u64) u32 {
ssize += @sizeOf(macho.BlobIndex) + sig.size();
}
ssize += n_special_slots * hash_size;
- return @intCast(u32, mem.alignForward(u64, ssize, @sizeOf(u64)));
+ return @as(u32, @intCast(mem.alignForward(u64, ssize, @sizeOf(u64))));
}
pub fn clear(self: *CodeSignature, allocator: Allocator) void {
diff --git a/src/link/MachO/DebugSymbols.zig b/src/link/MachO/DebugSymbols.zig
index fdb8c9c816..ade26de920 100644
--- a/src/link/MachO/DebugSymbols.zig
+++ b/src/link/MachO/DebugSymbols.zig
@@ -64,9 +64,9 @@ pub const Reloc = struct {
/// has been called to get a viable debug symbols output.
pub fn populateMissingMetadata(self: *DebugSymbols) !void {
if (self.dwarf_segment_cmd_index == null) {
- self.dwarf_segment_cmd_index = @intCast(u8, self.segments.items.len);
+ self.dwarf_segment_cmd_index = @as(u8, @intCast(self.segments.items.len));
- const off = @intCast(u64, self.page_size);
+ const off = @as(u64, @intCast(self.page_size));
const ideal_size: u16 = 200 + 128 + 160 + 250;
const needed_size = mem.alignForward(u64, padToIdeal(ideal_size), self.page_size);
@@ -86,7 +86,7 @@ pub fn populateMissingMetadata(self: *DebugSymbols) !void {
try self.dwarf.strtab.buffer.append(self.allocator, 0);
self.debug_str_section_index = try self.allocateSection(
"__debug_str",
- @intCast(u32, self.dwarf.strtab.buffer.items.len),
+ @as(u32, @intCast(self.dwarf.strtab.buffer.items.len)),
0,
);
self.debug_string_table_dirty = true;
@@ -113,7 +113,7 @@ pub fn populateMissingMetadata(self: *DebugSymbols) !void {
}
if (self.linkedit_segment_cmd_index == null) {
- self.linkedit_segment_cmd_index = @intCast(u8, self.segments.items.len);
+ self.linkedit_segment_cmd_index = @as(u8, @intCast(self.segments.items.len));
try self.segments.append(self.allocator, .{
.segname = makeStaticString("__LINKEDIT"),
.maxprot = macho.PROT.READ,
@@ -128,7 +128,7 @@ fn allocateSection(self: *DebugSymbols, sectname: []const u8, size: u64, alignme
var sect = macho.section_64{
.sectname = makeStaticString(sectname),
.segname = segment.segname,
- .size = @intCast(u32, size),
+ .size = @as(u32, @intCast(size)),
.@"align" = alignment,
};
const alignment_pow_2 = try math.powi(u32, 2, alignment);
@@ -141,9 +141,9 @@ fn allocateSection(self: *DebugSymbols, sectname: []const u8, size: u64, alignme
off + size,
});
- sect.offset = @intCast(u32, off);
+ sect.offset = @as(u32, @intCast(off));
- const index = @intCast(u8, self.sections.items.len);
+ const index = @as(u8, @intCast(self.sections.items.len));
try self.sections.append(self.allocator, sect);
segment.cmdsize += @sizeOf(macho.section_64);
segment.nsects += 1;
@@ -176,7 +176,7 @@ pub fn growSection(self: *DebugSymbols, sect_index: u8, needed_size: u32, requir
if (amt != existing_size) return error.InputOutput;
}
- sect.offset = @intCast(u32, new_offset);
+ sect.offset = @as(u32, @intCast(new_offset));
}
sect.size = needed_size;
@@ -286,7 +286,7 @@ pub fn flushModule(self: *DebugSymbols, macho_file: *MachO) !void {
{
const sect_index = self.debug_str_section_index.?;
if (self.debug_string_table_dirty or self.dwarf.strtab.buffer.items.len != self.getSection(sect_index).size) {
- const needed_size = @intCast(u32, self.dwarf.strtab.buffer.items.len);
+ const needed_size = @as(u32, @intCast(self.dwarf.strtab.buffer.items.len));
try self.growSection(sect_index, needed_size, false);
try self.file.pwriteAll(self.dwarf.strtab.buffer.items, self.getSection(sect_index).offset);
self.debug_string_table_dirty = false;
@@ -307,7 +307,7 @@ pub fn flushModule(self: *DebugSymbols, macho_file: *MachO) !void {
const ncmds = load_commands.calcNumOfLCs(lc_buffer.items);
try self.file.pwriteAll(lc_buffer.items, @sizeOf(macho.mach_header_64));
- try self.writeHeader(macho_file, ncmds, @intCast(u32, lc_buffer.items.len));
+ try self.writeHeader(macho_file, ncmds, @as(u32, @intCast(lc_buffer.items.len)));
assert(!self.debug_abbrev_section_dirty);
assert(!self.debug_aranges_section_dirty);
@@ -378,7 +378,7 @@ fn writeSegmentHeaders(self: *DebugSymbols, macho_file: *MachO, writer: anytype)
// Write segment/section headers from the binary file first.
const end = macho_file.linkedit_segment_cmd_index.?;
for (macho_file.segments.items[0..end], 0..) |seg, i| {
- const indexes = macho_file.getSectionIndexes(@intCast(u8, i));
+ const indexes = macho_file.getSectionIndexes(@as(u8, @intCast(i)));
var out_seg = seg;
out_seg.fileoff = 0;
out_seg.filesize = 0;
@@ -407,7 +407,7 @@ fn writeSegmentHeaders(self: *DebugSymbols, macho_file: *MachO, writer: anytype)
}
// Next, commit DSYM's __LINKEDIT and __DWARF segments headers.
for (self.segments.items, 0..) |seg, i| {
- const indexes = self.getSectionIndexes(@intCast(u8, i));
+ const indexes = self.getSectionIndexes(@as(u8, @intCast(i)));
try writer.writeStruct(seg);
for (self.sections.items[indexes.start..indexes.end]) |header| {
try writer.writeStruct(header);
@@ -473,7 +473,7 @@ fn writeSymtab(self: *DebugSymbols, macho_file: *MachO) !void {
for (macho_file.locals.items, 0..) |sym, sym_id| {
if (sym.n_strx == 0) continue; // no name, skip
- const sym_loc = MachO.SymbolWithLoc{ .sym_index = @intCast(u32, sym_id), .file = null };
+ const sym_loc = MachO.SymbolWithLoc{ .sym_index = @as(u32, @intCast(sym_id)), .file = null };
if (macho_file.symbolIsTemp(sym_loc)) continue; // local temp symbol, skip
if (macho_file.getGlobal(macho_file.getSymbolName(sym_loc)) != null) continue; // global symbol is either an export or import, skip
var out_sym = sym;
@@ -501,10 +501,10 @@ fn writeSymtab(self: *DebugSymbols, macho_file: *MachO) !void {
const needed_size = nsyms * @sizeOf(macho.nlist_64);
seg.filesize = offset + needed_size - seg.fileoff;
- self.symtab_cmd.symoff = @intCast(u32, offset);
- self.symtab_cmd.nsyms = @intCast(u32, nsyms);
+ self.symtab_cmd.symoff = @as(u32, @intCast(offset));
+ self.symtab_cmd.nsyms = @as(u32, @intCast(nsyms));
- const locals_off = @intCast(u32, offset);
+ const locals_off = @as(u32, @intCast(offset));
const locals_size = nlocals * @sizeOf(macho.nlist_64);
const exports_off = locals_off + locals_size;
const exports_size = nexports * @sizeOf(macho.nlist_64);
@@ -521,13 +521,13 @@ fn writeStrtab(self: *DebugSymbols) !void {
defer tracy.end();
const seg = &self.segments.items[self.linkedit_segment_cmd_index.?];
- const symtab_size = @intCast(u32, self.symtab_cmd.nsyms * @sizeOf(macho.nlist_64));
+ const symtab_size = @as(u32, @intCast(self.symtab_cmd.nsyms * @sizeOf(macho.nlist_64)));
const offset = mem.alignForward(u64, self.symtab_cmd.symoff + symtab_size, @alignOf(u64));
const needed_size = mem.alignForward(u64, self.strtab.buffer.items.len, @alignOf(u64));
seg.filesize = offset + needed_size - seg.fileoff;
- self.symtab_cmd.stroff = @intCast(u32, offset);
- self.symtab_cmd.strsize = @intCast(u32, needed_size);
+ self.symtab_cmd.stroff = @as(u32, @intCast(offset));
+ self.symtab_cmd.strsize = @as(u32, @intCast(needed_size));
log.debug("writing string table from 0x{x} to 0x{x}", .{ offset, offset + needed_size });
@@ -542,8 +542,8 @@ fn writeStrtab(self: *DebugSymbols) !void {
pub fn getSectionIndexes(self: *DebugSymbols, segment_index: u8) struct { start: u8, end: u8 } {
var start: u8 = 0;
const nsects = for (self.segments.items, 0..) |seg, i| {
- if (i == segment_index) break @intCast(u8, seg.nsects);
- start += @intCast(u8, seg.nsects);
+ if (i == segment_index) break @as(u8, @intCast(seg.nsects));
+ start += @as(u8, @intCast(seg.nsects));
} else 0;
return .{ .start = start, .end = start + nsects };
}
diff --git a/src/link/MachO/DwarfInfo.zig b/src/link/MachO/DwarfInfo.zig
index 3218435734..07d98e8e94 100644
--- a/src/link/MachO/DwarfInfo.zig
+++ b/src/link/MachO/DwarfInfo.zig
@@ -70,7 +70,7 @@ pub fn genSubprogramLookupByName(
low_pc = addr;
}
if (try attr.getConstant(self)) |constant| {
- low_pc = @intCast(u64, constant);
+ low_pc = @as(u64, @intCast(constant));
}
},
dwarf.AT.high_pc => {
@@ -78,7 +78,7 @@ pub fn genSubprogramLookupByName(
high_pc = addr;
}
if (try attr.getConstant(self)) |constant| {
- high_pc = @intCast(u64, constant);
+ high_pc = @as(u64, @intCast(constant));
}
},
else => {},
@@ -261,7 +261,7 @@ pub const Attribute = struct {
switch (self.form) {
dwarf.FORM.string => {
- return mem.sliceTo(@ptrCast([*:0]const u8, debug_info.ptr), 0);
+ return mem.sliceTo(@as([*:0]const u8, @ptrCast(debug_info.ptr)), 0);
},
dwarf.FORM.strp => {
const off = if (cuh.is_64bit)
@@ -499,5 +499,5 @@ fn findAbbrevEntrySize(self: DwarfInfo, da_off: usize, da_len: usize, di_off: us
fn getString(self: DwarfInfo, off: u64) []const u8 {
assert(off < self.debug_str.len);
- return mem.sliceTo(@ptrCast([*:0]const u8, self.debug_str.ptr + @intCast(usize, off)), 0);
+ return mem.sliceTo(@as([*:0]const u8, @ptrCast(self.debug_str.ptr + @as(usize, @intCast(off)))), 0);
}
diff --git a/src/link/MachO/Dylib.zig b/src/link/MachO/Dylib.zig
index 971706dae6..ee8f34f756 100644
--- a/src/link/MachO/Dylib.zig
+++ b/src/link/MachO/Dylib.zig
@@ -75,7 +75,7 @@ pub const Id = struct {
.int => |int| {
var out: u32 = 0;
const major = math.cast(u16, int) orelse return error.Overflow;
- out += @intCast(u32, major) << 16;
+ out += @as(u32, @intCast(major)) << 16;
return out;
},
.float => |float| {
@@ -106,9 +106,9 @@ pub const Id = struct {
out += try fmt.parseInt(u8, values[2], 10);
}
if (count > 1) {
- out += @intCast(u32, try fmt.parseInt(u8, values[1], 10)) << 8;
+ out += @as(u32, @intCast(try fmt.parseInt(u8, values[1], 10))) << 8;
}
- out += @intCast(u32, try fmt.parseInt(u16, values[0], 10)) << 16;
+ out += @as(u32, @intCast(try fmt.parseInt(u16, values[0], 10))) << 16;
return out;
}
@@ -164,11 +164,11 @@ pub fn parseFromBinary(
switch (cmd.cmd()) {
.SYMTAB => {
const symtab_cmd = cmd.cast(macho.symtab_command).?;
- const symtab = @ptrCast(
+ const symtab = @as(
[*]const macho.nlist_64,
// Alignment is guaranteed as a dylib is a final linked image and has to have sections
// properly aligned in order to be correctly loaded by the loader.
- @alignCast(@alignOf(macho.nlist_64), &data[symtab_cmd.symoff]),
+ @ptrCast(@alignCast(&data[symtab_cmd.symoff])),
)[0..symtab_cmd.nsyms];
const strtab = data[symtab_cmd.stroff..][0..symtab_cmd.strsize];
@@ -176,7 +176,7 @@ pub fn parseFromBinary(
const add_to_symtab = sym.ext() and (sym.sect() or sym.indr());
if (!add_to_symtab) continue;
- const sym_name = mem.sliceTo(@ptrCast([*:0]const u8, strtab.ptr + sym.n_strx), 0);
+ const sym_name = mem.sliceTo(@as([*:0]const u8, @ptrCast(strtab.ptr + sym.n_strx)), 0);
try self.symbols.putNoClobber(allocator, try allocator.dupe(u8, sym_name), false);
}
},
diff --git a/src/link/MachO/Object.zig b/src/link/MachO/Object.zig
index 105a806075..29fe2988b6 100644
--- a/src/link/MachO/Object.zig
+++ b/src/link/MachO/Object.zig
@@ -164,7 +164,7 @@ pub fn parse(self: *Object, allocator: Allocator, cpu_arch: std.Target.Cpu.Arch)
else => {},
} else return;
- self.in_symtab = @ptrCast([*]align(1) const macho.nlist_64, self.contents.ptr + symtab.symoff)[0..symtab.nsyms];
+ self.in_symtab = @as([*]align(1) const macho.nlist_64, @ptrCast(self.contents.ptr + symtab.symoff))[0..symtab.nsyms];
self.in_strtab = self.contents[symtab.stroff..][0..symtab.strsize];
self.symtab = try allocator.alloc(macho.nlist_64, self.in_symtab.?.len + nsects);
@@ -202,7 +202,7 @@ pub fn parse(self: *Object, allocator: Allocator, cpu_arch: std.Target.Cpu.Arch)
defer sorted_all_syms.deinit();
for (self.in_symtab.?, 0..) |_, index| {
- sorted_all_syms.appendAssumeCapacity(.{ .index = @intCast(u32, index) });
+ sorted_all_syms.appendAssumeCapacity(.{ .index = @as(u32, @intCast(index)) });
}
// We sort by type: defined < undefined, and
@@ -225,18 +225,18 @@ pub fn parse(self: *Object, allocator: Allocator, cpu_arch: std.Target.Cpu.Arch)
}
}
if (sym.sect() and section_index_lookup == null) {
- section_index_lookup = .{ .start = @intCast(u32, i), .len = 1 };
+ section_index_lookup = .{ .start = @as(u32, @intCast(i)), .len = 1 };
}
prev_sect_id = sym.n_sect;
self.symtab[i] = sym;
self.source_symtab_lookup[i] = sym_id.index;
- self.reverse_symtab_lookup[sym_id.index] = @intCast(u32, i);
- self.source_address_lookup[i] = if (sym.undf()) -1 else @intCast(i64, sym.n_value);
+ self.reverse_symtab_lookup[sym_id.index] = @as(u32, @intCast(i));
+ self.source_address_lookup[i] = if (sym.undf()) -1 else @as(i64, @intCast(sym.n_value));
- const sym_name_len = mem.sliceTo(@ptrCast([*:0]const u8, self.in_strtab.?.ptr + sym.n_strx), 0).len + 1;
- self.strtab_lookup[i] = @intCast(u32, sym_name_len);
+ const sym_name_len = mem.sliceTo(@as([*:0]const u8, @ptrCast(self.in_strtab.?.ptr + sym.n_strx)), 0).len + 1;
+ self.strtab_lookup[i] = @as(u32, @intCast(sym_name_len));
}
// If there were no undefined symbols, make sure we populate the
@@ -267,7 +267,7 @@ const SymbolAtIndex = struct {
fn getSymbolName(self: SymbolAtIndex, ctx: Context) []const u8 {
const off = self.getSymbol(ctx).n_strx;
- return mem.sliceTo(@ptrCast([*:0]const u8, ctx.in_strtab.?.ptr + off), 0);
+ return mem.sliceTo(@as([*:0]const u8, @ptrCast(ctx.in_strtab.?.ptr + off)), 0);
}
fn getSymbolSeniority(self: SymbolAtIndex, ctx: Context) u2 {
@@ -338,7 +338,7 @@ fn filterSymbolsBySection(symbols: []macho.nlist_64, n_sect: u8) struct {
.n_sect = n_sect,
});
- return .{ .index = @intCast(u32, index), .len = @intCast(u32, len) };
+ return .{ .index = @as(u32, @intCast(index)), .len = @as(u32, @intCast(len)) };
}
fn filterSymbolsByAddress(symbols: []macho.nlist_64, start_addr: u64, end_addr: u64) struct {
@@ -360,7 +360,7 @@ fn filterSymbolsByAddress(symbols: []macho.nlist_64, start_addr: u64, end_addr:
.addr = end_addr,
});
- return .{ .index = @intCast(u32, index), .len = @intCast(u32, len) };
+ return .{ .index = @as(u32, @intCast(index)), .len = @as(u32, @intCast(len)) };
}
const SortedSection = struct {
@@ -400,7 +400,7 @@ pub fn splitRegularSections(self: *Object, zld: *Zld, object_id: u32) !void {
};
if (sect.size == 0) continue;
- const sect_id = @intCast(u8, id);
+ const sect_id = @as(u8, @intCast(id));
const sym = self.getSectionAliasSymbolPtr(sect_id);
sym.* = .{
.n_strx = 0,
@@ -417,7 +417,7 @@ pub fn splitRegularSections(self: *Object, zld: *Zld, object_id: u32) !void {
const out_sect_id = (try zld.getOutputSection(sect)) orelse continue;
if (sect.size == 0) continue;
- const sect_id = @intCast(u8, id);
+ const sect_id = @as(u8, @intCast(id));
const sym_index = self.getSectionAliasSymbolIndex(sect_id);
const atom_index = try self.createAtomFromSubsection(
zld,
@@ -459,7 +459,7 @@ pub fn splitRegularSections(self: *Object, zld: *Zld, object_id: u32) !void {
defer gpa.free(sorted_sections);
for (sections, 0..) |sect, id| {
- sorted_sections[id] = .{ .header = sect, .id = @intCast(u8, id) };
+ sorted_sections[id] = .{ .header = sect, .id = @as(u8, @intCast(id)) };
}
mem.sort(SortedSection, sorted_sections, {}, sectionLessThanByAddress);
@@ -651,7 +651,7 @@ fn filterRelocs(
const start = @import("zld.zig").bsearch(macho.relocation_info, relocs, Predicate{ .addr = end_addr });
const len = @import("zld.zig").lsearch(macho.relocation_info, relocs[start..], LPredicate{ .addr = start_addr });
- return .{ .start = @intCast(u32, start), .len = @intCast(u32, len) };
+ return .{ .start = @as(u32, @intCast(start)), .len = @as(u32, @intCast(len)) };
}
/// Parse all relocs for the input section, and sort in descending order.
@@ -659,7 +659,7 @@ fn filterRelocs(
/// section in a sorted manner which is simply not true.
fn parseRelocs(self: *Object, gpa: Allocator, sect_id: u8) !void {
const section = self.getSourceSection(sect_id);
- const start = @intCast(u32, self.relocations.items.len);
+ const start = @as(u32, @intCast(self.relocations.items.len));
if (self.getSourceRelocs(section)) |relocs| {
try self.relocations.ensureUnusedCapacity(gpa, relocs.len);
self.relocations.appendUnalignedSliceAssumeCapacity(relocs);
@@ -677,8 +677,8 @@ fn cacheRelocs(self: *Object, zld: *Zld, atom_index: AtomIndex) !void {
// If there was no matching symbol present in the source symtab, this means
// we are dealing with either an entire section, or part of it, but also
// starting at the beginning.
- const nbase = @intCast(u32, self.in_symtab.?.len);
- const sect_id = @intCast(u8, atom.sym_index - nbase);
+ const nbase = @as(u32, @intCast(self.in_symtab.?.len));
+ const sect_id = @as(u8, @intCast(atom.sym_index - nbase));
break :blk sect_id;
};
const source_sect = self.getSourceSection(source_sect_id);
@@ -745,7 +745,7 @@ fn parseEhFrameSection(self: *Object, zld: *Zld, object_id: u32) !void {
.object_id = object_id,
.rel = rel,
.code = it.data[offset..],
- .base_offset = @intCast(i32, offset),
+ .base_offset = @as(i32, @intCast(offset)),
});
break :blk target;
},
@@ -798,7 +798,7 @@ fn parseUnwindInfo(self: *Object, zld: *Zld, object_id: u32) !void {
_ = try zld.initSection("__TEXT", "__unwind_info", .{});
}
- try self.unwind_records_lookup.ensureTotalCapacity(gpa, @intCast(u32, self.exec_atoms.items.len));
+ try self.unwind_records_lookup.ensureTotalCapacity(gpa, @as(u32, @intCast(self.exec_atoms.items.len)));
const unwind_records = self.getUnwindRecords();
@@ -834,14 +834,14 @@ fn parseUnwindInfo(self: *Object, zld: *Zld, object_id: u32) !void {
.object_id = object_id,
.rel = rel,
.code = mem.asBytes(&record),
- .base_offset = @intCast(i32, offset),
+ .base_offset = @as(i32, @intCast(offset)),
});
log.debug("unwind record {d} tracks {s}", .{ record_id, zld.getSymbolName(target) });
if (target.getFile() != object_id) {
self.unwind_relocs_lookup[record_id].dead = true;
} else {
const atom_index = self.getAtomIndexForSymbol(target.sym_index).?;
- self.unwind_records_lookup.putAssumeCapacityNoClobber(atom_index, @intCast(u32, record_id));
+ self.unwind_records_lookup.putAssumeCapacityNoClobber(atom_index, @as(u32, @intCast(record_id)));
}
}
}
@@ -869,7 +869,7 @@ pub fn getSourceSectionIndexByName(self: Object, segname: []const u8, sectname:
const sections = self.getSourceSections();
for (sections, 0..) |sect, i| {
if (mem.eql(u8, segname, sect.segName()) and mem.eql(u8, sectname, sect.sectName()))
- return @intCast(u8, i);
+ return @as(u8, @intCast(i));
} else return null;
}
@@ -898,7 +898,7 @@ pub fn parseDataInCode(self: *Object, gpa: Allocator) !void {
}
} else return;
const ndice = @divExact(cmd.datasize, @sizeOf(macho.data_in_code_entry));
- const dice = @ptrCast([*]align(1) const macho.data_in_code_entry, self.contents.ptr + cmd.dataoff)[0..ndice];
+ const dice = @as([*]align(1) const macho.data_in_code_entry, @ptrCast(self.contents.ptr + cmd.dataoff))[0..ndice];
try self.data_in_code.ensureTotalCapacityPrecise(gpa, dice.len);
self.data_in_code.appendUnalignedSliceAssumeCapacity(dice);
mem.sort(macho.data_in_code_entry, self.data_in_code.items, {}, diceLessThan);
@@ -945,12 +945,12 @@ pub fn parseDwarfInfo(self: Object) DwarfInfo {
}
pub fn getSectionContents(self: Object, sect: macho.section_64) []const u8 {
- const size = @intCast(usize, sect.size);
+ const size = @as(usize, @intCast(sect.size));
return self.contents[sect.offset..][0..size];
}
pub fn getSectionAliasSymbolIndex(self: Object, sect_id: u8) u32 {
- const start = @intCast(u32, self.in_symtab.?.len);
+ const start = @as(u32, @intCast(self.in_symtab.?.len));
return start + sect_id;
}
@@ -964,7 +964,7 @@ pub fn getSectionAliasSymbolPtr(self: *Object, sect_id: u8) *macho.nlist_64 {
fn getSourceRelocs(self: Object, sect: macho.section_64) ?[]align(1) const macho.relocation_info {
if (sect.nreloc == 0) return null;
- return @ptrCast([*]align(1) const macho.relocation_info, self.contents.ptr + sect.reloff)[0..sect.nreloc];
+ return @as([*]align(1) const macho.relocation_info, @ptrCast(self.contents.ptr + sect.reloff))[0..sect.nreloc];
}
pub fn getRelocs(self: Object, sect_id: u8) []const macho.relocation_info {
@@ -1005,25 +1005,25 @@ pub fn getSymbolByAddress(self: Object, addr: u64, sect_hint: ?u8) u32 {
const target_sym_index = @import("zld.zig").lsearch(
i64,
self.source_address_lookup[lookup.start..][0..lookup.len],
- Predicate{ .addr = @intCast(i64, addr) },
+ Predicate{ .addr = @as(i64, @intCast(addr)) },
);
if (target_sym_index > 0) {
- return @intCast(u32, lookup.start + target_sym_index - 1);
+ return @as(u32, @intCast(lookup.start + target_sym_index - 1));
}
}
return self.getSectionAliasSymbolIndex(sect_id);
}
const target_sym_index = @import("zld.zig").lsearch(i64, self.source_address_lookup, Predicate{
- .addr = @intCast(i64, addr),
+ .addr = @as(i64, @intCast(addr)),
});
assert(target_sym_index > 0);
- return @intCast(u32, target_sym_index - 1);
+ return @as(u32, @intCast(target_sym_index - 1));
}
pub fn getGlobal(self: Object, sym_index: u32) ?u32 {
if (self.globals_lookup[sym_index] == -1) return null;
- return @intCast(u32, self.globals_lookup[sym_index]);
+ return @as(u32, @intCast(self.globals_lookup[sym_index]));
}
pub fn getAtomIndexForSymbol(self: Object, sym_index: u32) ?AtomIndex {
@@ -1041,7 +1041,7 @@ pub fn getUnwindRecords(self: Object) []align(1) const macho.compact_unwind_entr
const sect = self.getSourceSection(sect_id);
const data = self.getSectionContents(sect);
const num_entries = @divExact(data.len, @sizeOf(macho.compact_unwind_entry));
- return @ptrCast([*]align(1) const macho.compact_unwind_entry, data)[0..num_entries];
+ return @as([*]align(1) const macho.compact_unwind_entry, @ptrCast(data))[0..num_entries];
}
pub fn hasEhFrameRecords(self: Object) bool {
diff --git a/src/link/MachO/Relocation.zig b/src/link/MachO/Relocation.zig
index 2685cc26e2..b7bbf59cfc 100644
--- a/src/link/MachO/Relocation.zig
+++ b/src/link/MachO/Relocation.zig
@@ -94,9 +94,9 @@ pub fn resolve(self: Relocation, macho_file: *MachO, atom_index: Atom.Index, cod
.tlv_initializer => blk: {
assert(self.addend == 0); // Addend here makes no sense.
const header = macho_file.sections.items(.header)[macho_file.thread_data_section_index.?];
- break :blk @intCast(i64, target_base_addr - header.addr);
+ break :blk @as(i64, @intCast(target_base_addr - header.addr));
},
- else => @intCast(i64, target_base_addr) + self.addend,
+ else => @as(i64, @intCast(target_base_addr)) + self.addend,
};
log.debug(" ({x}: [() => 0x{x} ({s})) ({s})", .{
@@ -119,7 +119,7 @@ fn resolveAarch64(self: Relocation, source_addr: u64, target_addr: i64, code: []
.branch => {
const displacement = math.cast(
i28,
- @intCast(i64, target_addr) - @intCast(i64, source_addr),
+ @as(i64, @intCast(target_addr)) - @as(i64, @intCast(source_addr)),
) orelse unreachable; // TODO codegen should never allow for jump larger than i28 displacement
var inst = aarch64.Instruction{
.unconditional_branch_immediate = mem.bytesToValue(meta.TagPayload(
@@ -127,25 +127,25 @@ fn resolveAarch64(self: Relocation, source_addr: u64, target_addr: i64, code: []
aarch64.Instruction.unconditional_branch_immediate,
), buffer[0..4]),
};
- inst.unconditional_branch_immediate.imm26 = @truncate(u26, @bitCast(u28, displacement >> 2));
+ inst.unconditional_branch_immediate.imm26 = @as(u26, @truncate(@as(u28, @bitCast(displacement >> 2))));
mem.writeIntLittle(u32, buffer[0..4], inst.toU32());
},
.page, .got_page => {
- const source_page = @intCast(i32, source_addr >> 12);
- const target_page = @intCast(i32, target_addr >> 12);
- const pages = @bitCast(u21, @intCast(i21, target_page - source_page));
+ const source_page = @as(i32, @intCast(source_addr >> 12));
+ const target_page = @as(i32, @intCast(target_addr >> 12));
+ const pages = @as(u21, @bitCast(@as(i21, @intCast(target_page - source_page))));
var inst = aarch64.Instruction{
.pc_relative_address = mem.bytesToValue(meta.TagPayload(
aarch64.Instruction,
aarch64.Instruction.pc_relative_address,
), buffer[0..4]),
};
- inst.pc_relative_address.immhi = @truncate(u19, pages >> 2);
- inst.pc_relative_address.immlo = @truncate(u2, pages);
+ inst.pc_relative_address.immhi = @as(u19, @truncate(pages >> 2));
+ inst.pc_relative_address.immlo = @as(u2, @truncate(pages));
mem.writeIntLittle(u32, buffer[0..4], inst.toU32());
},
.pageoff, .got_pageoff => {
- const narrowed = @truncate(u12, @intCast(u64, target_addr));
+ const narrowed = @as(u12, @truncate(@as(u64, @intCast(target_addr))));
if (isArithmeticOp(buffer[0..4])) {
var inst = aarch64.Instruction{
.add_subtract_immediate = mem.bytesToValue(meta.TagPayload(
@@ -180,8 +180,8 @@ fn resolveAarch64(self: Relocation, source_addr: u64, target_addr: i64, code: []
}
},
.tlv_initializer, .unsigned => switch (self.length) {
- 2 => mem.writeIntLittle(u32, buffer[0..4], @truncate(u32, @bitCast(u64, target_addr))),
- 3 => mem.writeIntLittle(u64, buffer[0..8], @bitCast(u64, target_addr)),
+ 2 => mem.writeIntLittle(u32, buffer[0..4], @as(u32, @truncate(@as(u64, @bitCast(target_addr))))),
+ 3 => mem.writeIntLittle(u64, buffer[0..8], @as(u64, @bitCast(target_addr))),
else => unreachable,
},
.got, .signed, .tlv => unreachable, // Invalid target architecture.
@@ -191,16 +191,16 @@ fn resolveAarch64(self: Relocation, source_addr: u64, target_addr: i64, code: []
fn resolveX8664(self: Relocation, source_addr: u64, target_addr: i64, code: []u8) void {
switch (self.type) {
.branch, .got, .tlv, .signed => {
- const displacement = @intCast(i32, @intCast(i64, target_addr) - @intCast(i64, source_addr) - 4);
- mem.writeIntLittle(u32, code[self.offset..][0..4], @bitCast(u32, displacement));
+ const displacement = @as(i32, @intCast(@as(i64, @intCast(target_addr)) - @as(i64, @intCast(source_addr)) - 4));
+ mem.writeIntLittle(u32, code[self.offset..][0..4], @as(u32, @bitCast(displacement)));
},
.tlv_initializer, .unsigned => {
switch (self.length) {
2 => {
- mem.writeIntLittle(u32, code[self.offset..][0..4], @truncate(u32, @bitCast(u64, target_addr)));
+ mem.writeIntLittle(u32, code[self.offset..][0..4], @as(u32, @truncate(@as(u64, @bitCast(target_addr)))));
},
3 => {
- mem.writeIntLittle(u64, code[self.offset..][0..8], @bitCast(u64, target_addr));
+ mem.writeIntLittle(u64, code[self.offset..][0..8], @as(u64, @bitCast(target_addr)));
},
else => unreachable,
}
@@ -210,24 +210,24 @@ fn resolveX8664(self: Relocation, source_addr: u64, target_addr: i64, code: []u8
}
pub inline fn isArithmeticOp(inst: *const [4]u8) bool {
- const group_decode = @truncate(u5, inst[3]);
+ const group_decode = @as(u5, @truncate(inst[3]));
return ((group_decode >> 2) == 4);
}
pub fn calcPcRelativeDisplacementX86(source_addr: u64, target_addr: u64, correction: u3) error{Overflow}!i32 {
- const disp = @intCast(i64, target_addr) - @intCast(i64, source_addr + 4 + correction);
+ const disp = @as(i64, @intCast(target_addr)) - @as(i64, @intCast(source_addr + 4 + correction));
return math.cast(i32, disp) orelse error.Overflow;
}
pub fn calcPcRelativeDisplacementArm64(source_addr: u64, target_addr: u64) error{Overflow}!i28 {
- const disp = @intCast(i64, target_addr) - @intCast(i64, source_addr);
+ const disp = @as(i64, @intCast(target_addr)) - @as(i64, @intCast(source_addr));
return math.cast(i28, disp) orelse error.Overflow;
}
pub fn calcNumberOfPages(source_addr: u64, target_addr: u64) i21 {
- const source_page = @intCast(i32, source_addr >> 12);
- const target_page = @intCast(i32, target_addr >> 12);
- const pages = @intCast(i21, target_page - source_page);
+ const source_page = @as(i32, @intCast(source_addr >> 12));
+ const target_page = @as(i32, @intCast(target_addr >> 12));
+ const pages = @as(i21, @intCast(target_page - source_page));
return pages;
}
@@ -241,7 +241,7 @@ pub const PageOffsetInstKind = enum {
};
pub fn calcPageOffset(target_addr: u64, kind: PageOffsetInstKind) !u12 {
- const narrowed = @truncate(u12, target_addr);
+ const narrowed = @as(u12, @truncate(target_addr));
return switch (kind) {
.arithmetic, .load_store_8 => narrowed,
.load_store_16 => try math.divExact(u12, narrowed, 2),
diff --git a/src/link/MachO/Trie.zig b/src/link/MachO/Trie.zig
index 34200db7dc..cabe611b64 100644
--- a/src/link/MachO/Trie.zig
+++ b/src/link/MachO/Trie.zig
@@ -220,7 +220,7 @@ pub const Node = struct {
try writer.writeByte(0);
}
// Write number of edges (max legal number of edges is 256).
- try writer.writeByte(@intCast(u8, self.edges.items.len));
+ try writer.writeByte(@as(u8, @intCast(self.edges.items.len)));
for (self.edges.items) |edge| {
// Write edge label and offset to next node in trie.
diff --git a/src/link/MachO/UnwindInfo.zig b/src/link/MachO/UnwindInfo.zig
index 3c9a438f92..cfef053d1b 100644
--- a/src/link/MachO/UnwindInfo.zig
+++ b/src/link/MachO/UnwindInfo.zig
@@ -87,7 +87,7 @@ const Page = struct {
const record_id = page.page_encodings[index];
const record = info.records.items[record_id];
if (record.compactUnwindEncoding == enc) {
- return @intCast(u8, index);
+ return @as(u8, @intCast(index));
}
}
return null;
@@ -150,14 +150,14 @@ const Page = struct {
for (info.records.items[page.start..][0..page.count]) |record| {
try writer.writeStruct(macho.unwind_info_regular_second_level_entry{
- .functionOffset = @intCast(u32, record.rangeStart),
+ .functionOffset = @as(u32, @intCast(record.rangeStart)),
.encoding = record.compactUnwindEncoding,
});
}
},
.compressed => {
const entry_offset = @sizeOf(macho.unwind_info_compressed_second_level_page_header) +
- @intCast(u16, page.page_encodings_count) * @sizeOf(u32);
+ @as(u16, @intCast(page.page_encodings_count)) * @sizeOf(u32);
try writer.writeStruct(macho.unwind_info_compressed_second_level_page_header{
.entryPageOffset = entry_offset,
.entryCount = page.count,
@@ -183,8 +183,8 @@ const Page = struct {
break :blk ncommon + page.getPageEncoding(info, record.compactUnwindEncoding).?;
};
const compressed = macho.UnwindInfoCompressedEntry{
- .funcOffset = @intCast(u24, record.rangeStart - first_entry.rangeStart),
- .encodingIndex = @intCast(u8, enc_index),
+ .funcOffset = @as(u24, @intCast(record.rangeStart - first_entry.rangeStart)),
+ .encodingIndex = @as(u8, @intCast(enc_index)),
};
try writer.writeStruct(compressed);
}
@@ -214,15 +214,15 @@ pub fn scanRelocs(zld: *Zld) !void {
if (!UnwindEncoding.isDwarf(record.compactUnwindEncoding, cpu_arch)) {
if (getPersonalityFunctionReloc(
zld,
- @intCast(u32, object_id),
+ @as(u32, @intCast(object_id)),
record_id,
)) |rel| {
// Personality function; add GOT pointer.
const target = Atom.parseRelocTarget(zld, .{
- .object_id = @intCast(u32, object_id),
+ .object_id = @as(u32, @intCast(object_id)),
.rel = rel,
.code = mem.asBytes(&record),
- .base_offset = @intCast(i32, record_id * @sizeOf(macho.compact_unwind_entry)),
+ .base_offset = @as(i32, @intCast(record_id * @sizeOf(macho.compact_unwind_entry))),
});
try Atom.addGotEntry(zld, target);
}
@@ -258,18 +258,18 @@ pub fn collect(info: *UnwindInfo, zld: *Zld) !void {
var record = unwind_records[record_id];
if (UnwindEncoding.isDwarf(record.compactUnwindEncoding, cpu_arch)) {
- try info.collectPersonalityFromDwarf(zld, @intCast(u32, object_id), atom_index, &record);
+ try info.collectPersonalityFromDwarf(zld, @as(u32, @intCast(object_id)), atom_index, &record);
} else {
if (getPersonalityFunctionReloc(
zld,
- @intCast(u32, object_id),
+ @as(u32, @intCast(object_id)),
record_id,
)) |rel| {
const target = Atom.parseRelocTarget(zld, .{
- .object_id = @intCast(u32, object_id),
+ .object_id = @as(u32, @intCast(object_id)),
.rel = rel,
.code = mem.asBytes(&record),
- .base_offset = @intCast(i32, record_id * @sizeOf(macho.compact_unwind_entry)),
+ .base_offset = @as(i32, @intCast(record_id * @sizeOf(macho.compact_unwind_entry))),
});
const personality_index = info.getPersonalityFunction(target) orelse inner: {
const personality_index = info.personalities_count;
@@ -282,14 +282,14 @@ pub fn collect(info: *UnwindInfo, zld: *Zld) !void {
UnwindEncoding.setPersonalityIndex(&record.compactUnwindEncoding, personality_index + 1);
}
- if (getLsdaReloc(zld, @intCast(u32, object_id), record_id)) |rel| {
+ if (getLsdaReloc(zld, @as(u32, @intCast(object_id)), record_id)) |rel| {
const target = Atom.parseRelocTarget(zld, .{
- .object_id = @intCast(u32, object_id),
+ .object_id = @as(u32, @intCast(object_id)),
.rel = rel,
.code = mem.asBytes(&record),
- .base_offset = @intCast(i32, record_id * @sizeOf(macho.compact_unwind_entry)),
+ .base_offset = @as(i32, @intCast(record_id * @sizeOf(macho.compact_unwind_entry))),
});
- record.lsda = @bitCast(u64, target);
+ record.lsda = @as(u64, @bitCast(target));
}
}
break :blk record;
@@ -302,7 +302,7 @@ pub fn collect(info: *UnwindInfo, zld: *Zld) !void {
if (object.eh_frame_records_lookup.get(atom_index)) |fde_offset| {
if (object.eh_frame_relocs_lookup.get(fde_offset).?.dead) continue;
var record = nullRecord();
- try info.collectPersonalityFromDwarf(zld, @intCast(u32, object_id), atom_index, &record);
+ try info.collectPersonalityFromDwarf(zld, @as(u32, @intCast(object_id)), atom_index, &record);
switch (cpu_arch) {
.aarch64 => UnwindEncoding.setMode(&record.compactUnwindEncoding, macho.UNWIND_ARM64_MODE.DWARF),
.x86_64 => UnwindEncoding.setMode(&record.compactUnwindEncoding, macho.UNWIND_X86_64_MODE.DWARF),
@@ -320,7 +320,7 @@ pub fn collect(info: *UnwindInfo, zld: *Zld) !void {
const sym = zld.getSymbol(sym_loc);
assert(sym.n_desc != N_DEAD);
record.rangeStart = sym.n_value;
- record.rangeLength = @intCast(u32, atom.size);
+ record.rangeLength = @as(u32, @intCast(atom.size));
records.appendAssumeCapacity(record);
atom_indexes.appendAssumeCapacity(atom_index);
@@ -329,7 +329,7 @@ pub fn collect(info: *UnwindInfo, zld: *Zld) !void {
// Fold records
try info.records.ensureTotalCapacity(info.gpa, records.items.len);
- try info.records_lookup.ensureTotalCapacity(info.gpa, @intCast(u32, atom_indexes.items.len));
+ try info.records_lookup.ensureTotalCapacity(info.gpa, @as(u32, @intCast(atom_indexes.items.len)));
var maybe_prev: ?macho.compact_unwind_entry = null;
for (records.items, 0..) |record, i| {
@@ -341,15 +341,15 @@ pub fn collect(info: *UnwindInfo, zld: *Zld) !void {
(prev.personalityFunction != record.personalityFunction) or
record.lsda > 0)
{
- const record_id = @intCast(RecordIndex, info.records.items.len);
+ const record_id = @as(RecordIndex, @intCast(info.records.items.len));
info.records.appendAssumeCapacity(record);
maybe_prev = record;
break :blk record_id;
} else {
- break :blk @intCast(RecordIndex, info.records.items.len - 1);
+ break :blk @as(RecordIndex, @intCast(info.records.items.len - 1));
}
} else {
- const record_id = @intCast(RecordIndex, info.records.items.len);
+ const record_id = @as(RecordIndex, @intCast(info.records.items.len));
info.records.appendAssumeCapacity(record);
maybe_prev = record;
break :blk record_id;
@@ -459,14 +459,14 @@ pub fn collect(info: *UnwindInfo, zld: *Zld) !void {
}
}
- page.count = @intCast(u16, i - page.start);
+ page.count = @as(u16, @intCast(i - page.start));
if (i < info.records.items.len and page.count < max_regular_second_level_entries) {
page.kind = .regular;
- page.count = @intCast(u16, @min(
+ page.count = @as(u16, @intCast(@min(
max_regular_second_level_entries,
info.records.items.len - page.start,
- ));
+ )));
i = page.start + page.count;
} else {
page.kind = .compressed;
@@ -479,11 +479,11 @@ pub fn collect(info: *UnwindInfo, zld: *Zld) !void {
}
// Save indices of records requiring LSDA relocation
- try info.lsdas_lookup.ensureTotalCapacity(info.gpa, @intCast(u32, info.records.items.len));
+ try info.lsdas_lookup.ensureTotalCapacity(info.gpa, @as(u32, @intCast(info.records.items.len)));
for (info.records.items, 0..) |rec, i| {
- info.lsdas_lookup.putAssumeCapacityNoClobber(@intCast(RecordIndex, i), @intCast(u32, info.lsdas.items.len));
+ info.lsdas_lookup.putAssumeCapacityNoClobber(@as(RecordIndex, @intCast(i)), @as(u32, @intCast(info.lsdas.items.len)));
if (rec.lsda == 0) continue;
- try info.lsdas.append(info.gpa, @intCast(RecordIndex, i));
+ try info.lsdas.append(info.gpa, @as(RecordIndex, @intCast(i)));
}
}
@@ -506,7 +506,7 @@ fn collectPersonalityFromDwarf(
if (cie.getPersonalityPointerReloc(
zld,
- @intCast(u32, object_id),
+ @as(u32, @intCast(object_id)),
cie_offset,
)) |target| {
const personality_index = info.getPersonalityFunction(target) orelse inner: {
@@ -532,8 +532,8 @@ fn calcRequiredSize(info: UnwindInfo) usize {
var total_size: usize = 0;
total_size += @sizeOf(macho.unwind_info_section_header);
total_size +=
- @intCast(usize, info.common_encodings_count) * @sizeOf(macho.compact_unwind_encoding_t);
- total_size += @intCast(usize, info.personalities_count) * @sizeOf(u32);
+ @as(usize, @intCast(info.common_encodings_count)) * @sizeOf(macho.compact_unwind_encoding_t);
+ total_size += @as(usize, @intCast(info.personalities_count)) * @sizeOf(u32);
total_size += (info.pages.items.len + 1) * @sizeOf(macho.unwind_info_section_header_index_entry);
total_size += info.lsdas.items.len * @sizeOf(macho.unwind_info_section_header_lsda_index_entry);
total_size += info.pages.items.len * second_level_page_bytes;
@@ -557,7 +557,7 @@ pub fn write(info: *UnwindInfo, zld: *Zld) !void {
const atom_index = zld.getGotAtomIndexForSymbol(target).?;
const atom = zld.getAtom(atom_index);
const sym = zld.getSymbol(atom.getSymbolWithLoc());
- personalities[i] = @intCast(u32, sym.n_value - seg.vmaddr);
+ personalities[i] = @as(u32, @intCast(sym.n_value - seg.vmaddr));
log.debug(" {d}: 0x{x} ({s})", .{ i, personalities[i], zld.getSymbolName(target) });
}
@@ -570,7 +570,7 @@ pub fn write(info: *UnwindInfo, zld: *Zld) !void {
}
if (rec.compactUnwindEncoding > 0 and !UnwindEncoding.isDwarf(rec.compactUnwindEncoding, cpu_arch)) {
- const lsda_target = @bitCast(SymbolWithLoc, rec.lsda);
+ const lsda_target = @as(SymbolWithLoc, @bitCast(rec.lsda));
if (lsda_target.getFile()) |_| {
const sym = zld.getSymbol(lsda_target);
rec.lsda = sym.n_value - seg.vmaddr;
@@ -601,7 +601,7 @@ pub fn write(info: *UnwindInfo, zld: *Zld) !void {
const personalities_offset: u32 = common_encodings_offset + common_encodings_count * @sizeOf(u32);
const personalities_count: u32 = info.personalities_count;
const indexes_offset: u32 = personalities_offset + personalities_count * @sizeOf(u32);
- const indexes_count: u32 = @intCast(u32, info.pages.items.len + 1);
+ const indexes_count: u32 = @as(u32, @intCast(info.pages.items.len + 1));
try writer.writeStruct(macho.unwind_info_section_header{
.commonEncodingsArraySectionOffset = common_encodings_offset,
@@ -615,34 +615,34 @@ pub fn write(info: *UnwindInfo, zld: *Zld) !void {
try writer.writeAll(mem.sliceAsBytes(info.common_encodings[0..info.common_encodings_count]));
try writer.writeAll(mem.sliceAsBytes(personalities[0..info.personalities_count]));
- const pages_base_offset = @intCast(u32, size - (info.pages.items.len * second_level_page_bytes));
- const lsda_base_offset = @intCast(u32, pages_base_offset -
- (info.lsdas.items.len * @sizeOf(macho.unwind_info_section_header_lsda_index_entry)));
+ const pages_base_offset = @as(u32, @intCast(size - (info.pages.items.len * second_level_page_bytes)));
+ const lsda_base_offset = @as(u32, @intCast(pages_base_offset -
+ (info.lsdas.items.len * @sizeOf(macho.unwind_info_section_header_lsda_index_entry))));
for (info.pages.items, 0..) |page, i| {
assert(page.count > 0);
const first_entry = info.records.items[page.start];
try writer.writeStruct(macho.unwind_info_section_header_index_entry{
- .functionOffset = @intCast(u32, first_entry.rangeStart),
- .secondLevelPagesSectionOffset = @intCast(u32, pages_base_offset + i * second_level_page_bytes),
+ .functionOffset = @as(u32, @intCast(first_entry.rangeStart)),
+ .secondLevelPagesSectionOffset = @as(u32, @intCast(pages_base_offset + i * second_level_page_bytes)),
.lsdaIndexArraySectionOffset = lsda_base_offset +
info.lsdas_lookup.get(page.start).? * @sizeOf(macho.unwind_info_section_header_lsda_index_entry),
});
}
const last_entry = info.records.items[info.records.items.len - 1];
- const sentinel_address = @intCast(u32, last_entry.rangeStart + last_entry.rangeLength);
+ const sentinel_address = @as(u32, @intCast(last_entry.rangeStart + last_entry.rangeLength));
try writer.writeStruct(macho.unwind_info_section_header_index_entry{
.functionOffset = sentinel_address,
.secondLevelPagesSectionOffset = 0,
.lsdaIndexArraySectionOffset = lsda_base_offset +
- @intCast(u32, info.lsdas.items.len) * @sizeOf(macho.unwind_info_section_header_lsda_index_entry),
+ @as(u32, @intCast(info.lsdas.items.len)) * @sizeOf(macho.unwind_info_section_header_lsda_index_entry),
});
for (info.lsdas.items) |record_id| {
const record = info.records.items[record_id];
try writer.writeStruct(macho.unwind_info_section_header_lsda_index_entry{
- .functionOffset = @intCast(u32, record.rangeStart),
- .lsdaOffset = @intCast(u32, record.lsda),
+ .functionOffset = @as(u32, @intCast(record.rangeStart)),
+ .lsdaOffset = @as(u32, @intCast(record.lsda)),
});
}
@@ -674,7 +674,7 @@ fn getRelocs(zld: *Zld, object_id: u32, record_id: usize) []const macho.relocati
}
fn isPersonalityFunction(record_id: usize, rel: macho.relocation_info) bool {
- const base_offset = @intCast(i32, record_id * @sizeOf(macho.compact_unwind_entry));
+ const base_offset = @as(i32, @intCast(record_id * @sizeOf(macho.compact_unwind_entry)));
const rel_offset = rel.r_address - base_offset;
return rel_offset == 16;
}
@@ -703,7 +703,7 @@ fn getPersonalityFunction(info: UnwindInfo, global_index: SymbolWithLoc) ?u2 {
}
fn isLsda(record_id: usize, rel: macho.relocation_info) bool {
- const base_offset = @intCast(i32, record_id * @sizeOf(macho.compact_unwind_entry));
+ const base_offset = @as(i32, @intCast(record_id * @sizeOf(macho.compact_unwind_entry)));
const rel_offset = rel.r_address - base_offset;
return rel_offset == 24;
}
@@ -754,45 +754,45 @@ fn getCommonEncoding(info: UnwindInfo, enc: macho.compact_unwind_encoding_t) ?u7
pub const UnwindEncoding = struct {
pub fn getMode(enc: macho.compact_unwind_encoding_t) u4 {
comptime assert(macho.UNWIND_ARM64_MODE_MASK == macho.UNWIND_X86_64_MODE_MASK);
- return @truncate(u4, (enc & macho.UNWIND_ARM64_MODE_MASK) >> 24);
+ return @as(u4, @truncate((enc & macho.UNWIND_ARM64_MODE_MASK) >> 24));
}
pub fn isDwarf(enc: macho.compact_unwind_encoding_t, cpu_arch: std.Target.Cpu.Arch) bool {
const mode = getMode(enc);
return switch (cpu_arch) {
- .aarch64 => @enumFromInt(macho.UNWIND_ARM64_MODE, mode) == .DWARF,
- .x86_64 => @enumFromInt(macho.UNWIND_X86_64_MODE, mode) == .DWARF,
+ .aarch64 => @as(macho.UNWIND_ARM64_MODE, @enumFromInt(mode)) == .DWARF,
+ .x86_64 => @as(macho.UNWIND_X86_64_MODE, @enumFromInt(mode)) == .DWARF,
else => unreachable,
};
}
pub fn setMode(enc: *macho.compact_unwind_encoding_t, mode: anytype) void {
- enc.* |= @intCast(u32, @intFromEnum(mode)) << 24;
+ enc.* |= @as(u32, @intCast(@intFromEnum(mode))) << 24;
}
pub fn hasLsda(enc: macho.compact_unwind_encoding_t) bool {
- const has_lsda = @truncate(u1, (enc & macho.UNWIND_HAS_LSDA) >> 31);
+ const has_lsda = @as(u1, @truncate((enc & macho.UNWIND_HAS_LSDA) >> 31));
return has_lsda == 1;
}
pub fn setHasLsda(enc: *macho.compact_unwind_encoding_t, has_lsda: bool) void {
- const mask = @intCast(u32, @intFromBool(has_lsda)) << 31;
+ const mask = @as(u32, @intCast(@intFromBool(has_lsda))) << 31;
enc.* |= mask;
}
pub fn getPersonalityIndex(enc: macho.compact_unwind_encoding_t) u2 {
- const index = @truncate(u2, (enc & macho.UNWIND_PERSONALITY_MASK) >> 28);
+ const index = @as(u2, @truncate((enc & macho.UNWIND_PERSONALITY_MASK) >> 28));
return index;
}
pub fn setPersonalityIndex(enc: *macho.compact_unwind_encoding_t, index: u2) void {
- const mask = @intCast(u32, index) << 28;
+ const mask = @as(u32, @intCast(index)) << 28;
enc.* |= mask;
}
pub fn getDwarfSectionOffset(enc: macho.compact_unwind_encoding_t, cpu_arch: std.Target.Cpu.Arch) u24 {
assert(isDwarf(enc, cpu_arch));
- const offset = @truncate(u24, enc);
+ const offset = @as(u24, @truncate(enc));
return offset;
}
diff --git a/src/link/MachO/ZldAtom.zig b/src/link/MachO/ZldAtom.zig
index 55a6325a5a..613f0fc86c 100644
--- a/src/link/MachO/ZldAtom.zig
+++ b/src/link/MachO/ZldAtom.zig
@@ -117,8 +117,8 @@ pub fn getSectionAlias(zld: *Zld, atom_index: AtomIndex) ?SymbolWithLoc {
assert(atom.getFile() != null);
const object = zld.objects.items[atom.getFile().?];
- const nbase = @intCast(u32, object.in_symtab.?.len);
- const ntotal = @intCast(u32, object.symtab.len);
+ const nbase = @as(u32, @intCast(object.in_symtab.?.len));
+ const ntotal = @as(u32, @intCast(object.symtab.len));
var sym_index: u32 = nbase;
while (sym_index < ntotal) : (sym_index += 1) {
if (object.getAtomIndexForSymbol(sym_index)) |other_atom_index| {
@@ -144,8 +144,8 @@ pub fn calcInnerSymbolOffset(zld: *Zld, atom_index: AtomIndex, sym_index: u32) u
const base_addr = if (object.getSourceSymbol(atom.sym_index)) |sym|
sym.n_value
else blk: {
- const nbase = @intCast(u32, object.in_symtab.?.len);
- const sect_id = @intCast(u8, atom.sym_index - nbase);
+ const nbase = @as(u32, @intCast(object.in_symtab.?.len));
+ const sect_id = @as(u8, @intCast(atom.sym_index - nbase));
const source_sect = object.getSourceSection(sect_id);
break :blk source_sect.addr;
};
@@ -177,15 +177,15 @@ pub fn getRelocContext(zld: *Zld, atom_index: AtomIndex) RelocContext {
if (object.getSourceSymbol(atom.sym_index)) |source_sym| {
const source_sect = object.getSourceSection(source_sym.n_sect - 1);
return .{
- .base_addr = @intCast(i64, source_sect.addr),
- .base_offset = @intCast(i32, source_sym.n_value - source_sect.addr),
+ .base_addr = @as(i64, @intCast(source_sect.addr)),
+ .base_offset = @as(i32, @intCast(source_sym.n_value - source_sect.addr)),
};
}
- const nbase = @intCast(u32, object.in_symtab.?.len);
- const sect_id = @intCast(u8, atom.sym_index - nbase);
+ const nbase = @as(u32, @intCast(object.in_symtab.?.len));
+ const sect_id = @as(u8, @intCast(atom.sym_index - nbase));
const source_sect = object.getSourceSection(sect_id);
return .{
- .base_addr = @intCast(i64, source_sect.addr),
+ .base_addr = @as(i64, @intCast(source_sect.addr)),
.base_offset = 0,
};
}
@@ -204,8 +204,8 @@ pub fn parseRelocTarget(zld: *Zld, ctx: struct {
log.debug("parsing reloc target in object({d}) '{s}' ", .{ ctx.object_id, object.name });
const sym_index = if (ctx.rel.r_extern == 0) sym_index: {
- const sect_id = @intCast(u8, ctx.rel.r_symbolnum - 1);
- const rel_offset = @intCast(u32, ctx.rel.r_address - ctx.base_offset);
+ const sect_id = @as(u8, @intCast(ctx.rel.r_symbolnum - 1));
+ const rel_offset = @as(u32, @intCast(ctx.rel.r_address - ctx.base_offset));
const address_in_section = if (ctx.rel.r_pcrel == 0) blk: {
break :blk if (ctx.rel.r_length == 3)
@@ -214,7 +214,7 @@ pub fn parseRelocTarget(zld: *Zld, ctx: struct {
mem.readIntLittle(u32, ctx.code[rel_offset..][0..4]);
} else blk: {
assert(zld.options.target.cpu.arch == .x86_64);
- const correction: u3 = switch (@enumFromInt(macho.reloc_type_x86_64, ctx.rel.r_type)) {
+ const correction: u3 = switch (@as(macho.reloc_type_x86_64, @enumFromInt(ctx.rel.r_type))) {
.X86_64_RELOC_SIGNED => 0,
.X86_64_RELOC_SIGNED_1 => 1,
.X86_64_RELOC_SIGNED_2 => 2,
@@ -222,8 +222,8 @@ pub fn parseRelocTarget(zld: *Zld, ctx: struct {
else => unreachable,
};
const addend = mem.readIntLittle(i32, ctx.code[rel_offset..][0..4]);
- const target_address = @intCast(i64, ctx.base_addr) + ctx.rel.r_address + 4 + correction + addend;
- break :blk @intCast(u64, target_address);
+ const target_address = @as(i64, @intCast(ctx.base_addr)) + ctx.rel.r_address + 4 + correction + addend;
+ break :blk @as(u64, @intCast(target_address));
};
// Find containing atom
@@ -272,7 +272,7 @@ pub fn getRelocTargetAtomIndex(zld: *Zld, target: SymbolWithLoc, is_via_got: boo
fn scanAtomRelocsArm64(zld: *Zld, atom_index: AtomIndex, relocs: []align(1) const macho.relocation_info) !void {
for (relocs) |rel| {
- const rel_type = @enumFromInt(macho.reloc_type_arm64, rel.r_type);
+ const rel_type = @as(macho.reloc_type_arm64, @enumFromInt(rel.r_type));
switch (rel_type) {
.ARM64_RELOC_ADDEND, .ARM64_RELOC_SUBTRACTOR => continue,
@@ -318,7 +318,7 @@ fn scanAtomRelocsArm64(zld: *Zld, atom_index: AtomIndex, relocs: []align(1) cons
fn scanAtomRelocsX86(zld: *Zld, atom_index: AtomIndex, relocs: []align(1) const macho.relocation_info) !void {
for (relocs) |rel| {
- const rel_type = @enumFromInt(macho.reloc_type_x86_64, rel.r_type);
+ const rel_type = @as(macho.reloc_type_x86_64, @enumFromInt(rel.r_type));
switch (rel_type) {
.X86_64_RELOC_SUBTRACTOR => continue,
@@ -364,7 +364,7 @@ fn addTlvPtrEntry(zld: *Zld, target: SymbolWithLoc) !void {
const gpa = zld.gpa;
const atom_index = try zld.createTlvPtrAtom();
- const tlv_ptr_index = @intCast(u32, zld.tlv_ptr_entries.items.len);
+ const tlv_ptr_index = @as(u32, @intCast(zld.tlv_ptr_entries.items.len));
try zld.tlv_ptr_entries.append(gpa, .{
.target = target,
.atom_index = atom_index,
@@ -376,7 +376,7 @@ pub fn addGotEntry(zld: *Zld, target: SymbolWithLoc) !void {
if (zld.got_table.contains(target)) return;
const gpa = zld.gpa;
const atom_index = try zld.createGotAtom();
- const got_index = @intCast(u32, zld.got_entries.items.len);
+ const got_index = @as(u32, @intCast(zld.got_entries.items.len));
try zld.got_entries.append(gpa, .{
.target = target,
.atom_index = atom_index,
@@ -393,7 +393,7 @@ pub fn addStub(zld: *Zld, target: SymbolWithLoc) !void {
_ = try zld.createStubHelperAtom();
_ = try zld.createLazyPointerAtom();
const atom_index = try zld.createStubAtom();
- const stubs_index = @intCast(u32, zld.stubs.items.len);
+ const stubs_index = @as(u32, @intCast(zld.stubs.items.len));
try zld.stubs.append(gpa, .{
.target = target,
.atom_index = atom_index,
@@ -489,7 +489,7 @@ fn resolveRelocsArm64(
var subtractor: ?SymbolWithLoc = null;
for (atom_relocs) |rel| {
- const rel_type = @enumFromInt(macho.reloc_type_arm64, rel.r_type);
+ const rel_type = @as(macho.reloc_type_arm64, @enumFromInt(rel.r_type));
switch (rel_type) {
.ARM64_RELOC_ADDEND => {
@@ -529,7 +529,7 @@ fn resolveRelocsArm64(
.base_addr = context.base_addr,
.base_offset = context.base_offset,
});
- const rel_offset = @intCast(u32, rel.r_address - context.base_offset);
+ const rel_offset = @as(u32, @intCast(rel.r_address - context.base_offset));
log.debug(" RELA({s}) @ {x} => %{d} ('{s}') in object({?})", .{
@tagName(rel_type),
@@ -590,7 +590,7 @@ fn resolveRelocsArm64(
aarch64.Instruction.unconditional_branch_immediate,
), code),
};
- inst.unconditional_branch_immediate.imm26 = @truncate(u26, @bitCast(u28, displacement >> 2));
+ inst.unconditional_branch_immediate.imm26 = @as(u26, @truncate(@as(u28, @bitCast(displacement >> 2))));
mem.writeIntLittle(u32, code, inst.toU32());
},
@@ -598,11 +598,11 @@ fn resolveRelocsArm64(
.ARM64_RELOC_GOT_LOAD_PAGE21,
.ARM64_RELOC_TLVP_LOAD_PAGE21,
=> {
- const adjusted_target_addr = @intCast(u64, @intCast(i64, target_addr) + (addend orelse 0));
+ const adjusted_target_addr = @as(u64, @intCast(@as(i64, @intCast(target_addr)) + (addend orelse 0)));
log.debug(" | target_addr = 0x{x}", .{adjusted_target_addr});
- const pages = @bitCast(u21, Relocation.calcNumberOfPages(source_addr, adjusted_target_addr));
+ const pages = @as(u21, @bitCast(Relocation.calcNumberOfPages(source_addr, adjusted_target_addr)));
const code = atom_code[rel_offset..][0..4];
var inst = aarch64.Instruction{
.pc_relative_address = mem.bytesToValue(meta.TagPayload(
@@ -610,14 +610,14 @@ fn resolveRelocsArm64(
aarch64.Instruction.pc_relative_address,
), code),
};
- inst.pc_relative_address.immhi = @truncate(u19, pages >> 2);
- inst.pc_relative_address.immlo = @truncate(u2, pages);
+ inst.pc_relative_address.immhi = @as(u19, @truncate(pages >> 2));
+ inst.pc_relative_address.immlo = @as(u2, @truncate(pages));
mem.writeIntLittle(u32, code, inst.toU32());
addend = null;
},
.ARM64_RELOC_PAGEOFF12 => {
- const adjusted_target_addr = @intCast(u64, @intCast(i64, target_addr) + (addend orelse 0));
+ const adjusted_target_addr = @as(u64, @intCast(@as(i64, @intCast(target_addr)) + (addend orelse 0)));
log.debug(" | target_addr = 0x{x}", .{adjusted_target_addr});
@@ -656,7 +656,7 @@ fn resolveRelocsArm64(
.ARM64_RELOC_GOT_LOAD_PAGEOFF12 => {
const code = atom_code[rel_offset..][0..4];
- const adjusted_target_addr = @intCast(u64, @intCast(i64, target_addr) + (addend orelse 0));
+ const adjusted_target_addr = @as(u64, @intCast(@as(i64, @intCast(target_addr)) + (addend orelse 0)));
log.debug(" | target_addr = 0x{x}", .{adjusted_target_addr});
@@ -674,7 +674,7 @@ fn resolveRelocsArm64(
.ARM64_RELOC_TLVP_LOAD_PAGEOFF12 => {
const code = atom_code[rel_offset..][0..4];
- const adjusted_target_addr = @intCast(u64, @intCast(i64, target_addr) + (addend orelse 0));
+ const adjusted_target_addr = @as(u64, @intCast(@as(i64, @intCast(target_addr)) + (addend orelse 0)));
log.debug(" | target_addr = 0x{x}", .{adjusted_target_addr});
@@ -725,7 +725,7 @@ fn resolveRelocsArm64(
.sh = 0,
.s = 0,
.op = 0,
- .sf = @truncate(u1, reg_info.size),
+ .sf = @as(u1, @truncate(reg_info.size)),
},
};
mem.writeIntLittle(u32, code, inst.toU32());
@@ -734,9 +734,9 @@ fn resolveRelocsArm64(
.ARM64_RELOC_POINTER_TO_GOT => {
log.debug(" | target_addr = 0x{x}", .{target_addr});
- const result = math.cast(i32, @intCast(i64, target_addr) - @intCast(i64, source_addr)) orelse
+ const result = math.cast(i32, @as(i64, @intCast(target_addr)) - @as(i64, @intCast(source_addr))) orelse
return error.Overflow;
- mem.writeIntLittle(u32, atom_code[rel_offset..][0..4], @bitCast(u32, result));
+ mem.writeIntLittle(u32, atom_code[rel_offset..][0..4], @as(u32, @bitCast(result)));
},
.ARM64_RELOC_UNSIGNED => {
@@ -747,7 +747,7 @@ fn resolveRelocsArm64(
if (rel.r_extern == 0) {
const base_addr = if (target.sym_index >= object.source_address_lookup.len)
- @intCast(i64, object.getSourceSection(@intCast(u8, rel.r_symbolnum - 1)).addr)
+ @as(i64, @intCast(object.getSourceSection(@as(u8, @intCast(rel.r_symbolnum - 1))).addr))
else
object.source_address_lookup[target.sym_index];
ptr_addend -= base_addr;
@@ -756,17 +756,17 @@ fn resolveRelocsArm64(
const result = blk: {
if (subtractor) |sub| {
const sym = zld.getSymbol(sub);
- break :blk @intCast(i64, target_addr) - @intCast(i64, sym.n_value) + ptr_addend;
+ break :blk @as(i64, @intCast(target_addr)) - @as(i64, @intCast(sym.n_value)) + ptr_addend;
} else {
- break :blk @intCast(i64, target_addr) + ptr_addend;
+ break :blk @as(i64, @intCast(target_addr)) + ptr_addend;
}
};
log.debug(" | target_addr = 0x{x}", .{result});
if (rel.r_length == 3) {
- mem.writeIntLittle(u64, atom_code[rel_offset..][0..8], @bitCast(u64, result));
+ mem.writeIntLittle(u64, atom_code[rel_offset..][0..8], @as(u64, @bitCast(result)));
} else {
- mem.writeIntLittle(u32, atom_code[rel_offset..][0..4], @truncate(u32, @bitCast(u64, result)));
+ mem.writeIntLittle(u32, atom_code[rel_offset..][0..4], @as(u32, @truncate(@as(u64, @bitCast(result)))));
}
subtractor = null;
@@ -791,7 +791,7 @@ fn resolveRelocsX86(
var subtractor: ?SymbolWithLoc = null;
for (atom_relocs) |rel| {
- const rel_type = @enumFromInt(macho.reloc_type_x86_64, rel.r_type);
+ const rel_type = @as(macho.reloc_type_x86_64, @enumFromInt(rel.r_type));
switch (rel_type) {
.X86_64_RELOC_SUBTRACTOR => {
@@ -823,7 +823,7 @@ fn resolveRelocsX86(
.base_addr = context.base_addr,
.base_offset = context.base_offset,
});
- const rel_offset = @intCast(u32, rel.r_address - context.base_offset);
+ const rel_offset = @as(u32, @intCast(rel.r_address - context.base_offset));
log.debug(" RELA({s}) @ {x} => %{d} ('{s}') in object({?})", .{
@tagName(rel_type),
@@ -851,7 +851,7 @@ fn resolveRelocsX86(
switch (rel_type) {
.X86_64_RELOC_BRANCH => {
const addend = mem.readIntLittle(i32, atom_code[rel_offset..][0..4]);
- const adjusted_target_addr = @intCast(u64, @intCast(i64, target_addr) + addend);
+ const adjusted_target_addr = @as(u64, @intCast(@as(i64, @intCast(target_addr)) + addend));
log.debug(" | target_addr = 0x{x}", .{adjusted_target_addr});
const disp = try Relocation.calcPcRelativeDisplacementX86(source_addr, adjusted_target_addr, 0);
mem.writeIntLittle(i32, atom_code[rel_offset..][0..4], disp);
@@ -861,7 +861,7 @@ fn resolveRelocsX86(
.X86_64_RELOC_GOT_LOAD,
=> {
const addend = mem.readIntLittle(i32, atom_code[rel_offset..][0..4]);
- const adjusted_target_addr = @intCast(u64, @intCast(i64, target_addr) + addend);
+ const adjusted_target_addr = @as(u64, @intCast(@as(i64, @intCast(target_addr)) + addend));
log.debug(" | target_addr = 0x{x}", .{adjusted_target_addr});
const disp = try Relocation.calcPcRelativeDisplacementX86(source_addr, adjusted_target_addr, 0);
mem.writeIntLittle(i32, atom_code[rel_offset..][0..4], disp);
@@ -869,7 +869,7 @@ fn resolveRelocsX86(
.X86_64_RELOC_TLV => {
const addend = mem.readIntLittle(i32, atom_code[rel_offset..][0..4]);
- const adjusted_target_addr = @intCast(u64, @intCast(i64, target_addr) + addend);
+ const adjusted_target_addr = @as(u64, @intCast(@as(i64, @intCast(target_addr)) + addend));
log.debug(" | target_addr = 0x{x}", .{adjusted_target_addr});
const disp = try Relocation.calcPcRelativeDisplacementX86(source_addr, adjusted_target_addr, 0);
@@ -897,14 +897,14 @@ fn resolveRelocsX86(
if (rel.r_extern == 0) {
const base_addr = if (target.sym_index >= object.source_address_lookup.len)
- @intCast(i64, object.getSourceSection(@intCast(u8, rel.r_symbolnum - 1)).addr)
+ @as(i64, @intCast(object.getSourceSection(@as(u8, @intCast(rel.r_symbolnum - 1))).addr))
else
object.source_address_lookup[target.sym_index];
- addend += @intCast(i32, @intCast(i64, context.base_addr) + rel.r_address + 4 -
- @intCast(i64, base_addr));
+ addend += @as(i32, @intCast(@as(i64, @intCast(context.base_addr)) + rel.r_address + 4 -
+ @as(i64, @intCast(base_addr))));
}
- const adjusted_target_addr = @intCast(u64, @intCast(i64, target_addr) + addend);
+ const adjusted_target_addr = @as(u64, @intCast(@as(i64, @intCast(target_addr)) + addend));
log.debug(" | target_addr = 0x{x}", .{adjusted_target_addr});
@@ -920,7 +920,7 @@ fn resolveRelocsX86(
if (rel.r_extern == 0) {
const base_addr = if (target.sym_index >= object.source_address_lookup.len)
- @intCast(i64, object.getSourceSection(@intCast(u8, rel.r_symbolnum - 1)).addr)
+ @as(i64, @intCast(object.getSourceSection(@as(u8, @intCast(rel.r_symbolnum - 1))).addr))
else
object.source_address_lookup[target.sym_index];
addend -= base_addr;
@@ -929,17 +929,17 @@ fn resolveRelocsX86(
const result = blk: {
if (subtractor) |sub| {
const sym = zld.getSymbol(sub);
- break :blk @intCast(i64, target_addr) - @intCast(i64, sym.n_value) + addend;
+ break :blk @as(i64, @intCast(target_addr)) - @as(i64, @intCast(sym.n_value)) + addend;
} else {
- break :blk @intCast(i64, target_addr) + addend;
+ break :blk @as(i64, @intCast(target_addr)) + addend;
}
};
log.debug(" | target_addr = 0x{x}", .{result});
if (rel.r_length == 3) {
- mem.writeIntLittle(u64, atom_code[rel_offset..][0..8], @bitCast(u64, result));
+ mem.writeIntLittle(u64, atom_code[rel_offset..][0..8], @as(u64, @bitCast(result)));
} else {
- mem.writeIntLittle(u32, atom_code[rel_offset..][0..4], @truncate(u32, @bitCast(u64, result)));
+ mem.writeIntLittle(u32, atom_code[rel_offset..][0..4], @as(u32, @truncate(@as(u64, @bitCast(result)))));
}
subtractor = null;
@@ -958,19 +958,19 @@ pub fn getAtomCode(zld: *Zld, atom_index: AtomIndex) []const u8 {
// If there was no matching symbol present in the source symtab, this means
// we are dealing with either an entire section, or part of it, but also
// starting at the beginning.
- const nbase = @intCast(u32, object.in_symtab.?.len);
- const sect_id = @intCast(u8, atom.sym_index - nbase);
+ const nbase = @as(u32, @intCast(object.in_symtab.?.len));
+ const sect_id = @as(u8, @intCast(atom.sym_index - nbase));
const source_sect = object.getSourceSection(sect_id);
assert(!source_sect.isZerofill());
const code = object.getSectionContents(source_sect);
- const code_len = @intCast(usize, atom.size);
+ const code_len = @as(usize, @intCast(atom.size));
return code[0..code_len];
};
const source_sect = object.getSourceSection(source_sym.n_sect - 1);
assert(!source_sect.isZerofill());
const code = object.getSectionContents(source_sect);
- const offset = @intCast(usize, source_sym.n_value - source_sect.addr);
- const code_len = @intCast(usize, atom.size);
+ const offset = @as(usize, @intCast(source_sym.n_value - source_sect.addr));
+ const code_len = @as(usize, @intCast(atom.size));
return code[offset..][0..code_len];
}
@@ -986,8 +986,8 @@ pub fn getAtomRelocs(zld: *Zld, atom_index: AtomIndex) []const macho.relocation_
// If there was no matching symbol present in the source symtab, this means
// we are dealing with either an entire section, or part of it, but also
// starting at the beginning.
- const nbase = @intCast(u32, object.in_symtab.?.len);
- const sect_id = @intCast(u8, atom.sym_index - nbase);
+ const nbase = @as(u32, @intCast(object.in_symtab.?.len));
+ const sect_id = @as(u8, @intCast(atom.sym_index - nbase));
break :blk sect_id;
};
const source_sect = object.getSourceSection(source_sect_id);
@@ -998,14 +998,14 @@ pub fn getAtomRelocs(zld: *Zld, atom_index: AtomIndex) []const macho.relocation_
pub fn relocRequiresGot(zld: *Zld, rel: macho.relocation_info) bool {
switch (zld.options.target.cpu.arch) {
- .aarch64 => switch (@enumFromInt(macho.reloc_type_arm64, rel.r_type)) {
+ .aarch64 => switch (@as(macho.reloc_type_arm64, @enumFromInt(rel.r_type))) {
.ARM64_RELOC_GOT_LOAD_PAGE21,
.ARM64_RELOC_GOT_LOAD_PAGEOFF12,
.ARM64_RELOC_POINTER_TO_GOT,
=> return true,
else => return false,
},
- .x86_64 => switch (@enumFromInt(macho.reloc_type_x86_64, rel.r_type)) {
+ .x86_64 => switch (@as(macho.reloc_type_x86_64, @enumFromInt(rel.r_type))) {
.X86_64_RELOC_GOT,
.X86_64_RELOC_GOT_LOAD,
=> return true,
diff --git a/src/link/MachO/dead_strip.zig b/src/link/MachO/dead_strip.zig
index b2c569447d..890b40ed85 100644
--- a/src/link/MachO/dead_strip.zig
+++ b/src/link/MachO/dead_strip.zig
@@ -27,10 +27,10 @@ pub fn gcAtoms(zld: *Zld, resolver: *const SymbolResolver) !void {
defer arena.deinit();
var roots = AtomTable.init(arena.allocator());
- try roots.ensureUnusedCapacity(@intCast(u32, zld.globals.items.len));
+ try roots.ensureUnusedCapacity(@as(u32, @intCast(zld.globals.items.len)));
var alive = AtomTable.init(arena.allocator());
- try alive.ensureTotalCapacity(@intCast(u32, zld.atoms.items.len));
+ try alive.ensureTotalCapacity(@as(u32, @intCast(zld.atoms.items.len)));
try collectRoots(zld, &roots, resolver);
try mark(zld, roots, &alive);
@@ -99,8 +99,8 @@ fn collectRoots(zld: *Zld, roots: *AtomTable, resolver: *const SymbolResolver) !
const sect_id = if (object.getSourceSymbol(atom.sym_index)) |source_sym|
source_sym.n_sect - 1
else sect_id: {
- const nbase = @intCast(u32, object.in_symtab.?.len);
- const sect_id = @intCast(u8, atom.sym_index - nbase);
+ const nbase = @as(u32, @intCast(object.in_symtab.?.len));
+ const sect_id = @as(u8, @intCast(atom.sym_index - nbase));
break :sect_id sect_id;
};
const source_sect = object.getSourceSection(sect_id);
@@ -148,7 +148,7 @@ fn markLive(zld: *Zld, atom_index: AtomIndex, alive: *AtomTable) void {
for (relocs) |rel| {
const target = switch (cpu_arch) {
- .aarch64 => switch (@enumFromInt(macho.reloc_type_arm64, rel.r_type)) {
+ .aarch64 => switch (@as(macho.reloc_type_arm64, @enumFromInt(rel.r_type))) {
.ARM64_RELOC_ADDEND => continue,
else => Atom.parseRelocTarget(zld, .{
.object_id = atom.getFile().?,
@@ -208,7 +208,7 @@ fn refersLive(zld: *Zld, atom_index: AtomIndex, alive: AtomTable) bool {
for (relocs) |rel| {
const target = switch (cpu_arch) {
- .aarch64 => switch (@enumFromInt(macho.reloc_type_arm64, rel.r_type)) {
+ .aarch64 => switch (@as(macho.reloc_type_arm64, @enumFromInt(rel.r_type))) {
.ARM64_RELOC_ADDEND => continue,
else => Atom.parseRelocTarget(zld, .{
.object_id = atom.getFile().?,
@@ -264,8 +264,8 @@ fn mark(zld: *Zld, roots: AtomTable, alive: *AtomTable) !void {
const sect_id = if (object.getSourceSymbol(atom.sym_index)) |source_sym|
source_sym.n_sect - 1
else blk: {
- const nbase = @intCast(u32, object.in_symtab.?.len);
- const sect_id = @intCast(u8, atom.sym_index - nbase);
+ const nbase = @as(u32, @intCast(object.in_symtab.?.len));
+ const sect_id = @as(u8, @intCast(atom.sym_index - nbase));
break :blk sect_id;
};
const source_sect = object.getSourceSection(sect_id);
@@ -283,7 +283,7 @@ fn mark(zld: *Zld, roots: AtomTable, alive: *AtomTable) !void {
for (zld.objects.items, 0..) |_, object_id| {
// Traverse unwind and eh_frame records noting if the source symbol has been marked, and if so,
// marking all references as live.
- try markUnwindRecords(zld, @intCast(u32, object_id), alive);
+ try markUnwindRecords(zld, @as(u32, @intCast(object_id)), alive);
}
}
@@ -329,7 +329,7 @@ fn markUnwindRecords(zld: *Zld, object_id: u32, alive: *AtomTable) !void {
.object_id = object_id,
.rel = rel,
.code = mem.asBytes(&record),
- .base_offset = @intCast(i32, record_id * @sizeOf(macho.compact_unwind_entry)),
+ .base_offset = @as(i32, @intCast(record_id * @sizeOf(macho.compact_unwind_entry))),
});
const target_sym = zld.getSymbol(target);
if (!target_sym.undf()) {
@@ -344,7 +344,7 @@ fn markUnwindRecords(zld: *Zld, object_id: u32, alive: *AtomTable) !void {
.object_id = object_id,
.rel = rel,
.code = mem.asBytes(&record),
- .base_offset = @intCast(i32, record_id * @sizeOf(macho.compact_unwind_entry)),
+ .base_offset = @as(i32, @intCast(record_id * @sizeOf(macho.compact_unwind_entry))),
});
const target_object = zld.objects.items[target.getFile().?];
const target_atom_index = target_object.getAtomIndexForSymbol(target.sym_index).?;
@@ -377,7 +377,7 @@ fn markEhFrameRecord(zld: *Zld, object_id: u32, atom_index: AtomIndex, alive: *A
.object_id = object_id,
.rel = rel,
.code = fde.data,
- .base_offset = @intCast(i32, fde_offset) + 4,
+ .base_offset = @as(i32, @intCast(fde_offset)) + 4,
});
const target_sym = zld.getSymbol(target);
if (!target_sym.undf()) blk: {
diff --git a/src/link/MachO/dyld_info/Rebase.zig b/src/link/MachO/dyld_info/Rebase.zig
index 5b386a8136..0f3e96b02f 100644
--- a/src/link/MachO/dyld_info/Rebase.zig
+++ b/src/link/MachO/dyld_info/Rebase.zig
@@ -31,7 +31,7 @@ pub fn deinit(rebase: *Rebase, gpa: Allocator) void {
}
pub fn size(rebase: Rebase) u64 {
- return @intCast(u64, rebase.buffer.items.len);
+ return @as(u64, @intCast(rebase.buffer.items.len));
}
pub fn finalize(rebase: *Rebase, gpa: Allocator) !void {
@@ -145,12 +145,12 @@ fn finalizeSegment(entries: []const Entry, writer: anytype) !void {
fn setTypePointer(writer: anytype) !void {
log.debug(">>> set type: {d}", .{macho.REBASE_TYPE_POINTER});
- try writer.writeByte(macho.REBASE_OPCODE_SET_TYPE_IMM | @truncate(u4, macho.REBASE_TYPE_POINTER));
+ try writer.writeByte(macho.REBASE_OPCODE_SET_TYPE_IMM | @as(u4, @truncate(macho.REBASE_TYPE_POINTER)));
}
fn setSegmentOffset(segment_id: u8, offset: u64, writer: anytype) !void {
log.debug(">>> set segment: {d} and offset: {x}", .{ segment_id, offset });
- try writer.writeByte(macho.REBASE_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB | @truncate(u4, segment_id));
+ try writer.writeByte(macho.REBASE_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB | @as(u4, @truncate(segment_id)));
try std.leb.writeULEB128(writer, offset);
}
@@ -163,7 +163,7 @@ fn rebaseAddAddr(addr: u64, writer: anytype) !void {
fn rebaseTimes(count: usize, writer: anytype) !void {
log.debug(">>> rebase with count: {d}", .{count});
if (count <= 0xf) {
- try writer.writeByte(macho.REBASE_OPCODE_DO_REBASE_IMM_TIMES | @truncate(u4, count));
+ try writer.writeByte(macho.REBASE_OPCODE_DO_REBASE_IMM_TIMES | @as(u4, @truncate(count)));
} else {
try writer.writeByte(macho.REBASE_OPCODE_DO_REBASE_ULEB_TIMES);
try std.leb.writeULEB128(writer, count);
@@ -182,7 +182,7 @@ fn addAddr(addr: u64, writer: anytype) !void {
if (std.mem.isAlignedGeneric(u64, addr, @sizeOf(u64))) {
const imm = @divExact(addr, @sizeOf(u64));
if (imm <= 0xf) {
- try writer.writeByte(macho.REBASE_OPCODE_ADD_ADDR_IMM_SCALED | @truncate(u4, imm));
+ try writer.writeByte(macho.REBASE_OPCODE_ADD_ADDR_IMM_SCALED | @as(u4, @truncate(imm)));
return;
}
}
diff --git a/src/link/MachO/dyld_info/bind.zig b/src/link/MachO/dyld_info/bind.zig
index 14ce1587aa..f804c6466d 100644
--- a/src/link/MachO/dyld_info/bind.zig
+++ b/src/link/MachO/dyld_info/bind.zig
@@ -39,7 +39,7 @@ pub fn Bind(comptime Ctx: type, comptime Target: type) type {
}
pub fn size(self: Self) u64 {
- return @intCast(u64, self.buffer.items.len);
+ return @as(u64, @intCast(self.buffer.items.len));
}
pub fn finalize(self: *Self, gpa: Allocator, ctx: Ctx) !void {
@@ -95,7 +95,7 @@ pub fn Bind(comptime Ctx: type, comptime Target: type) type {
const sym = ctx.getSymbol(current.target);
const name = ctx.getSymbolName(current.target);
const flags: u8 = if (sym.weakRef()) macho.BIND_SYMBOL_FLAGS_WEAK_IMPORT else 0;
- const ordinal = @divTrunc(@bitCast(i16, sym.n_desc), macho.N_SYMBOL_RESOLVER);
+ const ordinal = @divTrunc(@as(i16, @bitCast(sym.n_desc)), macho.N_SYMBOL_RESOLVER);
try setSymbol(name, flags, writer);
try setTypePointer(writer);
@@ -112,7 +112,7 @@ pub fn Bind(comptime Ctx: type, comptime Target: type) type {
switch (state) {
.start => {
if (current.offset < offset) {
- try addAddr(@bitCast(u64, @intCast(i64, current.offset) - @intCast(i64, offset)), writer);
+ try addAddr(@as(u64, @bitCast(@as(i64, @intCast(current.offset)) - @as(i64, @intCast(offset)))), writer);
offset = offset - (offset - current.offset);
} else if (current.offset > offset) {
const delta = current.offset - offset;
@@ -130,7 +130,7 @@ pub fn Bind(comptime Ctx: type, comptime Target: type) type {
} else if (current.offset > offset) {
const delta = current.offset - offset;
state = .bind_times_skip;
- skip = @intCast(u64, delta);
+ skip = @as(u64, @intCast(delta));
offset += skip;
} else unreachable;
i -= 1;
@@ -194,7 +194,7 @@ pub fn LazyBind(comptime Ctx: type, comptime Target: type) type {
}
pub fn size(self: Self) u64 {
- return @intCast(u64, self.buffer.items.len);
+ return @as(u64, @intCast(self.buffer.items.len));
}
pub fn finalize(self: *Self, gpa: Allocator, ctx: Ctx) !void {
@@ -208,12 +208,12 @@ pub fn LazyBind(comptime Ctx: type, comptime Target: type) type {
var addend: i64 = 0;
for (self.entries.items) |entry| {
- self.offsets.appendAssumeCapacity(@intCast(u32, cwriter.bytes_written));
+ self.offsets.appendAssumeCapacity(@as(u32, @intCast(cwriter.bytes_written)));
const sym = ctx.getSymbol(entry.target);
const name = ctx.getSymbolName(entry.target);
const flags: u8 = if (sym.weakRef()) macho.BIND_SYMBOL_FLAGS_WEAK_IMPORT else 0;
- const ordinal = @divTrunc(@bitCast(i16, sym.n_desc), macho.N_SYMBOL_RESOLVER);
+ const ordinal = @divTrunc(@as(i16, @bitCast(sym.n_desc)), macho.N_SYMBOL_RESOLVER);
try setSegmentOffset(entry.segment_id, entry.offset, writer);
try setSymbol(name, flags, writer);
@@ -238,20 +238,20 @@ pub fn LazyBind(comptime Ctx: type, comptime Target: type) type {
fn setSegmentOffset(segment_id: u8, offset: u64, writer: anytype) !void {
log.debug(">>> set segment: {d} and offset: {x}", .{ segment_id, offset });
- try writer.writeByte(macho.BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB | @truncate(u4, segment_id));
+ try writer.writeByte(macho.BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB | @as(u4, @truncate(segment_id)));
try std.leb.writeULEB128(writer, offset);
}
fn setSymbol(name: []const u8, flags: u8, writer: anytype) !void {
log.debug(">>> set symbol: {s} with flags: {x}", .{ name, flags });
- try writer.writeByte(macho.BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM | @truncate(u4, flags));
+ try writer.writeByte(macho.BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM | @as(u4, @truncate(flags)));
try writer.writeAll(name);
try writer.writeByte(0);
}
fn setTypePointer(writer: anytype) !void {
log.debug(">>> set type: {d}", .{macho.BIND_TYPE_POINTER});
- try writer.writeByte(macho.BIND_OPCODE_SET_TYPE_IMM | @truncate(u4, macho.BIND_TYPE_POINTER));
+ try writer.writeByte(macho.BIND_OPCODE_SET_TYPE_IMM | @as(u4, @truncate(macho.BIND_TYPE_POINTER)));
}
fn setDylibOrdinal(ordinal: i16, writer: anytype) !void {
@@ -264,13 +264,13 @@ fn setDylibOrdinal(ordinal: i16, writer: anytype) !void {
else => unreachable, // Invalid dylib special binding
}
log.debug(">>> set dylib special: {d}", .{ordinal});
- const cast = @bitCast(u16, ordinal);
- try writer.writeByte(macho.BIND_OPCODE_SET_DYLIB_SPECIAL_IMM | @truncate(u4, cast));
+ const cast = @as(u16, @bitCast(ordinal));
+ try writer.writeByte(macho.BIND_OPCODE_SET_DYLIB_SPECIAL_IMM | @as(u4, @truncate(cast)));
} else {
- const cast = @bitCast(u16, ordinal);
+ const cast = @as(u16, @bitCast(ordinal));
log.debug(">>> set dylib ordinal: {d}", .{ordinal});
if (cast <= 0xf) {
- try writer.writeByte(macho.BIND_OPCODE_SET_DYLIB_ORDINAL_IMM | @truncate(u4, cast));
+ try writer.writeByte(macho.BIND_OPCODE_SET_DYLIB_ORDINAL_IMM | @as(u4, @truncate(cast)));
} else {
try writer.writeByte(macho.BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB);
try std.leb.writeULEB128(writer, cast);
@@ -295,7 +295,7 @@ fn doBindAddAddr(addr: u64, writer: anytype) !void {
const imm = @divExact(addr, @sizeOf(u64));
if (imm <= 0xf) {
try writer.writeByte(
- macho.BIND_OPCODE_DO_BIND_ADD_ADDR_IMM_SCALED | @truncate(u4, imm),
+ macho.BIND_OPCODE_DO_BIND_ADD_ADDR_IMM_SCALED | @as(u4, @truncate(imm)),
);
return;
}
@@ -341,7 +341,7 @@ const TestContext = struct {
fn addSymbol(ctx: *TestContext, gpa: Allocator, name: []const u8, ordinal: i16, flags: u16) !void {
const n_strx = try ctx.addString(gpa, name);
- var n_desc = @bitCast(u16, ordinal * macho.N_SYMBOL_RESOLVER);
+ var n_desc = @as(u16, @bitCast(ordinal * macho.N_SYMBOL_RESOLVER));
n_desc |= flags;
try ctx.symbols.append(gpa, .{
.n_value = 0,
@@ -353,7 +353,7 @@ const TestContext = struct {
}
fn addString(ctx: *TestContext, gpa: Allocator, name: []const u8) !u32 {
- const n_strx = @intCast(u32, ctx.strtab.items.len);
+ const n_strx = @as(u32, @intCast(ctx.strtab.items.len));
try ctx.strtab.appendSlice(gpa, name);
try ctx.strtab.append(gpa, 0);
return n_strx;
@@ -366,7 +366,7 @@ const TestContext = struct {
fn getSymbolName(ctx: TestContext, target: Target) []const u8 {
const sym = ctx.getSymbol(target);
assert(sym.n_strx < ctx.strtab.items.len);
- return std.mem.sliceTo(@ptrCast([*:0]const u8, ctx.strtab.items.ptr + sym.n_strx), 0);
+ return std.mem.sliceTo(@as([*:0]const u8, @ptrCast(ctx.strtab.items.ptr + sym.n_strx)), 0);
}
};
diff --git a/src/link/MachO/eh_frame.zig b/src/link/MachO/eh_frame.zig
index 1672e37229..eb4419cd7b 100644
--- a/src/link/MachO/eh_frame.zig
+++ b/src/link/MachO/eh_frame.zig
@@ -36,7 +36,7 @@ pub fn scanRelocs(zld: *Zld) !void {
try cies.putNoClobber(cie_offset, {});
it.seekTo(cie_offset);
const cie = (try it.next()).?;
- try cie.scanRelocs(zld, @intCast(u32, object_id), cie_offset);
+ try cie.scanRelocs(zld, @as(u32, @intCast(object_id)), cie_offset);
}
}
}
@@ -110,7 +110,7 @@ pub fn write(zld: *Zld, unwind_info: *UnwindInfo) !void {
var eh_frame_offset: u32 = 0;
for (zld.objects.items, 0..) |*object, object_id| {
- try eh_records.ensureUnusedCapacity(2 * @intCast(u32, object.exec_atoms.items.len));
+ try eh_records.ensureUnusedCapacity(2 * @as(u32, @intCast(object.exec_atoms.items.len)));
var cies = std.AutoHashMap(u32, u32).init(gpa);
defer cies.deinit();
@@ -139,7 +139,7 @@ pub fn write(zld: *Zld, unwind_info: *UnwindInfo) !void {
eh_it.seekTo(cie_offset);
const source_cie_record = (try eh_it.next()).?;
var cie_record = try source_cie_record.toOwned(gpa);
- try cie_record.relocate(zld, @intCast(u32, object_id), .{
+ try cie_record.relocate(zld, @as(u32, @intCast(object_id)), .{
.source_offset = cie_offset,
.out_offset = eh_frame_offset,
.sect_addr = sect.addr,
@@ -151,7 +151,7 @@ pub fn write(zld: *Zld, unwind_info: *UnwindInfo) !void {
var fde_record = try source_fde_record.toOwned(gpa);
fde_record.setCiePointer(eh_frame_offset + 4 - gop.value_ptr.*);
- try fde_record.relocate(zld, @intCast(u32, object_id), .{
+ try fde_record.relocate(zld, @as(u32, @intCast(object_id)), .{
.source_offset = fde_record_offset,
.out_offset = eh_frame_offset,
.sect_addr = sect.addr,
@@ -194,7 +194,7 @@ pub fn write(zld: *Zld, unwind_info: *UnwindInfo) !void {
UnwindInfo.UnwindEncoding.setDwarfSectionOffset(
&record.compactUnwindEncoding,
cpu_arch,
- @intCast(u24, eh_frame_offset),
+ @as(u24, @intCast(eh_frame_offset)),
);
const cie_record = eh_records.get(
@@ -268,7 +268,7 @@ pub fn EhFrameRecord(comptime is_mutable: bool) type {
}) u64 {
assert(rec.tag == .fde);
const addend = mem.readIntLittle(i64, rec.data[4..][0..8]);
- return @intCast(u64, @intCast(i64, ctx.base_addr + ctx.base_offset + 8) + addend);
+ return @as(u64, @intCast(@as(i64, @intCast(ctx.base_addr + ctx.base_offset + 8)) + addend));
}
pub fn setTargetSymbolAddress(rec: *Record, value: u64, ctx: struct {
@@ -276,7 +276,7 @@ pub fn EhFrameRecord(comptime is_mutable: bool) type {
base_offset: u64,
}) !void {
assert(rec.tag == .fde);
- const addend = @intCast(i64, value) - @intCast(i64, ctx.base_addr + ctx.base_offset + 8);
+ const addend = @as(i64, @intCast(value)) - @as(i64, @intCast(ctx.base_addr + ctx.base_offset + 8));
mem.writeIntLittle(i64, rec.data[4..][0..8], addend);
}
@@ -291,7 +291,7 @@ pub fn EhFrameRecord(comptime is_mutable: bool) type {
for (relocs) |rel| {
switch (cpu_arch) {
.aarch64 => {
- const rel_type = @enumFromInt(macho.reloc_type_arm64, rel.r_type);
+ const rel_type = @as(macho.reloc_type_arm64, @enumFromInt(rel.r_type));
switch (rel_type) {
.ARM64_RELOC_SUBTRACTOR,
.ARM64_RELOC_UNSIGNED,
@@ -301,7 +301,7 @@ pub fn EhFrameRecord(comptime is_mutable: bool) type {
}
},
.x86_64 => {
- const rel_type = @enumFromInt(macho.reloc_type_x86_64, rel.r_type);
+ const rel_type = @as(macho.reloc_type_x86_64, @enumFromInt(rel.r_type));
switch (rel_type) {
.X86_64_RELOC_GOT => {},
else => unreachable,
@@ -313,7 +313,7 @@ pub fn EhFrameRecord(comptime is_mutable: bool) type {
.object_id = object_id,
.rel = rel,
.code = rec.data,
- .base_offset = @intCast(i32, source_offset) + 4,
+ .base_offset = @as(i32, @intCast(source_offset)) + 4,
});
return target;
}
@@ -335,40 +335,40 @@ pub fn EhFrameRecord(comptime is_mutable: bool) type {
.object_id = object_id,
.rel = rel,
.code = rec.data,
- .base_offset = @intCast(i32, ctx.source_offset) + 4,
+ .base_offset = @as(i32, @intCast(ctx.source_offset)) + 4,
});
- const rel_offset = @intCast(u32, rel.r_address - @intCast(i32, ctx.source_offset) - 4);
+ const rel_offset = @as(u32, @intCast(rel.r_address - @as(i32, @intCast(ctx.source_offset)) - 4));
const source_addr = ctx.sect_addr + rel_offset + ctx.out_offset + 4;
switch (cpu_arch) {
.aarch64 => {
- const rel_type = @enumFromInt(macho.reloc_type_arm64, rel.r_type);
+ const rel_type = @as(macho.reloc_type_arm64, @enumFromInt(rel.r_type));
switch (rel_type) {
.ARM64_RELOC_SUBTRACTOR => {
// Address of the __eh_frame in the source object file
},
.ARM64_RELOC_POINTER_TO_GOT => {
const target_addr = try Atom.getRelocTargetAddress(zld, target, true, false);
- const result = math.cast(i32, @intCast(i64, target_addr) - @intCast(i64, source_addr)) orelse
+ const result = math.cast(i32, @as(i64, @intCast(target_addr)) - @as(i64, @intCast(source_addr))) orelse
return error.Overflow;
mem.writeIntLittle(i32, rec.data[rel_offset..][0..4], result);
},
.ARM64_RELOC_UNSIGNED => {
assert(rel.r_extern == 1);
const target_addr = try Atom.getRelocTargetAddress(zld, target, false, false);
- const result = @intCast(i64, target_addr) - @intCast(i64, source_addr);
- mem.writeIntLittle(i64, rec.data[rel_offset..][0..8], @intCast(i64, result));
+ const result = @as(i64, @intCast(target_addr)) - @as(i64, @intCast(source_addr));
+ mem.writeIntLittle(i64, rec.data[rel_offset..][0..8], @as(i64, @intCast(result)));
},
else => unreachable,
}
},
.x86_64 => {
- const rel_type = @enumFromInt(macho.reloc_type_x86_64, rel.r_type);
+ const rel_type = @as(macho.reloc_type_x86_64, @enumFromInt(rel.r_type));
switch (rel_type) {
.X86_64_RELOC_GOT => {
const target_addr = try Atom.getRelocTargetAddress(zld, target, true, false);
const addend = mem.readIntLittle(i32, rec.data[rel_offset..][0..4]);
- const adjusted_target_addr = @intCast(u64, @intCast(i64, target_addr) + addend);
+ const adjusted_target_addr = @as(u64, @intCast(@as(i64, @intCast(target_addr)) + addend));
const disp = try Relocation.calcPcRelativeDisplacementX86(source_addr, adjusted_target_addr, 0);
mem.writeIntLittle(i32, rec.data[rel_offset..][0..4], disp);
},
@@ -392,7 +392,7 @@ pub fn EhFrameRecord(comptime is_mutable: bool) type {
pub fn getAugmentationString(rec: Record) []const u8 {
assert(rec.tag == .cie);
- return mem.sliceTo(@ptrCast([*:0]const u8, rec.data.ptr + 5), 0);
+ return mem.sliceTo(@as([*:0]const u8, @ptrCast(rec.data.ptr + 5)), 0);
}
pub fn getPersonalityPointer(rec: Record, ctx: struct {
@@ -418,7 +418,7 @@ pub fn EhFrameRecord(comptime is_mutable: bool) type {
'P' => {
const enc = try reader.readByte();
const offset = ctx.base_offset + 13 + aug_str.len + creader.bytes_read;
- const ptr = try getEncodedPointer(enc, @intCast(i64, ctx.base_addr + offset), reader);
+ const ptr = try getEncodedPointer(enc, @as(i64, @intCast(ctx.base_addr + offset)), reader);
return ptr;
},
'L' => {
@@ -441,7 +441,7 @@ pub fn EhFrameRecord(comptime is_mutable: bool) type {
const reader = stream.reader();
_ = try reader.readByte();
const offset = ctx.base_offset + 25;
- const ptr = try getEncodedPointer(enc, @intCast(i64, ctx.base_addr + offset), reader);
+ const ptr = try getEncodedPointer(enc, @as(i64, @intCast(ctx.base_addr + offset)), reader);
return ptr;
}
@@ -454,7 +454,7 @@ pub fn EhFrameRecord(comptime is_mutable: bool) type {
var stream = std.io.fixedBufferStream(rec.data[21..]);
const writer = stream.writer();
const offset = ctx.base_offset + 25;
- try setEncodedPointer(enc, @intCast(i64, ctx.base_addr + offset), value, writer);
+ try setEncodedPointer(enc, @as(i64, @intCast(ctx.base_addr + offset)), value, writer);
}
fn getLsdaEncoding(rec: Record) !?u8 {
@@ -494,11 +494,11 @@ pub fn EhFrameRecord(comptime is_mutable: bool) type {
if (enc == EH_PE.omit) return null;
var ptr: i64 = switch (enc & 0x0F) {
- EH_PE.absptr => @bitCast(i64, try reader.readIntLittle(u64)),
- EH_PE.udata2 => @bitCast(i16, try reader.readIntLittle(u16)),
- EH_PE.udata4 => @bitCast(i32, try reader.readIntLittle(u32)),
- EH_PE.udata8 => @bitCast(i64, try reader.readIntLittle(u64)),
- EH_PE.uleb128 => @bitCast(i64, try leb.readULEB128(u64, reader)),
+ EH_PE.absptr => @as(i64, @bitCast(try reader.readIntLittle(u64))),
+ EH_PE.udata2 => @as(i16, @bitCast(try reader.readIntLittle(u16))),
+ EH_PE.udata4 => @as(i32, @bitCast(try reader.readIntLittle(u32))),
+ EH_PE.udata8 => @as(i64, @bitCast(try reader.readIntLittle(u64))),
+ EH_PE.uleb128 => @as(i64, @bitCast(try leb.readULEB128(u64, reader))),
EH_PE.sdata2 => try reader.readIntLittle(i16),
EH_PE.sdata4 => try reader.readIntLittle(i32),
EH_PE.sdata8 => try reader.readIntLittle(i64),
@@ -517,13 +517,13 @@ pub fn EhFrameRecord(comptime is_mutable: bool) type {
else => return null,
}
- return @bitCast(u64, ptr);
+ return @as(u64, @bitCast(ptr));
}
fn setEncodedPointer(enc: u8, pcrel_offset: i64, value: u64, writer: anytype) !void {
if (enc == EH_PE.omit) return;
- var actual = @intCast(i64, value);
+ var actual = @as(i64, @intCast(value));
switch (enc & 0x70) {
EH_PE.absptr => {},
@@ -537,13 +537,13 @@ pub fn EhFrameRecord(comptime is_mutable: bool) type {
}
switch (enc & 0x0F) {
- EH_PE.absptr => try writer.writeIntLittle(u64, @bitCast(u64, actual)),
- EH_PE.udata2 => try writer.writeIntLittle(u16, @bitCast(u16, @intCast(i16, actual))),
- EH_PE.udata4 => try writer.writeIntLittle(u32, @bitCast(u32, @intCast(i32, actual))),
- EH_PE.udata8 => try writer.writeIntLittle(u64, @bitCast(u64, actual)),
- EH_PE.uleb128 => try leb.writeULEB128(writer, @bitCast(u64, actual)),
- EH_PE.sdata2 => try writer.writeIntLittle(i16, @intCast(i16, actual)),
- EH_PE.sdata4 => try writer.writeIntLittle(i32, @intCast(i32, actual)),
+ EH_PE.absptr => try writer.writeIntLittle(u64, @as(u64, @bitCast(actual))),
+ EH_PE.udata2 => try writer.writeIntLittle(u16, @as(u16, @bitCast(@as(i16, @intCast(actual))))),
+ EH_PE.udata4 => try writer.writeIntLittle(u32, @as(u32, @bitCast(@as(i32, @intCast(actual))))),
+ EH_PE.udata8 => try writer.writeIntLittle(u64, @as(u64, @bitCast(actual))),
+ EH_PE.uleb128 => try leb.writeULEB128(writer, @as(u64, @bitCast(actual))),
+ EH_PE.sdata2 => try writer.writeIntLittle(i16, @as(i16, @intCast(actual))),
+ EH_PE.sdata4 => try writer.writeIntLittle(i32, @as(i32, @intCast(actual))),
EH_PE.sdata8 => try writer.writeIntLittle(i64, actual),
EH_PE.sleb128 => try leb.writeILEB128(writer, actual),
else => unreachable,
diff --git a/src/link/MachO/load_commands.zig b/src/link/MachO/load_commands.zig
index eb582e2222..10f446f191 100644
--- a/src/link/MachO/load_commands.zig
+++ b/src/link/MachO/load_commands.zig
@@ -114,7 +114,7 @@ fn calcLCsSize(gpa: Allocator, options: *const link.Options, ctx: CalcLCsSizeCtx
}
}
- return @intCast(u32, sizeofcmds);
+ return @as(u32, @intCast(sizeofcmds));
}
pub fn calcMinHeaderPad(gpa: Allocator, options: *const link.Options, ctx: CalcLCsSizeCtx) !u64 {
@@ -140,7 +140,7 @@ pub fn calcNumOfLCs(lc_buffer: []const u8) u32 {
var pos: usize = 0;
while (true) {
if (pos >= lc_buffer.len) break;
- const cmd = @ptrCast(*align(1) const macho.load_command, lc_buffer.ptr + pos).*;
+ const cmd = @as(*align(1) const macho.load_command, @ptrCast(lc_buffer.ptr + pos)).*;
ncmds += 1;
pos += cmd.cmdsize;
}
@@ -149,11 +149,11 @@ pub fn calcNumOfLCs(lc_buffer: []const u8) u32 {
pub fn writeDylinkerLC(lc_writer: anytype) !void {
const name_len = mem.sliceTo(default_dyld_path, 0).len;
- const cmdsize = @intCast(u32, mem.alignForward(
+ const cmdsize = @as(u32, @intCast(mem.alignForward(
u64,
@sizeOf(macho.dylinker_command) + name_len,
@sizeOf(u64),
- ));
+ )));
try lc_writer.writeStruct(macho.dylinker_command{
.cmd = .LOAD_DYLINKER,
.cmdsize = cmdsize,
@@ -176,11 +176,11 @@ const WriteDylibLCCtx = struct {
fn writeDylibLC(ctx: WriteDylibLCCtx, lc_writer: anytype) !void {
const name_len = ctx.name.len + 1;
- const cmdsize = @intCast(u32, mem.alignForward(
+ const cmdsize = @as(u32, @intCast(mem.alignForward(
u64,
@sizeOf(macho.dylib_command) + name_len,
@sizeOf(u64),
- ));
+ )));
try lc_writer.writeStruct(macho.dylib_command{
.cmd = ctx.cmd,
.cmdsize = cmdsize,
@@ -217,8 +217,8 @@ pub fn writeDylibIdLC(gpa: Allocator, options: *const link.Options, lc_writer: a
try writeDylibLC(.{
.cmd = .ID_DYLIB,
.name = install_name,
- .current_version = @intCast(u32, curr.major << 16 | curr.minor << 8 | curr.patch),
- .compatibility_version = @intCast(u32, compat.major << 16 | compat.minor << 8 | compat.patch),
+ .current_version = @as(u32, @intCast(curr.major << 16 | curr.minor << 8 | curr.patch)),
+ .compatibility_version = @as(u32, @intCast(compat.major << 16 | compat.minor << 8 | compat.patch)),
}, lc_writer);
}
@@ -253,11 +253,11 @@ pub fn writeRpathLCs(gpa: Allocator, options: *const link.Options, lc_writer: an
while (try it.next()) |rpath| {
const rpath_len = rpath.len + 1;
- const cmdsize = @intCast(u32, mem.alignForward(
+ const cmdsize = @as(u32, @intCast(mem.alignForward(
u64,
@sizeOf(macho.rpath_command) + rpath_len,
@sizeOf(u64),
- ));
+ )));
try lc_writer.writeStruct(macho.rpath_command{
.cmdsize = cmdsize,
.path = @sizeOf(macho.rpath_command),
@@ -275,12 +275,12 @@ pub fn writeBuildVersionLC(options: *const link.Options, lc_writer: anytype) !vo
const cmdsize = @sizeOf(macho.build_version_command) + @sizeOf(macho.build_tool_version);
const platform_version = blk: {
const ver = options.target.os.version_range.semver.min;
- const platform_version = @intCast(u32, ver.major << 16 | ver.minor << 8);
+ const platform_version = @as(u32, @intCast(ver.major << 16 | ver.minor << 8));
break :blk platform_version;
};
const sdk_version = if (options.native_darwin_sdk) |sdk| blk: {
const ver = sdk.version;
- const sdk_version = @intCast(u32, ver.major << 16 | ver.minor << 8);
+ const sdk_version = @as(u32, @intCast(ver.major << 16 | ver.minor << 8));
break :blk sdk_version;
} else platform_version;
const is_simulator_abi = options.target.abi == .simulator;
diff --git a/src/link/MachO/thunks.zig b/src/link/MachO/thunks.zig
index f3289e544b..82d0451225 100644
--- a/src/link/MachO/thunks.zig
+++ b/src/link/MachO/thunks.zig
@@ -131,7 +131,7 @@ pub fn createThunks(zld: *Zld, sect_id: u8) !void {
log.debug("GROUP END at {d}", .{group_end});
// Insert thunk at group_end
- const thunk_index = @intCast(u32, zld.thunks.items.len);
+ const thunk_index = @as(u32, @intCast(zld.thunks.items.len));
try zld.thunks.append(gpa, .{ .start_index = undefined, .len = 0 });
// Scan relocs in the group and create trampolines for any unreachable callsite.
@@ -174,7 +174,7 @@ pub fn createThunks(zld: *Zld, sect_id: u8) !void {
}
}
- header.size = @intCast(u32, offset);
+ header.size = @as(u32, @intCast(offset));
}
fn allocateThunk(
@@ -223,7 +223,7 @@ fn scanRelocs(
const base_offset = if (object.getSourceSymbol(atom.sym_index)) |source_sym| blk: {
const source_sect = object.getSourceSection(source_sym.n_sect - 1);
- break :blk @intCast(i32, source_sym.n_value - source_sect.addr);
+ break :blk @as(i32, @intCast(source_sym.n_value - source_sect.addr));
} else 0;
const code = Atom.getAtomCode(zld, atom_index);
@@ -289,7 +289,7 @@ fn scanRelocs(
}
inline fn relocNeedsThunk(rel: macho.relocation_info) bool {
- const rel_type = @enumFromInt(macho.reloc_type_arm64, rel.r_type);
+ const rel_type = @as(macho.reloc_type_arm64, @enumFromInt(rel.r_type));
return rel_type == .ARM64_RELOC_BRANCH26;
}
@@ -315,7 +315,7 @@ fn isReachable(
if (!allocated.contains(target_atom_index)) return false;
- const source_addr = source_sym.n_value + @intCast(u32, rel.r_address - base_offset);
+ const source_addr = source_sym.n_value + @as(u32, @intCast(rel.r_address - base_offset));
const is_via_got = Atom.relocRequiresGot(zld, rel);
const target_addr = Atom.getRelocTargetAddress(zld, target, is_via_got, false) catch unreachable;
_ = Relocation.calcPcRelativeDisplacementArm64(source_addr, target_addr) catch
@@ -349,7 +349,7 @@ fn getThunkIndex(zld: *Zld, atom_index: AtomIndex) ?ThunkIndex {
const end_addr = start_addr + thunk.getSize();
if (start_addr <= sym.n_value and sym.n_value < end_addr) {
- return @intCast(u32, i);
+ return @as(u32, @intCast(i));
}
}
return null;
diff --git a/src/link/MachO/zld.zig b/src/link/MachO/zld.zig
index 13c1ea73fa..3e828984a9 100644
--- a/src/link/MachO/zld.zig
+++ b/src/link/MachO/zld.zig
@@ -103,7 +103,7 @@ pub const Zld = struct {
const cpu_arch = self.options.target.cpu.arch;
const mtime: u64 = mtime: {
const stat = file.stat() catch break :mtime 0;
- break :mtime @intCast(u64, @divFloor(stat.mtime, 1_000_000_000));
+ break :mtime @as(u64, @intCast(@divFloor(stat.mtime, 1_000_000_000)));
};
const file_stat = try file.stat();
const file_size = math.cast(usize, file_stat.size) orelse return error.Overflow;
@@ -220,7 +220,7 @@ pub const Zld = struct {
const contents = try file.readToEndAllocOptions(gpa, file_size, file_size, @alignOf(u64), null);
defer gpa.free(contents);
- const dylib_id = @intCast(u16, self.dylibs.items.len);
+ const dylib_id = @as(u16, @intCast(self.dylibs.items.len));
var dylib = Dylib{ .weak = opts.weak };
dylib.parseFromBinary(
@@ -535,7 +535,7 @@ pub const Zld = struct {
pub fn createEmptyAtom(self: *Zld, sym_index: u32, size: u64, alignment: u32) !AtomIndex {
const gpa = self.gpa;
- const index = @intCast(AtomIndex, self.atoms.items.len);
+ const index = @as(AtomIndex, @intCast(self.atoms.items.len));
const atom = try self.atoms.addOne(gpa);
atom.* = Atom.empty;
atom.sym_index = sym_index;
@@ -596,7 +596,7 @@ pub const Zld = struct {
const global_index = self.dyld_stub_binder_index orelse return;
const target = self.globals.items[global_index];
const atom_index = try self.createGotAtom();
- const got_index = @intCast(u32, self.got_entries.items.len);
+ const got_index = @as(u32, @intCast(self.got_entries.items.len));
try self.got_entries.append(gpa, .{
.target = target,
.atom_index = atom_index,
@@ -874,7 +874,7 @@ pub const Zld = struct {
}
for (self.objects.items, 0..) |_, object_id| {
- try self.resolveSymbolsInObject(@intCast(u32, object_id), resolver);
+ try self.resolveSymbolsInObject(@as(u32, @intCast(object_id)), resolver);
}
try self.resolveSymbolsInArchives(resolver);
@@ -1024,7 +1024,7 @@ pub const Zld = struct {
};
assert(offsets.items.len > 0);
- const object_id = @intCast(u16, self.objects.items.len);
+ const object_id = @as(u16, @intCast(self.objects.items.len));
const object = archive.parseObject(gpa, cpu_arch, offsets.items[0]) catch |e| switch (e) {
error.MismatchedCpuArchitecture => {
log.err("CPU architecture mismatch found in {s}", .{archive.name});
@@ -1055,14 +1055,14 @@ pub const Zld = struct {
for (self.dylibs.items, 0..) |dylib, id| {
if (!dylib.symbols.contains(sym_name)) continue;
- const dylib_id = @intCast(u16, id);
+ const dylib_id = @as(u16, @intCast(id));
if (!self.referenced_dylibs.contains(dylib_id)) {
try self.referenced_dylibs.putNoClobber(self.gpa, dylib_id, {});
}
const ordinal = self.referenced_dylibs.getIndex(dylib_id) orelse unreachable;
sym.n_type |= macho.N_EXT;
- sym.n_desc = @intCast(u16, ordinal + 1) * macho.N_SYMBOL_RESOLVER;
+ sym.n_desc = @as(u16, @intCast(ordinal + 1)) * macho.N_SYMBOL_RESOLVER;
if (dylib.weak) {
sym.n_desc |= macho.N_WEAK_REF;
@@ -1099,9 +1099,9 @@ pub const Zld = struct {
_ = resolver.unresolved.swapRemove(global_index);
continue;
} else if (allow_undef) {
- const n_desc = @bitCast(
+ const n_desc = @as(
u16,
- macho.BIND_SPECIAL_DYLIB_FLAT_LOOKUP * @intCast(i16, macho.N_SYMBOL_RESOLVER),
+ @bitCast(macho.BIND_SPECIAL_DYLIB_FLAT_LOOKUP * @as(i16, @intCast(macho.N_SYMBOL_RESOLVER))),
);
sym.n_type = macho.N_EXT;
sym.n_desc = n_desc;
@@ -1238,7 +1238,7 @@ pub const Zld = struct {
const segname = header.segName();
const segment_id = self.getSegmentByName(segname) orelse blk: {
log.debug("creating segment '{s}'", .{segname});
- const segment_id = @intCast(u8, self.segments.items.len);
+ const segment_id = @as(u8, @intCast(self.segments.items.len));
const protection = getSegmentMemoryProtection(segname);
try self.segments.append(self.gpa, .{
.cmdsize = @sizeOf(macho.segment_command_64),
@@ -1269,7 +1269,7 @@ pub const Zld = struct {
pub fn allocateSymbol(self: *Zld) !u32 {
try self.locals.ensureUnusedCapacity(self.gpa, 1);
log.debug(" (allocating symbol index {d})", .{self.locals.items.len});
- const index = @intCast(u32, self.locals.items.len);
+ const index = @as(u32, @intCast(self.locals.items.len));
_ = self.locals.addOneAssumeCapacity();
self.locals.items[index] = .{
.n_strx = 0,
@@ -1282,7 +1282,7 @@ pub const Zld = struct {
}
fn addGlobal(self: *Zld, sym_loc: SymbolWithLoc) !u32 {
- const global_index = @intCast(u32, self.globals.items.len);
+ const global_index = @as(u32, @intCast(self.globals.items.len));
try self.globals.append(self.gpa, sym_loc);
return global_index;
}
@@ -1489,7 +1489,7 @@ pub const Zld = struct {
if (mem.eql(u8, header.sectName(), "__stub_helper")) continue;
// Create jump/branch range extenders if needed.
- try thunks.createThunks(self, @intCast(u8, sect_id));
+ try thunks.createThunks(self, @as(u8, @intCast(sect_id)));
}
}
}
@@ -1502,7 +1502,7 @@ pub const Zld = struct {
.dylibs = self.dylibs.items,
.referenced_dylibs = self.referenced_dylibs.keys(),
}) else 0;
- try self.allocateSegment(@intCast(u8, segment_index), base_size);
+ try self.allocateSegment(@as(u8, @intCast(segment_index)), base_size);
}
}
@@ -1536,12 +1536,12 @@ pub const Zld = struct {
for (slice.items(.header)[indexes.start..indexes.end], 0..) |*header, sect_id| {
const alignment = try math.powi(u32, 2, header.@"align");
const start_aligned = mem.alignForward(u64, start, alignment);
- const n_sect = @intCast(u8, indexes.start + sect_id + 1);
+ const n_sect = @as(u8, @intCast(indexes.start + sect_id + 1));
header.offset = if (header.isZerofill())
0
else
- @intCast(u32, segment.fileoff + start_aligned);
+ @as(u32, @intCast(segment.fileoff + start_aligned));
header.addr = segment.vmaddr + start_aligned;
var atom_index = slice.items(.first_atom_index)[indexes.start + sect_id];
@@ -1617,7 +1617,7 @@ pub const Zld = struct {
) !u8 {
const gpa = self.gpa;
log.debug("creating section '{s},{s}'", .{ segname, sectname });
- const index = @intCast(u8, self.sections.slice().len);
+ const index = @as(u8, @intCast(self.sections.slice().len));
try self.sections.append(gpa, .{
.segment_index = undefined, // Segments will be created automatically later down the pipeline
.header = .{
@@ -1673,12 +1673,12 @@ pub const Zld = struct {
},
}
};
- return (@intCast(u8, segment_precedence) << 4) + section_precedence;
+ return (@as(u8, @intCast(segment_precedence)) << 4) + section_precedence;
}
fn writeSegmentHeaders(self: *Zld, writer: anytype) !void {
for (self.segments.items, 0..) |seg, i| {
- const indexes = self.getSectionIndexes(@intCast(u8, i));
+ const indexes = self.getSectionIndexes(@as(u8, @intCast(i)));
var out_seg = seg;
out_seg.cmdsize = @sizeOf(macho.segment_command_64);
out_seg.nsects = 0;
@@ -1790,7 +1790,7 @@ pub const Zld = struct {
}
const segment_index = slice.items(.segment_index)[sect_id];
- const segment = self.getSegment(@intCast(u8, sect_id));
+ const segment = self.getSegment(@as(u8, @intCast(sect_id)));
if (segment.maxprot & macho.PROT.WRITE == 0) continue;
log.debug("{s},{s}", .{ header.segName(), header.sectName() });
@@ -1820,12 +1820,12 @@ pub const Zld = struct {
for (relocs) |rel| {
switch (cpu_arch) {
.aarch64 => {
- const rel_type = @enumFromInt(macho.reloc_type_arm64, rel.r_type);
+ const rel_type = @as(macho.reloc_type_arm64, @enumFromInt(rel.r_type));
if (rel_type != .ARM64_RELOC_UNSIGNED) continue;
if (rel.r_length != 3) continue;
},
.x86_64 => {
- const rel_type = @enumFromInt(macho.reloc_type_x86_64, rel.r_type);
+ const rel_type = @as(macho.reloc_type_x86_64, @enumFromInt(rel.r_type));
if (rel_type != .X86_64_RELOC_UNSIGNED) continue;
if (rel.r_length != 3) continue;
},
@@ -1841,9 +1841,9 @@ pub const Zld = struct {
const target_sym = self.getSymbol(target);
if (target_sym.undf()) continue;
- const base_offset = @intCast(i32, sym.n_value - segment.vmaddr);
+ const base_offset = @as(i32, @intCast(sym.n_value - segment.vmaddr));
const rel_offset = rel.r_address - ctx.base_offset;
- const offset = @intCast(u64, base_offset + rel_offset);
+ const offset = @as(u64, @intCast(base_offset + rel_offset));
log.debug(" | rebase at {x}", .{offset});
try rebase.entries.append(self.gpa, .{
@@ -1882,7 +1882,7 @@ pub const Zld = struct {
const sym = entry.getAtomSymbol(self);
const base_offset = sym.n_value - seg.vmaddr;
- const dylib_ordinal = @divTrunc(@bitCast(i16, bind_sym.n_desc), macho.N_SYMBOL_RESOLVER);
+ const dylib_ordinal = @divTrunc(@as(i16, @bitCast(bind_sym.n_desc)), macho.N_SYMBOL_RESOLVER);
log.debug(" | bind at {x}, import('{s}') in dylib({d})", .{
base_offset,
bind_sym_name,
@@ -1929,7 +1929,7 @@ pub const Zld = struct {
}
const segment_index = slice.items(.segment_index)[sect_id];
- const segment = self.getSegment(@intCast(u8, sect_id));
+ const segment = self.getSegment(@as(u8, @intCast(sect_id)));
if (segment.maxprot & macho.PROT.WRITE == 0) continue;
const cpu_arch = self.options.target.cpu.arch;
@@ -1959,12 +1959,12 @@ pub const Zld = struct {
for (relocs) |rel| {
switch (cpu_arch) {
.aarch64 => {
- const rel_type = @enumFromInt(macho.reloc_type_arm64, rel.r_type);
+ const rel_type = @as(macho.reloc_type_arm64, @enumFromInt(rel.r_type));
if (rel_type != .ARM64_RELOC_UNSIGNED) continue;
if (rel.r_length != 3) continue;
},
.x86_64 => {
- const rel_type = @enumFromInt(macho.reloc_type_x86_64, rel.r_type);
+ const rel_type = @as(macho.reloc_type_x86_64, @enumFromInt(rel.r_type));
if (rel_type != .X86_64_RELOC_UNSIGNED) continue;
if (rel.r_length != 3) continue;
},
@@ -1983,11 +1983,11 @@ pub const Zld = struct {
if (!bind_sym.undf()) continue;
const base_offset = sym.n_value - segment.vmaddr;
- const rel_offset = @intCast(u32, rel.r_address - ctx.base_offset);
- const offset = @intCast(u64, base_offset + rel_offset);
+ const rel_offset = @as(u32, @intCast(rel.r_address - ctx.base_offset));
+ const offset = @as(u64, @intCast(base_offset + rel_offset));
const addend = mem.readIntLittle(i64, code[rel_offset..][0..8]);
- const dylib_ordinal = @divTrunc(@bitCast(i16, bind_sym.n_desc), macho.N_SYMBOL_RESOLVER);
+ const dylib_ordinal = @divTrunc(@as(i16, @bitCast(bind_sym.n_desc)), macho.N_SYMBOL_RESOLVER);
log.debug(" | bind at {x}, import('{s}') in dylib({d})", .{
base_offset,
bind_sym_name,
@@ -2039,7 +2039,7 @@ pub const Zld = struct {
const stub_entry = self.stubs.items[count];
const bind_sym = stub_entry.getTargetSymbol(self);
const bind_sym_name = stub_entry.getTargetSymbolName(self);
- const dylib_ordinal = @divTrunc(@bitCast(i16, bind_sym.n_desc), macho.N_SYMBOL_RESOLVER);
+ const dylib_ordinal = @divTrunc(@as(i16, @bitCast(bind_sym.n_desc)), macho.N_SYMBOL_RESOLVER);
log.debug(" | lazy bind at {x}, import('{s}') in dylib({d})", .{
base_offset,
bind_sym_name,
@@ -2165,14 +2165,14 @@ pub const Zld = struct {
try self.file.pwriteAll(buffer, rebase_off);
try self.populateLazyBindOffsetsInStubHelper(lazy_bind);
- self.dyld_info_cmd.rebase_off = @intCast(u32, rebase_off);
- self.dyld_info_cmd.rebase_size = @intCast(u32, rebase_size_aligned);
- self.dyld_info_cmd.bind_off = @intCast(u32, bind_off);
- self.dyld_info_cmd.bind_size = @intCast(u32, bind_size_aligned);
- self.dyld_info_cmd.lazy_bind_off = @intCast(u32, lazy_bind_off);
- self.dyld_info_cmd.lazy_bind_size = @intCast(u32, lazy_bind_size_aligned);
- self.dyld_info_cmd.export_off = @intCast(u32, export_off);
- self.dyld_info_cmd.export_size = @intCast(u32, export_size_aligned);
+ self.dyld_info_cmd.rebase_off = @as(u32, @intCast(rebase_off));
+ self.dyld_info_cmd.rebase_size = @as(u32, @intCast(rebase_size_aligned));
+ self.dyld_info_cmd.bind_off = @as(u32, @intCast(bind_off));
+ self.dyld_info_cmd.bind_size = @as(u32, @intCast(bind_size_aligned));
+ self.dyld_info_cmd.lazy_bind_off = @as(u32, @intCast(lazy_bind_off));
+ self.dyld_info_cmd.lazy_bind_size = @as(u32, @intCast(lazy_bind_size_aligned));
+ self.dyld_info_cmd.export_off = @as(u32, @intCast(export_off));
+ self.dyld_info_cmd.export_size = @as(u32, @intCast(export_size_aligned));
}
fn populateLazyBindOffsetsInStubHelper(self: *Zld, lazy_bind: LazyBind) !void {
@@ -2246,7 +2246,7 @@ pub const Zld = struct {
var last_off: u32 = 0;
for (addresses.items) |addr| {
- const offset = @intCast(u32, addr - text_seg.vmaddr);
+ const offset = @as(u32, @intCast(addr - text_seg.vmaddr));
const diff = offset - last_off;
if (diff == 0) continue;
@@ -2258,7 +2258,7 @@ pub const Zld = struct {
var buffer = std.ArrayList(u8).init(gpa);
defer buffer.deinit();
- const max_size = @intCast(usize, offsets.items.len * @sizeOf(u64));
+ const max_size = @as(usize, @intCast(offsets.items.len * @sizeOf(u64)));
try buffer.ensureTotalCapacity(max_size);
for (offsets.items) |offset| {
@@ -2281,8 +2281,8 @@ pub const Zld = struct {
try self.file.pwriteAll(buffer.items, offset);
- self.function_starts_cmd.dataoff = @intCast(u32, offset);
- self.function_starts_cmd.datasize = @intCast(u32, needed_size_aligned);
+ self.function_starts_cmd.dataoff = @as(u32, @intCast(offset));
+ self.function_starts_cmd.datasize = @as(u32, @intCast(needed_size_aligned));
}
fn filterDataInCode(
@@ -2324,8 +2324,8 @@ pub const Zld = struct {
const source_addr = if (object.getSourceSymbol(atom.sym_index)) |source_sym|
source_sym.n_value
else blk: {
- const nbase = @intCast(u32, object.in_symtab.?.len);
- const source_sect_id = @intCast(u8, atom.sym_index - nbase);
+ const nbase = @as(u32, @intCast(object.in_symtab.?.len));
+ const source_sect_id = @as(u8, @intCast(atom.sym_index - nbase));
break :blk object.getSourceSection(source_sect_id).addr;
};
const filtered_dice = filterDataInCode(dice, source_addr, source_addr + atom.size);
@@ -2363,8 +2363,8 @@ pub const Zld = struct {
try self.file.pwriteAll(buffer, offset);
- self.data_in_code_cmd.dataoff = @intCast(u32, offset);
- self.data_in_code_cmd.datasize = @intCast(u32, needed_size_aligned);
+ self.data_in_code_cmd.dataoff = @as(u32, @intCast(offset));
+ self.data_in_code_cmd.datasize = @as(u32, @intCast(needed_size_aligned));
}
fn writeSymtabs(self: *Zld) !void {
@@ -2428,7 +2428,7 @@ pub const Zld = struct {
if (!sym.undf()) continue; // not an import, skip
if (sym.n_desc == N_DEAD) continue;
- const new_index = @intCast(u32, imports.items.len);
+ const new_index = @as(u32, @intCast(imports.items.len));
var out_sym = sym;
out_sym.n_strx = try self.strtab.insert(gpa, self.getSymbolName(global));
try imports.append(out_sym);
@@ -2443,9 +2443,9 @@ pub const Zld = struct {
}
}
- const nlocals = @intCast(u32, locals.items.len);
- const nexports = @intCast(u32, exports.items.len);
- const nimports = @intCast(u32, imports.items.len);
+ const nlocals = @as(u32, @intCast(locals.items.len));
+ const nexports = @as(u32, @intCast(exports.items.len));
+ const nimports = @as(u32, @intCast(imports.items.len));
const nsyms = nlocals + nexports + nimports;
const seg = self.getLinkeditSegmentPtr();
@@ -2465,7 +2465,7 @@ pub const Zld = struct {
log.debug("writing symtab from 0x{x} to 0x{x}", .{ offset, offset + needed_size });
try self.file.pwriteAll(buffer.items, offset);
- self.symtab_cmd.symoff = @intCast(u32, offset);
+ self.symtab_cmd.symoff = @as(u32, @intCast(offset));
self.symtab_cmd.nsyms = nsyms;
return SymtabCtx{
@@ -2493,8 +2493,8 @@ pub const Zld = struct {
try self.file.pwriteAll(buffer, offset);
- self.symtab_cmd.stroff = @intCast(u32, offset);
- self.symtab_cmd.strsize = @intCast(u32, needed_size_aligned);
+ self.symtab_cmd.stroff = @as(u32, @intCast(offset));
+ self.symtab_cmd.strsize = @as(u32, @intCast(needed_size_aligned));
}
const SymtabCtx = struct {
@@ -2506,8 +2506,8 @@ pub const Zld = struct {
fn writeDysymtab(self: *Zld, ctx: SymtabCtx) !void {
const gpa = self.gpa;
- const nstubs = @intCast(u32, self.stubs.items.len);
- const ngot_entries = @intCast(u32, self.got_entries.items.len);
+ const nstubs = @as(u32, @intCast(self.stubs.items.len));
+ const ngot_entries = @as(u32, @intCast(self.got_entries.items.len));
const nindirectsyms = nstubs * 2 + ngot_entries;
const iextdefsym = ctx.nlocalsym;
const iundefsym = iextdefsym + ctx.nextdefsym;
@@ -2572,7 +2572,7 @@ pub const Zld = struct {
self.dysymtab_cmd.nextdefsym = ctx.nextdefsym;
self.dysymtab_cmd.iundefsym = iundefsym;
self.dysymtab_cmd.nundefsym = ctx.nundefsym;
- self.dysymtab_cmd.indirectsymoff = @intCast(u32, offset);
+ self.dysymtab_cmd.indirectsymoff = @as(u32, @intCast(offset));
self.dysymtab_cmd.nindirectsyms = nindirectsyms;
}
@@ -2599,8 +2599,8 @@ pub const Zld = struct {
// except for code signature data.
try self.file.pwriteAll(&[_]u8{0}, offset + needed_size - 1);
- self.codesig_cmd.dataoff = @intCast(u32, offset);
- self.codesig_cmd.datasize = @intCast(u32, needed_size);
+ self.codesig_cmd.dataoff = @as(u32, @intCast(offset));
+ self.codesig_cmd.datasize = @as(u32, @intCast(needed_size));
}
fn writeCodeSignature(self: *Zld, comp: *const Compilation, code_sig: *CodeSignature) !void {
@@ -2689,7 +2689,7 @@ pub const Zld = struct {
fn getSegmentByName(self: Zld, segname: []const u8) ?u8 {
for (self.segments.items, 0..) |seg, i| {
- if (mem.eql(u8, segname, seg.segName())) return @intCast(u8, i);
+ if (mem.eql(u8, segname, seg.segName())) return @as(u8, @intCast(i));
} else return null;
}
@@ -2714,15 +2714,15 @@ pub const Zld = struct {
// TODO investigate caching with a hashmap
for (self.sections.items(.header), 0..) |header, i| {
if (mem.eql(u8, header.segName(), segname) and mem.eql(u8, header.sectName(), sectname))
- return @intCast(u8, i);
+ return @as(u8, @intCast(i));
} else return null;
}
pub fn getSectionIndexes(self: Zld, segment_index: u8) struct { start: u8, end: u8 } {
var start: u8 = 0;
const nsects = for (self.segments.items, 0..) |seg, i| {
- if (i == segment_index) break @intCast(u8, seg.nsects);
- start += @intCast(u8, seg.nsects);
+ if (i == segment_index) break @as(u8, @intCast(seg.nsects));
+ start += @as(u8, @intCast(seg.nsects));
} else 0;
return .{ .start = start, .end = start + nsects };
}
@@ -2879,7 +2879,7 @@ pub const Zld = struct {
var name_lookup: ?DwarfInfo.SubprogramLookupByName = if (object.header.flags & macho.MH_SUBSECTIONS_VIA_SYMBOLS == 0) blk: {
var name_lookup = DwarfInfo.SubprogramLookupByName.init(gpa);
errdefer name_lookup.deinit();
- try name_lookup.ensureUnusedCapacity(@intCast(u32, object.atoms.items.len));
+ try name_lookup.ensureUnusedCapacity(@as(u32, @intCast(object.atoms.items.len)));
try debug_info.genSubprogramLookupByName(compile_unit, lookup, &name_lookup);
break :blk name_lookup;
} else null;
@@ -3069,7 +3069,7 @@ pub const Zld = struct {
@memset(&buf, '_');
scoped_log.debug(" %{d}: {s} @{x} in sect({d}), {s}", .{
sym_id,
- object.getSymbolName(@intCast(u32, sym_id)),
+ object.getSymbolName(@as(u32, @intCast(sym_id))),
sym.n_value,
sym.n_sect,
logSymAttributes(sym, &buf),
@@ -3252,7 +3252,7 @@ pub const Zld = struct {
}
};
-pub const N_DEAD: u16 = @bitCast(u16, @as(i16, -1));
+pub const N_DEAD: u16 = @as(u16, @bitCast(@as(i16, -1)));
const Section = struct {
header: macho.section_64,
@@ -3791,7 +3791,7 @@ pub fn linkWithZld(macho_file: *MachO, comp: *Compilation, prog_node: *std.Progr
}
for (zld.objects.items, 0..) |*object, object_id| {
- try object.splitIntoAtoms(&zld, @intCast(u32, object_id));
+ try object.splitIntoAtoms(&zld, @as(u32, @intCast(object_id)));
}
if (gc_sections) {
@@ -3929,7 +3929,7 @@ pub fn linkWithZld(macho_file: *MachO, comp: *Compilation, prog_node: *std.Progr
} else sym.n_value;
try lc_writer.writeStruct(macho.entry_point_command{
- .entryoff = @intCast(u32, addr - seg.vmaddr),
+ .entryoff = @as(u32, @intCast(addr - seg.vmaddr)),
.stacksize = options.stack_size_override orelse 0,
});
} else {
@@ -3943,7 +3943,7 @@ pub fn linkWithZld(macho_file: *MachO, comp: *Compilation, prog_node: *std.Progr
});
try load_commands.writeBuildVersionLC(zld.options, lc_writer);
- const uuid_cmd_offset = @sizeOf(macho.mach_header_64) + @intCast(u32, lc_buffer.items.len);
+ const uuid_cmd_offset = @sizeOf(macho.mach_header_64) + @as(u32, @intCast(lc_buffer.items.len));
try lc_writer.writeStruct(zld.uuid_cmd);
try load_commands.writeLoadDylibLCs(zld.dylibs.items, zld.referenced_dylibs.keys(), lc_writer);
@@ -3954,7 +3954,7 @@ pub fn linkWithZld(macho_file: *MachO, comp: *Compilation, prog_node: *std.Progr
const ncmds = load_commands.calcNumOfLCs(lc_buffer.items);
try zld.file.pwriteAll(lc_buffer.items, @sizeOf(macho.mach_header_64));
- try zld.writeHeader(ncmds, @intCast(u32, lc_buffer.items.len));
+ try zld.writeHeader(ncmds, @as(u32, @intCast(lc_buffer.items.len)));
try zld.writeUuid(comp, uuid_cmd_offset, requires_codesig);
if (codesig) |*csig| {
diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig
index b36e16452e..ad5292aa88 100644
--- a/src/link/Plan9.zig
+++ b/src/link/Plan9.zig
@@ -295,7 +295,7 @@ fn putFn(self: *Plan9, decl_index: Module.Decl.Index, out: FnDeclOutput) !void {
.sym_index = blk: {
try self.syms.append(gpa, undefined);
try self.syms.append(gpa, undefined);
- break :blk @intCast(u32, self.syms.items.len - 1);
+ break :blk @as(u32, @intCast(self.syms.items.len - 1));
},
};
try fn_map_res.value_ptr.functions.put(gpa, decl_index, out);
@@ -485,7 +485,7 @@ pub fn updateDecl(self: *Plan9, mod: *Module, decl_index: Module.Decl.Index) !vo
.ty = decl.ty,
.val = decl_val,
}, &code_buffer, .{ .none = {} }, .{
- .parent_atom_index = @intCast(Atom.Index, atom_idx),
+ .parent_atom_index = @as(Atom.Index, @intCast(atom_idx)),
});
const code = switch (res) {
.ok => code_buffer.items,
@@ -562,10 +562,10 @@ pub fn flush(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.Node) li
pub fn changeLine(l: *std.ArrayList(u8), delta_line: i32) !void {
if (delta_line > 0 and delta_line < 65) {
- const toappend = @intCast(u8, delta_line);
+ const toappend = @as(u8, @intCast(delta_line));
try l.append(toappend);
} else if (delta_line < 0 and delta_line > -65) {
- const toadd: u8 = @intCast(u8, -delta_line + 64);
+ const toadd: u8 = @as(u8, @intCast(-delta_line + 64));
try l.append(toadd);
} else if (delta_line != 0) {
try l.append(0);
@@ -675,7 +675,7 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No
const out = entry.value_ptr.*;
{
// connect the previous decl to the next
- const delta_line = @intCast(i32, out.start_line) - @intCast(i32, linecount);
+ const delta_line = @as(i32, @intCast(out.start_line)) - @as(i32, @intCast(linecount));
try changeLine(&linecountinfo, delta_line);
// TODO change the pc too (maybe?)
@@ -692,7 +692,7 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No
atom.offset = off;
log.debug("write text decl {*} ({}), lines {d} to {d}.;__GOT+0x{x} vaddr: 0x{x}", .{ decl, decl.name.fmt(&mod.intern_pool), out.start_line + 1, out.end_line, atom.got_index.? * 8, off });
if (!self.sixtyfour_bit) {
- mem.writeInt(u32, got_table[atom.got_index.? * 4 ..][0..4], @intCast(u32, off), self.base.options.target.cpu.arch.endian());
+ mem.writeInt(u32, got_table[atom.got_index.? * 4 ..][0..4], @as(u32, @intCast(off)), self.base.options.target.cpu.arch.endian());
} else {
mem.writeInt(u64, got_table[atom.got_index.? * 8 ..][0..8], off, self.base.options.target.cpu.arch.endian());
}
@@ -721,7 +721,7 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No
text_i += code.len;
text_atom.offset = off;
if (!self.sixtyfour_bit) {
- mem.writeInt(u32, got_table[text_atom.got_index.? * 4 ..][0..4], @intCast(u32, off), self.base.options.target.cpu.arch.endian());
+ mem.writeInt(u32, got_table[text_atom.got_index.? * 4 ..][0..4], @as(u32, @intCast(off)), self.base.options.target.cpu.arch.endian());
} else {
mem.writeInt(u64, got_table[text_atom.got_index.? * 8 ..][0..8], off, self.base.options.target.cpu.arch.endian());
}
@@ -749,7 +749,7 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No
data_i += code.len;
atom.offset = off;
if (!self.sixtyfour_bit) {
- mem.writeInt(u32, got_table[atom.got_index.? * 4 ..][0..4], @intCast(u32, off), self.base.options.target.cpu.arch.endian());
+ mem.writeInt(u32, got_table[atom.got_index.? * 4 ..][0..4], @as(u32, @intCast(off)), self.base.options.target.cpu.arch.endian());
} else {
mem.writeInt(u64, got_table[atom.got_index.? * 8 ..][0..8], off, self.base.options.target.cpu.arch.endian());
}
@@ -772,7 +772,7 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No
data_i += code.len;
atom.offset = off;
if (!self.sixtyfour_bit) {
- mem.writeInt(u32, got_table[atom.got_index.? * 4 ..][0..4], @intCast(u32, off), self.base.options.target.cpu.arch.endian());
+ mem.writeInt(u32, got_table[atom.got_index.? * 4 ..][0..4], @as(u32, @intCast(off)), self.base.options.target.cpu.arch.endian());
} else {
mem.writeInt(u64, got_table[atom.got_index.? * 8 ..][0..8], off, self.base.options.target.cpu.arch.endian());
}
@@ -792,7 +792,7 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No
data_i += code.len;
data_atom.offset = off;
if (!self.sixtyfour_bit) {
- mem.writeInt(u32, got_table[data_atom.got_index.? * 4 ..][0..4], @intCast(u32, off), self.base.options.target.cpu.arch.endian());
+ mem.writeInt(u32, got_table[data_atom.got_index.? * 4 ..][0..4], @as(u32, @intCast(off)), self.base.options.target.cpu.arch.endian());
} else {
mem.writeInt(u64, got_table[data_atom.got_index.? * 8 ..][0..8], off, self.base.options.target.cpu.arch.endian());
}
@@ -815,13 +815,13 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No
// generate the header
self.hdr = .{
.magic = self.magic,
- .text = @intCast(u32, text_i),
- .data = @intCast(u32, data_i),
- .syms = @intCast(u32, syms.len),
+ .text = @as(u32, @intCast(text_i)),
+ .data = @as(u32, @intCast(data_i)),
+ .syms = @as(u32, @intCast(syms.len)),
.bss = 0,
.spsz = 0,
- .pcsz = @intCast(u32, linecountinfo.items.len),
- .entry = @intCast(u32, self.entry_val.?),
+ .pcsz = @as(u32, @intCast(linecountinfo.items.len)),
+ .entry = @as(u32, @intCast(self.entry_val.?)),
};
@memcpy(hdr_slice, self.hdr.toU8s()[0..hdr_size]);
// write the fat header for 64 bit entry points
@@ -847,13 +847,13 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No
const code = source_atom.code.getCode(self);
if (reloc.pcrel) {
- const disp = @intCast(i32, target_offset) - @intCast(i32, source_atom.offset.?) - 4 - @intCast(i32, offset);
- mem.writeInt(i32, code[@intCast(usize, offset)..][0..4], @intCast(i32, disp), self.base.options.target.cpu.arch.endian());
+ const disp = @as(i32, @intCast(target_offset)) - @as(i32, @intCast(source_atom.offset.?)) - 4 - @as(i32, @intCast(offset));
+ mem.writeInt(i32, code[@as(usize, @intCast(offset))..][0..4], @as(i32, @intCast(disp)), self.base.options.target.cpu.arch.endian());
} else {
if (!self.sixtyfour_bit) {
- mem.writeInt(u32, code[@intCast(usize, offset)..][0..4], @intCast(u32, target_offset + addend), self.base.options.target.cpu.arch.endian());
+ mem.writeInt(u32, code[@as(usize, @intCast(offset))..][0..4], @as(u32, @intCast(target_offset + addend)), self.base.options.target.cpu.arch.endian());
} else {
- mem.writeInt(u64, code[@intCast(usize, offset)..][0..8], target_offset + addend, self.base.options.target.cpu.arch.endian());
+ mem.writeInt(u64, code[@as(usize, @intCast(offset))..][0..8], target_offset + addend, self.base.options.target.cpu.arch.endian());
}
}
log.debug("relocating the address of '{s}' + {d} into '{s}' + {d} (({s}[{d}] = 0x{x} + 0x{x})", .{ target_symbol.name, addend, source_atom_symbol.name, offset, source_atom_symbol.name, offset, target_offset, addend });
@@ -960,7 +960,7 @@ fn freeUnnamedConsts(self: *Plan9, decl_index: Module.Decl.Index) void {
fn createAtom(self: *Plan9) !Atom.Index {
const gpa = self.base.allocator;
- const index = @intCast(Atom.Index, self.atoms.items.len);
+ const index = @as(Atom.Index, @intCast(self.atoms.items.len));
const atom = try self.atoms.addOne(gpa);
atom.* = .{
.type = .t,
@@ -1060,7 +1060,7 @@ fn updateLazySymbolAtom(self: *Plan9, sym: File.LazySymbol, atom_index: Atom.Ind
&required_alignment,
&code_buffer,
.none,
- .{ .parent_atom_index = @intCast(Atom.Index, atom_index) },
+ .{ .parent_atom_index = @as(Atom.Index, @intCast(atom_index)) },
);
const code = switch (res) {
.ok => code_buffer.items,
@@ -1188,7 +1188,7 @@ pub fn writeSym(self: *Plan9, w: anytype, sym: aout.Sym) !void {
// log.debug("write sym{{name: {s}, value: {x}}}", .{ sym.name, sym.value });
if (sym.type == .bad) return; // we don't want to write free'd symbols
if (!self.sixtyfour_bit) {
- try w.writeIntBig(u32, @intCast(u32, sym.value));
+ try w.writeIntBig(u32, @as(u32, @intCast(sym.value)));
} else {
try w.writeIntBig(u64, sym.value);
}
diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig
index 647450a603..97a05a6e4a 100644
--- a/src/link/Wasm.zig
+++ b/src/link/Wasm.zig
@@ -317,7 +317,7 @@ pub const StringTable = struct {
}
try table.string_data.ensureUnusedCapacity(allocator, string.len + 1);
- const offset = @intCast(u32, table.string_data.items.len);
+ const offset = @as(u32, @intCast(table.string_data.items.len));
log.debug("writing new string '{s}' at offset 0x{x}", .{ string, offset });
@@ -333,7 +333,7 @@ pub const StringTable = struct {
/// Asserts offset does not exceed bounds.
pub fn get(table: StringTable, off: u32) []const u8 {
assert(off < table.string_data.items.len);
- return mem.sliceTo(@ptrCast([*:0]const u8, table.string_data.items.ptr + off), 0);
+ return mem.sliceTo(@as([*:0]const u8, @ptrCast(table.string_data.items.ptr + off)), 0);
}
/// Returns the offset of a given string when it exists.
@@ -396,7 +396,7 @@ pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Option
// For object files we will import the stack pointer symbol
if (options.output_mode == .Obj) {
symbol.setUndefined(true);
- symbol.index = @intCast(u32, wasm_bin.imported_globals_count);
+ symbol.index = @as(u32, @intCast(wasm_bin.imported_globals_count));
wasm_bin.imported_globals_count += 1;
try wasm_bin.imports.putNoClobber(
allocator,
@@ -408,7 +408,7 @@ pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Option
},
);
} else {
- symbol.index = @intCast(u32, wasm_bin.imported_globals_count + wasm_bin.wasm_globals.items.len);
+ symbol.index = @as(u32, @intCast(wasm_bin.imported_globals_count + wasm_bin.wasm_globals.items.len));
symbol.setFlag(.WASM_SYM_VISIBILITY_HIDDEN);
const global = try wasm_bin.wasm_globals.addOne(allocator);
global.* = .{
@@ -431,7 +431,7 @@ pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Option
};
if (options.output_mode == .Obj or options.import_table) {
symbol.setUndefined(true);
- symbol.index = @intCast(u32, wasm_bin.imported_tables_count);
+ symbol.index = @as(u32, @intCast(wasm_bin.imported_tables_count));
wasm_bin.imported_tables_count += 1;
try wasm_bin.imports.put(allocator, loc, .{
.module_name = try wasm_bin.string_table.put(allocator, wasm_bin.host_name),
@@ -439,7 +439,7 @@ pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Option
.kind = .{ .table = table },
});
} else {
- symbol.index = @intCast(u32, wasm_bin.imported_tables_count + wasm_bin.tables.items.len);
+ symbol.index = @as(u32, @intCast(wasm_bin.imported_tables_count + wasm_bin.tables.items.len));
try wasm_bin.tables.append(allocator, table);
if (options.export_table) {
symbol.setFlag(.WASM_SYM_EXPORTED);
@@ -519,7 +519,7 @@ fn createSyntheticSymbol(wasm: *Wasm, name: []const u8, tag: Symbol.Tag) !Symbol
}
fn createSyntheticSymbolOffset(wasm: *Wasm, name_offset: u32, tag: Symbol.Tag) !SymbolLoc {
- const sym_index = @intCast(u32, wasm.symbols.items.len);
+ const sym_index = @as(u32, @intCast(wasm.symbols.items.len));
const loc: SymbolLoc = .{ .index = sym_index, .file = null };
try wasm.symbols.append(wasm.base.allocator, .{
.name = name_offset,
@@ -588,7 +588,7 @@ pub fn getOrCreateAtomForDecl(wasm: *Wasm, decl_index: Module.Decl.Index) !Atom.
/// Creates a new empty `Atom` and returns its `Atom.Index`
fn createAtom(wasm: *Wasm) !Atom.Index {
- const index = @intCast(Atom.Index, wasm.managed_atoms.items.len);
+ const index = @as(Atom.Index, @intCast(wasm.managed_atoms.items.len));
const atom = try wasm.managed_atoms.addOne(wasm.base.allocator);
atom.* = Atom.empty;
atom.sym_index = try wasm.allocateSymbol();
@@ -669,7 +669,7 @@ fn resolveSymbolsInObject(wasm: *Wasm, object_index: u16) !void {
log.debug("Resolving symbols in object: '{s}'", .{object.name});
for (object.symtable, 0..) |symbol, i| {
- const sym_index = @intCast(u32, i);
+ const sym_index = @as(u32, @intCast(i));
const location: SymbolLoc = .{
.file = object_index,
.index = sym_index,
@@ -830,7 +830,7 @@ fn resolveSymbolsInArchives(wasm: *Wasm) !void {
// Symbol is found in unparsed object file within current archive.
// Parse object and and resolve symbols again before we check remaining
// undefined symbols.
- const object_file_index = @intCast(u16, wasm.objects.items.len);
+ const object_file_index = @as(u16, @intCast(wasm.objects.items.len));
var object = try archive.parseObject(wasm.base.allocator, offset.items[0]);
try wasm.objects.append(wasm.base.allocator, object);
try wasm.resolveSymbolsInObject(object_file_index);
@@ -1046,7 +1046,7 @@ fn setupTLSRelocationsFunction(wasm: *Wasm) !void {
try writer.writeByte(std.wasm.opcode(.i32_add));
try writer.writeByte(std.wasm.opcode(.global_set));
- try leb.writeULEB128(writer, wasm.imported_globals_count + @intCast(u32, wasm.wasm_globals.items.len + got_index));
+ try leb.writeULEB128(writer, wasm.imported_globals_count + @as(u32, @intCast(wasm.wasm_globals.items.len + got_index)));
}
try writer.writeByte(std.wasm.opcode(.end));
@@ -1091,7 +1091,7 @@ fn validateFeatures(
// linked object file so we can test them.
for (wasm.objects.items, 0..) |object, object_index| {
for (object.features) |feature| {
- const value = @intCast(u16, object_index) << 1 | @as(u1, 1);
+ const value = @as(u16, @intCast(object_index)) << 1 | @as(u1, 1);
switch (feature.prefix) {
.used => {
used[@intFromEnum(feature.tag)] = value;
@@ -1117,12 +1117,12 @@ fn validateFeatures(
// and insert it into the 'allowed' set. When features are not inferred,
// we validate that a used feature is allowed.
for (used, 0..) |used_set, used_index| {
- const is_enabled = @truncate(u1, used_set) != 0;
+ const is_enabled = @as(u1, @truncate(used_set)) != 0;
if (infer) {
allowed[used_index] = is_enabled;
emit_features_count.* += @intFromBool(is_enabled);
} else if (is_enabled and !allowed[used_index]) {
- log.err("feature '{}' not allowed, but used by linked object", .{@enumFromInt(types.Feature.Tag, used_index)});
+ log.err("feature '{}' not allowed, but used by linked object", .{@as(types.Feature.Tag, @enumFromInt(used_index))});
log.err(" defined in '{s}'", .{wasm.objects.items[used_set >> 1].name});
valid_feature_set = false;
}
@@ -1134,7 +1134,7 @@ fn validateFeatures(
if (wasm.base.options.shared_memory) {
const disallowed_feature = disallowed[@intFromEnum(types.Feature.Tag.shared_mem)];
- if (@truncate(u1, disallowed_feature) != 0) {
+ if (@as(u1, @truncate(disallowed_feature)) != 0) {
log.err(
"shared-memory is disallowed by '{s}' because it wasn't compiled with 'atomics' and 'bulk-memory' features enabled",
.{wasm.objects.items[disallowed_feature >> 1].name},
@@ -1163,7 +1163,7 @@ fn validateFeatures(
if (feature.prefix == .disallowed) continue; // already defined in 'disallowed' set.
// from here a feature is always used
const disallowed_feature = disallowed[@intFromEnum(feature.tag)];
- if (@truncate(u1, disallowed_feature) != 0) {
+ if (@as(u1, @truncate(disallowed_feature)) != 0) {
log.err("feature '{}' is disallowed, but used by linked object", .{feature.tag});
log.err(" disallowed by '{s}'", .{wasm.objects.items[disallowed_feature >> 1].name});
log.err(" used in '{s}'", .{object.name});
@@ -1175,9 +1175,9 @@ fn validateFeatures(
// validate the linked object file has each required feature
for (required, 0..) |required_feature, feature_index| {
- const is_required = @truncate(u1, required_feature) != 0;
+ const is_required = @as(u1, @truncate(required_feature)) != 0;
if (is_required and !object_used_features[feature_index]) {
- log.err("feature '{}' is required but not used in linked object", .{@enumFromInt(types.Feature.Tag, feature_index)});
+ log.err("feature '{}' is required but not used in linked object", .{@as(types.Feature.Tag, @enumFromInt(feature_index))});
log.err(" required by '{s}'", .{wasm.objects.items[required_feature >> 1].name});
log.err(" missing in '{s}'", .{object.name});
valid_feature_set = false;
@@ -1333,7 +1333,7 @@ pub fn allocateSymbol(wasm: *Wasm) !u32 {
wasm.symbols.items[index] = symbol;
return index;
}
- const index = @intCast(u32, wasm.symbols.items.len);
+ const index = @as(u32, @intCast(wasm.symbols.items.len));
wasm.symbols.appendAssumeCapacity(symbol);
return index;
}
@@ -1485,7 +1485,7 @@ fn finishUpdateDecl(wasm: *Wasm, decl_index: Module.Decl.Index, code: []const u8
try atom.code.appendSlice(wasm.base.allocator, code);
try wasm.resolved_symbols.put(wasm.base.allocator, atom.symbolLoc(), {});
- atom.size = @intCast(u32, code.len);
+ atom.size = @as(u32, @intCast(code.len));
if (code.len == 0) return;
atom.alignment = decl.getAlignment(mod);
}
@@ -1589,7 +1589,7 @@ pub fn lowerUnnamedConst(wasm: *Wasm, tv: TypedValue, decl_index: Module.Decl.In
};
const atom = wasm.getAtomPtr(atom_index);
- atom.size = @intCast(u32, code.len);
+ atom.size = @as(u32, @intCast(code.len));
try atom.code.appendSlice(wasm.base.allocator, code);
return atom.sym_index;
}
@@ -1617,7 +1617,7 @@ pub fn getGlobalSymbol(wasm: *Wasm, name: []const u8, lib_name: ?[]const u8) !u3
symbol.setUndefined(true);
const sym_index = if (wasm.symbols_free_list.popOrNull()) |index| index else blk: {
- var index = @intCast(u32, wasm.symbols.items.len);
+ var index = @as(u32, @intCast(wasm.symbols.items.len));
try wasm.symbols.ensureUnusedCapacity(wasm.base.allocator, 1);
wasm.symbols.items.len += 1;
break :blk index;
@@ -1654,15 +1654,15 @@ pub fn getDeclVAddr(
try wasm.addTableFunction(target_symbol_index);
try atom.relocs.append(wasm.base.allocator, .{
.index = target_symbol_index,
- .offset = @intCast(u32, reloc_info.offset),
+ .offset = @as(u32, @intCast(reloc_info.offset)),
.relocation_type = if (is_wasm32) .R_WASM_TABLE_INDEX_I32 else .R_WASM_TABLE_INDEX_I64,
});
} else {
try atom.relocs.append(wasm.base.allocator, .{
.index = target_symbol_index,
- .offset = @intCast(u32, reloc_info.offset),
+ .offset = @as(u32, @intCast(reloc_info.offset)),
.relocation_type = if (is_wasm32) .R_WASM_MEMORY_ADDR_I32 else .R_WASM_MEMORY_ADDR_I64,
- .addend = @intCast(i32, reloc_info.addend),
+ .addend = @as(i32, @intCast(reloc_info.addend)),
});
}
// we do not know the final address at this point,
@@ -1840,7 +1840,7 @@ pub fn freeDecl(wasm: *Wasm, decl_index: Module.Decl.Index) void {
/// Appends a new entry to the indirect function table
pub fn addTableFunction(wasm: *Wasm, symbol_index: u32) !void {
- const index = @intCast(u32, wasm.function_table.count());
+ const index = @as(u32, @intCast(wasm.function_table.count()));
try wasm.function_table.put(wasm.base.allocator, .{ .file = null, .index = symbol_index }, index);
}
@@ -1971,7 +1971,7 @@ fn parseAtom(wasm: *Wasm, atom_index: Atom.Index, kind: Kind) !void {
const symbol = (SymbolLoc{ .file = null, .index = atom.sym_index }).getSymbol(wasm);
const final_index: u32 = switch (kind) {
.function => result: {
- const index = @intCast(u32, wasm.functions.count() + wasm.imported_functions_count);
+ const index = @as(u32, @intCast(wasm.functions.count() + wasm.imported_functions_count));
const type_index = wasm.atom_types.get(atom_index).?;
try wasm.functions.putNoClobber(
wasm.base.allocator,
@@ -1982,7 +1982,7 @@ fn parseAtom(wasm: *Wasm, atom_index: Atom.Index, kind: Kind) !void {
symbol.index = index;
if (wasm.code_section_index == null) {
- wasm.code_section_index = @intCast(u32, wasm.segments.items.len);
+ wasm.code_section_index = @as(u32, @intCast(wasm.segments.items.len));
try wasm.segments.append(wasm.base.allocator, .{
.alignment = atom.alignment,
.size = atom.size,
@@ -2020,12 +2020,12 @@ fn parseAtom(wasm: *Wasm, atom_index: Atom.Index, kind: Kind) !void {
const index = gop.value_ptr.*;
wasm.segments.items[index].size += atom.size;
- symbol.index = @intCast(u32, wasm.segment_info.getIndex(index).?);
+ symbol.index = @as(u32, @intCast(wasm.segment_info.getIndex(index).?));
// segment info already exists, so free its memory
wasm.base.allocator.free(segment_name);
break :result index;
} else {
- const index = @intCast(u32, wasm.segments.items.len);
+ const index = @as(u32, @intCast(wasm.segments.items.len));
var flags: u32 = 0;
if (wasm.base.options.shared_memory) {
flags |= @intFromEnum(Segment.Flag.WASM_DATA_SEGMENT_IS_PASSIVE);
@@ -2038,7 +2038,7 @@ fn parseAtom(wasm: *Wasm, atom_index: Atom.Index, kind: Kind) !void {
});
gop.value_ptr.* = index;
- const info_index = @intCast(u32, wasm.segment_info.count());
+ const info_index = @as(u32, @intCast(wasm.segment_info.count()));
try wasm.segment_info.put(wasm.base.allocator, index, segment_info);
symbol.index = info_index;
break :result index;
@@ -2074,13 +2074,13 @@ fn allocateDebugAtoms(wasm: *Wasm) !void {
const allocAtom = struct {
fn f(bin: *Wasm, maybe_index: *?u32, atom_index: Atom.Index) !void {
const index = maybe_index.* orelse idx: {
- const index = @intCast(u32, bin.segments.items.len);
+ const index = @as(u32, @intCast(bin.segments.items.len));
try bin.appendDummySegment();
maybe_index.* = index;
break :idx index;
};
const atom = bin.getAtomPtr(atom_index);
- atom.size = @intCast(u32, atom.code.items.len);
+ atom.size = @as(u32, @intCast(atom.code.items.len));
bin.symbols.items[atom.sym_index].index = index;
try bin.appendAtomAtIndex(index, atom_index);
}
@@ -2215,7 +2215,7 @@ fn setupInitFunctions(wasm: *Wasm) !void {
log.debug("appended init func '{s}'\n", .{object.string_table.get(symbol.name)});
wasm.init_funcs.appendAssumeCapacity(.{
.index = init_func.symbol_index,
- .file = @intCast(u16, file_index),
+ .file = @as(u16, @intCast(file_index)),
.priority = init_func.priority,
});
}
@@ -2248,7 +2248,7 @@ fn setupErrorsLen(wasm: *Wasm) !void {
atom.deinit(wasm);
break :blk index;
} else new_atom: {
- const atom_index = @intCast(Atom.Index, wasm.managed_atoms.items.len);
+ const atom_index = @as(Atom.Index, @intCast(wasm.managed_atoms.items.len));
try wasm.symbol_atom.put(wasm.base.allocator, loc, atom_index);
try wasm.managed_atoms.append(wasm.base.allocator, undefined);
break :new_atom atom_index;
@@ -2257,7 +2257,7 @@ fn setupErrorsLen(wasm: *Wasm) !void {
atom.* = Atom.empty;
atom.sym_index = loc.index;
atom.size = 2;
- try atom.code.writer(wasm.base.allocator).writeIntLittle(u16, @intCast(u16, errors_len));
+ try atom.code.writer(wasm.base.allocator).writeIntLittle(u16, @as(u16, @intCast(errors_len)));
try wasm.parseAtom(atom_index, .{ .data = .read_only });
}
@@ -2325,7 +2325,7 @@ fn createSyntheticFunction(
const symbol = loc.getSymbol(wasm);
const ty_index = try wasm.putOrGetFuncType(func_ty);
// create function with above type
- const func_index = wasm.imported_functions_count + @intCast(u32, wasm.functions.count());
+ const func_index = wasm.imported_functions_count + @as(u32, @intCast(wasm.functions.count()));
try wasm.functions.putNoClobber(
wasm.base.allocator,
.{ .file = null, .index = func_index },
@@ -2334,10 +2334,10 @@ fn createSyntheticFunction(
symbol.index = func_index;
// create the atom that will be output into the final binary
- const atom_index = @intCast(Atom.Index, wasm.managed_atoms.items.len);
+ const atom_index = @as(Atom.Index, @intCast(wasm.managed_atoms.items.len));
const atom = try wasm.managed_atoms.addOne(wasm.base.allocator);
atom.* = .{
- .size = @intCast(u32, function_body.items.len),
+ .size = @as(u32, @intCast(function_body.items.len)),
.offset = 0,
.sym_index = loc.index,
.file = null,
@@ -2369,10 +2369,10 @@ pub fn createFunction(
) !u32 {
const loc = try wasm.createSyntheticSymbol(symbol_name, .function);
- const atom_index = @intCast(Atom.Index, wasm.managed_atoms.items.len);
+ const atom_index = @as(Atom.Index, @intCast(wasm.managed_atoms.items.len));
const atom = try wasm.managed_atoms.addOne(wasm.base.allocator);
atom.* = .{
- .size = @intCast(u32, function_body.items.len),
+ .size = @as(u32, @intCast(function_body.items.len)),
.offset = 0,
.sym_index = loc.index,
.file = null,
@@ -2386,7 +2386,7 @@ pub fn createFunction(
symbol.setFlag(.WASM_SYM_VISIBILITY_HIDDEN); // ensure function does not get exported
const section_index = wasm.code_section_index orelse idx: {
- const index = @intCast(u32, wasm.segments.items.len);
+ const index = @as(u32, @intCast(wasm.segments.items.len));
try wasm.appendDummySegment();
break :idx index;
};
@@ -2438,7 +2438,7 @@ fn initializeTLSFunction(wasm: *Wasm) !void {
try writer.writeByte(std.wasm.opcode(.misc_prefix));
try leb.writeULEB128(writer, std.wasm.miscOpcode(.memory_init));
// segment immediate
- try leb.writeULEB128(writer, @intCast(u32, data_index));
+ try leb.writeULEB128(writer, @as(u32, @intCast(data_index)));
// memory index immediate (always 0)
try leb.writeULEB128(writer, @as(u32, 0));
}
@@ -2567,16 +2567,16 @@ fn mergeSections(wasm: *Wasm) !void {
if (!gop.found_existing) {
gop.value_ptr.* = object.functions[index];
}
- symbol.index = @intCast(u32, gop.index) + wasm.imported_functions_count;
+ symbol.index = @as(u32, @intCast(gop.index)) + wasm.imported_functions_count;
},
.global => {
const original_global = object.globals[index];
- symbol.index = @intCast(u32, wasm.wasm_globals.items.len) + wasm.imported_globals_count;
+ symbol.index = @as(u32, @intCast(wasm.wasm_globals.items.len)) + wasm.imported_globals_count;
try wasm.wasm_globals.append(wasm.base.allocator, original_global);
},
.table => {
const original_table = object.tables[index];
- symbol.index = @intCast(u32, wasm.tables.items.len) + wasm.imported_tables_count;
+ symbol.index = @as(u32, @intCast(wasm.tables.items.len)) + wasm.imported_tables_count;
try wasm.tables.append(wasm.base.allocator, original_table);
},
else => unreachable,
@@ -2596,7 +2596,7 @@ fn mergeTypes(wasm: *Wasm) !void {
// type inserted. If we do this for the same function multiple times,
// it will be overwritten with the incorrect type.
var dirty = std.AutoHashMap(u32, void).init(wasm.base.allocator);
- try dirty.ensureUnusedCapacity(@intCast(u32, wasm.functions.count()));
+ try dirty.ensureUnusedCapacity(@as(u32, @intCast(wasm.functions.count())));
defer dirty.deinit();
for (wasm.resolved_symbols.keys()) |sym_loc| {
@@ -2660,10 +2660,10 @@ fn setupExports(wasm: *Wasm) !void {
break :blk try wasm.string_table.put(wasm.base.allocator, sym_name);
};
const exp: types.Export = if (symbol.tag == .data) exp: {
- const global_index = @intCast(u32, wasm.imported_globals_count + wasm.wasm_globals.items.len);
+ const global_index = @as(u32, @intCast(wasm.imported_globals_count + wasm.wasm_globals.items.len));
try wasm.wasm_globals.append(wasm.base.allocator, .{
.global_type = .{ .valtype = .i32, .mutable = false },
- .init = .{ .i32_const = @intCast(i32, symbol.virtual_address) },
+ .init = .{ .i32_const = @as(i32, @intCast(symbol.virtual_address)) },
});
break :exp .{
.name = export_name,
@@ -2734,10 +2734,10 @@ fn setupMemory(wasm: *Wasm) !void {
memory_ptr = std.mem.alignForward(u64, memory_ptr, stack_alignment);
memory_ptr += stack_size;
// We always put the stack pointer global at index 0
- wasm.wasm_globals.items[0].init.i32_const = @bitCast(i32, @intCast(u32, memory_ptr));
+ wasm.wasm_globals.items[0].init.i32_const = @as(i32, @bitCast(@as(u32, @intCast(memory_ptr))));
}
- var offset: u32 = @intCast(u32, memory_ptr);
+ var offset: u32 = @as(u32, @intCast(memory_ptr));
var data_seg_it = wasm.data_segments.iterator();
while (data_seg_it.next()) |entry| {
const segment = &wasm.segments.items[entry.value_ptr.*];
@@ -2747,26 +2747,26 @@ fn setupMemory(wasm: *Wasm) !void {
if (mem.eql(u8, entry.key_ptr.*, ".tdata")) {
if (wasm.findGlobalSymbol("__tls_size")) |loc| {
const sym = loc.getSymbol(wasm);
- sym.index = @intCast(u32, wasm.wasm_globals.items.len) + wasm.imported_globals_count;
+ sym.index = @as(u32, @intCast(wasm.wasm_globals.items.len)) + wasm.imported_globals_count;
try wasm.wasm_globals.append(wasm.base.allocator, .{
.global_type = .{ .valtype = .i32, .mutable = false },
- .init = .{ .i32_const = @intCast(i32, segment.size) },
+ .init = .{ .i32_const = @as(i32, @intCast(segment.size)) },
});
}
if (wasm.findGlobalSymbol("__tls_align")) |loc| {
const sym = loc.getSymbol(wasm);
- sym.index = @intCast(u32, wasm.wasm_globals.items.len) + wasm.imported_globals_count;
+ sym.index = @as(u32, @intCast(wasm.wasm_globals.items.len)) + wasm.imported_globals_count;
try wasm.wasm_globals.append(wasm.base.allocator, .{
.global_type = .{ .valtype = .i32, .mutable = false },
- .init = .{ .i32_const = @intCast(i32, segment.alignment) },
+ .init = .{ .i32_const = @as(i32, @intCast(segment.alignment)) },
});
}
if (wasm.findGlobalSymbol("__tls_base")) |loc| {
const sym = loc.getSymbol(wasm);
- sym.index = @intCast(u32, wasm.wasm_globals.items.len) + wasm.imported_globals_count;
+ sym.index = @as(u32, @intCast(wasm.wasm_globals.items.len)) + wasm.imported_globals_count;
try wasm.wasm_globals.append(wasm.base.allocator, .{
.global_type = .{ .valtype = .i32, .mutable = wasm.base.options.shared_memory },
- .init = .{ .i32_const = if (wasm.base.options.shared_memory) @as(u32, 0) else @intCast(i32, memory_ptr) },
+ .init = .{ .i32_const = if (wasm.base.options.shared_memory) @as(u32, 0) else @as(i32, @intCast(memory_ptr)) },
});
}
}
@@ -2782,21 +2782,21 @@ fn setupMemory(wasm: *Wasm) !void {
memory_ptr = mem.alignForward(u64, memory_ptr, 4);
const loc = try wasm.createSyntheticSymbol("__wasm_init_memory_flag", .data);
const sym = loc.getSymbol(wasm);
- sym.virtual_address = @intCast(u32, memory_ptr);
+ sym.virtual_address = @as(u32, @intCast(memory_ptr));
memory_ptr += 4;
}
if (!place_stack_first and !is_obj) {
memory_ptr = std.mem.alignForward(u64, memory_ptr, stack_alignment);
memory_ptr += stack_size;
- wasm.wasm_globals.items[0].init.i32_const = @bitCast(i32, @intCast(u32, memory_ptr));
+ wasm.wasm_globals.items[0].init.i32_const = @as(i32, @bitCast(@as(u32, @intCast(memory_ptr))));
}
// One of the linked object files has a reference to the __heap_base symbol.
// We must set its virtual address so it can be used in relocations.
if (wasm.findGlobalSymbol("__heap_base")) |loc| {
const symbol = loc.getSymbol(wasm);
- symbol.virtual_address = @intCast(u32, mem.alignForward(u64, memory_ptr, heap_alignment));
+ symbol.virtual_address = @as(u32, @intCast(mem.alignForward(u64, memory_ptr, heap_alignment)));
}
// Setup the max amount of pages
@@ -2821,12 +2821,12 @@ fn setupMemory(wasm: *Wasm) !void {
memory_ptr = mem.alignForward(u64, memory_ptr, std.wasm.page_size);
// In case we do not import memory, but define it ourselves,
// set the minimum amount of pages on the memory section.
- wasm.memories.limits.min = @intCast(u32, memory_ptr / page_size);
+ wasm.memories.limits.min = @as(u32, @intCast(memory_ptr / page_size));
log.debug("Total memory pages: {d}", .{wasm.memories.limits.min});
if (wasm.findGlobalSymbol("__heap_end")) |loc| {
const symbol = loc.getSymbol(wasm);
- symbol.virtual_address = @intCast(u32, memory_ptr);
+ symbol.virtual_address = @as(u32, @intCast(memory_ptr));
}
if (wasm.base.options.max_memory) |max_memory| {
@@ -2842,7 +2842,7 @@ fn setupMemory(wasm: *Wasm) !void {
log.err("Maximum memory exceeds maxmium amount {d}", .{max_memory_allowed});
return error.MemoryTooBig;
}
- wasm.memories.limits.max = @intCast(u32, max_memory / page_size);
+ wasm.memories.limits.max = @as(u32, @intCast(max_memory / page_size));
wasm.memories.limits.setFlag(.WASM_LIMITS_FLAG_HAS_MAX);
if (wasm.base.options.shared_memory) {
wasm.memories.limits.setFlag(.WASM_LIMITS_FLAG_IS_SHARED);
@@ -2857,7 +2857,7 @@ fn setupMemory(wasm: *Wasm) !void {
pub fn getMatchingSegment(wasm: *Wasm, object_index: u16, relocatable_index: u32) !?u32 {
const object: Object = wasm.objects.items[object_index];
const relocatable_data = object.relocatable_data[relocatable_index];
- const index = @intCast(u32, wasm.segments.items.len);
+ const index = @as(u32, @intCast(wasm.segments.items.len));
switch (relocatable_data.type) {
.data => {
@@ -3023,10 +3023,10 @@ fn populateErrorNameTable(wasm: *Wasm) !void {
const mod = wasm.base.options.module.?;
for (mod.global_error_set.keys()) |error_name_nts| {
const error_name = mod.intern_pool.stringToSlice(error_name_nts);
- const len = @intCast(u32, error_name.len + 1); // names are 0-termianted
+ const len = @as(u32, @intCast(error_name.len + 1)); // names are 0-termianted
const slice_ty = Type.slice_const_u8_sentinel_0;
- const offset = @intCast(u32, atom.code.items.len);
+ const offset = @as(u32, @intCast(atom.code.items.len));
// first we create the data for the slice of the name
try atom.code.appendNTimes(wasm.base.allocator, 0, 4); // ptr to name, will be relocated
try atom.code.writer(wasm.base.allocator).writeIntLittle(u32, len - 1);
@@ -3035,9 +3035,9 @@ fn populateErrorNameTable(wasm: *Wasm) !void {
.index = names_atom.sym_index,
.relocation_type = .R_WASM_MEMORY_ADDR_I32,
.offset = offset,
- .addend = @intCast(i32, addend),
+ .addend = @as(i32, @intCast(addend)),
});
- atom.size += @intCast(u32, slice_ty.abiSize(mod));
+ atom.size += @as(u32, @intCast(slice_ty.abiSize(mod)));
addend += len;
// as we updated the error name table, we now store the actual name within the names atom
@@ -3063,7 +3063,7 @@ fn populateErrorNameTable(wasm: *Wasm) !void {
/// This initializes the index, appends a new segment,
/// and finally, creates a managed `Atom`.
pub fn createDebugSectionForIndex(wasm: *Wasm, index: *?u32, name: []const u8) !Atom.Index {
- const new_index = @intCast(u32, wasm.segments.items.len);
+ const new_index = @as(u32, @intCast(wasm.segments.items.len));
index.* = new_index;
try wasm.appendDummySegment();
@@ -3294,7 +3294,7 @@ fn linkWithZld(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Node) l
try wasm.parseInputFiles(positionals.items);
for (wasm.objects.items, 0..) |_, object_index| {
- try wasm.resolveSymbolsInObject(@intCast(u16, object_index));
+ try wasm.resolveSymbolsInObject(@as(u16, @intCast(object_index)));
}
var emit_features_count: u32 = 0;
@@ -3309,7 +3309,7 @@ fn linkWithZld(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Node) l
try wasm.setupImports();
for (wasm.objects.items, 0..) |*object, object_index| {
- try object.parseIntoAtoms(gpa, @intCast(u16, object_index), wasm);
+ try object.parseIntoAtoms(gpa, @as(u16, @intCast(object_index)), wasm);
}
try wasm.allocateAtoms();
@@ -3382,7 +3382,7 @@ pub fn flushModule(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Nod
try wasm.parseInputFiles(positionals.items);
for (wasm.objects.items, 0..) |_, object_index| {
- try wasm.resolveSymbolsInObject(@intCast(u16, object_index));
+ try wasm.resolveSymbolsInObject(@as(u16, @intCast(object_index)));
}
var emit_features_count: u32 = 0;
@@ -3446,7 +3446,7 @@ pub fn flushModule(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Nod
}
for (wasm.objects.items, 0..) |*object, object_index| {
- try object.parseIntoAtoms(wasm.base.allocator, @intCast(u16, object_index), wasm);
+ try object.parseIntoAtoms(wasm.base.allocator, @as(u16, @intCast(object_index)), wasm);
}
try wasm.allocateAtoms();
@@ -3497,11 +3497,11 @@ fn writeToFile(
log.debug("Writing type section. Count: ({d})", .{wasm.func_types.items.len});
for (wasm.func_types.items) |func_type| {
try leb.writeULEB128(binary_writer, std.wasm.function_type);
- try leb.writeULEB128(binary_writer, @intCast(u32, func_type.params.len));
+ try leb.writeULEB128(binary_writer, @as(u32, @intCast(func_type.params.len)));
for (func_type.params) |param_ty| {
try leb.writeULEB128(binary_writer, std.wasm.valtype(param_ty));
}
- try leb.writeULEB128(binary_writer, @intCast(u32, func_type.returns.len));
+ try leb.writeULEB128(binary_writer, @as(u32, @intCast(func_type.returns.len)));
for (func_type.returns) |ret_ty| {
try leb.writeULEB128(binary_writer, std.wasm.valtype(ret_ty));
}
@@ -3511,8 +3511,8 @@ fn writeToFile(
binary_bytes.items,
header_offset,
.type,
- @intCast(u32, binary_bytes.items.len - header_offset - header_size),
- @intCast(u32, wasm.func_types.items.len),
+ @as(u32, @intCast(binary_bytes.items.len - header_offset - header_size)),
+ @as(u32, @intCast(wasm.func_types.items.len)),
);
section_count += 1;
}
@@ -3543,8 +3543,8 @@ fn writeToFile(
binary_bytes.items,
header_offset,
.import,
- @intCast(u32, binary_bytes.items.len - header_offset - header_size),
- @intCast(u32, wasm.imports.count() + @intFromBool(import_memory)),
+ @as(u32, @intCast(binary_bytes.items.len - header_offset - header_size)),
+ @as(u32, @intCast(wasm.imports.count() + @intFromBool(import_memory))),
);
section_count += 1;
}
@@ -3560,8 +3560,8 @@ fn writeToFile(
binary_bytes.items,
header_offset,
.function,
- @intCast(u32, binary_bytes.items.len - header_offset - header_size),
- @intCast(u32, wasm.functions.count()),
+ @as(u32, @intCast(binary_bytes.items.len - header_offset - header_size)),
+ @as(u32, @intCast(wasm.functions.count())),
);
section_count += 1;
}
@@ -3579,8 +3579,8 @@ fn writeToFile(
binary_bytes.items,
header_offset,
.table,
- @intCast(u32, binary_bytes.items.len - header_offset - header_size),
- @intCast(u32, wasm.tables.items.len),
+ @as(u32, @intCast(binary_bytes.items.len - header_offset - header_size)),
+ @as(u32, @intCast(wasm.tables.items.len)),
);
section_count += 1;
}
@@ -3594,7 +3594,7 @@ fn writeToFile(
binary_bytes.items,
header_offset,
.memory,
- @intCast(u32, binary_bytes.items.len - header_offset - header_size),
+ @as(u32, @intCast(binary_bytes.items.len - header_offset - header_size)),
@as(u32, 1), // wasm currently only supports 1 linear memory segment
);
section_count += 1;
@@ -3614,8 +3614,8 @@ fn writeToFile(
binary_bytes.items,
header_offset,
.global,
- @intCast(u32, binary_bytes.items.len - header_offset - header_size),
- @intCast(u32, wasm.wasm_globals.items.len),
+ @as(u32, @intCast(binary_bytes.items.len - header_offset - header_size)),
+ @as(u32, @intCast(wasm.wasm_globals.items.len)),
);
section_count += 1;
}
@@ -3626,14 +3626,14 @@ fn writeToFile(
for (wasm.exports.items) |exp| {
const name = wasm.string_table.get(exp.name);
- try leb.writeULEB128(binary_writer, @intCast(u32, name.len));
+ try leb.writeULEB128(binary_writer, @as(u32, @intCast(name.len)));
try binary_writer.writeAll(name);
try leb.writeULEB128(binary_writer, @intFromEnum(exp.kind));
try leb.writeULEB128(binary_writer, exp.index);
}
if (!import_memory) {
- try leb.writeULEB128(binary_writer, @intCast(u32, "memory".len));
+ try leb.writeULEB128(binary_writer, @as(u32, @intCast("memory".len)));
try binary_writer.writeAll("memory");
try binary_writer.writeByte(std.wasm.externalKind(.memory));
try leb.writeULEB128(binary_writer, @as(u32, 0));
@@ -3643,8 +3643,8 @@ fn writeToFile(
binary_bytes.items,
header_offset,
.@"export",
- @intCast(u32, binary_bytes.items.len - header_offset - header_size),
- @intCast(u32, wasm.exports.items.len) + @intFromBool(!import_memory),
+ @as(u32, @intCast(binary_bytes.items.len - header_offset - header_size)),
+ @as(u32, @intCast(wasm.exports.items.len)) + @intFromBool(!import_memory),
);
section_count += 1;
}
@@ -3665,7 +3665,7 @@ fn writeToFile(
if (flags == 0x02) {
try leb.writeULEB128(binary_writer, @as(u8, 0)); // represents funcref
}
- try leb.writeULEB128(binary_writer, @intCast(u32, wasm.function_table.count()));
+ try leb.writeULEB128(binary_writer, @as(u32, @intCast(wasm.function_table.count())));
var symbol_it = wasm.function_table.keyIterator();
while (symbol_it.next()) |symbol_loc_ptr| {
try leb.writeULEB128(binary_writer, symbol_loc_ptr.*.getSymbol(wasm).index);
@@ -3675,7 +3675,7 @@ fn writeToFile(
binary_bytes.items,
header_offset,
.element,
- @intCast(u32, binary_bytes.items.len - header_offset - header_size),
+ @as(u32, @intCast(binary_bytes.items.len - header_offset - header_size)),
@as(u32, 1),
);
section_count += 1;
@@ -3689,8 +3689,8 @@ fn writeToFile(
binary_bytes.items,
header_offset,
.data_count,
- @intCast(u32, binary_bytes.items.len - header_offset - header_size),
- @intCast(u32, data_segments_count),
+ @as(u32, @intCast(binary_bytes.items.len - header_offset - header_size)),
+ @as(u32, @intCast(data_segments_count)),
);
}
@@ -3731,13 +3731,13 @@ fn writeToFile(
try binary_writer.writeAll(sorted_atom.code.items);
}
- code_section_size = @intCast(u32, binary_bytes.items.len - header_offset - header_size);
+ code_section_size = @as(u32, @intCast(binary_bytes.items.len - header_offset - header_size));
try writeVecSectionHeader(
binary_bytes.items,
header_offset,
.code,
code_section_size,
- @intCast(u32, wasm.functions.count()),
+ @as(u32, @intCast(wasm.functions.count())),
);
code_section_index = section_count;
section_count += 1;
@@ -3765,7 +3765,7 @@ fn writeToFile(
}
// when a segment is passive, it's initialized during runtime.
if (!segment.isPassive()) {
- try emitInit(binary_writer, .{ .i32_const = @bitCast(i32, segment.offset) });
+ try emitInit(binary_writer, .{ .i32_const = @as(i32, @bitCast(segment.offset)) });
}
// offset into data section
try leb.writeULEB128(binary_writer, segment.size);
@@ -3808,8 +3808,8 @@ fn writeToFile(
binary_bytes.items,
header_offset,
.data,
- @intCast(u32, binary_bytes.items.len - header_offset - header_size),
- @intCast(u32, segment_count),
+ @as(u32, @intCast(binary_bytes.items.len - header_offset - header_size)),
+ @as(u32, @intCast(segment_count)),
);
data_section_index = section_count;
section_count += 1;
@@ -3927,7 +3927,7 @@ fn emitDebugSection(binary_bytes: *std.ArrayList(u8), data: []const u8, name: []
if (data.len == 0) return;
const header_offset = try reserveCustomSectionHeader(binary_bytes);
const writer = binary_bytes.writer();
- try leb.writeULEB128(writer, @intCast(u32, name.len));
+ try leb.writeULEB128(writer, @as(u32, @intCast(name.len)));
try writer.writeAll(name);
const start = binary_bytes.items.len - header_offset;
@@ -3937,7 +3937,7 @@ fn emitDebugSection(binary_bytes: *std.ArrayList(u8), data: []const u8, name: []
try writeCustomSectionHeader(
binary_bytes.items,
header_offset,
- @intCast(u32, binary_bytes.items.len - header_offset - 6),
+ @as(u32, @intCast(binary_bytes.items.len - header_offset - 6)),
);
}
@@ -3946,7 +3946,7 @@ fn emitProducerSection(binary_bytes: *std.ArrayList(u8)) !void {
const writer = binary_bytes.writer();
const producers = "producers";
- try leb.writeULEB128(writer, @intCast(u32, producers.len));
+ try leb.writeULEB128(writer, @as(u32, @intCast(producers.len)));
try writer.writeAll(producers);
try leb.writeULEB128(writer, @as(u32, 2)); // 2 fields: Language + processed-by
@@ -3958,7 +3958,7 @@ fn emitProducerSection(binary_bytes: *std.ArrayList(u8)) !void {
// language field
{
const language = "language";
- try leb.writeULEB128(writer, @intCast(u32, language.len));
+ try leb.writeULEB128(writer, @as(u32, @intCast(language.len)));
try writer.writeAll(language);
// field_value_count (TODO: Parse object files for producer sections to detect their language)
@@ -3969,7 +3969,7 @@ fn emitProducerSection(binary_bytes: *std.ArrayList(u8)) !void {
try leb.writeULEB128(writer, @as(u32, 3)); // len of "Zig"
try writer.writeAll("Zig");
- try leb.writeULEB128(writer, @intCast(u32, version.len));
+ try leb.writeULEB128(writer, @as(u32, @intCast(version.len)));
try writer.writeAll(version);
}
}
@@ -3977,7 +3977,7 @@ fn emitProducerSection(binary_bytes: *std.ArrayList(u8)) !void {
// processed-by field
{
const processed_by = "processed-by";
- try leb.writeULEB128(writer, @intCast(u32, processed_by.len));
+ try leb.writeULEB128(writer, @as(u32, @intCast(processed_by.len)));
try writer.writeAll(processed_by);
// field_value_count (TODO: Parse object files for producer sections to detect other used tools)
@@ -3988,7 +3988,7 @@ fn emitProducerSection(binary_bytes: *std.ArrayList(u8)) !void {
try leb.writeULEB128(writer, @as(u32, 3)); // len of "Zig"
try writer.writeAll("Zig");
- try leb.writeULEB128(writer, @intCast(u32, version.len));
+ try leb.writeULEB128(writer, @as(u32, @intCast(version.len)));
try writer.writeAll(version);
}
}
@@ -3996,7 +3996,7 @@ fn emitProducerSection(binary_bytes: *std.ArrayList(u8)) !void {
try writeCustomSectionHeader(
binary_bytes.items,
header_offset,
- @intCast(u32, binary_bytes.items.len - header_offset - 6),
+ @as(u32, @intCast(binary_bytes.items.len - header_offset - 6)),
);
}
@@ -4005,17 +4005,17 @@ fn emitBuildIdSection(binary_bytes: *std.ArrayList(u8), build_id: []const u8) !v
const writer = binary_bytes.writer();
const hdr_build_id = "build_id";
- try leb.writeULEB128(writer, @intCast(u32, hdr_build_id.len));
+ try leb.writeULEB128(writer, @as(u32, @intCast(hdr_build_id.len)));
try writer.writeAll(hdr_build_id);
try leb.writeULEB128(writer, @as(u32, 1));
- try leb.writeULEB128(writer, @intCast(u32, build_id.len));
+ try leb.writeULEB128(writer, @as(u32, @intCast(build_id.len)));
try writer.writeAll(build_id);
try writeCustomSectionHeader(
binary_bytes.items,
header_offset,
- @intCast(u32, binary_bytes.items.len - header_offset - 6),
+ @as(u32, @intCast(binary_bytes.items.len - header_offset - 6)),
);
}
@@ -4024,17 +4024,17 @@ fn emitFeaturesSection(binary_bytes: *std.ArrayList(u8), enabled_features: []con
const writer = binary_bytes.writer();
const target_features = "target_features";
- try leb.writeULEB128(writer, @intCast(u32, target_features.len));
+ try leb.writeULEB128(writer, @as(u32, @intCast(target_features.len)));
try writer.writeAll(target_features);
try leb.writeULEB128(writer, features_count);
for (enabled_features, 0..) |enabled, feature_index| {
if (enabled) {
- const feature: types.Feature = .{ .prefix = .used, .tag = @enumFromInt(types.Feature.Tag, feature_index) };
+ const feature: types.Feature = .{ .prefix = .used, .tag = @as(types.Feature.Tag, @enumFromInt(feature_index)) };
try leb.writeULEB128(writer, @intFromEnum(feature.prefix));
var buf: [100]u8 = undefined;
const string = try std.fmt.bufPrint(&buf, "{}", .{feature.tag});
- try leb.writeULEB128(writer, @intCast(u32, string.len));
+ try leb.writeULEB128(writer, @as(u32, @intCast(string.len)));
try writer.writeAll(string);
}
}
@@ -4042,7 +4042,7 @@ fn emitFeaturesSection(binary_bytes: *std.ArrayList(u8), enabled_features: []con
try writeCustomSectionHeader(
binary_bytes.items,
header_offset,
- @intCast(u32, binary_bytes.items.len - header_offset - 6),
+ @as(u32, @intCast(binary_bytes.items.len - header_offset - 6)),
);
}
@@ -4092,7 +4092,7 @@ fn emitNameSection(wasm: *Wasm, binary_bytes: *std.ArrayList(u8), arena: std.mem
const header_offset = try reserveCustomSectionHeader(binary_bytes);
const writer = binary_bytes.writer();
- try leb.writeULEB128(writer, @intCast(u32, "name".len));
+ try leb.writeULEB128(writer, @as(u32, @intCast("name".len)));
try writer.writeAll("name");
try wasm.emitNameSubsection(.function, funcs.values(), writer);
@@ -4102,7 +4102,7 @@ fn emitNameSection(wasm: *Wasm, binary_bytes: *std.ArrayList(u8), arena: std.mem
try writeCustomSectionHeader(
binary_bytes.items,
header_offset,
- @intCast(u32, binary_bytes.items.len - header_offset - 6),
+ @as(u32, @intCast(binary_bytes.items.len - header_offset - 6)),
);
}
@@ -4112,17 +4112,17 @@ fn emitNameSubsection(wasm: *Wasm, section_id: std.wasm.NameSubsection, names: a
defer section_list.deinit();
const sub_writer = section_list.writer();
- try leb.writeULEB128(sub_writer, @intCast(u32, names.len));
+ try leb.writeULEB128(sub_writer, @as(u32, @intCast(names.len)));
for (names) |name| {
log.debug("Emit symbol '{s}' type({s})", .{ name.name, @tagName(section_id) });
try leb.writeULEB128(sub_writer, name.index);
- try leb.writeULEB128(sub_writer, @intCast(u32, name.name.len));
+ try leb.writeULEB128(sub_writer, @as(u32, @intCast(name.name.len)));
try sub_writer.writeAll(name.name);
}
// From now, write to the actual writer
try leb.writeULEB128(writer, @intFromEnum(section_id));
- try leb.writeULEB128(writer, @intCast(u32, section_list.items.len));
+ try leb.writeULEB128(writer, @as(u32, @intCast(section_list.items.len)));
try writer.writeAll(section_list.items);
}
@@ -4146,11 +4146,11 @@ fn emitInit(writer: anytype, init_expr: std.wasm.InitExpression) !void {
},
.f32_const => |val| {
try writer.writeByte(std.wasm.opcode(.f32_const));
- try writer.writeIntLittle(u32, @bitCast(u32, val));
+ try writer.writeIntLittle(u32, @as(u32, @bitCast(val)));
},
.f64_const => |val| {
try writer.writeByte(std.wasm.opcode(.f64_const));
- try writer.writeIntLittle(u64, @bitCast(u64, val));
+ try writer.writeIntLittle(u64, @as(u64, @bitCast(val)));
},
.global_get => |val| {
try writer.writeByte(std.wasm.opcode(.global_get));
@@ -4162,11 +4162,11 @@ fn emitInit(writer: anytype, init_expr: std.wasm.InitExpression) !void {
fn emitImport(wasm: *Wasm, writer: anytype, import: types.Import) !void {
const module_name = wasm.string_table.get(import.module_name);
- try leb.writeULEB128(writer, @intCast(u32, module_name.len));
+ try leb.writeULEB128(writer, @as(u32, @intCast(module_name.len)));
try writer.writeAll(module_name);
const name = wasm.string_table.get(import.name);
- try leb.writeULEB128(writer, @intCast(u32, name.len));
+ try leb.writeULEB128(writer, @as(u32, @intCast(name.len)));
try writer.writeAll(name);
try writer.writeByte(@intFromEnum(import.kind));
@@ -4594,7 +4594,7 @@ fn linkWithLLD(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Node) !
fn reserveVecSectionHeader(bytes: *std.ArrayList(u8)) !u32 {
// section id + fixed leb contents size + fixed leb vector length
const header_size = 1 + 5 + 5;
- const offset = @intCast(u32, bytes.items.len);
+ const offset = @as(u32, @intCast(bytes.items.len));
try bytes.appendSlice(&[_]u8{0} ** header_size);
return offset;
}
@@ -4602,7 +4602,7 @@ fn reserveVecSectionHeader(bytes: *std.ArrayList(u8)) !u32 {
fn reserveCustomSectionHeader(bytes: *std.ArrayList(u8)) !u32 {
// unlike regular section, we don't emit the count
const header_size = 1 + 5;
- const offset = @intCast(u32, bytes.items.len);
+ const offset = @as(u32, @intCast(bytes.items.len));
try bytes.appendSlice(&[_]u8{0} ** header_size);
return offset;
}
@@ -4638,7 +4638,7 @@ fn emitLinkSection(wasm: *Wasm, binary_bytes: *std.ArrayList(u8), symbol_table:
try wasm.emitSymbolTable(binary_bytes, symbol_table);
try wasm.emitSegmentInfo(binary_bytes);
- const size = @intCast(u32, binary_bytes.items.len - offset - 6);
+ const size = @as(u32, @intCast(binary_bytes.items.len - offset - 6));
try writeCustomSectionHeader(binary_bytes.items, offset, size);
}
@@ -4661,7 +4661,7 @@ fn emitSymbolTable(wasm: *Wasm, binary_bytes: *std.ArrayList(u8), symbol_table:
const sym_name = if (wasm.export_names.get(sym_loc)) |exp_name| wasm.string_table.get(exp_name) else sym_loc.getName(wasm);
switch (symbol.tag) {
.data => {
- try leb.writeULEB128(writer, @intCast(u32, sym_name.len));
+ try leb.writeULEB128(writer, @as(u32, @intCast(sym_name.len)));
try writer.writeAll(sym_name);
if (symbol.isDefined()) {
@@ -4678,7 +4678,7 @@ fn emitSymbolTable(wasm: *Wasm, binary_bytes: *std.ArrayList(u8), symbol_table:
else => {
try leb.writeULEB128(writer, symbol.index);
if (symbol.isDefined()) {
- try leb.writeULEB128(writer, @intCast(u32, sym_name.len));
+ try leb.writeULEB128(writer, @as(u32, @intCast(sym_name.len)));
try writer.writeAll(sym_name);
}
},
@@ -4686,7 +4686,7 @@ fn emitSymbolTable(wasm: *Wasm, binary_bytes: *std.ArrayList(u8), symbol_table:
}
var buf: [10]u8 = undefined;
- leb.writeUnsignedFixed(5, buf[0..5], @intCast(u32, binary_bytes.items.len - table_offset + 5));
+ leb.writeUnsignedFixed(5, buf[0..5], @as(u32, @intCast(binary_bytes.items.len - table_offset + 5)));
leb.writeUnsignedFixed(5, buf[5..], symbol_count);
try binary_bytes.insertSlice(table_offset, &buf);
}
@@ -4696,28 +4696,28 @@ fn emitSegmentInfo(wasm: *Wasm, binary_bytes: *std.ArrayList(u8)) !void {
try leb.writeULEB128(writer, @intFromEnum(types.SubsectionType.WASM_SEGMENT_INFO));
const segment_offset = binary_bytes.items.len;
- try leb.writeULEB128(writer, @intCast(u32, wasm.segment_info.count()));
+ try leb.writeULEB128(writer, @as(u32, @intCast(wasm.segment_info.count())));
for (wasm.segment_info.values()) |segment_info| {
log.debug("Emit segment: {s} align({d}) flags({b})", .{
segment_info.name,
@ctz(segment_info.alignment),
segment_info.flags,
});
- try leb.writeULEB128(writer, @intCast(u32, segment_info.name.len));
+ try leb.writeULEB128(writer, @as(u32, @intCast(segment_info.name.len)));
try writer.writeAll(segment_info.name);
try leb.writeULEB128(writer, @ctz(segment_info.alignment));
try leb.writeULEB128(writer, segment_info.flags);
}
var buf: [5]u8 = undefined;
- leb.writeUnsignedFixed(5, &buf, @intCast(u32, binary_bytes.items.len - segment_offset));
+ leb.writeUnsignedFixed(5, &buf, @as(u32, @intCast(binary_bytes.items.len - segment_offset)));
try binary_bytes.insertSlice(segment_offset, &buf);
}
pub fn getULEB128Size(uint_value: anytype) u32 {
const T = @TypeOf(uint_value);
const U = if (@typeInfo(T).Int.bits < 8) u8 else T;
- var value = @intCast(U, uint_value);
+ var value = @as(U, @intCast(uint_value));
var size: u32 = 0;
while (value != 0) : (size += 1) {
@@ -4739,7 +4739,7 @@ fn emitCodeRelocations(
// write custom section information
const name = "reloc.CODE";
- try leb.writeULEB128(writer, @intCast(u32, name.len));
+ try leb.writeULEB128(writer, @as(u32, @intCast(name.len)));
try writer.writeAll(name);
try leb.writeULEB128(writer, section_index);
const reloc_start = binary_bytes.items.len;
@@ -4769,7 +4769,7 @@ fn emitCodeRelocations(
var buf: [5]u8 = undefined;
leb.writeUnsignedFixed(5, &buf, count);
try binary_bytes.insertSlice(reloc_start, &buf);
- const size = @intCast(u32, binary_bytes.items.len - header_offset - 6);
+ const size = @as(u32, @intCast(binary_bytes.items.len - header_offset - 6));
try writeCustomSectionHeader(binary_bytes.items, header_offset, size);
}
@@ -4785,7 +4785,7 @@ fn emitDataRelocations(
// write custom section information
const name = "reloc.DATA";
- try leb.writeULEB128(writer, @intCast(u32, name.len));
+ try leb.writeULEB128(writer, @as(u32, @intCast(name.len)));
try writer.writeAll(name);
try leb.writeULEB128(writer, section_index);
const reloc_start = binary_bytes.items.len;
@@ -4821,7 +4821,7 @@ fn emitDataRelocations(
var buf: [5]u8 = undefined;
leb.writeUnsignedFixed(5, &buf, count);
try binary_bytes.insertSlice(reloc_start, &buf);
- const size = @intCast(u32, binary_bytes.items.len - header_offset - 6);
+ const size = @as(u32, @intCast(binary_bytes.items.len - header_offset - 6));
try writeCustomSectionHeader(binary_bytes.items, header_offset, size);
}
@@ -4852,7 +4852,7 @@ pub fn putOrGetFuncType(wasm: *Wasm, func_type: std.wasm.Type) !u32 {
}
// functype does not exist.
- const index = @intCast(u32, wasm.func_types.items.len);
+ const index = @as(u32, @intCast(wasm.func_types.items.len));
const params = try wasm.base.allocator.dupe(std.wasm.Valtype, func_type.params);
errdefer wasm.base.allocator.free(params);
const returns = try wasm.base.allocator.dupe(std.wasm.Valtype, func_type.returns);
diff --git a/src/link/Wasm/Atom.zig b/src/link/Wasm/Atom.zig
index f8092c6db1..64e9ebaaa1 100644
--- a/src/link/Wasm/Atom.zig
+++ b/src/link/Wasm/Atom.zig
@@ -114,7 +114,7 @@ pub fn resolveRelocs(atom: *Atom, wasm_bin: *const Wasm) void {
.R_WASM_GLOBAL_INDEX_I32,
.R_WASM_MEMORY_ADDR_I32,
.R_WASM_SECTION_OFFSET_I32,
- => std.mem.writeIntLittle(u32, atom.code.items[reloc.offset..][0..4], @intCast(u32, value)),
+ => std.mem.writeIntLittle(u32, atom.code.items[reloc.offset..][0..4], @as(u32, @intCast(value))),
.R_WASM_TABLE_INDEX_I64,
.R_WASM_MEMORY_ADDR_I64,
=> std.mem.writeIntLittle(u64, atom.code.items[reloc.offset..][0..8], value),
@@ -127,7 +127,7 @@ pub fn resolveRelocs(atom: *Atom, wasm_bin: *const Wasm) void {
.R_WASM_TABLE_NUMBER_LEB,
.R_WASM_TYPE_INDEX_LEB,
.R_WASM_MEMORY_ADDR_TLS_SLEB,
- => leb.writeUnsignedFixed(5, atom.code.items[reloc.offset..][0..5], @intCast(u32, value)),
+ => leb.writeUnsignedFixed(5, atom.code.items[reloc.offset..][0..5], @as(u32, @intCast(value))),
.R_WASM_MEMORY_ADDR_LEB64,
.R_WASM_MEMORY_ADDR_SLEB64,
.R_WASM_TABLE_INDEX_SLEB64,
@@ -173,24 +173,24 @@ fn relocationValue(atom: Atom, relocation: types.Relocation, wasm_bin: *const Wa
if (symbol.isUndefined()) {
return 0;
}
- const va = @intCast(i64, symbol.virtual_address);
- return @intCast(u32, va + relocation.addend);
+ const va = @as(i64, @intCast(symbol.virtual_address));
+ return @as(u32, @intCast(va + relocation.addend));
},
.R_WASM_EVENT_INDEX_LEB => return symbol.index,
.R_WASM_SECTION_OFFSET_I32 => {
const target_atom_index = wasm_bin.symbol_atom.get(target_loc).?;
const target_atom = wasm_bin.getAtom(target_atom_index);
- const rel_value = @intCast(i32, target_atom.offset) + relocation.addend;
- return @intCast(u32, rel_value);
+ const rel_value = @as(i32, @intCast(target_atom.offset)) + relocation.addend;
+ return @as(u32, @intCast(rel_value));
},
.R_WASM_FUNCTION_OFFSET_I32 => {
const target_atom_index = wasm_bin.symbol_atom.get(target_loc) orelse {
- return @bitCast(u32, @as(i32, -1));
+ return @as(u32, @bitCast(@as(i32, -1)));
};
const target_atom = wasm_bin.getAtom(target_atom_index);
const offset: u32 = 11 + Wasm.getULEB128Size(target_atom.size); // Header (11 bytes fixed-size) + body size (leb-encoded)
- const rel_value = @intCast(i32, target_atom.offset + offset) + relocation.addend;
- return @intCast(u32, rel_value);
+ const rel_value = @as(i32, @intCast(target_atom.offset + offset)) + relocation.addend;
+ return @as(u32, @intCast(rel_value));
},
.R_WASM_MEMORY_ADDR_TLS_SLEB,
.R_WASM_MEMORY_ADDR_TLS_SLEB64,
diff --git a/src/link/Wasm/Object.zig b/src/link/Wasm/Object.zig
index db96381938..8e4df417ae 100644
--- a/src/link/Wasm/Object.zig
+++ b/src/link/Wasm/Object.zig
@@ -93,7 +93,7 @@ const RelocatableData = struct {
const data_alignment = object.segment_info[relocatable_data.index].alignment;
if (data_alignment == 0) return 1;
// Decode from power of 2 to natural alignment
- return @as(u32, 1) << @intCast(u5, data_alignment);
+ return @as(u32, 1) << @as(u5, @intCast(data_alignment));
}
/// Returns the symbol kind that corresponds to the relocatable section
@@ -130,7 +130,7 @@ pub fn create(gpa: Allocator, file: std.fs.File, name: []const u8, maybe_max_siz
const size = maybe_max_size orelse size: {
errdefer gpa.free(object.name);
const stat = try file.stat();
- break :size @intCast(usize, stat.size);
+ break :size @as(usize, @intCast(stat.size));
};
const file_contents = try gpa.alloc(u8, size);
@@ -365,7 +365,7 @@ fn Parser(comptime ReaderType: type) type {
const len = try readLeb(u32, parser.reader.reader());
var limited_reader = std.io.limitedReader(parser.reader.reader(), len);
const reader = limited_reader.reader();
- switch (@enumFromInt(std.wasm.Section, byte)) {
+ switch (@as(std.wasm.Section, @enumFromInt(byte))) {
.custom => {
const name_len = try readLeb(u32, reader);
const name = try gpa.alloc(u8, name_len);
@@ -375,13 +375,13 @@ fn Parser(comptime ReaderType: type) type {
if (std.mem.eql(u8, name, "linking")) {
is_object_file.* = true;
parser.object.relocatable_data = relocatable_data.items; // at this point no new relocatable sections will appear so we're free to store them.
- try parser.parseMetadata(gpa, @intCast(usize, reader.context.bytes_left));
+ try parser.parseMetadata(gpa, @as(usize, @intCast(reader.context.bytes_left)));
} else if (std.mem.startsWith(u8, name, "reloc")) {
try parser.parseRelocations(gpa);
} else if (std.mem.eql(u8, name, "target_features")) {
try parser.parseFeatures(gpa);
} else if (std.mem.startsWith(u8, name, ".debug")) {
- const debug_size = @intCast(u32, reader.context.bytes_left);
+ const debug_size = @as(u32, @intCast(reader.context.bytes_left));
const debug_content = try gpa.alloc(u8, debug_size);
errdefer gpa.free(debug_content);
try reader.readNoEof(debug_content);
@@ -514,7 +514,7 @@ fn Parser(comptime ReaderType: type) type {
const count = try readLeb(u32, reader);
while (index < count) : (index += 1) {
const code_len = try readLeb(u32, reader);
- const offset = @intCast(u32, start - reader.context.bytes_left);
+ const offset = @as(u32, @intCast(start - reader.context.bytes_left));
const data = try gpa.alloc(u8, code_len);
errdefer gpa.free(data);
try reader.readNoEof(data);
@@ -538,7 +538,7 @@ fn Parser(comptime ReaderType: type) type {
_ = flags; // TODO: Do we need to check flags to detect passive/active memory?
_ = data_offset;
const data_len = try readLeb(u32, reader);
- const offset = @intCast(u32, start - reader.context.bytes_left);
+ const offset = @as(u32, @intCast(start - reader.context.bytes_left));
const data = try gpa.alloc(u8, data_len);
errdefer gpa.free(data);
try reader.readNoEof(data);
@@ -645,7 +645,7 @@ fn Parser(comptime ReaderType: type) type {
/// such as access to the `import` section to find the name of a symbol.
fn parseSubsection(parser: *ObjectParser, gpa: Allocator, reader: anytype) !void {
const sub_type = try leb.readULEB128(u8, reader);
- log.debug("Found subsection: {s}", .{@tagName(@enumFromInt(types.SubsectionType, sub_type))});
+ log.debug("Found subsection: {s}", .{@tagName(@as(types.SubsectionType, @enumFromInt(sub_type)))});
const payload_len = try leb.readULEB128(u32, reader);
if (payload_len == 0) return;
@@ -655,7 +655,7 @@ fn Parser(comptime ReaderType: type) type {
// every subsection contains a 'count' field
const count = try leb.readULEB128(u32, limited_reader);
- switch (@enumFromInt(types.SubsectionType, sub_type)) {
+ switch (@as(types.SubsectionType, @enumFromInt(sub_type))) {
.WASM_SEGMENT_INFO => {
const segments = try gpa.alloc(types.Segment, count);
errdefer gpa.free(segments);
@@ -714,7 +714,7 @@ fn Parser(comptime ReaderType: type) type {
errdefer gpa.free(symbols);
for (symbols) |*symbol| {
symbol.* = .{
- .kind = @enumFromInt(types.ComdatSym.Type, try leb.readULEB128(u8, reader)),
+ .kind = @as(types.ComdatSym.Type, @enumFromInt(try leb.readULEB128(u8, reader))),
.index = try leb.readULEB128(u32, reader),
};
}
@@ -758,7 +758,7 @@ fn Parser(comptime ReaderType: type) type {
/// requires access to `Object` to find the name of a symbol when it's
/// an import and flag `WASM_SYM_EXPLICIT_NAME` is not set.
fn parseSymbol(parser: *ObjectParser, gpa: Allocator, reader: anytype) !Symbol {
- const tag = @enumFromInt(Symbol.Tag, try leb.readULEB128(u8, reader));
+ const tag = @as(Symbol.Tag, @enumFromInt(try leb.readULEB128(u8, reader)));
const flags = try leb.readULEB128(u32, reader);
var symbol: Symbol = .{
.flags = flags,
@@ -846,7 +846,7 @@ fn readLeb(comptime T: type, reader: anytype) !T {
/// Asserts `T` is an enum
fn readEnum(comptime T: type, reader: anytype) !T {
switch (@typeInfo(T)) {
- .Enum => |enum_type| return @enumFromInt(T, try readLeb(enum_type.tag_type, reader)),
+ .Enum => |enum_type| return @as(T, @enumFromInt(try readLeb(enum_type.tag_type, reader))),
else => @compileError("T must be an enum. Instead was given type " ++ @typeName(T)),
}
}
@@ -867,7 +867,7 @@ fn readLimits(reader: anytype) !std.wasm.Limits {
fn readInit(reader: anytype) !std.wasm.InitExpression {
const opcode = try reader.readByte();
- const init_expr: std.wasm.InitExpression = switch (@enumFromInt(std.wasm.Opcode, opcode)) {
+ const init_expr: std.wasm.InitExpression = switch (@as(std.wasm.Opcode, @enumFromInt(opcode))) {
.i32_const => .{ .i32_const = try readLeb(i32, reader) },
.global_get => .{ .global_get = try readLeb(u32, reader) },
else => @panic("TODO: initexpression for other opcodes"),
@@ -899,7 +899,7 @@ pub fn parseIntoAtoms(object: *Object, gpa: Allocator, object_index: u16, wasm_b
switch (symbol.tag) {
.function, .data, .section => if (!symbol.isUndefined()) {
const gop = try symbol_for_segment.getOrPut(.{ .kind = symbol.tag, .index = symbol.index });
- const sym_idx = @intCast(u32, symbol_index);
+ const sym_idx = @as(u32, @intCast(symbol_index));
if (!gop.found_existing) {
gop.value_ptr.* = std.ArrayList(u32).init(gpa);
}
@@ -910,11 +910,11 @@ pub fn parseIntoAtoms(object: *Object, gpa: Allocator, object_index: u16, wasm_b
}
for (object.relocatable_data, 0..) |relocatable_data, index| {
- const final_index = (try wasm_bin.getMatchingSegment(object_index, @intCast(u32, index))) orelse {
+ const final_index = (try wasm_bin.getMatchingSegment(object_index, @as(u32, @intCast(index)))) orelse {
continue; // found unknown section, so skip parsing into atom as we do not know how to handle it.
};
- const atom_index = @intCast(Atom.Index, wasm_bin.managed_atoms.items.len);
+ const atom_index = @as(Atom.Index, @intCast(wasm_bin.managed_atoms.items.len));
const atom = try wasm_bin.managed_atoms.addOne(gpa);
atom.* = Atom.empty;
atom.file = object_index;
diff --git a/src/link/Wasm/types.zig b/src/link/Wasm/types.zig
index 9bf54f25c3..cce5cdef49 100644
--- a/src/link/Wasm/types.zig
+++ b/src/link/Wasm/types.zig
@@ -205,7 +205,7 @@ pub const Feature = struct {
/// From a given cpu feature, returns its linker feature
pub fn fromCpuFeature(feature: std.Target.wasm.Feature) Tag {
- return @enumFromInt(Tag, @intFromEnum(feature));
+ return @as(Tag, @enumFromInt(@intFromEnum(feature)));
}
pub fn format(tag: Tag, comptime fmt: []const u8, opt: std.fmt.FormatOptions, writer: anytype) !void {
diff --git a/src/link/strtab.zig b/src/link/strtab.zig
index abb58defef..0d71c9bf83 100644
--- a/src/link/strtab.zig
+++ b/src/link/strtab.zig
@@ -45,7 +45,7 @@ pub fn StringTable(comptime log_scope: @Type(.EnumLiteral)) type {
const off = entry.key_ptr.*;
const save = entry.value_ptr.*;
if (!save) continue;
- const new_off = @intCast(u32, buffer.items.len);
+ const new_off = @as(u32, @intCast(buffer.items.len));
buffer.appendSliceAssumeCapacity(self.getAssumeExists(off));
idx_map.putAssumeCapacityNoClobber(off, new_off);
}
@@ -73,7 +73,7 @@ pub fn StringTable(comptime log_scope: @Type(.EnumLiteral)) type {
}
try self.buffer.ensureUnusedCapacity(gpa, string.len + 1);
- const new_off = @intCast(u32, self.buffer.items.len);
+ const new_off = @as(u32, @intCast(self.buffer.items.len));
log.debug("writing new string '{s}' at offset 0x{x}", .{ string, new_off });
@@ -103,7 +103,7 @@ pub fn StringTable(comptime log_scope: @Type(.EnumLiteral)) type {
pub fn get(self: Self, off: u32) ?[]const u8 {
log.debug("getting string at 0x{x}", .{off});
if (off >= self.buffer.items.len) return null;
- return mem.sliceTo(@ptrCast([*:0]const u8, self.buffer.items.ptr + off), 0);
+ return mem.sliceTo(@as([*:0]const u8, @ptrCast(self.buffer.items.ptr + off)), 0);
}
pub fn getAssumeExists(self: Self, off: u32) []const u8 {
diff --git a/src/link/table_section.zig b/src/link/table_section.zig
index 891f3b1a50..2c70b03f42 100644
--- a/src/link/table_section.zig
+++ b/src/link/table_section.zig
@@ -18,7 +18,7 @@ pub fn TableSection(comptime Entry: type) type {
break :blk index;
} else {
log.debug(" (allocating entry at index {d})", .{self.entries.items.len});
- const index = @intCast(u32, self.entries.items.len);
+ const index = @as(u32, @intCast(self.entries.items.len));
_ = self.entries.addOneAssumeCapacity();
break :blk index;
}
diff --git a/src/link/tapi/Tokenizer.zig b/src/link/tapi/Tokenizer.zig
index df46bb7d83..eb1ffc0e81 100644
--- a/src/link/tapi/Tokenizer.zig
+++ b/src/link/tapi/Tokenizer.zig
@@ -67,11 +67,11 @@ pub const TokenIterator = struct {
}
pub fn seekBy(self: *TokenIterator, offset: isize) void {
- const new_pos = @bitCast(isize, self.pos) + offset;
+ const new_pos = @as(isize, @bitCast(self.pos)) + offset;
if (new_pos < 0) {
self.pos = 0;
} else {
- self.pos = @intCast(usize, new_pos);
+ self.pos = @as(usize, @intCast(new_pos));
}
}
};