aboutsummaryrefslogtreecommitdiff
path: root/src/link/MachO
diff options
context:
space:
mode:
Diffstat (limited to 'src/link/MachO')
-rw-r--r--src/link/MachO/Archive.zig2
-rw-r--r--src/link/MachO/CodeSignature.zig18
-rw-r--r--src/link/MachO/DebugSymbols.zig42
-rw-r--r--src/link/MachO/DwarfInfo.zig8
-rw-r--r--src/link/MachO/Dylib.zig12
-rw-r--r--src/link/MachO/Object.zig64
-rw-r--r--src/link/MachO/Relocation.zig46
-rw-r--r--src/link/MachO/Trie.zig2
-rw-r--r--src/link/MachO/UnwindInfo.zig108
-rw-r--r--src/link/MachO/ZldAtom.zig120
-rw-r--r--src/link/MachO/dead_strip.zig24
-rw-r--r--src/link/MachO/dyld_info/Rebase.zig10
-rw-r--r--src/link/MachO/dyld_info/bind.zig36
-rw-r--r--src/link/MachO/eh_frame.zig72
-rw-r--r--src/link/MachO/load_commands.zig24
-rw-r--r--src/link/MachO/thunks.zig12
-rw-r--r--src/link/MachO/zld.zig144
17 files changed, 372 insertions, 372 deletions
diff --git a/src/link/MachO/Archive.zig b/src/link/MachO/Archive.zig
index d222394ad5..5276bf041e 100644
--- a/src/link/MachO/Archive.zig
+++ b/src/link/MachO/Archive.zig
@@ -169,7 +169,7 @@ fn parseTableOfContents(self: *Archive, allocator: Allocator, reader: anytype) !
};
const object_offset = try symtab_reader.readIntLittle(u32);
- const sym_name = mem.sliceTo(@ptrCast([*:0]const u8, strtab.ptr + n_strx), 0);
+ const sym_name = mem.sliceTo(@as([*:0]const u8, @ptrCast(strtab.ptr + n_strx)), 0);
const owned_name = try allocator.dupe(u8, sym_name);
const res = try self.toc.getOrPut(allocator, owned_name);
defer if (res.found_existing) allocator.free(owned_name);
diff --git a/src/link/MachO/CodeSignature.zig b/src/link/MachO/CodeSignature.zig
index fcb4c16063..f527ca3581 100644
--- a/src/link/MachO/CodeSignature.zig
+++ b/src/link/MachO/CodeSignature.zig
@@ -72,7 +72,7 @@ const CodeDirectory = struct {
.hashSize = hash_size,
.hashType = macho.CS_HASHTYPE_SHA256,
.platform = 0,
- .pageSize = @truncate(u8, std.math.log2(page_size)),
+ .pageSize = @as(u8, @truncate(std.math.log2(page_size))),
.spare2 = 0,
.scatterOffset = 0,
.teamOffset = 0,
@@ -110,7 +110,7 @@ const CodeDirectory = struct {
fn size(self: CodeDirectory) u32 {
const code_slots = self.inner.nCodeSlots * hash_size;
const special_slots = self.inner.nSpecialSlots * hash_size;
- return @sizeOf(macho.CodeDirectory) + @intCast(u32, self.ident.len + 1 + special_slots + code_slots);
+ return @sizeOf(macho.CodeDirectory) + @as(u32, @intCast(self.ident.len + 1 + special_slots + code_slots));
}
fn write(self: CodeDirectory, writer: anytype) !void {
@@ -139,9 +139,9 @@ const CodeDirectory = struct {
try writer.writeAll(self.ident);
try writer.writeByte(0);
- var i: isize = @intCast(isize, self.inner.nSpecialSlots);
+ var i: isize = @as(isize, @intCast(self.inner.nSpecialSlots));
while (i > 0) : (i -= 1) {
- try writer.writeAll(&self.special_slots[@intCast(usize, i - 1)]);
+ try writer.writeAll(&self.special_slots[@as(usize, @intCast(i - 1))]);
}
for (self.code_slots.items) |slot| {
@@ -186,7 +186,7 @@ const Entitlements = struct {
}
fn size(self: Entitlements) u32 {
- return @intCast(u32, self.inner.len) + 2 * @sizeOf(u32);
+ return @as(u32, @intCast(self.inner.len)) + 2 * @sizeOf(u32);
}
fn write(self: Entitlements, writer: anytype) !void {
@@ -281,7 +281,7 @@ pub fn writeAdhocSignature(
self.code_directory.inner.execSegFlags = if (opts.output_mode == .Exe) macho.CS_EXECSEG_MAIN_BINARY else 0;
self.code_directory.inner.codeLimit = opts.file_size;
- const total_pages = @intCast(u32, mem.alignForward(usize, opts.file_size, self.page_size) / self.page_size);
+ const total_pages = @as(u32, @intCast(mem.alignForward(usize, opts.file_size, self.page_size) / self.page_size));
try self.code_directory.code_slots.ensureTotalCapacityPrecise(gpa, total_pages);
self.code_directory.code_slots.items.len = total_pages;
@@ -331,7 +331,7 @@ pub fn writeAdhocSignature(
}
self.code_directory.inner.hashOffset =
- @sizeOf(macho.CodeDirectory) + @intCast(u32, self.code_directory.ident.len + 1 + self.code_directory.inner.nSpecialSlots * hash_size);
+ @sizeOf(macho.CodeDirectory) + @as(u32, @intCast(self.code_directory.ident.len + 1 + self.code_directory.inner.nSpecialSlots * hash_size));
self.code_directory.inner.length = self.code_directory.size();
header.length += self.code_directory.size();
@@ -339,7 +339,7 @@ pub fn writeAdhocSignature(
try writer.writeIntBig(u32, header.length);
try writer.writeIntBig(u32, header.count);
- var offset: u32 = @sizeOf(macho.SuperBlob) + @sizeOf(macho.BlobIndex) * @intCast(u32, blobs.items.len);
+ var offset: u32 = @sizeOf(macho.SuperBlob) + @sizeOf(macho.BlobIndex) * @as(u32, @intCast(blobs.items.len));
for (blobs.items) |blob| {
try writer.writeIntBig(u32, blob.slotType());
try writer.writeIntBig(u32, offset);
@@ -383,7 +383,7 @@ pub fn estimateSize(self: CodeSignature, file_size: u64) u32 {
ssize += @sizeOf(macho.BlobIndex) + sig.size();
}
ssize += n_special_slots * hash_size;
- return @intCast(u32, mem.alignForward(u64, ssize, @sizeOf(u64)));
+ return @as(u32, @intCast(mem.alignForward(u64, ssize, @sizeOf(u64))));
}
pub fn clear(self: *CodeSignature, allocator: Allocator) void {
diff --git a/src/link/MachO/DebugSymbols.zig b/src/link/MachO/DebugSymbols.zig
index fdb8c9c816..ade26de920 100644
--- a/src/link/MachO/DebugSymbols.zig
+++ b/src/link/MachO/DebugSymbols.zig
@@ -64,9 +64,9 @@ pub const Reloc = struct {
/// has been called to get a viable debug symbols output.
pub fn populateMissingMetadata(self: *DebugSymbols) !void {
if (self.dwarf_segment_cmd_index == null) {
- self.dwarf_segment_cmd_index = @intCast(u8, self.segments.items.len);
+ self.dwarf_segment_cmd_index = @as(u8, @intCast(self.segments.items.len));
- const off = @intCast(u64, self.page_size);
+ const off = @as(u64, @intCast(self.page_size));
const ideal_size: u16 = 200 + 128 + 160 + 250;
const needed_size = mem.alignForward(u64, padToIdeal(ideal_size), self.page_size);
@@ -86,7 +86,7 @@ pub fn populateMissingMetadata(self: *DebugSymbols) !void {
try self.dwarf.strtab.buffer.append(self.allocator, 0);
self.debug_str_section_index = try self.allocateSection(
"__debug_str",
- @intCast(u32, self.dwarf.strtab.buffer.items.len),
+ @as(u32, @intCast(self.dwarf.strtab.buffer.items.len)),
0,
);
self.debug_string_table_dirty = true;
@@ -113,7 +113,7 @@ pub fn populateMissingMetadata(self: *DebugSymbols) !void {
}
if (self.linkedit_segment_cmd_index == null) {
- self.linkedit_segment_cmd_index = @intCast(u8, self.segments.items.len);
+ self.linkedit_segment_cmd_index = @as(u8, @intCast(self.segments.items.len));
try self.segments.append(self.allocator, .{
.segname = makeStaticString("__LINKEDIT"),
.maxprot = macho.PROT.READ,
@@ -128,7 +128,7 @@ fn allocateSection(self: *DebugSymbols, sectname: []const u8, size: u64, alignme
var sect = macho.section_64{
.sectname = makeStaticString(sectname),
.segname = segment.segname,
- .size = @intCast(u32, size),
+ .size = @as(u32, @intCast(size)),
.@"align" = alignment,
};
const alignment_pow_2 = try math.powi(u32, 2, alignment);
@@ -141,9 +141,9 @@ fn allocateSection(self: *DebugSymbols, sectname: []const u8, size: u64, alignme
off + size,
});
- sect.offset = @intCast(u32, off);
+ sect.offset = @as(u32, @intCast(off));
- const index = @intCast(u8, self.sections.items.len);
+ const index = @as(u8, @intCast(self.sections.items.len));
try self.sections.append(self.allocator, sect);
segment.cmdsize += @sizeOf(macho.section_64);
segment.nsects += 1;
@@ -176,7 +176,7 @@ pub fn growSection(self: *DebugSymbols, sect_index: u8, needed_size: u32, requir
if (amt != existing_size) return error.InputOutput;
}
- sect.offset = @intCast(u32, new_offset);
+ sect.offset = @as(u32, @intCast(new_offset));
}
sect.size = needed_size;
@@ -286,7 +286,7 @@ pub fn flushModule(self: *DebugSymbols, macho_file: *MachO) !void {
{
const sect_index = self.debug_str_section_index.?;
if (self.debug_string_table_dirty or self.dwarf.strtab.buffer.items.len != self.getSection(sect_index).size) {
- const needed_size = @intCast(u32, self.dwarf.strtab.buffer.items.len);
+ const needed_size = @as(u32, @intCast(self.dwarf.strtab.buffer.items.len));
try self.growSection(sect_index, needed_size, false);
try self.file.pwriteAll(self.dwarf.strtab.buffer.items, self.getSection(sect_index).offset);
self.debug_string_table_dirty = false;
@@ -307,7 +307,7 @@ pub fn flushModule(self: *DebugSymbols, macho_file: *MachO) !void {
const ncmds = load_commands.calcNumOfLCs(lc_buffer.items);
try self.file.pwriteAll(lc_buffer.items, @sizeOf(macho.mach_header_64));
- try self.writeHeader(macho_file, ncmds, @intCast(u32, lc_buffer.items.len));
+ try self.writeHeader(macho_file, ncmds, @as(u32, @intCast(lc_buffer.items.len)));
assert(!self.debug_abbrev_section_dirty);
assert(!self.debug_aranges_section_dirty);
@@ -378,7 +378,7 @@ fn writeSegmentHeaders(self: *DebugSymbols, macho_file: *MachO, writer: anytype)
// Write segment/section headers from the binary file first.
const end = macho_file.linkedit_segment_cmd_index.?;
for (macho_file.segments.items[0..end], 0..) |seg, i| {
- const indexes = macho_file.getSectionIndexes(@intCast(u8, i));
+ const indexes = macho_file.getSectionIndexes(@as(u8, @intCast(i)));
var out_seg = seg;
out_seg.fileoff = 0;
out_seg.filesize = 0;
@@ -407,7 +407,7 @@ fn writeSegmentHeaders(self: *DebugSymbols, macho_file: *MachO, writer: anytype)
}
// Next, commit DSYM's __LINKEDIT and __DWARF segments headers.
for (self.segments.items, 0..) |seg, i| {
- const indexes = self.getSectionIndexes(@intCast(u8, i));
+ const indexes = self.getSectionIndexes(@as(u8, @intCast(i)));
try writer.writeStruct(seg);
for (self.sections.items[indexes.start..indexes.end]) |header| {
try writer.writeStruct(header);
@@ -473,7 +473,7 @@ fn writeSymtab(self: *DebugSymbols, macho_file: *MachO) !void {
for (macho_file.locals.items, 0..) |sym, sym_id| {
if (sym.n_strx == 0) continue; // no name, skip
- const sym_loc = MachO.SymbolWithLoc{ .sym_index = @intCast(u32, sym_id), .file = null };
+ const sym_loc = MachO.SymbolWithLoc{ .sym_index = @as(u32, @intCast(sym_id)), .file = null };
if (macho_file.symbolIsTemp(sym_loc)) continue; // local temp symbol, skip
if (macho_file.getGlobal(macho_file.getSymbolName(sym_loc)) != null) continue; // global symbol is either an export or import, skip
var out_sym = sym;
@@ -501,10 +501,10 @@ fn writeSymtab(self: *DebugSymbols, macho_file: *MachO) !void {
const needed_size = nsyms * @sizeOf(macho.nlist_64);
seg.filesize = offset + needed_size - seg.fileoff;
- self.symtab_cmd.symoff = @intCast(u32, offset);
- self.symtab_cmd.nsyms = @intCast(u32, nsyms);
+ self.symtab_cmd.symoff = @as(u32, @intCast(offset));
+ self.symtab_cmd.nsyms = @as(u32, @intCast(nsyms));
- const locals_off = @intCast(u32, offset);
+ const locals_off = @as(u32, @intCast(offset));
const locals_size = nlocals * @sizeOf(macho.nlist_64);
const exports_off = locals_off + locals_size;
const exports_size = nexports * @sizeOf(macho.nlist_64);
@@ -521,13 +521,13 @@ fn writeStrtab(self: *DebugSymbols) !void {
defer tracy.end();
const seg = &self.segments.items[self.linkedit_segment_cmd_index.?];
- const symtab_size = @intCast(u32, self.symtab_cmd.nsyms * @sizeOf(macho.nlist_64));
+ const symtab_size = @as(u32, @intCast(self.symtab_cmd.nsyms * @sizeOf(macho.nlist_64)));
const offset = mem.alignForward(u64, self.symtab_cmd.symoff + symtab_size, @alignOf(u64));
const needed_size = mem.alignForward(u64, self.strtab.buffer.items.len, @alignOf(u64));
seg.filesize = offset + needed_size - seg.fileoff;
- self.symtab_cmd.stroff = @intCast(u32, offset);
- self.symtab_cmd.strsize = @intCast(u32, needed_size);
+ self.symtab_cmd.stroff = @as(u32, @intCast(offset));
+ self.symtab_cmd.strsize = @as(u32, @intCast(needed_size));
log.debug("writing string table from 0x{x} to 0x{x}", .{ offset, offset + needed_size });
@@ -542,8 +542,8 @@ fn writeStrtab(self: *DebugSymbols) !void {
pub fn getSectionIndexes(self: *DebugSymbols, segment_index: u8) struct { start: u8, end: u8 } {
var start: u8 = 0;
const nsects = for (self.segments.items, 0..) |seg, i| {
- if (i == segment_index) break @intCast(u8, seg.nsects);
- start += @intCast(u8, seg.nsects);
+ if (i == segment_index) break @as(u8, @intCast(seg.nsects));
+ start += @as(u8, @intCast(seg.nsects));
} else 0;
return .{ .start = start, .end = start + nsects };
}
diff --git a/src/link/MachO/DwarfInfo.zig b/src/link/MachO/DwarfInfo.zig
index 3218435734..07d98e8e94 100644
--- a/src/link/MachO/DwarfInfo.zig
+++ b/src/link/MachO/DwarfInfo.zig
@@ -70,7 +70,7 @@ pub fn genSubprogramLookupByName(
low_pc = addr;
}
if (try attr.getConstant(self)) |constant| {
- low_pc = @intCast(u64, constant);
+ low_pc = @as(u64, @intCast(constant));
}
},
dwarf.AT.high_pc => {
@@ -78,7 +78,7 @@ pub fn genSubprogramLookupByName(
high_pc = addr;
}
if (try attr.getConstant(self)) |constant| {
- high_pc = @intCast(u64, constant);
+ high_pc = @as(u64, @intCast(constant));
}
},
else => {},
@@ -261,7 +261,7 @@ pub const Attribute = struct {
switch (self.form) {
dwarf.FORM.string => {
- return mem.sliceTo(@ptrCast([*:0]const u8, debug_info.ptr), 0);
+ return mem.sliceTo(@as([*:0]const u8, @ptrCast(debug_info.ptr)), 0);
},
dwarf.FORM.strp => {
const off = if (cuh.is_64bit)
@@ -499,5 +499,5 @@ fn findAbbrevEntrySize(self: DwarfInfo, da_off: usize, da_len: usize, di_off: us
fn getString(self: DwarfInfo, off: u64) []const u8 {
assert(off < self.debug_str.len);
- return mem.sliceTo(@ptrCast([*:0]const u8, self.debug_str.ptr + @intCast(usize, off)), 0);
+ return mem.sliceTo(@as([*:0]const u8, @ptrCast(self.debug_str.ptr + @as(usize, @intCast(off)))), 0);
}
diff --git a/src/link/MachO/Dylib.zig b/src/link/MachO/Dylib.zig
index 971706dae6..ee8f34f756 100644
--- a/src/link/MachO/Dylib.zig
+++ b/src/link/MachO/Dylib.zig
@@ -75,7 +75,7 @@ pub const Id = struct {
.int => |int| {
var out: u32 = 0;
const major = math.cast(u16, int) orelse return error.Overflow;
- out += @intCast(u32, major) << 16;
+ out += @as(u32, @intCast(major)) << 16;
return out;
},
.float => |float| {
@@ -106,9 +106,9 @@ pub const Id = struct {
out += try fmt.parseInt(u8, values[2], 10);
}
if (count > 1) {
- out += @intCast(u32, try fmt.parseInt(u8, values[1], 10)) << 8;
+ out += @as(u32, @intCast(try fmt.parseInt(u8, values[1], 10))) << 8;
}
- out += @intCast(u32, try fmt.parseInt(u16, values[0], 10)) << 16;
+ out += @as(u32, @intCast(try fmt.parseInt(u16, values[0], 10))) << 16;
return out;
}
@@ -164,11 +164,11 @@ pub fn parseFromBinary(
switch (cmd.cmd()) {
.SYMTAB => {
const symtab_cmd = cmd.cast(macho.symtab_command).?;
- const symtab = @ptrCast(
+ const symtab = @as(
[*]const macho.nlist_64,
// Alignment is guaranteed as a dylib is a final linked image and has to have sections
// properly aligned in order to be correctly loaded by the loader.
- @alignCast(@alignOf(macho.nlist_64), &data[symtab_cmd.symoff]),
+ @ptrCast(@alignCast(&data[symtab_cmd.symoff])),
)[0..symtab_cmd.nsyms];
const strtab = data[symtab_cmd.stroff..][0..symtab_cmd.strsize];
@@ -176,7 +176,7 @@ pub fn parseFromBinary(
const add_to_symtab = sym.ext() and (sym.sect() or sym.indr());
if (!add_to_symtab) continue;
- const sym_name = mem.sliceTo(@ptrCast([*:0]const u8, strtab.ptr + sym.n_strx), 0);
+ const sym_name = mem.sliceTo(@as([*:0]const u8, @ptrCast(strtab.ptr + sym.n_strx)), 0);
try self.symbols.putNoClobber(allocator, try allocator.dupe(u8, sym_name), false);
}
},
diff --git a/src/link/MachO/Object.zig b/src/link/MachO/Object.zig
index 105a806075..29fe2988b6 100644
--- a/src/link/MachO/Object.zig
+++ b/src/link/MachO/Object.zig
@@ -164,7 +164,7 @@ pub fn parse(self: *Object, allocator: Allocator, cpu_arch: std.Target.Cpu.Arch)
else => {},
} else return;
- self.in_symtab = @ptrCast([*]align(1) const macho.nlist_64, self.contents.ptr + symtab.symoff)[0..symtab.nsyms];
+ self.in_symtab = @as([*]align(1) const macho.nlist_64, @ptrCast(self.contents.ptr + symtab.symoff))[0..symtab.nsyms];
self.in_strtab = self.contents[symtab.stroff..][0..symtab.strsize];
self.symtab = try allocator.alloc(macho.nlist_64, self.in_symtab.?.len + nsects);
@@ -202,7 +202,7 @@ pub fn parse(self: *Object, allocator: Allocator, cpu_arch: std.Target.Cpu.Arch)
defer sorted_all_syms.deinit();
for (self.in_symtab.?, 0..) |_, index| {
- sorted_all_syms.appendAssumeCapacity(.{ .index = @intCast(u32, index) });
+ sorted_all_syms.appendAssumeCapacity(.{ .index = @as(u32, @intCast(index)) });
}
// We sort by type: defined < undefined, and
@@ -225,18 +225,18 @@ pub fn parse(self: *Object, allocator: Allocator, cpu_arch: std.Target.Cpu.Arch)
}
}
if (sym.sect() and section_index_lookup == null) {
- section_index_lookup = .{ .start = @intCast(u32, i), .len = 1 };
+ section_index_lookup = .{ .start = @as(u32, @intCast(i)), .len = 1 };
}
prev_sect_id = sym.n_sect;
self.symtab[i] = sym;
self.source_symtab_lookup[i] = sym_id.index;
- self.reverse_symtab_lookup[sym_id.index] = @intCast(u32, i);
- self.source_address_lookup[i] = if (sym.undf()) -1 else @intCast(i64, sym.n_value);
+ self.reverse_symtab_lookup[sym_id.index] = @as(u32, @intCast(i));
+ self.source_address_lookup[i] = if (sym.undf()) -1 else @as(i64, @intCast(sym.n_value));
- const sym_name_len = mem.sliceTo(@ptrCast([*:0]const u8, self.in_strtab.?.ptr + sym.n_strx), 0).len + 1;
- self.strtab_lookup[i] = @intCast(u32, sym_name_len);
+ const sym_name_len = mem.sliceTo(@as([*:0]const u8, @ptrCast(self.in_strtab.?.ptr + sym.n_strx)), 0).len + 1;
+ self.strtab_lookup[i] = @as(u32, @intCast(sym_name_len));
}
// If there were no undefined symbols, make sure we populate the
@@ -267,7 +267,7 @@ const SymbolAtIndex = struct {
fn getSymbolName(self: SymbolAtIndex, ctx: Context) []const u8 {
const off = self.getSymbol(ctx).n_strx;
- return mem.sliceTo(@ptrCast([*:0]const u8, ctx.in_strtab.?.ptr + off), 0);
+ return mem.sliceTo(@as([*:0]const u8, @ptrCast(ctx.in_strtab.?.ptr + off)), 0);
}
fn getSymbolSeniority(self: SymbolAtIndex, ctx: Context) u2 {
@@ -338,7 +338,7 @@ fn filterSymbolsBySection(symbols: []macho.nlist_64, n_sect: u8) struct {
.n_sect = n_sect,
});
- return .{ .index = @intCast(u32, index), .len = @intCast(u32, len) };
+ return .{ .index = @as(u32, @intCast(index)), .len = @as(u32, @intCast(len)) };
}
fn filterSymbolsByAddress(symbols: []macho.nlist_64, start_addr: u64, end_addr: u64) struct {
@@ -360,7 +360,7 @@ fn filterSymbolsByAddress(symbols: []macho.nlist_64, start_addr: u64, end_addr:
.addr = end_addr,
});
- return .{ .index = @intCast(u32, index), .len = @intCast(u32, len) };
+ return .{ .index = @as(u32, @intCast(index)), .len = @as(u32, @intCast(len)) };
}
const SortedSection = struct {
@@ -400,7 +400,7 @@ pub fn splitRegularSections(self: *Object, zld: *Zld, object_id: u32) !void {
};
if (sect.size == 0) continue;
- const sect_id = @intCast(u8, id);
+ const sect_id = @as(u8, @intCast(id));
const sym = self.getSectionAliasSymbolPtr(sect_id);
sym.* = .{
.n_strx = 0,
@@ -417,7 +417,7 @@ pub fn splitRegularSections(self: *Object, zld: *Zld, object_id: u32) !void {
const out_sect_id = (try zld.getOutputSection(sect)) orelse continue;
if (sect.size == 0) continue;
- const sect_id = @intCast(u8, id);
+ const sect_id = @as(u8, @intCast(id));
const sym_index = self.getSectionAliasSymbolIndex(sect_id);
const atom_index = try self.createAtomFromSubsection(
zld,
@@ -459,7 +459,7 @@ pub fn splitRegularSections(self: *Object, zld: *Zld, object_id: u32) !void {
defer gpa.free(sorted_sections);
for (sections, 0..) |sect, id| {
- sorted_sections[id] = .{ .header = sect, .id = @intCast(u8, id) };
+ sorted_sections[id] = .{ .header = sect, .id = @as(u8, @intCast(id)) };
}
mem.sort(SortedSection, sorted_sections, {}, sectionLessThanByAddress);
@@ -651,7 +651,7 @@ fn filterRelocs(
const start = @import("zld.zig").bsearch(macho.relocation_info, relocs, Predicate{ .addr = end_addr });
const len = @import("zld.zig").lsearch(macho.relocation_info, relocs[start..], LPredicate{ .addr = start_addr });
- return .{ .start = @intCast(u32, start), .len = @intCast(u32, len) };
+ return .{ .start = @as(u32, @intCast(start)), .len = @as(u32, @intCast(len)) };
}
/// Parse all relocs for the input section, and sort in descending order.
@@ -659,7 +659,7 @@ fn filterRelocs(
/// section in a sorted manner which is simply not true.
fn parseRelocs(self: *Object, gpa: Allocator, sect_id: u8) !void {
const section = self.getSourceSection(sect_id);
- const start = @intCast(u32, self.relocations.items.len);
+ const start = @as(u32, @intCast(self.relocations.items.len));
if (self.getSourceRelocs(section)) |relocs| {
try self.relocations.ensureUnusedCapacity(gpa, relocs.len);
self.relocations.appendUnalignedSliceAssumeCapacity(relocs);
@@ -677,8 +677,8 @@ fn cacheRelocs(self: *Object, zld: *Zld, atom_index: AtomIndex) !void {
// If there was no matching symbol present in the source symtab, this means
// we are dealing with either an entire section, or part of it, but also
// starting at the beginning.
- const nbase = @intCast(u32, self.in_symtab.?.len);
- const sect_id = @intCast(u8, atom.sym_index - nbase);
+ const nbase = @as(u32, @intCast(self.in_symtab.?.len));
+ const sect_id = @as(u8, @intCast(atom.sym_index - nbase));
break :blk sect_id;
};
const source_sect = self.getSourceSection(source_sect_id);
@@ -745,7 +745,7 @@ fn parseEhFrameSection(self: *Object, zld: *Zld, object_id: u32) !void {
.object_id = object_id,
.rel = rel,
.code = it.data[offset..],
- .base_offset = @intCast(i32, offset),
+ .base_offset = @as(i32, @intCast(offset)),
});
break :blk target;
},
@@ -798,7 +798,7 @@ fn parseUnwindInfo(self: *Object, zld: *Zld, object_id: u32) !void {
_ = try zld.initSection("__TEXT", "__unwind_info", .{});
}
- try self.unwind_records_lookup.ensureTotalCapacity(gpa, @intCast(u32, self.exec_atoms.items.len));
+ try self.unwind_records_lookup.ensureTotalCapacity(gpa, @as(u32, @intCast(self.exec_atoms.items.len)));
const unwind_records = self.getUnwindRecords();
@@ -834,14 +834,14 @@ fn parseUnwindInfo(self: *Object, zld: *Zld, object_id: u32) !void {
.object_id = object_id,
.rel = rel,
.code = mem.asBytes(&record),
- .base_offset = @intCast(i32, offset),
+ .base_offset = @as(i32, @intCast(offset)),
});
log.debug("unwind record {d} tracks {s}", .{ record_id, zld.getSymbolName(target) });
if (target.getFile() != object_id) {
self.unwind_relocs_lookup[record_id].dead = true;
} else {
const atom_index = self.getAtomIndexForSymbol(target.sym_index).?;
- self.unwind_records_lookup.putAssumeCapacityNoClobber(atom_index, @intCast(u32, record_id));
+ self.unwind_records_lookup.putAssumeCapacityNoClobber(atom_index, @as(u32, @intCast(record_id)));
}
}
}
@@ -869,7 +869,7 @@ pub fn getSourceSectionIndexByName(self: Object, segname: []const u8, sectname:
const sections = self.getSourceSections();
for (sections, 0..) |sect, i| {
if (mem.eql(u8, segname, sect.segName()) and mem.eql(u8, sectname, sect.sectName()))
- return @intCast(u8, i);
+ return @as(u8, @intCast(i));
} else return null;
}
@@ -898,7 +898,7 @@ pub fn parseDataInCode(self: *Object, gpa: Allocator) !void {
}
} else return;
const ndice = @divExact(cmd.datasize, @sizeOf(macho.data_in_code_entry));
- const dice = @ptrCast([*]align(1) const macho.data_in_code_entry, self.contents.ptr + cmd.dataoff)[0..ndice];
+ const dice = @as([*]align(1) const macho.data_in_code_entry, @ptrCast(self.contents.ptr + cmd.dataoff))[0..ndice];
try self.data_in_code.ensureTotalCapacityPrecise(gpa, dice.len);
self.data_in_code.appendUnalignedSliceAssumeCapacity(dice);
mem.sort(macho.data_in_code_entry, self.data_in_code.items, {}, diceLessThan);
@@ -945,12 +945,12 @@ pub fn parseDwarfInfo(self: Object) DwarfInfo {
}
pub fn getSectionContents(self: Object, sect: macho.section_64) []const u8 {
- const size = @intCast(usize, sect.size);
+ const size = @as(usize, @intCast(sect.size));
return self.contents[sect.offset..][0..size];
}
pub fn getSectionAliasSymbolIndex(self: Object, sect_id: u8) u32 {
- const start = @intCast(u32, self.in_symtab.?.len);
+ const start = @as(u32, @intCast(self.in_symtab.?.len));
return start + sect_id;
}
@@ -964,7 +964,7 @@ pub fn getSectionAliasSymbolPtr(self: *Object, sect_id: u8) *macho.nlist_64 {
fn getSourceRelocs(self: Object, sect: macho.section_64) ?[]align(1) const macho.relocation_info {
if (sect.nreloc == 0) return null;
- return @ptrCast([*]align(1) const macho.relocation_info, self.contents.ptr + sect.reloff)[0..sect.nreloc];
+ return @as([*]align(1) const macho.relocation_info, @ptrCast(self.contents.ptr + sect.reloff))[0..sect.nreloc];
}
pub fn getRelocs(self: Object, sect_id: u8) []const macho.relocation_info {
@@ -1005,25 +1005,25 @@ pub fn getSymbolByAddress(self: Object, addr: u64, sect_hint: ?u8) u32 {
const target_sym_index = @import("zld.zig").lsearch(
i64,
self.source_address_lookup[lookup.start..][0..lookup.len],
- Predicate{ .addr = @intCast(i64, addr) },
+ Predicate{ .addr = @as(i64, @intCast(addr)) },
);
if (target_sym_index > 0) {
- return @intCast(u32, lookup.start + target_sym_index - 1);
+ return @as(u32, @intCast(lookup.start + target_sym_index - 1));
}
}
return self.getSectionAliasSymbolIndex(sect_id);
}
const target_sym_index = @import("zld.zig").lsearch(i64, self.source_address_lookup, Predicate{
- .addr = @intCast(i64, addr),
+ .addr = @as(i64, @intCast(addr)),
});
assert(target_sym_index > 0);
- return @intCast(u32, target_sym_index - 1);
+ return @as(u32, @intCast(target_sym_index - 1));
}
pub fn getGlobal(self: Object, sym_index: u32) ?u32 {
if (self.globals_lookup[sym_index] == -1) return null;
- return @intCast(u32, self.globals_lookup[sym_index]);
+ return @as(u32, @intCast(self.globals_lookup[sym_index]));
}
pub fn getAtomIndexForSymbol(self: Object, sym_index: u32) ?AtomIndex {
@@ -1041,7 +1041,7 @@ pub fn getUnwindRecords(self: Object) []align(1) const macho.compact_unwind_entr
const sect = self.getSourceSection(sect_id);
const data = self.getSectionContents(sect);
const num_entries = @divExact(data.len, @sizeOf(macho.compact_unwind_entry));
- return @ptrCast([*]align(1) const macho.compact_unwind_entry, data)[0..num_entries];
+ return @as([*]align(1) const macho.compact_unwind_entry, @ptrCast(data))[0..num_entries];
}
pub fn hasEhFrameRecords(self: Object) bool {
diff --git a/src/link/MachO/Relocation.zig b/src/link/MachO/Relocation.zig
index 2685cc26e2..b7bbf59cfc 100644
--- a/src/link/MachO/Relocation.zig
+++ b/src/link/MachO/Relocation.zig
@@ -94,9 +94,9 @@ pub fn resolve(self: Relocation, macho_file: *MachO, atom_index: Atom.Index, cod
.tlv_initializer => blk: {
assert(self.addend == 0); // Addend here makes no sense.
const header = macho_file.sections.items(.header)[macho_file.thread_data_section_index.?];
- break :blk @intCast(i64, target_base_addr - header.addr);
+ break :blk @as(i64, @intCast(target_base_addr - header.addr));
},
- else => @intCast(i64, target_base_addr) + self.addend,
+ else => @as(i64, @intCast(target_base_addr)) + self.addend,
};
log.debug(" ({x}: [() => 0x{x} ({s})) ({s})", .{
@@ -119,7 +119,7 @@ fn resolveAarch64(self: Relocation, source_addr: u64, target_addr: i64, code: []
.branch => {
const displacement = math.cast(
i28,
- @intCast(i64, target_addr) - @intCast(i64, source_addr),
+ @as(i64, @intCast(target_addr)) - @as(i64, @intCast(source_addr)),
) orelse unreachable; // TODO codegen should never allow for jump larger than i28 displacement
var inst = aarch64.Instruction{
.unconditional_branch_immediate = mem.bytesToValue(meta.TagPayload(
@@ -127,25 +127,25 @@ fn resolveAarch64(self: Relocation, source_addr: u64, target_addr: i64, code: []
aarch64.Instruction.unconditional_branch_immediate,
), buffer[0..4]),
};
- inst.unconditional_branch_immediate.imm26 = @truncate(u26, @bitCast(u28, displacement >> 2));
+ inst.unconditional_branch_immediate.imm26 = @as(u26, @truncate(@as(u28, @bitCast(displacement >> 2))));
mem.writeIntLittle(u32, buffer[0..4], inst.toU32());
},
.page, .got_page => {
- const source_page = @intCast(i32, source_addr >> 12);
- const target_page = @intCast(i32, target_addr >> 12);
- const pages = @bitCast(u21, @intCast(i21, target_page - source_page));
+ const source_page = @as(i32, @intCast(source_addr >> 12));
+ const target_page = @as(i32, @intCast(target_addr >> 12));
+ const pages = @as(u21, @bitCast(@as(i21, @intCast(target_page - source_page))));
var inst = aarch64.Instruction{
.pc_relative_address = mem.bytesToValue(meta.TagPayload(
aarch64.Instruction,
aarch64.Instruction.pc_relative_address,
), buffer[0..4]),
};
- inst.pc_relative_address.immhi = @truncate(u19, pages >> 2);
- inst.pc_relative_address.immlo = @truncate(u2, pages);
+ inst.pc_relative_address.immhi = @as(u19, @truncate(pages >> 2));
+ inst.pc_relative_address.immlo = @as(u2, @truncate(pages));
mem.writeIntLittle(u32, buffer[0..4], inst.toU32());
},
.pageoff, .got_pageoff => {
- const narrowed = @truncate(u12, @intCast(u64, target_addr));
+ const narrowed = @as(u12, @truncate(@as(u64, @intCast(target_addr))));
if (isArithmeticOp(buffer[0..4])) {
var inst = aarch64.Instruction{
.add_subtract_immediate = mem.bytesToValue(meta.TagPayload(
@@ -180,8 +180,8 @@ fn resolveAarch64(self: Relocation, source_addr: u64, target_addr: i64, code: []
}
},
.tlv_initializer, .unsigned => switch (self.length) {
- 2 => mem.writeIntLittle(u32, buffer[0..4], @truncate(u32, @bitCast(u64, target_addr))),
- 3 => mem.writeIntLittle(u64, buffer[0..8], @bitCast(u64, target_addr)),
+ 2 => mem.writeIntLittle(u32, buffer[0..4], @as(u32, @truncate(@as(u64, @bitCast(target_addr))))),
+ 3 => mem.writeIntLittle(u64, buffer[0..8], @as(u64, @bitCast(target_addr))),
else => unreachable,
},
.got, .signed, .tlv => unreachable, // Invalid target architecture.
@@ -191,16 +191,16 @@ fn resolveAarch64(self: Relocation, source_addr: u64, target_addr: i64, code: []
fn resolveX8664(self: Relocation, source_addr: u64, target_addr: i64, code: []u8) void {
switch (self.type) {
.branch, .got, .tlv, .signed => {
- const displacement = @intCast(i32, @intCast(i64, target_addr) - @intCast(i64, source_addr) - 4);
- mem.writeIntLittle(u32, code[self.offset..][0..4], @bitCast(u32, displacement));
+ const displacement = @as(i32, @intCast(@as(i64, @intCast(target_addr)) - @as(i64, @intCast(source_addr)) - 4));
+ mem.writeIntLittle(u32, code[self.offset..][0..4], @as(u32, @bitCast(displacement)));
},
.tlv_initializer, .unsigned => {
switch (self.length) {
2 => {
- mem.writeIntLittle(u32, code[self.offset..][0..4], @truncate(u32, @bitCast(u64, target_addr)));
+ mem.writeIntLittle(u32, code[self.offset..][0..4], @as(u32, @truncate(@as(u64, @bitCast(target_addr)))));
},
3 => {
- mem.writeIntLittle(u64, code[self.offset..][0..8], @bitCast(u64, target_addr));
+ mem.writeIntLittle(u64, code[self.offset..][0..8], @as(u64, @bitCast(target_addr)));
},
else => unreachable,
}
@@ -210,24 +210,24 @@ fn resolveX8664(self: Relocation, source_addr: u64, target_addr: i64, code: []u8
}
pub inline fn isArithmeticOp(inst: *const [4]u8) bool {
- const group_decode = @truncate(u5, inst[3]);
+ const group_decode = @as(u5, @truncate(inst[3]));
return ((group_decode >> 2) == 4);
}
pub fn calcPcRelativeDisplacementX86(source_addr: u64, target_addr: u64, correction: u3) error{Overflow}!i32 {
- const disp = @intCast(i64, target_addr) - @intCast(i64, source_addr + 4 + correction);
+ const disp = @as(i64, @intCast(target_addr)) - @as(i64, @intCast(source_addr + 4 + correction));
return math.cast(i32, disp) orelse error.Overflow;
}
pub fn calcPcRelativeDisplacementArm64(source_addr: u64, target_addr: u64) error{Overflow}!i28 {
- const disp = @intCast(i64, target_addr) - @intCast(i64, source_addr);
+ const disp = @as(i64, @intCast(target_addr)) - @as(i64, @intCast(source_addr));
return math.cast(i28, disp) orelse error.Overflow;
}
pub fn calcNumberOfPages(source_addr: u64, target_addr: u64) i21 {
- const source_page = @intCast(i32, source_addr >> 12);
- const target_page = @intCast(i32, target_addr >> 12);
- const pages = @intCast(i21, target_page - source_page);
+ const source_page = @as(i32, @intCast(source_addr >> 12));
+ const target_page = @as(i32, @intCast(target_addr >> 12));
+ const pages = @as(i21, @intCast(target_page - source_page));
return pages;
}
@@ -241,7 +241,7 @@ pub const PageOffsetInstKind = enum {
};
pub fn calcPageOffset(target_addr: u64, kind: PageOffsetInstKind) !u12 {
- const narrowed = @truncate(u12, target_addr);
+ const narrowed = @as(u12, @truncate(target_addr));
return switch (kind) {
.arithmetic, .load_store_8 => narrowed,
.load_store_16 => try math.divExact(u12, narrowed, 2),
diff --git a/src/link/MachO/Trie.zig b/src/link/MachO/Trie.zig
index 34200db7dc..cabe611b64 100644
--- a/src/link/MachO/Trie.zig
+++ b/src/link/MachO/Trie.zig
@@ -220,7 +220,7 @@ pub const Node = struct {
try writer.writeByte(0);
}
// Write number of edges (max legal number of edges is 256).
- try writer.writeByte(@intCast(u8, self.edges.items.len));
+ try writer.writeByte(@as(u8, @intCast(self.edges.items.len)));
for (self.edges.items) |edge| {
// Write edge label and offset to next node in trie.
diff --git a/src/link/MachO/UnwindInfo.zig b/src/link/MachO/UnwindInfo.zig
index 3c9a438f92..cfef053d1b 100644
--- a/src/link/MachO/UnwindInfo.zig
+++ b/src/link/MachO/UnwindInfo.zig
@@ -87,7 +87,7 @@ const Page = struct {
const record_id = page.page_encodings[index];
const record = info.records.items[record_id];
if (record.compactUnwindEncoding == enc) {
- return @intCast(u8, index);
+ return @as(u8, @intCast(index));
}
}
return null;
@@ -150,14 +150,14 @@ const Page = struct {
for (info.records.items[page.start..][0..page.count]) |record| {
try writer.writeStruct(macho.unwind_info_regular_second_level_entry{
- .functionOffset = @intCast(u32, record.rangeStart),
+ .functionOffset = @as(u32, @intCast(record.rangeStart)),
.encoding = record.compactUnwindEncoding,
});
}
},
.compressed => {
const entry_offset = @sizeOf(macho.unwind_info_compressed_second_level_page_header) +
- @intCast(u16, page.page_encodings_count) * @sizeOf(u32);
+ @as(u16, @intCast(page.page_encodings_count)) * @sizeOf(u32);
try writer.writeStruct(macho.unwind_info_compressed_second_level_page_header{
.entryPageOffset = entry_offset,
.entryCount = page.count,
@@ -183,8 +183,8 @@ const Page = struct {
break :blk ncommon + page.getPageEncoding(info, record.compactUnwindEncoding).?;
};
const compressed = macho.UnwindInfoCompressedEntry{
- .funcOffset = @intCast(u24, record.rangeStart - first_entry.rangeStart),
- .encodingIndex = @intCast(u8, enc_index),
+ .funcOffset = @as(u24, @intCast(record.rangeStart - first_entry.rangeStart)),
+ .encodingIndex = @as(u8, @intCast(enc_index)),
};
try writer.writeStruct(compressed);
}
@@ -214,15 +214,15 @@ pub fn scanRelocs(zld: *Zld) !void {
if (!UnwindEncoding.isDwarf(record.compactUnwindEncoding, cpu_arch)) {
if (getPersonalityFunctionReloc(
zld,
- @intCast(u32, object_id),
+ @as(u32, @intCast(object_id)),
record_id,
)) |rel| {
// Personality function; add GOT pointer.
const target = Atom.parseRelocTarget(zld, .{
- .object_id = @intCast(u32, object_id),
+ .object_id = @as(u32, @intCast(object_id)),
.rel = rel,
.code = mem.asBytes(&record),
- .base_offset = @intCast(i32, record_id * @sizeOf(macho.compact_unwind_entry)),
+ .base_offset = @as(i32, @intCast(record_id * @sizeOf(macho.compact_unwind_entry))),
});
try Atom.addGotEntry(zld, target);
}
@@ -258,18 +258,18 @@ pub fn collect(info: *UnwindInfo, zld: *Zld) !void {
var record = unwind_records[record_id];
if (UnwindEncoding.isDwarf(record.compactUnwindEncoding, cpu_arch)) {
- try info.collectPersonalityFromDwarf(zld, @intCast(u32, object_id), atom_index, &record);
+ try info.collectPersonalityFromDwarf(zld, @as(u32, @intCast(object_id)), atom_index, &record);
} else {
if (getPersonalityFunctionReloc(
zld,
- @intCast(u32, object_id),
+ @as(u32, @intCast(object_id)),
record_id,
)) |rel| {
const target = Atom.parseRelocTarget(zld, .{
- .object_id = @intCast(u32, object_id),
+ .object_id = @as(u32, @intCast(object_id)),
.rel = rel,
.code = mem.asBytes(&record),
- .base_offset = @intCast(i32, record_id * @sizeOf(macho.compact_unwind_entry)),
+ .base_offset = @as(i32, @intCast(record_id * @sizeOf(macho.compact_unwind_entry))),
});
const personality_index = info.getPersonalityFunction(target) orelse inner: {
const personality_index = info.personalities_count;
@@ -282,14 +282,14 @@ pub fn collect(info: *UnwindInfo, zld: *Zld) !void {
UnwindEncoding.setPersonalityIndex(&record.compactUnwindEncoding, personality_index + 1);
}
- if (getLsdaReloc(zld, @intCast(u32, object_id), record_id)) |rel| {
+ if (getLsdaReloc(zld, @as(u32, @intCast(object_id)), record_id)) |rel| {
const target = Atom.parseRelocTarget(zld, .{
- .object_id = @intCast(u32, object_id),
+ .object_id = @as(u32, @intCast(object_id)),
.rel = rel,
.code = mem.asBytes(&record),
- .base_offset = @intCast(i32, record_id * @sizeOf(macho.compact_unwind_entry)),
+ .base_offset = @as(i32, @intCast(record_id * @sizeOf(macho.compact_unwind_entry))),
});
- record.lsda = @bitCast(u64, target);
+ record.lsda = @as(u64, @bitCast(target));
}
}
break :blk record;
@@ -302,7 +302,7 @@ pub fn collect(info: *UnwindInfo, zld: *Zld) !void {
if (object.eh_frame_records_lookup.get(atom_index)) |fde_offset| {
if (object.eh_frame_relocs_lookup.get(fde_offset).?.dead) continue;
var record = nullRecord();
- try info.collectPersonalityFromDwarf(zld, @intCast(u32, object_id), atom_index, &record);
+ try info.collectPersonalityFromDwarf(zld, @as(u32, @intCast(object_id)), atom_index, &record);
switch (cpu_arch) {
.aarch64 => UnwindEncoding.setMode(&record.compactUnwindEncoding, macho.UNWIND_ARM64_MODE.DWARF),
.x86_64 => UnwindEncoding.setMode(&record.compactUnwindEncoding, macho.UNWIND_X86_64_MODE.DWARF),
@@ -320,7 +320,7 @@ pub fn collect(info: *UnwindInfo, zld: *Zld) !void {
const sym = zld.getSymbol(sym_loc);
assert(sym.n_desc != N_DEAD);
record.rangeStart = sym.n_value;
- record.rangeLength = @intCast(u32, atom.size);
+ record.rangeLength = @as(u32, @intCast(atom.size));
records.appendAssumeCapacity(record);
atom_indexes.appendAssumeCapacity(atom_index);
@@ -329,7 +329,7 @@ pub fn collect(info: *UnwindInfo, zld: *Zld) !void {
// Fold records
try info.records.ensureTotalCapacity(info.gpa, records.items.len);
- try info.records_lookup.ensureTotalCapacity(info.gpa, @intCast(u32, atom_indexes.items.len));
+ try info.records_lookup.ensureTotalCapacity(info.gpa, @as(u32, @intCast(atom_indexes.items.len)));
var maybe_prev: ?macho.compact_unwind_entry = null;
for (records.items, 0..) |record, i| {
@@ -341,15 +341,15 @@ pub fn collect(info: *UnwindInfo, zld: *Zld) !void {
(prev.personalityFunction != record.personalityFunction) or
record.lsda > 0)
{
- const record_id = @intCast(RecordIndex, info.records.items.len);
+ const record_id = @as(RecordIndex, @intCast(info.records.items.len));
info.records.appendAssumeCapacity(record);
maybe_prev = record;
break :blk record_id;
} else {
- break :blk @intCast(RecordIndex, info.records.items.len - 1);
+ break :blk @as(RecordIndex, @intCast(info.records.items.len - 1));
}
} else {
- const record_id = @intCast(RecordIndex, info.records.items.len);
+ const record_id = @as(RecordIndex, @intCast(info.records.items.len));
info.records.appendAssumeCapacity(record);
maybe_prev = record;
break :blk record_id;
@@ -459,14 +459,14 @@ pub fn collect(info: *UnwindInfo, zld: *Zld) !void {
}
}
- page.count = @intCast(u16, i - page.start);
+ page.count = @as(u16, @intCast(i - page.start));
if (i < info.records.items.len and page.count < max_regular_second_level_entries) {
page.kind = .regular;
- page.count = @intCast(u16, @min(
+ page.count = @as(u16, @intCast(@min(
max_regular_second_level_entries,
info.records.items.len - page.start,
- ));
+ )));
i = page.start + page.count;
} else {
page.kind = .compressed;
@@ -479,11 +479,11 @@ pub fn collect(info: *UnwindInfo, zld: *Zld) !void {
}
// Save indices of records requiring LSDA relocation
- try info.lsdas_lookup.ensureTotalCapacity(info.gpa, @intCast(u32, info.records.items.len));
+ try info.lsdas_lookup.ensureTotalCapacity(info.gpa, @as(u32, @intCast(info.records.items.len)));
for (info.records.items, 0..) |rec, i| {
- info.lsdas_lookup.putAssumeCapacityNoClobber(@intCast(RecordIndex, i), @intCast(u32, info.lsdas.items.len));
+ info.lsdas_lookup.putAssumeCapacityNoClobber(@as(RecordIndex, @intCast(i)), @as(u32, @intCast(info.lsdas.items.len)));
if (rec.lsda == 0) continue;
- try info.lsdas.append(info.gpa, @intCast(RecordIndex, i));
+ try info.lsdas.append(info.gpa, @as(RecordIndex, @intCast(i)));
}
}
@@ -506,7 +506,7 @@ fn collectPersonalityFromDwarf(
if (cie.getPersonalityPointerReloc(
zld,
- @intCast(u32, object_id),
+ @as(u32, @intCast(object_id)),
cie_offset,
)) |target| {
const personality_index = info.getPersonalityFunction(target) orelse inner: {
@@ -532,8 +532,8 @@ fn calcRequiredSize(info: UnwindInfo) usize {
var total_size: usize = 0;
total_size += @sizeOf(macho.unwind_info_section_header);
total_size +=
- @intCast(usize, info.common_encodings_count) * @sizeOf(macho.compact_unwind_encoding_t);
- total_size += @intCast(usize, info.personalities_count) * @sizeOf(u32);
+ @as(usize, @intCast(info.common_encodings_count)) * @sizeOf(macho.compact_unwind_encoding_t);
+ total_size += @as(usize, @intCast(info.personalities_count)) * @sizeOf(u32);
total_size += (info.pages.items.len + 1) * @sizeOf(macho.unwind_info_section_header_index_entry);
total_size += info.lsdas.items.len * @sizeOf(macho.unwind_info_section_header_lsda_index_entry);
total_size += info.pages.items.len * second_level_page_bytes;
@@ -557,7 +557,7 @@ pub fn write(info: *UnwindInfo, zld: *Zld) !void {
const atom_index = zld.getGotAtomIndexForSymbol(target).?;
const atom = zld.getAtom(atom_index);
const sym = zld.getSymbol(atom.getSymbolWithLoc());
- personalities[i] = @intCast(u32, sym.n_value - seg.vmaddr);
+ personalities[i] = @as(u32, @intCast(sym.n_value - seg.vmaddr));
log.debug(" {d}: 0x{x} ({s})", .{ i, personalities[i], zld.getSymbolName(target) });
}
@@ -570,7 +570,7 @@ pub fn write(info: *UnwindInfo, zld: *Zld) !void {
}
if (rec.compactUnwindEncoding > 0 and !UnwindEncoding.isDwarf(rec.compactUnwindEncoding, cpu_arch)) {
- const lsda_target = @bitCast(SymbolWithLoc, rec.lsda);
+ const lsda_target = @as(SymbolWithLoc, @bitCast(rec.lsda));
if (lsda_target.getFile()) |_| {
const sym = zld.getSymbol(lsda_target);
rec.lsda = sym.n_value - seg.vmaddr;
@@ -601,7 +601,7 @@ pub fn write(info: *UnwindInfo, zld: *Zld) !void {
const personalities_offset: u32 = common_encodings_offset + common_encodings_count * @sizeOf(u32);
const personalities_count: u32 = info.personalities_count;
const indexes_offset: u32 = personalities_offset + personalities_count * @sizeOf(u32);
- const indexes_count: u32 = @intCast(u32, info.pages.items.len + 1);
+ const indexes_count: u32 = @as(u32, @intCast(info.pages.items.len + 1));
try writer.writeStruct(macho.unwind_info_section_header{
.commonEncodingsArraySectionOffset = common_encodings_offset,
@@ -615,34 +615,34 @@ pub fn write(info: *UnwindInfo, zld: *Zld) !void {
try writer.writeAll(mem.sliceAsBytes(info.common_encodings[0..info.common_encodings_count]));
try writer.writeAll(mem.sliceAsBytes(personalities[0..info.personalities_count]));
- const pages_base_offset = @intCast(u32, size - (info.pages.items.len * second_level_page_bytes));
- const lsda_base_offset = @intCast(u32, pages_base_offset -
- (info.lsdas.items.len * @sizeOf(macho.unwind_info_section_header_lsda_index_entry)));
+ const pages_base_offset = @as(u32, @intCast(size - (info.pages.items.len * second_level_page_bytes)));
+ const lsda_base_offset = @as(u32, @intCast(pages_base_offset -
+ (info.lsdas.items.len * @sizeOf(macho.unwind_info_section_header_lsda_index_entry))));
for (info.pages.items, 0..) |page, i| {
assert(page.count > 0);
const first_entry = info.records.items[page.start];
try writer.writeStruct(macho.unwind_info_section_header_index_entry{
- .functionOffset = @intCast(u32, first_entry.rangeStart),
- .secondLevelPagesSectionOffset = @intCast(u32, pages_base_offset + i * second_level_page_bytes),
+ .functionOffset = @as(u32, @intCast(first_entry.rangeStart)),
+ .secondLevelPagesSectionOffset = @as(u32, @intCast(pages_base_offset + i * second_level_page_bytes)),
.lsdaIndexArraySectionOffset = lsda_base_offset +
info.lsdas_lookup.get(page.start).? * @sizeOf(macho.unwind_info_section_header_lsda_index_entry),
});
}
const last_entry = info.records.items[info.records.items.len - 1];
- const sentinel_address = @intCast(u32, last_entry.rangeStart + last_entry.rangeLength);
+ const sentinel_address = @as(u32, @intCast(last_entry.rangeStart + last_entry.rangeLength));
try writer.writeStruct(macho.unwind_info_section_header_index_entry{
.functionOffset = sentinel_address,
.secondLevelPagesSectionOffset = 0,
.lsdaIndexArraySectionOffset = lsda_base_offset +
- @intCast(u32, info.lsdas.items.len) * @sizeOf(macho.unwind_info_section_header_lsda_index_entry),
+ @as(u32, @intCast(info.lsdas.items.len)) * @sizeOf(macho.unwind_info_section_header_lsda_index_entry),
});
for (info.lsdas.items) |record_id| {
const record = info.records.items[record_id];
try writer.writeStruct(macho.unwind_info_section_header_lsda_index_entry{
- .functionOffset = @intCast(u32, record.rangeStart),
- .lsdaOffset = @intCast(u32, record.lsda),
+ .functionOffset = @as(u32, @intCast(record.rangeStart)),
+ .lsdaOffset = @as(u32, @intCast(record.lsda)),
});
}
@@ -674,7 +674,7 @@ fn getRelocs(zld: *Zld, object_id: u32, record_id: usize) []const macho.relocati
}
fn isPersonalityFunction(record_id: usize, rel: macho.relocation_info) bool {
- const base_offset = @intCast(i32, record_id * @sizeOf(macho.compact_unwind_entry));
+ const base_offset = @as(i32, @intCast(record_id * @sizeOf(macho.compact_unwind_entry)));
const rel_offset = rel.r_address - base_offset;
return rel_offset == 16;
}
@@ -703,7 +703,7 @@ fn getPersonalityFunction(info: UnwindInfo, global_index: SymbolWithLoc) ?u2 {
}
fn isLsda(record_id: usize, rel: macho.relocation_info) bool {
- const base_offset = @intCast(i32, record_id * @sizeOf(macho.compact_unwind_entry));
+ const base_offset = @as(i32, @intCast(record_id * @sizeOf(macho.compact_unwind_entry)));
const rel_offset = rel.r_address - base_offset;
return rel_offset == 24;
}
@@ -754,45 +754,45 @@ fn getCommonEncoding(info: UnwindInfo, enc: macho.compact_unwind_encoding_t) ?u7
pub const UnwindEncoding = struct {
pub fn getMode(enc: macho.compact_unwind_encoding_t) u4 {
comptime assert(macho.UNWIND_ARM64_MODE_MASK == macho.UNWIND_X86_64_MODE_MASK);
- return @truncate(u4, (enc & macho.UNWIND_ARM64_MODE_MASK) >> 24);
+ return @as(u4, @truncate((enc & macho.UNWIND_ARM64_MODE_MASK) >> 24));
}
pub fn isDwarf(enc: macho.compact_unwind_encoding_t, cpu_arch: std.Target.Cpu.Arch) bool {
const mode = getMode(enc);
return switch (cpu_arch) {
- .aarch64 => @enumFromInt(macho.UNWIND_ARM64_MODE, mode) == .DWARF,
- .x86_64 => @enumFromInt(macho.UNWIND_X86_64_MODE, mode) == .DWARF,
+ .aarch64 => @as(macho.UNWIND_ARM64_MODE, @enumFromInt(mode)) == .DWARF,
+ .x86_64 => @as(macho.UNWIND_X86_64_MODE, @enumFromInt(mode)) == .DWARF,
else => unreachable,
};
}
pub fn setMode(enc: *macho.compact_unwind_encoding_t, mode: anytype) void {
- enc.* |= @intCast(u32, @intFromEnum(mode)) << 24;
+ enc.* |= @as(u32, @intCast(@intFromEnum(mode))) << 24;
}
pub fn hasLsda(enc: macho.compact_unwind_encoding_t) bool {
- const has_lsda = @truncate(u1, (enc & macho.UNWIND_HAS_LSDA) >> 31);
+ const has_lsda = @as(u1, @truncate((enc & macho.UNWIND_HAS_LSDA) >> 31));
return has_lsda == 1;
}
pub fn setHasLsda(enc: *macho.compact_unwind_encoding_t, has_lsda: bool) void {
- const mask = @intCast(u32, @intFromBool(has_lsda)) << 31;
+ const mask = @as(u32, @intCast(@intFromBool(has_lsda))) << 31;
enc.* |= mask;
}
pub fn getPersonalityIndex(enc: macho.compact_unwind_encoding_t) u2 {
- const index = @truncate(u2, (enc & macho.UNWIND_PERSONALITY_MASK) >> 28);
+ const index = @as(u2, @truncate((enc & macho.UNWIND_PERSONALITY_MASK) >> 28));
return index;
}
pub fn setPersonalityIndex(enc: *macho.compact_unwind_encoding_t, index: u2) void {
- const mask = @intCast(u32, index) << 28;
+ const mask = @as(u32, @intCast(index)) << 28;
enc.* |= mask;
}
pub fn getDwarfSectionOffset(enc: macho.compact_unwind_encoding_t, cpu_arch: std.Target.Cpu.Arch) u24 {
assert(isDwarf(enc, cpu_arch));
- const offset = @truncate(u24, enc);
+ const offset = @as(u24, @truncate(enc));
return offset;
}
diff --git a/src/link/MachO/ZldAtom.zig b/src/link/MachO/ZldAtom.zig
index 55a6325a5a..613f0fc86c 100644
--- a/src/link/MachO/ZldAtom.zig
+++ b/src/link/MachO/ZldAtom.zig
@@ -117,8 +117,8 @@ pub fn getSectionAlias(zld: *Zld, atom_index: AtomIndex) ?SymbolWithLoc {
assert(atom.getFile() != null);
const object = zld.objects.items[atom.getFile().?];
- const nbase = @intCast(u32, object.in_symtab.?.len);
- const ntotal = @intCast(u32, object.symtab.len);
+ const nbase = @as(u32, @intCast(object.in_symtab.?.len));
+ const ntotal = @as(u32, @intCast(object.symtab.len));
var sym_index: u32 = nbase;
while (sym_index < ntotal) : (sym_index += 1) {
if (object.getAtomIndexForSymbol(sym_index)) |other_atom_index| {
@@ -144,8 +144,8 @@ pub fn calcInnerSymbolOffset(zld: *Zld, atom_index: AtomIndex, sym_index: u32) u
const base_addr = if (object.getSourceSymbol(atom.sym_index)) |sym|
sym.n_value
else blk: {
- const nbase = @intCast(u32, object.in_symtab.?.len);
- const sect_id = @intCast(u8, atom.sym_index - nbase);
+ const nbase = @as(u32, @intCast(object.in_symtab.?.len));
+ const sect_id = @as(u8, @intCast(atom.sym_index - nbase));
const source_sect = object.getSourceSection(sect_id);
break :blk source_sect.addr;
};
@@ -177,15 +177,15 @@ pub fn getRelocContext(zld: *Zld, atom_index: AtomIndex) RelocContext {
if (object.getSourceSymbol(atom.sym_index)) |source_sym| {
const source_sect = object.getSourceSection(source_sym.n_sect - 1);
return .{
- .base_addr = @intCast(i64, source_sect.addr),
- .base_offset = @intCast(i32, source_sym.n_value - source_sect.addr),
+ .base_addr = @as(i64, @intCast(source_sect.addr)),
+ .base_offset = @as(i32, @intCast(source_sym.n_value - source_sect.addr)),
};
}
- const nbase = @intCast(u32, object.in_symtab.?.len);
- const sect_id = @intCast(u8, atom.sym_index - nbase);
+ const nbase = @as(u32, @intCast(object.in_symtab.?.len));
+ const sect_id = @as(u8, @intCast(atom.sym_index - nbase));
const source_sect = object.getSourceSection(sect_id);
return .{
- .base_addr = @intCast(i64, source_sect.addr),
+ .base_addr = @as(i64, @intCast(source_sect.addr)),
.base_offset = 0,
};
}
@@ -204,8 +204,8 @@ pub fn parseRelocTarget(zld: *Zld, ctx: struct {
log.debug("parsing reloc target in object({d}) '{s}' ", .{ ctx.object_id, object.name });
const sym_index = if (ctx.rel.r_extern == 0) sym_index: {
- const sect_id = @intCast(u8, ctx.rel.r_symbolnum - 1);
- const rel_offset = @intCast(u32, ctx.rel.r_address - ctx.base_offset);
+ const sect_id = @as(u8, @intCast(ctx.rel.r_symbolnum - 1));
+ const rel_offset = @as(u32, @intCast(ctx.rel.r_address - ctx.base_offset));
const address_in_section = if (ctx.rel.r_pcrel == 0) blk: {
break :blk if (ctx.rel.r_length == 3)
@@ -214,7 +214,7 @@ pub fn parseRelocTarget(zld: *Zld, ctx: struct {
mem.readIntLittle(u32, ctx.code[rel_offset..][0..4]);
} else blk: {
assert(zld.options.target.cpu.arch == .x86_64);
- const correction: u3 = switch (@enumFromInt(macho.reloc_type_x86_64, ctx.rel.r_type)) {
+ const correction: u3 = switch (@as(macho.reloc_type_x86_64, @enumFromInt(ctx.rel.r_type))) {
.X86_64_RELOC_SIGNED => 0,
.X86_64_RELOC_SIGNED_1 => 1,
.X86_64_RELOC_SIGNED_2 => 2,
@@ -222,8 +222,8 @@ pub fn parseRelocTarget(zld: *Zld, ctx: struct {
else => unreachable,
};
const addend = mem.readIntLittle(i32, ctx.code[rel_offset..][0..4]);
- const target_address = @intCast(i64, ctx.base_addr) + ctx.rel.r_address + 4 + correction + addend;
- break :blk @intCast(u64, target_address);
+ const target_address = @as(i64, @intCast(ctx.base_addr)) + ctx.rel.r_address + 4 + correction + addend;
+ break :blk @as(u64, @intCast(target_address));
};
// Find containing atom
@@ -272,7 +272,7 @@ pub fn getRelocTargetAtomIndex(zld: *Zld, target: SymbolWithLoc, is_via_got: boo
fn scanAtomRelocsArm64(zld: *Zld, atom_index: AtomIndex, relocs: []align(1) const macho.relocation_info) !void {
for (relocs) |rel| {
- const rel_type = @enumFromInt(macho.reloc_type_arm64, rel.r_type);
+ const rel_type = @as(macho.reloc_type_arm64, @enumFromInt(rel.r_type));
switch (rel_type) {
.ARM64_RELOC_ADDEND, .ARM64_RELOC_SUBTRACTOR => continue,
@@ -318,7 +318,7 @@ fn scanAtomRelocsArm64(zld: *Zld, atom_index: AtomIndex, relocs: []align(1) cons
fn scanAtomRelocsX86(zld: *Zld, atom_index: AtomIndex, relocs: []align(1) const macho.relocation_info) !void {
for (relocs) |rel| {
- const rel_type = @enumFromInt(macho.reloc_type_x86_64, rel.r_type);
+ const rel_type = @as(macho.reloc_type_x86_64, @enumFromInt(rel.r_type));
switch (rel_type) {
.X86_64_RELOC_SUBTRACTOR => continue,
@@ -364,7 +364,7 @@ fn addTlvPtrEntry(zld: *Zld, target: SymbolWithLoc) !void {
const gpa = zld.gpa;
const atom_index = try zld.createTlvPtrAtom();
- const tlv_ptr_index = @intCast(u32, zld.tlv_ptr_entries.items.len);
+ const tlv_ptr_index = @as(u32, @intCast(zld.tlv_ptr_entries.items.len));
try zld.tlv_ptr_entries.append(gpa, .{
.target = target,
.atom_index = atom_index,
@@ -376,7 +376,7 @@ pub fn addGotEntry(zld: *Zld, target: SymbolWithLoc) !void {
if (zld.got_table.contains(target)) return;
const gpa = zld.gpa;
const atom_index = try zld.createGotAtom();
- const got_index = @intCast(u32, zld.got_entries.items.len);
+ const got_index = @as(u32, @intCast(zld.got_entries.items.len));
try zld.got_entries.append(gpa, .{
.target = target,
.atom_index = atom_index,
@@ -393,7 +393,7 @@ pub fn addStub(zld: *Zld, target: SymbolWithLoc) !void {
_ = try zld.createStubHelperAtom();
_ = try zld.createLazyPointerAtom();
const atom_index = try zld.createStubAtom();
- const stubs_index = @intCast(u32, zld.stubs.items.len);
+ const stubs_index = @as(u32, @intCast(zld.stubs.items.len));
try zld.stubs.append(gpa, .{
.target = target,
.atom_index = atom_index,
@@ -489,7 +489,7 @@ fn resolveRelocsArm64(
var subtractor: ?SymbolWithLoc = null;
for (atom_relocs) |rel| {
- const rel_type = @enumFromInt(macho.reloc_type_arm64, rel.r_type);
+ const rel_type = @as(macho.reloc_type_arm64, @enumFromInt(rel.r_type));
switch (rel_type) {
.ARM64_RELOC_ADDEND => {
@@ -529,7 +529,7 @@ fn resolveRelocsArm64(
.base_addr = context.base_addr,
.base_offset = context.base_offset,
});
- const rel_offset = @intCast(u32, rel.r_address - context.base_offset);
+ const rel_offset = @as(u32, @intCast(rel.r_address - context.base_offset));
log.debug(" RELA({s}) @ {x} => %{d} ('{s}') in object({?})", .{
@tagName(rel_type),
@@ -590,7 +590,7 @@ fn resolveRelocsArm64(
aarch64.Instruction.unconditional_branch_immediate,
), code),
};
- inst.unconditional_branch_immediate.imm26 = @truncate(u26, @bitCast(u28, displacement >> 2));
+ inst.unconditional_branch_immediate.imm26 = @as(u26, @truncate(@as(u28, @bitCast(displacement >> 2))));
mem.writeIntLittle(u32, code, inst.toU32());
},
@@ -598,11 +598,11 @@ fn resolveRelocsArm64(
.ARM64_RELOC_GOT_LOAD_PAGE21,
.ARM64_RELOC_TLVP_LOAD_PAGE21,
=> {
- const adjusted_target_addr = @intCast(u64, @intCast(i64, target_addr) + (addend orelse 0));
+ const adjusted_target_addr = @as(u64, @intCast(@as(i64, @intCast(target_addr)) + (addend orelse 0)));
log.debug(" | target_addr = 0x{x}", .{adjusted_target_addr});
- const pages = @bitCast(u21, Relocation.calcNumberOfPages(source_addr, adjusted_target_addr));
+ const pages = @as(u21, @bitCast(Relocation.calcNumberOfPages(source_addr, adjusted_target_addr)));
const code = atom_code[rel_offset..][0..4];
var inst = aarch64.Instruction{
.pc_relative_address = mem.bytesToValue(meta.TagPayload(
@@ -610,14 +610,14 @@ fn resolveRelocsArm64(
aarch64.Instruction.pc_relative_address,
), code),
};
- inst.pc_relative_address.immhi = @truncate(u19, pages >> 2);
- inst.pc_relative_address.immlo = @truncate(u2, pages);
+ inst.pc_relative_address.immhi = @as(u19, @truncate(pages >> 2));
+ inst.pc_relative_address.immlo = @as(u2, @truncate(pages));
mem.writeIntLittle(u32, code, inst.toU32());
addend = null;
},
.ARM64_RELOC_PAGEOFF12 => {
- const adjusted_target_addr = @intCast(u64, @intCast(i64, target_addr) + (addend orelse 0));
+ const adjusted_target_addr = @as(u64, @intCast(@as(i64, @intCast(target_addr)) + (addend orelse 0)));
log.debug(" | target_addr = 0x{x}", .{adjusted_target_addr});
@@ -656,7 +656,7 @@ fn resolveRelocsArm64(
.ARM64_RELOC_GOT_LOAD_PAGEOFF12 => {
const code = atom_code[rel_offset..][0..4];
- const adjusted_target_addr = @intCast(u64, @intCast(i64, target_addr) + (addend orelse 0));
+ const adjusted_target_addr = @as(u64, @intCast(@as(i64, @intCast(target_addr)) + (addend orelse 0)));
log.debug(" | target_addr = 0x{x}", .{adjusted_target_addr});
@@ -674,7 +674,7 @@ fn resolveRelocsArm64(
.ARM64_RELOC_TLVP_LOAD_PAGEOFF12 => {
const code = atom_code[rel_offset..][0..4];
- const adjusted_target_addr = @intCast(u64, @intCast(i64, target_addr) + (addend orelse 0));
+ const adjusted_target_addr = @as(u64, @intCast(@as(i64, @intCast(target_addr)) + (addend orelse 0)));
log.debug(" | target_addr = 0x{x}", .{adjusted_target_addr});
@@ -725,7 +725,7 @@ fn resolveRelocsArm64(
.sh = 0,
.s = 0,
.op = 0,
- .sf = @truncate(u1, reg_info.size),
+ .sf = @as(u1, @truncate(reg_info.size)),
},
};
mem.writeIntLittle(u32, code, inst.toU32());
@@ -734,9 +734,9 @@ fn resolveRelocsArm64(
.ARM64_RELOC_POINTER_TO_GOT => {
log.debug(" | target_addr = 0x{x}", .{target_addr});
- const result = math.cast(i32, @intCast(i64, target_addr) - @intCast(i64, source_addr)) orelse
+ const result = math.cast(i32, @as(i64, @intCast(target_addr)) - @as(i64, @intCast(source_addr))) orelse
return error.Overflow;
- mem.writeIntLittle(u32, atom_code[rel_offset..][0..4], @bitCast(u32, result));
+ mem.writeIntLittle(u32, atom_code[rel_offset..][0..4], @as(u32, @bitCast(result)));
},
.ARM64_RELOC_UNSIGNED => {
@@ -747,7 +747,7 @@ fn resolveRelocsArm64(
if (rel.r_extern == 0) {
const base_addr = if (target.sym_index >= object.source_address_lookup.len)
- @intCast(i64, object.getSourceSection(@intCast(u8, rel.r_symbolnum - 1)).addr)
+ @as(i64, @intCast(object.getSourceSection(@as(u8, @intCast(rel.r_symbolnum - 1))).addr))
else
object.source_address_lookup[target.sym_index];
ptr_addend -= base_addr;
@@ -756,17 +756,17 @@ fn resolveRelocsArm64(
const result = blk: {
if (subtractor) |sub| {
const sym = zld.getSymbol(sub);
- break :blk @intCast(i64, target_addr) - @intCast(i64, sym.n_value) + ptr_addend;
+ break :blk @as(i64, @intCast(target_addr)) - @as(i64, @intCast(sym.n_value)) + ptr_addend;
} else {
- break :blk @intCast(i64, target_addr) + ptr_addend;
+ break :blk @as(i64, @intCast(target_addr)) + ptr_addend;
}
};
log.debug(" | target_addr = 0x{x}", .{result});
if (rel.r_length == 3) {
- mem.writeIntLittle(u64, atom_code[rel_offset..][0..8], @bitCast(u64, result));
+ mem.writeIntLittle(u64, atom_code[rel_offset..][0..8], @as(u64, @bitCast(result)));
} else {
- mem.writeIntLittle(u32, atom_code[rel_offset..][0..4], @truncate(u32, @bitCast(u64, result)));
+ mem.writeIntLittle(u32, atom_code[rel_offset..][0..4], @as(u32, @truncate(@as(u64, @bitCast(result)))));
}
subtractor = null;
@@ -791,7 +791,7 @@ fn resolveRelocsX86(
var subtractor: ?SymbolWithLoc = null;
for (atom_relocs) |rel| {
- const rel_type = @enumFromInt(macho.reloc_type_x86_64, rel.r_type);
+ const rel_type = @as(macho.reloc_type_x86_64, @enumFromInt(rel.r_type));
switch (rel_type) {
.X86_64_RELOC_SUBTRACTOR => {
@@ -823,7 +823,7 @@ fn resolveRelocsX86(
.base_addr = context.base_addr,
.base_offset = context.base_offset,
});
- const rel_offset = @intCast(u32, rel.r_address - context.base_offset);
+ const rel_offset = @as(u32, @intCast(rel.r_address - context.base_offset));
log.debug(" RELA({s}) @ {x} => %{d} ('{s}') in object({?})", .{
@tagName(rel_type),
@@ -851,7 +851,7 @@ fn resolveRelocsX86(
switch (rel_type) {
.X86_64_RELOC_BRANCH => {
const addend = mem.readIntLittle(i32, atom_code[rel_offset..][0..4]);
- const adjusted_target_addr = @intCast(u64, @intCast(i64, target_addr) + addend);
+ const adjusted_target_addr = @as(u64, @intCast(@as(i64, @intCast(target_addr)) + addend));
log.debug(" | target_addr = 0x{x}", .{adjusted_target_addr});
const disp = try Relocation.calcPcRelativeDisplacementX86(source_addr, adjusted_target_addr, 0);
mem.writeIntLittle(i32, atom_code[rel_offset..][0..4], disp);
@@ -861,7 +861,7 @@ fn resolveRelocsX86(
.X86_64_RELOC_GOT_LOAD,
=> {
const addend = mem.readIntLittle(i32, atom_code[rel_offset..][0..4]);
- const adjusted_target_addr = @intCast(u64, @intCast(i64, target_addr) + addend);
+ const adjusted_target_addr = @as(u64, @intCast(@as(i64, @intCast(target_addr)) + addend));
log.debug(" | target_addr = 0x{x}", .{adjusted_target_addr});
const disp = try Relocation.calcPcRelativeDisplacementX86(source_addr, adjusted_target_addr, 0);
mem.writeIntLittle(i32, atom_code[rel_offset..][0..4], disp);
@@ -869,7 +869,7 @@ fn resolveRelocsX86(
.X86_64_RELOC_TLV => {
const addend = mem.readIntLittle(i32, atom_code[rel_offset..][0..4]);
- const adjusted_target_addr = @intCast(u64, @intCast(i64, target_addr) + addend);
+ const adjusted_target_addr = @as(u64, @intCast(@as(i64, @intCast(target_addr)) + addend));
log.debug(" | target_addr = 0x{x}", .{adjusted_target_addr});
const disp = try Relocation.calcPcRelativeDisplacementX86(source_addr, adjusted_target_addr, 0);
@@ -897,14 +897,14 @@ fn resolveRelocsX86(
if (rel.r_extern == 0) {
const base_addr = if (target.sym_index >= object.source_address_lookup.len)
- @intCast(i64, object.getSourceSection(@intCast(u8, rel.r_symbolnum - 1)).addr)
+ @as(i64, @intCast(object.getSourceSection(@as(u8, @intCast(rel.r_symbolnum - 1))).addr))
else
object.source_address_lookup[target.sym_index];
- addend += @intCast(i32, @intCast(i64, context.base_addr) + rel.r_address + 4 -
- @intCast(i64, base_addr));
+ addend += @as(i32, @intCast(@as(i64, @intCast(context.base_addr)) + rel.r_address + 4 -
+ @as(i64, @intCast(base_addr))));
}
- const adjusted_target_addr = @intCast(u64, @intCast(i64, target_addr) + addend);
+ const adjusted_target_addr = @as(u64, @intCast(@as(i64, @intCast(target_addr)) + addend));
log.debug(" | target_addr = 0x{x}", .{adjusted_target_addr});
@@ -920,7 +920,7 @@ fn resolveRelocsX86(
if (rel.r_extern == 0) {
const base_addr = if (target.sym_index >= object.source_address_lookup.len)
- @intCast(i64, object.getSourceSection(@intCast(u8, rel.r_symbolnum - 1)).addr)
+ @as(i64, @intCast(object.getSourceSection(@as(u8, @intCast(rel.r_symbolnum - 1))).addr))
else
object.source_address_lookup[target.sym_index];
addend -= base_addr;
@@ -929,17 +929,17 @@ fn resolveRelocsX86(
const result = blk: {
if (subtractor) |sub| {
const sym = zld.getSymbol(sub);
- break :blk @intCast(i64, target_addr) - @intCast(i64, sym.n_value) + addend;
+ break :blk @as(i64, @intCast(target_addr)) - @as(i64, @intCast(sym.n_value)) + addend;
} else {
- break :blk @intCast(i64, target_addr) + addend;
+ break :blk @as(i64, @intCast(target_addr)) + addend;
}
};
log.debug(" | target_addr = 0x{x}", .{result});
if (rel.r_length == 3) {
- mem.writeIntLittle(u64, atom_code[rel_offset..][0..8], @bitCast(u64, result));
+ mem.writeIntLittle(u64, atom_code[rel_offset..][0..8], @as(u64, @bitCast(result)));
} else {
- mem.writeIntLittle(u32, atom_code[rel_offset..][0..4], @truncate(u32, @bitCast(u64, result)));
+ mem.writeIntLittle(u32, atom_code[rel_offset..][0..4], @as(u32, @truncate(@as(u64, @bitCast(result)))));
}
subtractor = null;
@@ -958,19 +958,19 @@ pub fn getAtomCode(zld: *Zld, atom_index: AtomIndex) []const u8 {
// If there was no matching symbol present in the source symtab, this means
// we are dealing with either an entire section, or part of it, but also
// starting at the beginning.
- const nbase = @intCast(u32, object.in_symtab.?.len);
- const sect_id = @intCast(u8, atom.sym_index - nbase);
+ const nbase = @as(u32, @intCast(object.in_symtab.?.len));
+ const sect_id = @as(u8, @intCast(atom.sym_index - nbase));
const source_sect = object.getSourceSection(sect_id);
assert(!source_sect.isZerofill());
const code = object.getSectionContents(source_sect);
- const code_len = @intCast(usize, atom.size);
+ const code_len = @as(usize, @intCast(atom.size));
return code[0..code_len];
};
const source_sect = object.getSourceSection(source_sym.n_sect - 1);
assert(!source_sect.isZerofill());
const code = object.getSectionContents(source_sect);
- const offset = @intCast(usize, source_sym.n_value - source_sect.addr);
- const code_len = @intCast(usize, atom.size);
+ const offset = @as(usize, @intCast(source_sym.n_value - source_sect.addr));
+ const code_len = @as(usize, @intCast(atom.size));
return code[offset..][0..code_len];
}
@@ -986,8 +986,8 @@ pub fn getAtomRelocs(zld: *Zld, atom_index: AtomIndex) []const macho.relocation_
// If there was no matching symbol present in the source symtab, this means
// we are dealing with either an entire section, or part of it, but also
// starting at the beginning.
- const nbase = @intCast(u32, object.in_symtab.?.len);
- const sect_id = @intCast(u8, atom.sym_index - nbase);
+ const nbase = @as(u32, @intCast(object.in_symtab.?.len));
+ const sect_id = @as(u8, @intCast(atom.sym_index - nbase));
break :blk sect_id;
};
const source_sect = object.getSourceSection(source_sect_id);
@@ -998,14 +998,14 @@ pub fn getAtomRelocs(zld: *Zld, atom_index: AtomIndex) []const macho.relocation_
pub fn relocRequiresGot(zld: *Zld, rel: macho.relocation_info) bool {
switch (zld.options.target.cpu.arch) {
- .aarch64 => switch (@enumFromInt(macho.reloc_type_arm64, rel.r_type)) {
+ .aarch64 => switch (@as(macho.reloc_type_arm64, @enumFromInt(rel.r_type))) {
.ARM64_RELOC_GOT_LOAD_PAGE21,
.ARM64_RELOC_GOT_LOAD_PAGEOFF12,
.ARM64_RELOC_POINTER_TO_GOT,
=> return true,
else => return false,
},
- .x86_64 => switch (@enumFromInt(macho.reloc_type_x86_64, rel.r_type)) {
+ .x86_64 => switch (@as(macho.reloc_type_x86_64, @enumFromInt(rel.r_type))) {
.X86_64_RELOC_GOT,
.X86_64_RELOC_GOT_LOAD,
=> return true,
diff --git a/src/link/MachO/dead_strip.zig b/src/link/MachO/dead_strip.zig
index b2c569447d..890b40ed85 100644
--- a/src/link/MachO/dead_strip.zig
+++ b/src/link/MachO/dead_strip.zig
@@ -27,10 +27,10 @@ pub fn gcAtoms(zld: *Zld, resolver: *const SymbolResolver) !void {
defer arena.deinit();
var roots = AtomTable.init(arena.allocator());
- try roots.ensureUnusedCapacity(@intCast(u32, zld.globals.items.len));
+ try roots.ensureUnusedCapacity(@as(u32, @intCast(zld.globals.items.len)));
var alive = AtomTable.init(arena.allocator());
- try alive.ensureTotalCapacity(@intCast(u32, zld.atoms.items.len));
+ try alive.ensureTotalCapacity(@as(u32, @intCast(zld.atoms.items.len)));
try collectRoots(zld, &roots, resolver);
try mark(zld, roots, &alive);
@@ -99,8 +99,8 @@ fn collectRoots(zld: *Zld, roots: *AtomTable, resolver: *const SymbolResolver) !
const sect_id = if (object.getSourceSymbol(atom.sym_index)) |source_sym|
source_sym.n_sect - 1
else sect_id: {
- const nbase = @intCast(u32, object.in_symtab.?.len);
- const sect_id = @intCast(u8, atom.sym_index - nbase);
+ const nbase = @as(u32, @intCast(object.in_symtab.?.len));
+ const sect_id = @as(u8, @intCast(atom.sym_index - nbase));
break :sect_id sect_id;
};
const source_sect = object.getSourceSection(sect_id);
@@ -148,7 +148,7 @@ fn markLive(zld: *Zld, atom_index: AtomIndex, alive: *AtomTable) void {
for (relocs) |rel| {
const target = switch (cpu_arch) {
- .aarch64 => switch (@enumFromInt(macho.reloc_type_arm64, rel.r_type)) {
+ .aarch64 => switch (@as(macho.reloc_type_arm64, @enumFromInt(rel.r_type))) {
.ARM64_RELOC_ADDEND => continue,
else => Atom.parseRelocTarget(zld, .{
.object_id = atom.getFile().?,
@@ -208,7 +208,7 @@ fn refersLive(zld: *Zld, atom_index: AtomIndex, alive: AtomTable) bool {
for (relocs) |rel| {
const target = switch (cpu_arch) {
- .aarch64 => switch (@enumFromInt(macho.reloc_type_arm64, rel.r_type)) {
+ .aarch64 => switch (@as(macho.reloc_type_arm64, @enumFromInt(rel.r_type))) {
.ARM64_RELOC_ADDEND => continue,
else => Atom.parseRelocTarget(zld, .{
.object_id = atom.getFile().?,
@@ -264,8 +264,8 @@ fn mark(zld: *Zld, roots: AtomTable, alive: *AtomTable) !void {
const sect_id = if (object.getSourceSymbol(atom.sym_index)) |source_sym|
source_sym.n_sect - 1
else blk: {
- const nbase = @intCast(u32, object.in_symtab.?.len);
- const sect_id = @intCast(u8, atom.sym_index - nbase);
+ const nbase = @as(u32, @intCast(object.in_symtab.?.len));
+ const sect_id = @as(u8, @intCast(atom.sym_index - nbase));
break :blk sect_id;
};
const source_sect = object.getSourceSection(sect_id);
@@ -283,7 +283,7 @@ fn mark(zld: *Zld, roots: AtomTable, alive: *AtomTable) !void {
for (zld.objects.items, 0..) |_, object_id| {
// Traverse unwind and eh_frame records noting if the source symbol has been marked, and if so,
// marking all references as live.
- try markUnwindRecords(zld, @intCast(u32, object_id), alive);
+ try markUnwindRecords(zld, @as(u32, @intCast(object_id)), alive);
}
}
@@ -329,7 +329,7 @@ fn markUnwindRecords(zld: *Zld, object_id: u32, alive: *AtomTable) !void {
.object_id = object_id,
.rel = rel,
.code = mem.asBytes(&record),
- .base_offset = @intCast(i32, record_id * @sizeOf(macho.compact_unwind_entry)),
+ .base_offset = @as(i32, @intCast(record_id * @sizeOf(macho.compact_unwind_entry))),
});
const target_sym = zld.getSymbol(target);
if (!target_sym.undf()) {
@@ -344,7 +344,7 @@ fn markUnwindRecords(zld: *Zld, object_id: u32, alive: *AtomTable) !void {
.object_id = object_id,
.rel = rel,
.code = mem.asBytes(&record),
- .base_offset = @intCast(i32, record_id * @sizeOf(macho.compact_unwind_entry)),
+ .base_offset = @as(i32, @intCast(record_id * @sizeOf(macho.compact_unwind_entry))),
});
const target_object = zld.objects.items[target.getFile().?];
const target_atom_index = target_object.getAtomIndexForSymbol(target.sym_index).?;
@@ -377,7 +377,7 @@ fn markEhFrameRecord(zld: *Zld, object_id: u32, atom_index: AtomIndex, alive: *A
.object_id = object_id,
.rel = rel,
.code = fde.data,
- .base_offset = @intCast(i32, fde_offset) + 4,
+ .base_offset = @as(i32, @intCast(fde_offset)) + 4,
});
const target_sym = zld.getSymbol(target);
if (!target_sym.undf()) blk: {
diff --git a/src/link/MachO/dyld_info/Rebase.zig b/src/link/MachO/dyld_info/Rebase.zig
index 5b386a8136..0f3e96b02f 100644
--- a/src/link/MachO/dyld_info/Rebase.zig
+++ b/src/link/MachO/dyld_info/Rebase.zig
@@ -31,7 +31,7 @@ pub fn deinit(rebase: *Rebase, gpa: Allocator) void {
}
pub fn size(rebase: Rebase) u64 {
- return @intCast(u64, rebase.buffer.items.len);
+ return @as(u64, @intCast(rebase.buffer.items.len));
}
pub fn finalize(rebase: *Rebase, gpa: Allocator) !void {
@@ -145,12 +145,12 @@ fn finalizeSegment(entries: []const Entry, writer: anytype) !void {
fn setTypePointer(writer: anytype) !void {
log.debug(">>> set type: {d}", .{macho.REBASE_TYPE_POINTER});
- try writer.writeByte(macho.REBASE_OPCODE_SET_TYPE_IMM | @truncate(u4, macho.REBASE_TYPE_POINTER));
+ try writer.writeByte(macho.REBASE_OPCODE_SET_TYPE_IMM | @as(u4, @truncate(macho.REBASE_TYPE_POINTER)));
}
fn setSegmentOffset(segment_id: u8, offset: u64, writer: anytype) !void {
log.debug(">>> set segment: {d} and offset: {x}", .{ segment_id, offset });
- try writer.writeByte(macho.REBASE_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB | @truncate(u4, segment_id));
+ try writer.writeByte(macho.REBASE_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB | @as(u4, @truncate(segment_id)));
try std.leb.writeULEB128(writer, offset);
}
@@ -163,7 +163,7 @@ fn rebaseAddAddr(addr: u64, writer: anytype) !void {
fn rebaseTimes(count: usize, writer: anytype) !void {
log.debug(">>> rebase with count: {d}", .{count});
if (count <= 0xf) {
- try writer.writeByte(macho.REBASE_OPCODE_DO_REBASE_IMM_TIMES | @truncate(u4, count));
+ try writer.writeByte(macho.REBASE_OPCODE_DO_REBASE_IMM_TIMES | @as(u4, @truncate(count)));
} else {
try writer.writeByte(macho.REBASE_OPCODE_DO_REBASE_ULEB_TIMES);
try std.leb.writeULEB128(writer, count);
@@ -182,7 +182,7 @@ fn addAddr(addr: u64, writer: anytype) !void {
if (std.mem.isAlignedGeneric(u64, addr, @sizeOf(u64))) {
const imm = @divExact(addr, @sizeOf(u64));
if (imm <= 0xf) {
- try writer.writeByte(macho.REBASE_OPCODE_ADD_ADDR_IMM_SCALED | @truncate(u4, imm));
+ try writer.writeByte(macho.REBASE_OPCODE_ADD_ADDR_IMM_SCALED | @as(u4, @truncate(imm)));
return;
}
}
diff --git a/src/link/MachO/dyld_info/bind.zig b/src/link/MachO/dyld_info/bind.zig
index 14ce1587aa..f804c6466d 100644
--- a/src/link/MachO/dyld_info/bind.zig
+++ b/src/link/MachO/dyld_info/bind.zig
@@ -39,7 +39,7 @@ pub fn Bind(comptime Ctx: type, comptime Target: type) type {
}
pub fn size(self: Self) u64 {
- return @intCast(u64, self.buffer.items.len);
+ return @as(u64, @intCast(self.buffer.items.len));
}
pub fn finalize(self: *Self, gpa: Allocator, ctx: Ctx) !void {
@@ -95,7 +95,7 @@ pub fn Bind(comptime Ctx: type, comptime Target: type) type {
const sym = ctx.getSymbol(current.target);
const name = ctx.getSymbolName(current.target);
const flags: u8 = if (sym.weakRef()) macho.BIND_SYMBOL_FLAGS_WEAK_IMPORT else 0;
- const ordinal = @divTrunc(@bitCast(i16, sym.n_desc), macho.N_SYMBOL_RESOLVER);
+ const ordinal = @divTrunc(@as(i16, @bitCast(sym.n_desc)), macho.N_SYMBOL_RESOLVER);
try setSymbol(name, flags, writer);
try setTypePointer(writer);
@@ -112,7 +112,7 @@ pub fn Bind(comptime Ctx: type, comptime Target: type) type {
switch (state) {
.start => {
if (current.offset < offset) {
- try addAddr(@bitCast(u64, @intCast(i64, current.offset) - @intCast(i64, offset)), writer);
+ try addAddr(@as(u64, @bitCast(@as(i64, @intCast(current.offset)) - @as(i64, @intCast(offset)))), writer);
offset = offset - (offset - current.offset);
} else if (current.offset > offset) {
const delta = current.offset - offset;
@@ -130,7 +130,7 @@ pub fn Bind(comptime Ctx: type, comptime Target: type) type {
} else if (current.offset > offset) {
const delta = current.offset - offset;
state = .bind_times_skip;
- skip = @intCast(u64, delta);
+ skip = @as(u64, @intCast(delta));
offset += skip;
} else unreachable;
i -= 1;
@@ -194,7 +194,7 @@ pub fn LazyBind(comptime Ctx: type, comptime Target: type) type {
}
pub fn size(self: Self) u64 {
- return @intCast(u64, self.buffer.items.len);
+ return @as(u64, @intCast(self.buffer.items.len));
}
pub fn finalize(self: *Self, gpa: Allocator, ctx: Ctx) !void {
@@ -208,12 +208,12 @@ pub fn LazyBind(comptime Ctx: type, comptime Target: type) type {
var addend: i64 = 0;
for (self.entries.items) |entry| {
- self.offsets.appendAssumeCapacity(@intCast(u32, cwriter.bytes_written));
+ self.offsets.appendAssumeCapacity(@as(u32, @intCast(cwriter.bytes_written)));
const sym = ctx.getSymbol(entry.target);
const name = ctx.getSymbolName(entry.target);
const flags: u8 = if (sym.weakRef()) macho.BIND_SYMBOL_FLAGS_WEAK_IMPORT else 0;
- const ordinal = @divTrunc(@bitCast(i16, sym.n_desc), macho.N_SYMBOL_RESOLVER);
+ const ordinal = @divTrunc(@as(i16, @bitCast(sym.n_desc)), macho.N_SYMBOL_RESOLVER);
try setSegmentOffset(entry.segment_id, entry.offset, writer);
try setSymbol(name, flags, writer);
@@ -238,20 +238,20 @@ pub fn LazyBind(comptime Ctx: type, comptime Target: type) type {
fn setSegmentOffset(segment_id: u8, offset: u64, writer: anytype) !void {
log.debug(">>> set segment: {d} and offset: {x}", .{ segment_id, offset });
- try writer.writeByte(macho.BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB | @truncate(u4, segment_id));
+ try writer.writeByte(macho.BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB | @as(u4, @truncate(segment_id)));
try std.leb.writeULEB128(writer, offset);
}
fn setSymbol(name: []const u8, flags: u8, writer: anytype) !void {
log.debug(">>> set symbol: {s} with flags: {x}", .{ name, flags });
- try writer.writeByte(macho.BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM | @truncate(u4, flags));
+ try writer.writeByte(macho.BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM | @as(u4, @truncate(flags)));
try writer.writeAll(name);
try writer.writeByte(0);
}
fn setTypePointer(writer: anytype) !void {
log.debug(">>> set type: {d}", .{macho.BIND_TYPE_POINTER});
- try writer.writeByte(macho.BIND_OPCODE_SET_TYPE_IMM | @truncate(u4, macho.BIND_TYPE_POINTER));
+ try writer.writeByte(macho.BIND_OPCODE_SET_TYPE_IMM | @as(u4, @truncate(macho.BIND_TYPE_POINTER)));
}
fn setDylibOrdinal(ordinal: i16, writer: anytype) !void {
@@ -264,13 +264,13 @@ fn setDylibOrdinal(ordinal: i16, writer: anytype) !void {
else => unreachable, // Invalid dylib special binding
}
log.debug(">>> set dylib special: {d}", .{ordinal});
- const cast = @bitCast(u16, ordinal);
- try writer.writeByte(macho.BIND_OPCODE_SET_DYLIB_SPECIAL_IMM | @truncate(u4, cast));
+ const cast = @as(u16, @bitCast(ordinal));
+ try writer.writeByte(macho.BIND_OPCODE_SET_DYLIB_SPECIAL_IMM | @as(u4, @truncate(cast)));
} else {
- const cast = @bitCast(u16, ordinal);
+ const cast = @as(u16, @bitCast(ordinal));
log.debug(">>> set dylib ordinal: {d}", .{ordinal});
if (cast <= 0xf) {
- try writer.writeByte(macho.BIND_OPCODE_SET_DYLIB_ORDINAL_IMM | @truncate(u4, cast));
+ try writer.writeByte(macho.BIND_OPCODE_SET_DYLIB_ORDINAL_IMM | @as(u4, @truncate(cast)));
} else {
try writer.writeByte(macho.BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB);
try std.leb.writeULEB128(writer, cast);
@@ -295,7 +295,7 @@ fn doBindAddAddr(addr: u64, writer: anytype) !void {
const imm = @divExact(addr, @sizeOf(u64));
if (imm <= 0xf) {
try writer.writeByte(
- macho.BIND_OPCODE_DO_BIND_ADD_ADDR_IMM_SCALED | @truncate(u4, imm),
+ macho.BIND_OPCODE_DO_BIND_ADD_ADDR_IMM_SCALED | @as(u4, @truncate(imm)),
);
return;
}
@@ -341,7 +341,7 @@ const TestContext = struct {
fn addSymbol(ctx: *TestContext, gpa: Allocator, name: []const u8, ordinal: i16, flags: u16) !void {
const n_strx = try ctx.addString(gpa, name);
- var n_desc = @bitCast(u16, ordinal * macho.N_SYMBOL_RESOLVER);
+ var n_desc = @as(u16, @bitCast(ordinal * macho.N_SYMBOL_RESOLVER));
n_desc |= flags;
try ctx.symbols.append(gpa, .{
.n_value = 0,
@@ -353,7 +353,7 @@ const TestContext = struct {
}
fn addString(ctx: *TestContext, gpa: Allocator, name: []const u8) !u32 {
- const n_strx = @intCast(u32, ctx.strtab.items.len);
+ const n_strx = @as(u32, @intCast(ctx.strtab.items.len));
try ctx.strtab.appendSlice(gpa, name);
try ctx.strtab.append(gpa, 0);
return n_strx;
@@ -366,7 +366,7 @@ const TestContext = struct {
fn getSymbolName(ctx: TestContext, target: Target) []const u8 {
const sym = ctx.getSymbol(target);
assert(sym.n_strx < ctx.strtab.items.len);
- return std.mem.sliceTo(@ptrCast([*:0]const u8, ctx.strtab.items.ptr + sym.n_strx), 0);
+ return std.mem.sliceTo(@as([*:0]const u8, @ptrCast(ctx.strtab.items.ptr + sym.n_strx)), 0);
}
};
diff --git a/src/link/MachO/eh_frame.zig b/src/link/MachO/eh_frame.zig
index 1672e37229..eb4419cd7b 100644
--- a/src/link/MachO/eh_frame.zig
+++ b/src/link/MachO/eh_frame.zig
@@ -36,7 +36,7 @@ pub fn scanRelocs(zld: *Zld) !void {
try cies.putNoClobber(cie_offset, {});
it.seekTo(cie_offset);
const cie = (try it.next()).?;
- try cie.scanRelocs(zld, @intCast(u32, object_id), cie_offset);
+ try cie.scanRelocs(zld, @as(u32, @intCast(object_id)), cie_offset);
}
}
}
@@ -110,7 +110,7 @@ pub fn write(zld: *Zld, unwind_info: *UnwindInfo) !void {
var eh_frame_offset: u32 = 0;
for (zld.objects.items, 0..) |*object, object_id| {
- try eh_records.ensureUnusedCapacity(2 * @intCast(u32, object.exec_atoms.items.len));
+ try eh_records.ensureUnusedCapacity(2 * @as(u32, @intCast(object.exec_atoms.items.len)));
var cies = std.AutoHashMap(u32, u32).init(gpa);
defer cies.deinit();
@@ -139,7 +139,7 @@ pub fn write(zld: *Zld, unwind_info: *UnwindInfo) !void {
eh_it.seekTo(cie_offset);
const source_cie_record = (try eh_it.next()).?;
var cie_record = try source_cie_record.toOwned(gpa);
- try cie_record.relocate(zld, @intCast(u32, object_id), .{
+ try cie_record.relocate(zld, @as(u32, @intCast(object_id)), .{
.source_offset = cie_offset,
.out_offset = eh_frame_offset,
.sect_addr = sect.addr,
@@ -151,7 +151,7 @@ pub fn write(zld: *Zld, unwind_info: *UnwindInfo) !void {
var fde_record = try source_fde_record.toOwned(gpa);
fde_record.setCiePointer(eh_frame_offset + 4 - gop.value_ptr.*);
- try fde_record.relocate(zld, @intCast(u32, object_id), .{
+ try fde_record.relocate(zld, @as(u32, @intCast(object_id)), .{
.source_offset = fde_record_offset,
.out_offset = eh_frame_offset,
.sect_addr = sect.addr,
@@ -194,7 +194,7 @@ pub fn write(zld: *Zld, unwind_info: *UnwindInfo) !void {
UnwindInfo.UnwindEncoding.setDwarfSectionOffset(
&record.compactUnwindEncoding,
cpu_arch,
- @intCast(u24, eh_frame_offset),
+ @as(u24, @intCast(eh_frame_offset)),
);
const cie_record = eh_records.get(
@@ -268,7 +268,7 @@ pub fn EhFrameRecord(comptime is_mutable: bool) type {
}) u64 {
assert(rec.tag == .fde);
const addend = mem.readIntLittle(i64, rec.data[4..][0..8]);
- return @intCast(u64, @intCast(i64, ctx.base_addr + ctx.base_offset + 8) + addend);
+ return @as(u64, @intCast(@as(i64, @intCast(ctx.base_addr + ctx.base_offset + 8)) + addend));
}
pub fn setTargetSymbolAddress(rec: *Record, value: u64, ctx: struct {
@@ -276,7 +276,7 @@ pub fn EhFrameRecord(comptime is_mutable: bool) type {
base_offset: u64,
}) !void {
assert(rec.tag == .fde);
- const addend = @intCast(i64, value) - @intCast(i64, ctx.base_addr + ctx.base_offset + 8);
+ const addend = @as(i64, @intCast(value)) - @as(i64, @intCast(ctx.base_addr + ctx.base_offset + 8));
mem.writeIntLittle(i64, rec.data[4..][0..8], addend);
}
@@ -291,7 +291,7 @@ pub fn EhFrameRecord(comptime is_mutable: bool) type {
for (relocs) |rel| {
switch (cpu_arch) {
.aarch64 => {
- const rel_type = @enumFromInt(macho.reloc_type_arm64, rel.r_type);
+ const rel_type = @as(macho.reloc_type_arm64, @enumFromInt(rel.r_type));
switch (rel_type) {
.ARM64_RELOC_SUBTRACTOR,
.ARM64_RELOC_UNSIGNED,
@@ -301,7 +301,7 @@ pub fn EhFrameRecord(comptime is_mutable: bool) type {
}
},
.x86_64 => {
- const rel_type = @enumFromInt(macho.reloc_type_x86_64, rel.r_type);
+ const rel_type = @as(macho.reloc_type_x86_64, @enumFromInt(rel.r_type));
switch (rel_type) {
.X86_64_RELOC_GOT => {},
else => unreachable,
@@ -313,7 +313,7 @@ pub fn EhFrameRecord(comptime is_mutable: bool) type {
.object_id = object_id,
.rel = rel,
.code = rec.data,
- .base_offset = @intCast(i32, source_offset) + 4,
+ .base_offset = @as(i32, @intCast(source_offset)) + 4,
});
return target;
}
@@ -335,40 +335,40 @@ pub fn EhFrameRecord(comptime is_mutable: bool) type {
.object_id = object_id,
.rel = rel,
.code = rec.data,
- .base_offset = @intCast(i32, ctx.source_offset) + 4,
+ .base_offset = @as(i32, @intCast(ctx.source_offset)) + 4,
});
- const rel_offset = @intCast(u32, rel.r_address - @intCast(i32, ctx.source_offset) - 4);
+ const rel_offset = @as(u32, @intCast(rel.r_address - @as(i32, @intCast(ctx.source_offset)) - 4));
const source_addr = ctx.sect_addr + rel_offset + ctx.out_offset + 4;
switch (cpu_arch) {
.aarch64 => {
- const rel_type = @enumFromInt(macho.reloc_type_arm64, rel.r_type);
+ const rel_type = @as(macho.reloc_type_arm64, @enumFromInt(rel.r_type));
switch (rel_type) {
.ARM64_RELOC_SUBTRACTOR => {
// Address of the __eh_frame in the source object file
},
.ARM64_RELOC_POINTER_TO_GOT => {
const target_addr = try Atom.getRelocTargetAddress(zld, target, true, false);
- const result = math.cast(i32, @intCast(i64, target_addr) - @intCast(i64, source_addr)) orelse
+ const result = math.cast(i32, @as(i64, @intCast(target_addr)) - @as(i64, @intCast(source_addr))) orelse
return error.Overflow;
mem.writeIntLittle(i32, rec.data[rel_offset..][0..4], result);
},
.ARM64_RELOC_UNSIGNED => {
assert(rel.r_extern == 1);
const target_addr = try Atom.getRelocTargetAddress(zld, target, false, false);
- const result = @intCast(i64, target_addr) - @intCast(i64, source_addr);
- mem.writeIntLittle(i64, rec.data[rel_offset..][0..8], @intCast(i64, result));
+ const result = @as(i64, @intCast(target_addr)) - @as(i64, @intCast(source_addr));
+ mem.writeIntLittle(i64, rec.data[rel_offset..][0..8], @as(i64, @intCast(result)));
},
else => unreachable,
}
},
.x86_64 => {
- const rel_type = @enumFromInt(macho.reloc_type_x86_64, rel.r_type);
+ const rel_type = @as(macho.reloc_type_x86_64, @enumFromInt(rel.r_type));
switch (rel_type) {
.X86_64_RELOC_GOT => {
const target_addr = try Atom.getRelocTargetAddress(zld, target, true, false);
const addend = mem.readIntLittle(i32, rec.data[rel_offset..][0..4]);
- const adjusted_target_addr = @intCast(u64, @intCast(i64, target_addr) + addend);
+ const adjusted_target_addr = @as(u64, @intCast(@as(i64, @intCast(target_addr)) + addend));
const disp = try Relocation.calcPcRelativeDisplacementX86(source_addr, adjusted_target_addr, 0);
mem.writeIntLittle(i32, rec.data[rel_offset..][0..4], disp);
},
@@ -392,7 +392,7 @@ pub fn EhFrameRecord(comptime is_mutable: bool) type {
pub fn getAugmentationString(rec: Record) []const u8 {
assert(rec.tag == .cie);
- return mem.sliceTo(@ptrCast([*:0]const u8, rec.data.ptr + 5), 0);
+ return mem.sliceTo(@as([*:0]const u8, @ptrCast(rec.data.ptr + 5)), 0);
}
pub fn getPersonalityPointer(rec: Record, ctx: struct {
@@ -418,7 +418,7 @@ pub fn EhFrameRecord(comptime is_mutable: bool) type {
'P' => {
const enc = try reader.readByte();
const offset = ctx.base_offset + 13 + aug_str.len + creader.bytes_read;
- const ptr = try getEncodedPointer(enc, @intCast(i64, ctx.base_addr + offset), reader);
+ const ptr = try getEncodedPointer(enc, @as(i64, @intCast(ctx.base_addr + offset)), reader);
return ptr;
},
'L' => {
@@ -441,7 +441,7 @@ pub fn EhFrameRecord(comptime is_mutable: bool) type {
const reader = stream.reader();
_ = try reader.readByte();
const offset = ctx.base_offset + 25;
- const ptr = try getEncodedPointer(enc, @intCast(i64, ctx.base_addr + offset), reader);
+ const ptr = try getEncodedPointer(enc, @as(i64, @intCast(ctx.base_addr + offset)), reader);
return ptr;
}
@@ -454,7 +454,7 @@ pub fn EhFrameRecord(comptime is_mutable: bool) type {
var stream = std.io.fixedBufferStream(rec.data[21..]);
const writer = stream.writer();
const offset = ctx.base_offset + 25;
- try setEncodedPointer(enc, @intCast(i64, ctx.base_addr + offset), value, writer);
+ try setEncodedPointer(enc, @as(i64, @intCast(ctx.base_addr + offset)), value, writer);
}
fn getLsdaEncoding(rec: Record) !?u8 {
@@ -494,11 +494,11 @@ pub fn EhFrameRecord(comptime is_mutable: bool) type {
if (enc == EH_PE.omit) return null;
var ptr: i64 = switch (enc & 0x0F) {
- EH_PE.absptr => @bitCast(i64, try reader.readIntLittle(u64)),
- EH_PE.udata2 => @bitCast(i16, try reader.readIntLittle(u16)),
- EH_PE.udata4 => @bitCast(i32, try reader.readIntLittle(u32)),
- EH_PE.udata8 => @bitCast(i64, try reader.readIntLittle(u64)),
- EH_PE.uleb128 => @bitCast(i64, try leb.readULEB128(u64, reader)),
+ EH_PE.absptr => @as(i64, @bitCast(try reader.readIntLittle(u64))),
+ EH_PE.udata2 => @as(i16, @bitCast(try reader.readIntLittle(u16))),
+ EH_PE.udata4 => @as(i32, @bitCast(try reader.readIntLittle(u32))),
+ EH_PE.udata8 => @as(i64, @bitCast(try reader.readIntLittle(u64))),
+ EH_PE.uleb128 => @as(i64, @bitCast(try leb.readULEB128(u64, reader))),
EH_PE.sdata2 => try reader.readIntLittle(i16),
EH_PE.sdata4 => try reader.readIntLittle(i32),
EH_PE.sdata8 => try reader.readIntLittle(i64),
@@ -517,13 +517,13 @@ pub fn EhFrameRecord(comptime is_mutable: bool) type {
else => return null,
}
- return @bitCast(u64, ptr);
+ return @as(u64, @bitCast(ptr));
}
fn setEncodedPointer(enc: u8, pcrel_offset: i64, value: u64, writer: anytype) !void {
if (enc == EH_PE.omit) return;
- var actual = @intCast(i64, value);
+ var actual = @as(i64, @intCast(value));
switch (enc & 0x70) {
EH_PE.absptr => {},
@@ -537,13 +537,13 @@ pub fn EhFrameRecord(comptime is_mutable: bool) type {
}
switch (enc & 0x0F) {
- EH_PE.absptr => try writer.writeIntLittle(u64, @bitCast(u64, actual)),
- EH_PE.udata2 => try writer.writeIntLittle(u16, @bitCast(u16, @intCast(i16, actual))),
- EH_PE.udata4 => try writer.writeIntLittle(u32, @bitCast(u32, @intCast(i32, actual))),
- EH_PE.udata8 => try writer.writeIntLittle(u64, @bitCast(u64, actual)),
- EH_PE.uleb128 => try leb.writeULEB128(writer, @bitCast(u64, actual)),
- EH_PE.sdata2 => try writer.writeIntLittle(i16, @intCast(i16, actual)),
- EH_PE.sdata4 => try writer.writeIntLittle(i32, @intCast(i32, actual)),
+ EH_PE.absptr => try writer.writeIntLittle(u64, @as(u64, @bitCast(actual))),
+ EH_PE.udata2 => try writer.writeIntLittle(u16, @as(u16, @bitCast(@as(i16, @intCast(actual))))),
+ EH_PE.udata4 => try writer.writeIntLittle(u32, @as(u32, @bitCast(@as(i32, @intCast(actual))))),
+ EH_PE.udata8 => try writer.writeIntLittle(u64, @as(u64, @bitCast(actual))),
+ EH_PE.uleb128 => try leb.writeULEB128(writer, @as(u64, @bitCast(actual))),
+ EH_PE.sdata2 => try writer.writeIntLittle(i16, @as(i16, @intCast(actual))),
+ EH_PE.sdata4 => try writer.writeIntLittle(i32, @as(i32, @intCast(actual))),
EH_PE.sdata8 => try writer.writeIntLittle(i64, actual),
EH_PE.sleb128 => try leb.writeILEB128(writer, actual),
else => unreachable,
diff --git a/src/link/MachO/load_commands.zig b/src/link/MachO/load_commands.zig
index eb582e2222..10f446f191 100644
--- a/src/link/MachO/load_commands.zig
+++ b/src/link/MachO/load_commands.zig
@@ -114,7 +114,7 @@ fn calcLCsSize(gpa: Allocator, options: *const link.Options, ctx: CalcLCsSizeCtx
}
}
- return @intCast(u32, sizeofcmds);
+ return @as(u32, @intCast(sizeofcmds));
}
pub fn calcMinHeaderPad(gpa: Allocator, options: *const link.Options, ctx: CalcLCsSizeCtx) !u64 {
@@ -140,7 +140,7 @@ pub fn calcNumOfLCs(lc_buffer: []const u8) u32 {
var pos: usize = 0;
while (true) {
if (pos >= lc_buffer.len) break;
- const cmd = @ptrCast(*align(1) const macho.load_command, lc_buffer.ptr + pos).*;
+ const cmd = @as(*align(1) const macho.load_command, @ptrCast(lc_buffer.ptr + pos)).*;
ncmds += 1;
pos += cmd.cmdsize;
}
@@ -149,11 +149,11 @@ pub fn calcNumOfLCs(lc_buffer: []const u8) u32 {
pub fn writeDylinkerLC(lc_writer: anytype) !void {
const name_len = mem.sliceTo(default_dyld_path, 0).len;
- const cmdsize = @intCast(u32, mem.alignForward(
+ const cmdsize = @as(u32, @intCast(mem.alignForward(
u64,
@sizeOf(macho.dylinker_command) + name_len,
@sizeOf(u64),
- ));
+ )));
try lc_writer.writeStruct(macho.dylinker_command{
.cmd = .LOAD_DYLINKER,
.cmdsize = cmdsize,
@@ -176,11 +176,11 @@ const WriteDylibLCCtx = struct {
fn writeDylibLC(ctx: WriteDylibLCCtx, lc_writer: anytype) !void {
const name_len = ctx.name.len + 1;
- const cmdsize = @intCast(u32, mem.alignForward(
+ const cmdsize = @as(u32, @intCast(mem.alignForward(
u64,
@sizeOf(macho.dylib_command) + name_len,
@sizeOf(u64),
- ));
+ )));
try lc_writer.writeStruct(macho.dylib_command{
.cmd = ctx.cmd,
.cmdsize = cmdsize,
@@ -217,8 +217,8 @@ pub fn writeDylibIdLC(gpa: Allocator, options: *const link.Options, lc_writer: a
try writeDylibLC(.{
.cmd = .ID_DYLIB,
.name = install_name,
- .current_version = @intCast(u32, curr.major << 16 | curr.minor << 8 | curr.patch),
- .compatibility_version = @intCast(u32, compat.major << 16 | compat.minor << 8 | compat.patch),
+ .current_version = @as(u32, @intCast(curr.major << 16 | curr.minor << 8 | curr.patch)),
+ .compatibility_version = @as(u32, @intCast(compat.major << 16 | compat.minor << 8 | compat.patch)),
}, lc_writer);
}
@@ -253,11 +253,11 @@ pub fn writeRpathLCs(gpa: Allocator, options: *const link.Options, lc_writer: an
while (try it.next()) |rpath| {
const rpath_len = rpath.len + 1;
- const cmdsize = @intCast(u32, mem.alignForward(
+ const cmdsize = @as(u32, @intCast(mem.alignForward(
u64,
@sizeOf(macho.rpath_command) + rpath_len,
@sizeOf(u64),
- ));
+ )));
try lc_writer.writeStruct(macho.rpath_command{
.cmdsize = cmdsize,
.path = @sizeOf(macho.rpath_command),
@@ -275,12 +275,12 @@ pub fn writeBuildVersionLC(options: *const link.Options, lc_writer: anytype) !vo
const cmdsize = @sizeOf(macho.build_version_command) + @sizeOf(macho.build_tool_version);
const platform_version = blk: {
const ver = options.target.os.version_range.semver.min;
- const platform_version = @intCast(u32, ver.major << 16 | ver.minor << 8);
+ const platform_version = @as(u32, @intCast(ver.major << 16 | ver.minor << 8));
break :blk platform_version;
};
const sdk_version = if (options.native_darwin_sdk) |sdk| blk: {
const ver = sdk.version;
- const sdk_version = @intCast(u32, ver.major << 16 | ver.minor << 8);
+ const sdk_version = @as(u32, @intCast(ver.major << 16 | ver.minor << 8));
break :blk sdk_version;
} else platform_version;
const is_simulator_abi = options.target.abi == .simulator;
diff --git a/src/link/MachO/thunks.zig b/src/link/MachO/thunks.zig
index f3289e544b..82d0451225 100644
--- a/src/link/MachO/thunks.zig
+++ b/src/link/MachO/thunks.zig
@@ -131,7 +131,7 @@ pub fn createThunks(zld: *Zld, sect_id: u8) !void {
log.debug("GROUP END at {d}", .{group_end});
// Insert thunk at group_end
- const thunk_index = @intCast(u32, zld.thunks.items.len);
+ const thunk_index = @as(u32, @intCast(zld.thunks.items.len));
try zld.thunks.append(gpa, .{ .start_index = undefined, .len = 0 });
// Scan relocs in the group and create trampolines for any unreachable callsite.
@@ -174,7 +174,7 @@ pub fn createThunks(zld: *Zld, sect_id: u8) !void {
}
}
- header.size = @intCast(u32, offset);
+ header.size = @as(u32, @intCast(offset));
}
fn allocateThunk(
@@ -223,7 +223,7 @@ fn scanRelocs(
const base_offset = if (object.getSourceSymbol(atom.sym_index)) |source_sym| blk: {
const source_sect = object.getSourceSection(source_sym.n_sect - 1);
- break :blk @intCast(i32, source_sym.n_value - source_sect.addr);
+ break :blk @as(i32, @intCast(source_sym.n_value - source_sect.addr));
} else 0;
const code = Atom.getAtomCode(zld, atom_index);
@@ -289,7 +289,7 @@ fn scanRelocs(
}
inline fn relocNeedsThunk(rel: macho.relocation_info) bool {
- const rel_type = @enumFromInt(macho.reloc_type_arm64, rel.r_type);
+ const rel_type = @as(macho.reloc_type_arm64, @enumFromInt(rel.r_type));
return rel_type == .ARM64_RELOC_BRANCH26;
}
@@ -315,7 +315,7 @@ fn isReachable(
if (!allocated.contains(target_atom_index)) return false;
- const source_addr = source_sym.n_value + @intCast(u32, rel.r_address - base_offset);
+ const source_addr = source_sym.n_value + @as(u32, @intCast(rel.r_address - base_offset));
const is_via_got = Atom.relocRequiresGot(zld, rel);
const target_addr = Atom.getRelocTargetAddress(zld, target, is_via_got, false) catch unreachable;
_ = Relocation.calcPcRelativeDisplacementArm64(source_addr, target_addr) catch
@@ -349,7 +349,7 @@ fn getThunkIndex(zld: *Zld, atom_index: AtomIndex) ?ThunkIndex {
const end_addr = start_addr + thunk.getSize();
if (start_addr <= sym.n_value and sym.n_value < end_addr) {
- return @intCast(u32, i);
+ return @as(u32, @intCast(i));
}
}
return null;
diff --git a/src/link/MachO/zld.zig b/src/link/MachO/zld.zig
index 13c1ea73fa..3e828984a9 100644
--- a/src/link/MachO/zld.zig
+++ b/src/link/MachO/zld.zig
@@ -103,7 +103,7 @@ pub const Zld = struct {
const cpu_arch = self.options.target.cpu.arch;
const mtime: u64 = mtime: {
const stat = file.stat() catch break :mtime 0;
- break :mtime @intCast(u64, @divFloor(stat.mtime, 1_000_000_000));
+ break :mtime @as(u64, @intCast(@divFloor(stat.mtime, 1_000_000_000)));
};
const file_stat = try file.stat();
const file_size = math.cast(usize, file_stat.size) orelse return error.Overflow;
@@ -220,7 +220,7 @@ pub const Zld = struct {
const contents = try file.readToEndAllocOptions(gpa, file_size, file_size, @alignOf(u64), null);
defer gpa.free(contents);
- const dylib_id = @intCast(u16, self.dylibs.items.len);
+ const dylib_id = @as(u16, @intCast(self.dylibs.items.len));
var dylib = Dylib{ .weak = opts.weak };
dylib.parseFromBinary(
@@ -535,7 +535,7 @@ pub const Zld = struct {
pub fn createEmptyAtom(self: *Zld, sym_index: u32, size: u64, alignment: u32) !AtomIndex {
const gpa = self.gpa;
- const index = @intCast(AtomIndex, self.atoms.items.len);
+ const index = @as(AtomIndex, @intCast(self.atoms.items.len));
const atom = try self.atoms.addOne(gpa);
atom.* = Atom.empty;
atom.sym_index = sym_index;
@@ -596,7 +596,7 @@ pub const Zld = struct {
const global_index = self.dyld_stub_binder_index orelse return;
const target = self.globals.items[global_index];
const atom_index = try self.createGotAtom();
- const got_index = @intCast(u32, self.got_entries.items.len);
+ const got_index = @as(u32, @intCast(self.got_entries.items.len));
try self.got_entries.append(gpa, .{
.target = target,
.atom_index = atom_index,
@@ -874,7 +874,7 @@ pub const Zld = struct {
}
for (self.objects.items, 0..) |_, object_id| {
- try self.resolveSymbolsInObject(@intCast(u32, object_id), resolver);
+ try self.resolveSymbolsInObject(@as(u32, @intCast(object_id)), resolver);
}
try self.resolveSymbolsInArchives(resolver);
@@ -1024,7 +1024,7 @@ pub const Zld = struct {
};
assert(offsets.items.len > 0);
- const object_id = @intCast(u16, self.objects.items.len);
+ const object_id = @as(u16, @intCast(self.objects.items.len));
const object = archive.parseObject(gpa, cpu_arch, offsets.items[0]) catch |e| switch (e) {
error.MismatchedCpuArchitecture => {
log.err("CPU architecture mismatch found in {s}", .{archive.name});
@@ -1055,14 +1055,14 @@ pub const Zld = struct {
for (self.dylibs.items, 0..) |dylib, id| {
if (!dylib.symbols.contains(sym_name)) continue;
- const dylib_id = @intCast(u16, id);
+ const dylib_id = @as(u16, @intCast(id));
if (!self.referenced_dylibs.contains(dylib_id)) {
try self.referenced_dylibs.putNoClobber(self.gpa, dylib_id, {});
}
const ordinal = self.referenced_dylibs.getIndex(dylib_id) orelse unreachable;
sym.n_type |= macho.N_EXT;
- sym.n_desc = @intCast(u16, ordinal + 1) * macho.N_SYMBOL_RESOLVER;
+ sym.n_desc = @as(u16, @intCast(ordinal + 1)) * macho.N_SYMBOL_RESOLVER;
if (dylib.weak) {
sym.n_desc |= macho.N_WEAK_REF;
@@ -1099,9 +1099,9 @@ pub const Zld = struct {
_ = resolver.unresolved.swapRemove(global_index);
continue;
} else if (allow_undef) {
- const n_desc = @bitCast(
+ const n_desc = @as(
u16,
- macho.BIND_SPECIAL_DYLIB_FLAT_LOOKUP * @intCast(i16, macho.N_SYMBOL_RESOLVER),
+ @bitCast(macho.BIND_SPECIAL_DYLIB_FLAT_LOOKUP * @as(i16, @intCast(macho.N_SYMBOL_RESOLVER))),
);
sym.n_type = macho.N_EXT;
sym.n_desc = n_desc;
@@ -1238,7 +1238,7 @@ pub const Zld = struct {
const segname = header.segName();
const segment_id = self.getSegmentByName(segname) orelse blk: {
log.debug("creating segment '{s}'", .{segname});
- const segment_id = @intCast(u8, self.segments.items.len);
+ const segment_id = @as(u8, @intCast(self.segments.items.len));
const protection = getSegmentMemoryProtection(segname);
try self.segments.append(self.gpa, .{
.cmdsize = @sizeOf(macho.segment_command_64),
@@ -1269,7 +1269,7 @@ pub const Zld = struct {
pub fn allocateSymbol(self: *Zld) !u32 {
try self.locals.ensureUnusedCapacity(self.gpa, 1);
log.debug(" (allocating symbol index {d})", .{self.locals.items.len});
- const index = @intCast(u32, self.locals.items.len);
+ const index = @as(u32, @intCast(self.locals.items.len));
_ = self.locals.addOneAssumeCapacity();
self.locals.items[index] = .{
.n_strx = 0,
@@ -1282,7 +1282,7 @@ pub const Zld = struct {
}
fn addGlobal(self: *Zld, sym_loc: SymbolWithLoc) !u32 {
- const global_index = @intCast(u32, self.globals.items.len);
+ const global_index = @as(u32, @intCast(self.globals.items.len));
try self.globals.append(self.gpa, sym_loc);
return global_index;
}
@@ -1489,7 +1489,7 @@ pub const Zld = struct {
if (mem.eql(u8, header.sectName(), "__stub_helper")) continue;
// Create jump/branch range extenders if needed.
- try thunks.createThunks(self, @intCast(u8, sect_id));
+ try thunks.createThunks(self, @as(u8, @intCast(sect_id)));
}
}
}
@@ -1502,7 +1502,7 @@ pub const Zld = struct {
.dylibs = self.dylibs.items,
.referenced_dylibs = self.referenced_dylibs.keys(),
}) else 0;
- try self.allocateSegment(@intCast(u8, segment_index), base_size);
+ try self.allocateSegment(@as(u8, @intCast(segment_index)), base_size);
}
}
@@ -1536,12 +1536,12 @@ pub const Zld = struct {
for (slice.items(.header)[indexes.start..indexes.end], 0..) |*header, sect_id| {
const alignment = try math.powi(u32, 2, header.@"align");
const start_aligned = mem.alignForward(u64, start, alignment);
- const n_sect = @intCast(u8, indexes.start + sect_id + 1);
+ const n_sect = @as(u8, @intCast(indexes.start + sect_id + 1));
header.offset = if (header.isZerofill())
0
else
- @intCast(u32, segment.fileoff + start_aligned);
+ @as(u32, @intCast(segment.fileoff + start_aligned));
header.addr = segment.vmaddr + start_aligned;
var atom_index = slice.items(.first_atom_index)[indexes.start + sect_id];
@@ -1617,7 +1617,7 @@ pub const Zld = struct {
) !u8 {
const gpa = self.gpa;
log.debug("creating section '{s},{s}'", .{ segname, sectname });
- const index = @intCast(u8, self.sections.slice().len);
+ const index = @as(u8, @intCast(self.sections.slice().len));
try self.sections.append(gpa, .{
.segment_index = undefined, // Segments will be created automatically later down the pipeline
.header = .{
@@ -1673,12 +1673,12 @@ pub const Zld = struct {
},
}
};
- return (@intCast(u8, segment_precedence) << 4) + section_precedence;
+ return (@as(u8, @intCast(segment_precedence)) << 4) + section_precedence;
}
fn writeSegmentHeaders(self: *Zld, writer: anytype) !void {
for (self.segments.items, 0..) |seg, i| {
- const indexes = self.getSectionIndexes(@intCast(u8, i));
+ const indexes = self.getSectionIndexes(@as(u8, @intCast(i)));
var out_seg = seg;
out_seg.cmdsize = @sizeOf(macho.segment_command_64);
out_seg.nsects = 0;
@@ -1790,7 +1790,7 @@ pub const Zld = struct {
}
const segment_index = slice.items(.segment_index)[sect_id];
- const segment = self.getSegment(@intCast(u8, sect_id));
+ const segment = self.getSegment(@as(u8, @intCast(sect_id)));
if (segment.maxprot & macho.PROT.WRITE == 0) continue;
log.debug("{s},{s}", .{ header.segName(), header.sectName() });
@@ -1820,12 +1820,12 @@ pub const Zld = struct {
for (relocs) |rel| {
switch (cpu_arch) {
.aarch64 => {
- const rel_type = @enumFromInt(macho.reloc_type_arm64, rel.r_type);
+ const rel_type = @as(macho.reloc_type_arm64, @enumFromInt(rel.r_type));
if (rel_type != .ARM64_RELOC_UNSIGNED) continue;
if (rel.r_length != 3) continue;
},
.x86_64 => {
- const rel_type = @enumFromInt(macho.reloc_type_x86_64, rel.r_type);
+ const rel_type = @as(macho.reloc_type_x86_64, @enumFromInt(rel.r_type));
if (rel_type != .X86_64_RELOC_UNSIGNED) continue;
if (rel.r_length != 3) continue;
},
@@ -1841,9 +1841,9 @@ pub const Zld = struct {
const target_sym = self.getSymbol(target);
if (target_sym.undf()) continue;
- const base_offset = @intCast(i32, sym.n_value - segment.vmaddr);
+ const base_offset = @as(i32, @intCast(sym.n_value - segment.vmaddr));
const rel_offset = rel.r_address - ctx.base_offset;
- const offset = @intCast(u64, base_offset + rel_offset);
+ const offset = @as(u64, @intCast(base_offset + rel_offset));
log.debug(" | rebase at {x}", .{offset});
try rebase.entries.append(self.gpa, .{
@@ -1882,7 +1882,7 @@ pub const Zld = struct {
const sym = entry.getAtomSymbol(self);
const base_offset = sym.n_value - seg.vmaddr;
- const dylib_ordinal = @divTrunc(@bitCast(i16, bind_sym.n_desc), macho.N_SYMBOL_RESOLVER);
+ const dylib_ordinal = @divTrunc(@as(i16, @bitCast(bind_sym.n_desc)), macho.N_SYMBOL_RESOLVER);
log.debug(" | bind at {x}, import('{s}') in dylib({d})", .{
base_offset,
bind_sym_name,
@@ -1929,7 +1929,7 @@ pub const Zld = struct {
}
const segment_index = slice.items(.segment_index)[sect_id];
- const segment = self.getSegment(@intCast(u8, sect_id));
+ const segment = self.getSegment(@as(u8, @intCast(sect_id)));
if (segment.maxprot & macho.PROT.WRITE == 0) continue;
const cpu_arch = self.options.target.cpu.arch;
@@ -1959,12 +1959,12 @@ pub const Zld = struct {
for (relocs) |rel| {
switch (cpu_arch) {
.aarch64 => {
- const rel_type = @enumFromInt(macho.reloc_type_arm64, rel.r_type);
+ const rel_type = @as(macho.reloc_type_arm64, @enumFromInt(rel.r_type));
if (rel_type != .ARM64_RELOC_UNSIGNED) continue;
if (rel.r_length != 3) continue;
},
.x86_64 => {
- const rel_type = @enumFromInt(macho.reloc_type_x86_64, rel.r_type);
+ const rel_type = @as(macho.reloc_type_x86_64, @enumFromInt(rel.r_type));
if (rel_type != .X86_64_RELOC_UNSIGNED) continue;
if (rel.r_length != 3) continue;
},
@@ -1983,11 +1983,11 @@ pub const Zld = struct {
if (!bind_sym.undf()) continue;
const base_offset = sym.n_value - segment.vmaddr;
- const rel_offset = @intCast(u32, rel.r_address - ctx.base_offset);
- const offset = @intCast(u64, base_offset + rel_offset);
+ const rel_offset = @as(u32, @intCast(rel.r_address - ctx.base_offset));
+ const offset = @as(u64, @intCast(base_offset + rel_offset));
const addend = mem.readIntLittle(i64, code[rel_offset..][0..8]);
- const dylib_ordinal = @divTrunc(@bitCast(i16, bind_sym.n_desc), macho.N_SYMBOL_RESOLVER);
+ const dylib_ordinal = @divTrunc(@as(i16, @bitCast(bind_sym.n_desc)), macho.N_SYMBOL_RESOLVER);
log.debug(" | bind at {x}, import('{s}') in dylib({d})", .{
base_offset,
bind_sym_name,
@@ -2039,7 +2039,7 @@ pub const Zld = struct {
const stub_entry = self.stubs.items[count];
const bind_sym = stub_entry.getTargetSymbol(self);
const bind_sym_name = stub_entry.getTargetSymbolName(self);
- const dylib_ordinal = @divTrunc(@bitCast(i16, bind_sym.n_desc), macho.N_SYMBOL_RESOLVER);
+ const dylib_ordinal = @divTrunc(@as(i16, @bitCast(bind_sym.n_desc)), macho.N_SYMBOL_RESOLVER);
log.debug(" | lazy bind at {x}, import('{s}') in dylib({d})", .{
base_offset,
bind_sym_name,
@@ -2165,14 +2165,14 @@ pub const Zld = struct {
try self.file.pwriteAll(buffer, rebase_off);
try self.populateLazyBindOffsetsInStubHelper(lazy_bind);
- self.dyld_info_cmd.rebase_off = @intCast(u32, rebase_off);
- self.dyld_info_cmd.rebase_size = @intCast(u32, rebase_size_aligned);
- self.dyld_info_cmd.bind_off = @intCast(u32, bind_off);
- self.dyld_info_cmd.bind_size = @intCast(u32, bind_size_aligned);
- self.dyld_info_cmd.lazy_bind_off = @intCast(u32, lazy_bind_off);
- self.dyld_info_cmd.lazy_bind_size = @intCast(u32, lazy_bind_size_aligned);
- self.dyld_info_cmd.export_off = @intCast(u32, export_off);
- self.dyld_info_cmd.export_size = @intCast(u32, export_size_aligned);
+ self.dyld_info_cmd.rebase_off = @as(u32, @intCast(rebase_off));
+ self.dyld_info_cmd.rebase_size = @as(u32, @intCast(rebase_size_aligned));
+ self.dyld_info_cmd.bind_off = @as(u32, @intCast(bind_off));
+ self.dyld_info_cmd.bind_size = @as(u32, @intCast(bind_size_aligned));
+ self.dyld_info_cmd.lazy_bind_off = @as(u32, @intCast(lazy_bind_off));
+ self.dyld_info_cmd.lazy_bind_size = @as(u32, @intCast(lazy_bind_size_aligned));
+ self.dyld_info_cmd.export_off = @as(u32, @intCast(export_off));
+ self.dyld_info_cmd.export_size = @as(u32, @intCast(export_size_aligned));
}
fn populateLazyBindOffsetsInStubHelper(self: *Zld, lazy_bind: LazyBind) !void {
@@ -2246,7 +2246,7 @@ pub const Zld = struct {
var last_off: u32 = 0;
for (addresses.items) |addr| {
- const offset = @intCast(u32, addr - text_seg.vmaddr);
+ const offset = @as(u32, @intCast(addr - text_seg.vmaddr));
const diff = offset - last_off;
if (diff == 0) continue;
@@ -2258,7 +2258,7 @@ pub const Zld = struct {
var buffer = std.ArrayList(u8).init(gpa);
defer buffer.deinit();
- const max_size = @intCast(usize, offsets.items.len * @sizeOf(u64));
+ const max_size = @as(usize, @intCast(offsets.items.len * @sizeOf(u64)));
try buffer.ensureTotalCapacity(max_size);
for (offsets.items) |offset| {
@@ -2281,8 +2281,8 @@ pub const Zld = struct {
try self.file.pwriteAll(buffer.items, offset);
- self.function_starts_cmd.dataoff = @intCast(u32, offset);
- self.function_starts_cmd.datasize = @intCast(u32, needed_size_aligned);
+ self.function_starts_cmd.dataoff = @as(u32, @intCast(offset));
+ self.function_starts_cmd.datasize = @as(u32, @intCast(needed_size_aligned));
}
fn filterDataInCode(
@@ -2324,8 +2324,8 @@ pub const Zld = struct {
const source_addr = if (object.getSourceSymbol(atom.sym_index)) |source_sym|
source_sym.n_value
else blk: {
- const nbase = @intCast(u32, object.in_symtab.?.len);
- const source_sect_id = @intCast(u8, atom.sym_index - nbase);
+ const nbase = @as(u32, @intCast(object.in_symtab.?.len));
+ const source_sect_id = @as(u8, @intCast(atom.sym_index - nbase));
break :blk object.getSourceSection(source_sect_id).addr;
};
const filtered_dice = filterDataInCode(dice, source_addr, source_addr + atom.size);
@@ -2363,8 +2363,8 @@ pub const Zld = struct {
try self.file.pwriteAll(buffer, offset);
- self.data_in_code_cmd.dataoff = @intCast(u32, offset);
- self.data_in_code_cmd.datasize = @intCast(u32, needed_size_aligned);
+ self.data_in_code_cmd.dataoff = @as(u32, @intCast(offset));
+ self.data_in_code_cmd.datasize = @as(u32, @intCast(needed_size_aligned));
}
fn writeSymtabs(self: *Zld) !void {
@@ -2428,7 +2428,7 @@ pub const Zld = struct {
if (!sym.undf()) continue; // not an import, skip
if (sym.n_desc == N_DEAD) continue;
- const new_index = @intCast(u32, imports.items.len);
+ const new_index = @as(u32, @intCast(imports.items.len));
var out_sym = sym;
out_sym.n_strx = try self.strtab.insert(gpa, self.getSymbolName(global));
try imports.append(out_sym);
@@ -2443,9 +2443,9 @@ pub const Zld = struct {
}
}
- const nlocals = @intCast(u32, locals.items.len);
- const nexports = @intCast(u32, exports.items.len);
- const nimports = @intCast(u32, imports.items.len);
+ const nlocals = @as(u32, @intCast(locals.items.len));
+ const nexports = @as(u32, @intCast(exports.items.len));
+ const nimports = @as(u32, @intCast(imports.items.len));
const nsyms = nlocals + nexports + nimports;
const seg = self.getLinkeditSegmentPtr();
@@ -2465,7 +2465,7 @@ pub const Zld = struct {
log.debug("writing symtab from 0x{x} to 0x{x}", .{ offset, offset + needed_size });
try self.file.pwriteAll(buffer.items, offset);
- self.symtab_cmd.symoff = @intCast(u32, offset);
+ self.symtab_cmd.symoff = @as(u32, @intCast(offset));
self.symtab_cmd.nsyms = nsyms;
return SymtabCtx{
@@ -2493,8 +2493,8 @@ pub const Zld = struct {
try self.file.pwriteAll(buffer, offset);
- self.symtab_cmd.stroff = @intCast(u32, offset);
- self.symtab_cmd.strsize = @intCast(u32, needed_size_aligned);
+ self.symtab_cmd.stroff = @as(u32, @intCast(offset));
+ self.symtab_cmd.strsize = @as(u32, @intCast(needed_size_aligned));
}
const SymtabCtx = struct {
@@ -2506,8 +2506,8 @@ pub const Zld = struct {
fn writeDysymtab(self: *Zld, ctx: SymtabCtx) !void {
const gpa = self.gpa;
- const nstubs = @intCast(u32, self.stubs.items.len);
- const ngot_entries = @intCast(u32, self.got_entries.items.len);
+ const nstubs = @as(u32, @intCast(self.stubs.items.len));
+ const ngot_entries = @as(u32, @intCast(self.got_entries.items.len));
const nindirectsyms = nstubs * 2 + ngot_entries;
const iextdefsym = ctx.nlocalsym;
const iundefsym = iextdefsym + ctx.nextdefsym;
@@ -2572,7 +2572,7 @@ pub const Zld = struct {
self.dysymtab_cmd.nextdefsym = ctx.nextdefsym;
self.dysymtab_cmd.iundefsym = iundefsym;
self.dysymtab_cmd.nundefsym = ctx.nundefsym;
- self.dysymtab_cmd.indirectsymoff = @intCast(u32, offset);
+ self.dysymtab_cmd.indirectsymoff = @as(u32, @intCast(offset));
self.dysymtab_cmd.nindirectsyms = nindirectsyms;
}
@@ -2599,8 +2599,8 @@ pub const Zld = struct {
// except for code signature data.
try self.file.pwriteAll(&[_]u8{0}, offset + needed_size - 1);
- self.codesig_cmd.dataoff = @intCast(u32, offset);
- self.codesig_cmd.datasize = @intCast(u32, needed_size);
+ self.codesig_cmd.dataoff = @as(u32, @intCast(offset));
+ self.codesig_cmd.datasize = @as(u32, @intCast(needed_size));
}
fn writeCodeSignature(self: *Zld, comp: *const Compilation, code_sig: *CodeSignature) !void {
@@ -2689,7 +2689,7 @@ pub const Zld = struct {
fn getSegmentByName(self: Zld, segname: []const u8) ?u8 {
for (self.segments.items, 0..) |seg, i| {
- if (mem.eql(u8, segname, seg.segName())) return @intCast(u8, i);
+ if (mem.eql(u8, segname, seg.segName())) return @as(u8, @intCast(i));
} else return null;
}
@@ -2714,15 +2714,15 @@ pub const Zld = struct {
// TODO investigate caching with a hashmap
for (self.sections.items(.header), 0..) |header, i| {
if (mem.eql(u8, header.segName(), segname) and mem.eql(u8, header.sectName(), sectname))
- return @intCast(u8, i);
+ return @as(u8, @intCast(i));
} else return null;
}
pub fn getSectionIndexes(self: Zld, segment_index: u8) struct { start: u8, end: u8 } {
var start: u8 = 0;
const nsects = for (self.segments.items, 0..) |seg, i| {
- if (i == segment_index) break @intCast(u8, seg.nsects);
- start += @intCast(u8, seg.nsects);
+ if (i == segment_index) break @as(u8, @intCast(seg.nsects));
+ start += @as(u8, @intCast(seg.nsects));
} else 0;
return .{ .start = start, .end = start + nsects };
}
@@ -2879,7 +2879,7 @@ pub const Zld = struct {
var name_lookup: ?DwarfInfo.SubprogramLookupByName = if (object.header.flags & macho.MH_SUBSECTIONS_VIA_SYMBOLS == 0) blk: {
var name_lookup = DwarfInfo.SubprogramLookupByName.init(gpa);
errdefer name_lookup.deinit();
- try name_lookup.ensureUnusedCapacity(@intCast(u32, object.atoms.items.len));
+ try name_lookup.ensureUnusedCapacity(@as(u32, @intCast(object.atoms.items.len)));
try debug_info.genSubprogramLookupByName(compile_unit, lookup, &name_lookup);
break :blk name_lookup;
} else null;
@@ -3069,7 +3069,7 @@ pub const Zld = struct {
@memset(&buf, '_');
scoped_log.debug(" %{d}: {s} @{x} in sect({d}), {s}", .{
sym_id,
- object.getSymbolName(@intCast(u32, sym_id)),
+ object.getSymbolName(@as(u32, @intCast(sym_id))),
sym.n_value,
sym.n_sect,
logSymAttributes(sym, &buf),
@@ -3252,7 +3252,7 @@ pub const Zld = struct {
}
};
-pub const N_DEAD: u16 = @bitCast(u16, @as(i16, -1));
+pub const N_DEAD: u16 = @as(u16, @bitCast(@as(i16, -1)));
const Section = struct {
header: macho.section_64,
@@ -3791,7 +3791,7 @@ pub fn linkWithZld(macho_file: *MachO, comp: *Compilation, prog_node: *std.Progr
}
for (zld.objects.items, 0..) |*object, object_id| {
- try object.splitIntoAtoms(&zld, @intCast(u32, object_id));
+ try object.splitIntoAtoms(&zld, @as(u32, @intCast(object_id)));
}
if (gc_sections) {
@@ -3929,7 +3929,7 @@ pub fn linkWithZld(macho_file: *MachO, comp: *Compilation, prog_node: *std.Progr
} else sym.n_value;
try lc_writer.writeStruct(macho.entry_point_command{
- .entryoff = @intCast(u32, addr - seg.vmaddr),
+ .entryoff = @as(u32, @intCast(addr - seg.vmaddr)),
.stacksize = options.stack_size_override orelse 0,
});
} else {
@@ -3943,7 +3943,7 @@ pub fn linkWithZld(macho_file: *MachO, comp: *Compilation, prog_node: *std.Progr
});
try load_commands.writeBuildVersionLC(zld.options, lc_writer);
- const uuid_cmd_offset = @sizeOf(macho.mach_header_64) + @intCast(u32, lc_buffer.items.len);
+ const uuid_cmd_offset = @sizeOf(macho.mach_header_64) + @as(u32, @intCast(lc_buffer.items.len));
try lc_writer.writeStruct(zld.uuid_cmd);
try load_commands.writeLoadDylibLCs(zld.dylibs.items, zld.referenced_dylibs.keys(), lc_writer);
@@ -3954,7 +3954,7 @@ pub fn linkWithZld(macho_file: *MachO, comp: *Compilation, prog_node: *std.Progr
const ncmds = load_commands.calcNumOfLCs(lc_buffer.items);
try zld.file.pwriteAll(lc_buffer.items, @sizeOf(macho.mach_header_64));
- try zld.writeHeader(ncmds, @intCast(u32, lc_buffer.items.len));
+ try zld.writeHeader(ncmds, @as(u32, @intCast(lc_buffer.items.len)));
try zld.writeUuid(comp, uuid_cmd_offset, requires_codesig);
if (codesig) |*csig| {