aboutsummaryrefslogtreecommitdiff
path: root/src/arch
diff options
context:
space:
mode:
authorAndrew Kelley <andrew@ziglang.org>2023-06-24 16:58:19 -0700
committerGitHub <noreply@github.com>2023-06-24 16:58:19 -0700
commit146b79af153bbd5dafda0ba12a040385c7fc58f8 (patch)
tree67e3db8b444d65c667e314770fc983a7fc8ba293 /src/arch
parent13853bef0df3c90633021850cc6d6abaeea03282 (diff)
parent21ac0beb436f49fe49c6982a872f2dc48e4bea5e (diff)
downloadzig-146b79af153bbd5dafda0ba12a040385c7fc58f8.tar.gz
zig-146b79af153bbd5dafda0ba12a040385c7fc58f8.zip
Merge pull request #16163 from mlugg/feat/builtins-infer-dest-ty
Infer destination type of cast builtins using result type
Diffstat (limited to 'src/arch')
-rw-r--r--src/arch/aarch64/CodeGen.zig176
-rw-r--r--src/arch/aarch64/Emit.zig44
-rw-r--r--src/arch/aarch64/Mir.zig2
-rw-r--r--src/arch/aarch64/bits.zig218
-rw-r--r--src/arch/arm/CodeGen.zig192
-rw-r--r--src/arch/arm/Emit.zig38
-rw-r--r--src/arch/arm/Mir.zig2
-rw-r--r--src/arch/arm/abi.zig2
-rw-r--r--src/arch/arm/bits.zig64
-rw-r--r--src/arch/riscv64/CodeGen.zig38
-rw-r--r--src/arch/riscv64/Emit.zig10
-rw-r--r--src/arch/riscv64/Mir.zig2
-rw-r--r--src/arch/riscv64/bits.zig46
-rw-r--r--src/arch/sparc64/CodeGen.zig86
-rw-r--r--src/arch/sparc64/Emit.zig26
-rw-r--r--src/arch/sparc64/Mir.zig2
-rw-r--r--src/arch/sparc64/bits.zig80
-rw-r--r--src/arch/wasm/CodeGen.zig328
-rw-r--r--src/arch/wasm/Emit.zig22
-rw-r--r--src/arch/wasm/Mir.zig16
-rw-r--r--src/arch/x86_64/CodeGen.zig458
-rw-r--r--src/arch/x86_64/Emit.zig28
-rw-r--r--src/arch/x86_64/Encoding.zig4
-rw-r--r--src/arch/x86_64/Lower.zig10
-rw-r--r--src/arch/x86_64/Mir.zig34
-rw-r--r--src/arch/x86_64/abi.zig4
-rw-r--r--src/arch/x86_64/bits.zig32
-rw-r--r--src/arch/x86_64/encoder.zig14
28 files changed, 989 insertions, 989 deletions
diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig
index 5080a0451a..1d09fcd1cd 100644
--- a/src/arch/aarch64/CodeGen.zig
+++ b/src/arch/aarch64/CodeGen.zig
@@ -187,8 +187,8 @@ const DbgInfoReloc = struct {
.stack_argument_offset,
=> |offset| blk: {
const adjusted_offset = switch (reloc.mcv) {
- .stack_offset => -@intCast(i32, offset),
- .stack_argument_offset => @intCast(i32, function.saved_regs_stack_space + offset),
+ .stack_offset => -@as(i32, @intCast(offset)),
+ .stack_argument_offset => @as(i32, @intCast(function.saved_regs_stack_space + offset)),
else => unreachable,
};
break :blk .{ .stack = .{
@@ -224,8 +224,8 @@ const DbgInfoReloc = struct {
const adjusted_offset = switch (reloc.mcv) {
.ptr_stack_offset,
.stack_offset,
- => -@intCast(i32, offset),
- .stack_argument_offset => @intCast(i32, function.saved_regs_stack_space + offset),
+ => -@as(i32, @intCast(offset)),
+ .stack_argument_offset => @as(i32, @intCast(function.saved_regs_stack_space + offset)),
else => unreachable,
};
break :blk .{
@@ -440,7 +440,7 @@ fn addInst(self: *Self, inst: Mir.Inst) error{OutOfMemory}!Mir.Inst.Index {
try self.mir_instructions.ensureUnusedCapacity(gpa, 1);
- const result_index = @intCast(Air.Inst.Index, self.mir_instructions.len);
+ const result_index = @as(Air.Inst.Index, @intCast(self.mir_instructions.len));
self.mir_instructions.appendAssumeCapacity(inst);
return result_index;
}
@@ -460,11 +460,11 @@ pub fn addExtra(self: *Self, extra: anytype) Allocator.Error!u32 {
pub fn addExtraAssumeCapacity(self: *Self, extra: anytype) u32 {
const fields = std.meta.fields(@TypeOf(extra));
- const result = @intCast(u32, self.mir_extra.items.len);
+ const result = @as(u32, @intCast(self.mir_extra.items.len));
inline for (fields) |field| {
self.mir_extra.appendAssumeCapacity(switch (field.type) {
u32 => @field(extra, field.name),
- i32 => @bitCast(u32, @field(extra, field.name)),
+ i32 => @as(u32, @bitCast(@field(extra, field.name))),
else => @compileError("bad field type"),
});
}
@@ -524,7 +524,7 @@ fn gen(self: *Self) !void {
const ty = self.typeOfIndex(inst);
- const abi_size = @intCast(u32, ty.abiSize(mod));
+ const abi_size = @as(u32, @intCast(ty.abiSize(mod)));
const abi_align = ty.abiAlignment(mod);
const stack_offset = try self.allocMem(abi_size, abi_align, inst);
try self.genSetStack(ty, stack_offset, MCValue{ .register = reg });
@@ -547,7 +547,7 @@ fn gen(self: *Self) !void {
self.saved_regs_stack_space = 16;
inline for (callee_preserved_regs) |reg| {
if (self.register_manager.isRegAllocated(reg)) {
- saved_regs |= @as(u32, 1) << @intCast(u5, reg.id());
+ saved_regs |= @as(u32, 1) << @as(u5, @intCast(reg.id()));
self.saved_regs_stack_space += 8;
}
}
@@ -597,14 +597,14 @@ fn gen(self: *Self) !void {
for (self.exitlude_jump_relocs.items) |jmp_reloc| {
self.mir_instructions.set(jmp_reloc, .{
.tag = .b,
- .data = .{ .inst = @intCast(u32, self.mir_instructions.len) },
+ .data = .{ .inst = @as(u32, @intCast(self.mir_instructions.len)) },
});
}
// add sp, sp, #stack_size
_ = try self.addInst(.{
.tag = .add_immediate,
- .data = .{ .rr_imm12_sh = .{ .rd = .sp, .rn = .sp, .imm12 = @intCast(u12, stack_size) } },
+ .data = .{ .rr_imm12_sh = .{ .rd = .sp, .rn = .sp, .imm12 = @as(u12, @intCast(stack_size)) } },
});
// <load other registers>
@@ -948,15 +948,15 @@ fn finishAirBookkeeping(self: *Self) void {
fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Liveness.bpi - 1]Air.Inst.Ref) void {
var tomb_bits = self.liveness.getTombBits(inst);
for (operands) |op| {
- const dies = @truncate(u1, tomb_bits) != 0;
+ const dies = @as(u1, @truncate(tomb_bits)) != 0;
tomb_bits >>= 1;
if (!dies) continue;
const op_int = @intFromEnum(op);
if (op_int < Air.ref_start_index) continue;
- const op_index = @intCast(Air.Inst.Index, op_int - Air.ref_start_index);
+ const op_index = @as(Air.Inst.Index, @intCast(op_int - Air.ref_start_index));
self.processDeath(op_index);
}
- const is_used = @truncate(u1, tomb_bits) == 0;
+ const is_used = @as(u1, @truncate(tomb_bits)) == 0;
if (is_used) {
log.debug("%{d} => {}", .{ inst, result });
const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
@@ -1232,7 +1232,7 @@ fn truncRegister(
.rd = dest_reg,
.rn = operand_reg,
.lsb = 0,
- .width = @intCast(u6, int_bits),
+ .width = @as(u6, @intCast(int_bits)),
} },
});
},
@@ -1877,7 +1877,7 @@ fn binOpImmediate(
=> .{ .rr_imm12_sh = .{
.rd = dest_reg,
.rn = lhs_reg,
- .imm12 = @intCast(u12, rhs_immediate),
+ .imm12 = @as(u12, @intCast(rhs_immediate)),
} },
.lsl_immediate,
.asr_immediate,
@@ -1885,7 +1885,7 @@ fn binOpImmediate(
=> .{ .rr_shift = .{
.rd = dest_reg,
.rn = lhs_reg,
- .shift = @intCast(u6, rhs_immediate),
+ .shift = @as(u6, @intCast(rhs_immediate)),
} },
else => unreachable,
};
@@ -2526,9 +2526,9 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void {
const rhs_ty = self.typeOf(extra.rhs);
const tuple_ty = self.typeOfIndex(inst);
- const tuple_size = @intCast(u32, tuple_ty.abiSize(mod));
+ const tuple_size = @as(u32, @intCast(tuple_ty.abiSize(mod)));
const tuple_align = tuple_ty.abiAlignment(mod);
- const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, mod));
+ const overflow_bit_offset = @as(u32, @intCast(tuple_ty.structFieldOffset(1, mod)));
switch (lhs_ty.zigTypeTag(mod)) {
.Vector => return self.fail("TODO implement add_with_overflow/sub_with_overflow for vectors", .{}),
@@ -2654,9 +2654,9 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
const rhs_ty = self.typeOf(extra.rhs);
const tuple_ty = self.typeOfIndex(inst);
- const tuple_size = @intCast(u32, tuple_ty.abiSize(mod));
+ const tuple_size = @as(u32, @intCast(tuple_ty.abiSize(mod)));
const tuple_align = tuple_ty.abiAlignment(mod);
- const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, mod));
+ const overflow_bit_offset = @as(u32, @intCast(tuple_ty.structFieldOffset(1, mod)));
switch (lhs_ty.zigTypeTag(mod)) {
.Vector => return self.fail("TODO implement mul_with_overflow for vectors", .{}),
@@ -2777,7 +2777,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
} },
});
- const shift: u6 = @intCast(u6, @as(u7, 64) - @intCast(u7, int_info.bits));
+ const shift: u6 = @as(u6, @intCast(@as(u7, 64) - @as(u7, @intCast(int_info.bits))));
if (shift > 0) {
// lsl dest_high, dest, #shift
_ = try self.addInst(.{
@@ -2837,7 +2837,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
.data = .{ .rr_shift = .{
.rd = dest_high_reg,
.rn = dest_reg,
- .shift = @intCast(u6, int_info.bits),
+ .shift = @as(u6, @intCast(int_info.bits)),
} },
});
@@ -2878,9 +2878,9 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
const rhs_ty = self.typeOf(extra.rhs);
const tuple_ty = self.typeOfIndex(inst);
- const tuple_size = @intCast(u32, tuple_ty.abiSize(mod));
+ const tuple_size = @as(u32, @intCast(tuple_ty.abiSize(mod)));
const tuple_align = tuple_ty.abiAlignment(mod);
- const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, mod));
+ const overflow_bit_offset = @as(u32, @intCast(tuple_ty.structFieldOffset(1, mod)));
switch (lhs_ty.zigTypeTag(mod)) {
.Vector => return self.fail("TODO implement shl_with_overflow for vectors", .{}),
@@ -2917,7 +2917,7 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
.data = .{ .rr_shift = .{
.rd = dest_reg,
.rn = lhs_reg,
- .shift = @intCast(u6, imm),
+ .shift = @as(u6, @intCast(imm)),
} },
});
@@ -2932,7 +2932,7 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
.data = .{ .rr_shift = .{
.rd = reconstructed_reg,
.rn = dest_reg,
- .shift = @intCast(u6, imm),
+ .shift = @as(u6, @intCast(imm)),
} },
});
} else {
@@ -3072,7 +3072,7 @@ fn errUnionErr(
return try error_union_bind.resolveToMcv(self);
}
- const err_offset = @intCast(u32, errUnionErrorOffset(payload_ty, mod));
+ const err_offset = @as(u32, @intCast(errUnionErrorOffset(payload_ty, mod)));
switch (try error_union_bind.resolveToMcv(self)) {
.register => {
var operand_reg: Register = undefined;
@@ -3094,7 +3094,7 @@ fn errUnionErr(
);
const err_bit_offset = err_offset * 8;
- const err_bit_size = @intCast(u32, err_ty.abiSize(mod)) * 8;
+ const err_bit_size = @as(u32, @intCast(err_ty.abiSize(mod))) * 8;
_ = try self.addInst(.{
.tag = .ubfx, // errors are unsigned integers
@@ -3103,8 +3103,8 @@ fn errUnionErr(
// Set both registers to the X variant to get the full width
.rd = dest_reg.toX(),
.rn = operand_reg.toX(),
- .lsb = @intCast(u6, err_bit_offset),
- .width = @intCast(u7, err_bit_size),
+ .lsb = @as(u6, @intCast(err_bit_offset)),
+ .width = @as(u7, @intCast(err_bit_size)),
},
},
});
@@ -3152,7 +3152,7 @@ fn errUnionPayload(
return MCValue.none;
}
- const payload_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, mod));
+ const payload_offset = @as(u32, @intCast(errUnionPayloadOffset(payload_ty, mod)));
switch (try error_union_bind.resolveToMcv(self)) {
.register => {
var operand_reg: Register = undefined;
@@ -3174,7 +3174,7 @@ fn errUnionPayload(
);
const payload_bit_offset = payload_offset * 8;
- const payload_bit_size = @intCast(u32, payload_ty.abiSize(mod)) * 8;
+ const payload_bit_size = @as(u32, @intCast(payload_ty.abiSize(mod))) * 8;
_ = try self.addInst(.{
.tag = if (payload_ty.isSignedInt(mod)) Mir.Inst.Tag.sbfx else .ubfx,
@@ -3183,8 +3183,8 @@ fn errUnionPayload(
// Set both registers to the X variant to get the full width
.rd = dest_reg.toX(),
.rn = operand_reg.toX(),
- .lsb = @intCast(u5, payload_bit_offset),
- .width = @intCast(u6, payload_bit_size),
+ .lsb = @as(u5, @intCast(payload_bit_offset)),
+ .width = @as(u6, @intCast(payload_bit_size)),
},
},
});
@@ -3283,9 +3283,9 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void {
break :result MCValue{ .register = reg };
}
- const optional_abi_size = @intCast(u32, optional_ty.abiSize(mod));
+ const optional_abi_size = @as(u32, @intCast(optional_ty.abiSize(mod)));
const optional_abi_align = optional_ty.abiAlignment(mod);
- const offset = @intCast(u32, payload_ty.abiSize(mod));
+ const offset = @as(u32, @intCast(payload_ty.abiSize(mod)));
const stack_offset = try self.allocMem(optional_abi_size, optional_abi_align, inst);
try self.genSetStack(payload_ty, stack_offset, operand);
@@ -3308,13 +3308,13 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void {
const operand = try self.resolveInst(ty_op.operand);
if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result operand;
- const abi_size = @intCast(u32, error_union_ty.abiSize(mod));
+ const abi_size = @as(u32, @intCast(error_union_ty.abiSize(mod)));
const abi_align = error_union_ty.abiAlignment(mod);
const stack_offset = try self.allocMem(abi_size, abi_align, inst);
const payload_off = errUnionPayloadOffset(payload_ty, mod);
const err_off = errUnionErrorOffset(payload_ty, mod);
- try self.genSetStack(payload_ty, stack_offset - @intCast(u32, payload_off), operand);
- try self.genSetStack(error_ty, stack_offset - @intCast(u32, err_off), .{ .immediate = 0 });
+ try self.genSetStack(payload_ty, stack_offset - @as(u32, @intCast(payload_off)), operand);
+ try self.genSetStack(error_ty, stack_offset - @as(u32, @intCast(err_off)), .{ .immediate = 0 });
break :result MCValue{ .stack_offset = stack_offset };
};
@@ -3332,13 +3332,13 @@ fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void {
const operand = try self.resolveInst(ty_op.operand);
if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result operand;
- const abi_size = @intCast(u32, error_union_ty.abiSize(mod));
+ const abi_size = @as(u32, @intCast(error_union_ty.abiSize(mod)));
const abi_align = error_union_ty.abiAlignment(mod);
const stack_offset = try self.allocMem(abi_size, abi_align, inst);
const payload_off = errUnionPayloadOffset(payload_ty, mod);
const err_off = errUnionErrorOffset(payload_ty, mod);
- try self.genSetStack(error_ty, stack_offset - @intCast(u32, err_off), operand);
- try self.genSetStack(payload_ty, stack_offset - @intCast(u32, payload_off), .undef);
+ try self.genSetStack(error_ty, stack_offset - @as(u32, @intCast(err_off)), operand);
+ try self.genSetStack(payload_ty, stack_offset - @as(u32, @intCast(payload_off)), .undef);
break :result MCValue{ .stack_offset = stack_offset };
};
@@ -3454,7 +3454,7 @@ fn ptrElemVal(
) !MCValue {
const mod = self.bin_file.options.module.?;
const elem_ty = ptr_ty.childType(mod);
- const elem_size = @intCast(u32, elem_ty.abiSize(mod));
+ const elem_size = @as(u32, @intCast(elem_ty.abiSize(mod)));
// TODO optimize for elem_sizes of 1, 2, 4, 8
switch (elem_size) {
@@ -3716,7 +3716,7 @@ fn genInlineMemcpy(
_ = try self.addInst(.{
.tag = .b_cond,
.data = .{ .inst_cond = .{
- .inst = @intCast(u32, self.mir_instructions.len + 5),
+ .inst = @as(u32, @intCast(self.mir_instructions.len + 5)),
.cond = .ge,
} },
});
@@ -3754,7 +3754,7 @@ fn genInlineMemcpy(
// b loop
_ = try self.addInst(.{
.tag = .b,
- .data = .{ .inst = @intCast(u32, self.mir_instructions.len - 5) },
+ .data = .{ .inst = @as(u32, @intCast(self.mir_instructions.len - 5)) },
});
// end:
@@ -3824,7 +3824,7 @@ fn genInlineMemsetCode(
_ = try self.addInst(.{
.tag = .b_cond,
.data = .{ .inst_cond = .{
- .inst = @intCast(u32, self.mir_instructions.len + 4),
+ .inst = @as(u32, @intCast(self.mir_instructions.len + 4)),
.cond = .ge,
} },
});
@@ -3852,7 +3852,7 @@ fn genInlineMemsetCode(
// b loop
_ = try self.addInst(.{
.tag = .b,
- .data = .{ .inst = @intCast(u32, self.mir_instructions.len - 4) },
+ .data = .{ .inst = @as(u32, @intCast(self.mir_instructions.len - 4)) },
});
// end:
@@ -4002,7 +4002,7 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
} },
});
},
- .memory => |addr| try self.genSetReg(Type.usize, src_reg, .{ .immediate = @intCast(u32, addr) }),
+ .memory => |addr| try self.genSetReg(Type.usize, src_reg, .{ .immediate = @as(u32, @intCast(addr)) }),
.linker_load => |load_struct| {
const tag: Mir.Inst.Tag = switch (load_struct.type) {
.got => .load_memory_ptr_got,
@@ -4092,7 +4092,7 @@ fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde
const mcv = try self.resolveInst(operand);
const ptr_ty = self.typeOf(operand);
const struct_ty = ptr_ty.childType(mod);
- const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod));
+ const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(index, mod)));
switch (mcv) {
.ptr_stack_offset => |off| {
break :result MCValue{ .ptr_stack_offset = off - struct_field_offset };
@@ -4117,7 +4117,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
const mcv = try self.resolveInst(operand);
const struct_ty = self.typeOf(operand);
const struct_field_ty = struct_ty.structFieldType(index, mod);
- const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod));
+ const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(index, mod)));
switch (mcv) {
.dead, .unreach => unreachable,
@@ -4169,7 +4169,7 @@ fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void {
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const field_ptr = try self.resolveInst(extra.field_ptr);
const struct_ty = self.air.getRefType(ty_pl.ty).childType(mod);
- const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(extra.field_index, mod));
+ const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(extra.field_index, mod)));
switch (field_ptr) {
.ptr_stack_offset => |off| {
break :result MCValue{ .ptr_stack_offset = off + struct_field_offset };
@@ -4243,7 +4243,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
const callee = pl_op.operand;
const extra = self.air.extraData(Air.Call, pl_op.payload);
- const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]);
+ const args = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra.end..][0..extra.data.args_len]));
const ty = self.typeOf(callee);
const mod = self.bin_file.options.module.?;
@@ -4269,8 +4269,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
if (info.return_value == .stack_offset) {
log.debug("airCall: return by reference", .{});
const ret_ty = fn_ty.fnReturnType(mod);
- const ret_abi_size = @intCast(u32, ret_ty.abiSize(mod));
- const ret_abi_align = @intCast(u32, ret_ty.abiAlignment(mod));
+ const ret_abi_size = @as(u32, @intCast(ret_ty.abiSize(mod)));
+ const ret_abi_align = @as(u32, @intCast(ret_ty.abiAlignment(mod)));
const stack_offset = try self.allocMem(ret_abi_size, ret_abi_align, inst);
const ret_ptr_reg = self.registerAlias(.x0, Type.usize);
@@ -4314,7 +4314,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl);
const atom = elf_file.getAtom(atom_index);
_ = try atom.getOrCreateOffsetTableEntry(elf_file);
- const got_addr = @intCast(u32, atom.getOffsetTableAddress(elf_file));
+ const got_addr = @as(u32, @intCast(atom.getOffsetTableAddress(elf_file)));
try self.genSetReg(Type.usize, .x30, .{ .memory = got_addr });
} else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
const atom = try macho_file.getOrCreateAtomForDecl(func.owner_decl);
@@ -4473,7 +4473,7 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void {
// location.
const op_inst = Air.refToIndex(un_op).?;
if (self.air.instructions.items(.tag)[op_inst] != .ret_ptr) {
- const abi_size = @intCast(u32, ret_ty.abiSize(mod));
+ const abi_size = @as(u32, @intCast(ret_ty.abiSize(mod)));
const abi_align = ret_ty.abiAlignment(mod);
const offset = try self.allocMem(abi_size, abi_align, null);
@@ -4554,7 +4554,7 @@ fn cmp(
.tag = .cmp_immediate,
.data = .{ .r_imm12_sh = .{
.rn = lhs_reg,
- .imm12 = @intCast(u12, rhs_immediate.?),
+ .imm12 = @as(u12, @intCast(rhs_immediate.?)),
} },
});
} else {
@@ -4696,7 +4696,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void {
if (self.liveness.operandDies(inst, 0)) {
const op_int = @intFromEnum(pl_op.operand);
if (op_int >= Air.ref_start_index) {
- const op_index = @intCast(Air.Inst.Index, op_int - Air.ref_start_index);
+ const op_index = @as(Air.Inst.Index, @intCast(op_int - Air.ref_start_index));
self.processDeath(op_index);
}
}
@@ -4833,7 +4833,7 @@ fn isNull(self: *Self, operand_bind: ReadArg.Bind, operand_ty: Type) !MCValue {
if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod))
break :blk .{ .ty = operand_ty, .bind = operand_bind };
- const offset = @intCast(u32, payload_ty.abiSize(mod));
+ const offset = @as(u32, @intCast(payload_ty.abiSize(mod)));
const operand_mcv = try operand_bind.resolveToMcv(self);
const new_mcv: MCValue = switch (operand_mcv) {
.register => |source_reg| new: {
@@ -4841,7 +4841,7 @@ fn isNull(self: *Self, operand_bind: ReadArg.Bind, operand_ty: Type) !MCValue {
const raw_reg = try self.register_manager.allocReg(null, gp);
const dest_reg = raw_reg.toX();
- const shift = @intCast(u6, offset * 8);
+ const shift = @as(u6, @intCast(offset * 8));
if (shift == 0) {
try self.genSetReg(payload_ty, dest_reg, operand_mcv);
} else {
@@ -5026,7 +5026,7 @@ fn airLoop(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const loop = self.air.extraData(Air.Block, ty_pl.payload);
const body = self.air.extra[loop.end..][0..loop.data.body_len];
- const start_index = @intCast(u32, self.mir_instructions.len);
+ const start_index = @as(u32, @intCast(self.mir_instructions.len));
try self.genBody(body);
try self.jump(start_index);
@@ -5091,7 +5091,7 @@ fn airSwitch(self: *Self, inst: Air.Inst.Index) !void {
var case_i: u32 = 0;
while (case_i < switch_br.data.cases_len) : (case_i += 1) {
const case = self.air.extraData(Air.SwitchBr.Case, extra_index);
- const items = @ptrCast([]const Air.Inst.Ref, self.air.extra[case.end..][0..case.data.items_len]);
+ const items = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[case.end..][0..case.data.items_len]));
assert(items.len > 0);
const case_body = self.air.extra[case.end + items.len ..][0..case.data.body_len];
extra_index = case.end + items.len + case_body.len;
@@ -5209,9 +5209,9 @@ fn airSwitch(self: *Self, inst: Air.Inst.Index) !void {
fn performReloc(self: *Self, inst: Mir.Inst.Index) !void {
const tag = self.mir_instructions.items(.tag)[inst];
switch (tag) {
- .cbz => self.mir_instructions.items(.data)[inst].r_inst.inst = @intCast(Mir.Inst.Index, self.mir_instructions.len),
- .b_cond => self.mir_instructions.items(.data)[inst].inst_cond.inst = @intCast(Mir.Inst.Index, self.mir_instructions.len),
- .b => self.mir_instructions.items(.data)[inst].inst = @intCast(Mir.Inst.Index, self.mir_instructions.len),
+ .cbz => self.mir_instructions.items(.data)[inst].r_inst.inst = @as(Mir.Inst.Index, @intCast(self.mir_instructions.len)),
+ .b_cond => self.mir_instructions.items(.data)[inst].inst_cond.inst = @as(Mir.Inst.Index, @intCast(self.mir_instructions.len)),
+ .b => self.mir_instructions.items(.data)[inst].inst = @as(Mir.Inst.Index, @intCast(self.mir_instructions.len)),
else => unreachable,
}
}
@@ -5262,12 +5262,12 @@ fn brVoid(self: *Self, block: Air.Inst.Index) !void {
fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.Asm, ty_pl.payload);
- const is_volatile = @truncate(u1, extra.data.flags >> 31) != 0;
- const clobbers_len = @truncate(u31, extra.data.flags);
+ const is_volatile = @as(u1, @truncate(extra.data.flags >> 31)) != 0;
+ const clobbers_len = @as(u31, @truncate(extra.data.flags));
var extra_i: usize = extra.end;
- const outputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.outputs_len]);
+ const outputs = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra_i..][0..extra.data.outputs_len]));
extra_i += outputs.len;
- const inputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.inputs_len]);
+ const inputs = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra_i..][0..extra.data.inputs_len]));
extra_i += inputs.len;
const dead = !is_volatile and self.liveness.isUnused(inst);
@@ -5401,7 +5401,7 @@ fn setRegOrMem(self: *Self, ty: Type, loc: MCValue, val: MCValue) !void {
fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void {
const mod = self.bin_file.options.module.?;
- const abi_size = @intCast(u32, ty.abiSize(mod));
+ const abi_size = @as(u32, @intCast(ty.abiSize(mod)));
switch (mcv) {
.dead => unreachable,
.unreach, .none => return, // Nothing to do.
@@ -5460,7 +5460,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
try self.genSetStack(wrapped_ty, stack_offset, .{ .register = rwo.reg });
const overflow_bit_ty = ty.structFieldType(1, mod);
- const overflow_bit_offset = @intCast(u32, ty.structFieldOffset(1, mod));
+ const overflow_bit_offset = @as(u32, @intCast(ty.structFieldOffset(1, mod)));
const raw_cond_reg = try self.register_manager.allocReg(null, gp);
const cond_reg = self.registerAlias(raw_cond_reg, overflow_bit_ty);
@@ -5589,7 +5589,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
.tag = .ldr_ptr_stack,
.data = .{ .load_store_stack = .{
.rt = reg,
- .offset = @intCast(u32, off),
+ .offset = @as(u32, @intCast(off)),
} },
});
},
@@ -5605,13 +5605,13 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
.immediate => |x| {
_ = try self.addInst(.{
.tag = .movz,
- .data = .{ .r_imm16_sh = .{ .rd = reg, .imm16 = @truncate(u16, x) } },
+ .data = .{ .r_imm16_sh = .{ .rd = reg, .imm16 = @as(u16, @truncate(x)) } },
});
if (x & 0x0000_0000_ffff_0000 != 0) {
_ = try self.addInst(.{
.tag = .movk,
- .data = .{ .r_imm16_sh = .{ .rd = reg, .imm16 = @truncate(u16, x >> 16), .hw = 1 } },
+ .data = .{ .r_imm16_sh = .{ .rd = reg, .imm16 = @as(u16, @truncate(x >> 16)), .hw = 1 } },
});
}
@@ -5619,13 +5619,13 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
if (x & 0x0000_ffff_0000_0000 != 0) {
_ = try self.addInst(.{
.tag = .movk,
- .data = .{ .r_imm16_sh = .{ .rd = reg, .imm16 = @truncate(u16, x >> 32), .hw = 2 } },
+ .data = .{ .r_imm16_sh = .{ .rd = reg, .imm16 = @as(u16, @truncate(x >> 32)), .hw = 2 } },
});
}
if (x & 0xffff_0000_0000_0000 != 0) {
_ = try self.addInst(.{
.tag = .movk,
- .data = .{ .r_imm16_sh = .{ .rd = reg, .imm16 = @truncate(u16, x >> 48), .hw = 3 } },
+ .data = .{ .r_imm16_sh = .{ .rd = reg, .imm16 = @as(u16, @truncate(x >> 48)), .hw = 3 } },
});
}
}
@@ -5696,7 +5696,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
.tag = tag,
.data = .{ .load_store_stack = .{
.rt = reg,
- .offset = @intCast(u32, off),
+ .offset = @as(u32, @intCast(off)),
} },
});
},
@@ -5720,7 +5720,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
.tag = tag,
.data = .{ .load_store_stack = .{
.rt = reg,
- .offset = @intCast(u32, off),
+ .offset = @as(u32, @intCast(off)),
} },
});
},
@@ -5733,7 +5733,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void {
const mod = self.bin_file.options.module.?;
- const abi_size = @intCast(u32, ty.abiSize(mod));
+ const abi_size = @as(u32, @intCast(ty.abiSize(mod)));
switch (mcv) {
.dead => unreachable,
.none, .unreach => return,
@@ -5840,7 +5840,7 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I
} },
});
},
- .memory => |addr| try self.genSetReg(ptr_ty, src_reg, .{ .immediate = @intCast(u32, addr) }),
+ .memory => |addr| try self.genSetReg(ptr_ty, src_reg, .{ .immediate = @as(u32, @intCast(addr)) }),
.linker_load => |load_struct| {
const tag: Mir.Inst.Tag = switch (load_struct.type) {
.got => .load_memory_ptr_got,
@@ -5937,7 +5937,7 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void {
const ptr_ty = self.typeOf(ty_op.operand);
const ptr = try self.resolveInst(ty_op.operand);
const array_ty = ptr_ty.childType(mod);
- const array_len = @intCast(u32, array_ty.arrayLen(mod));
+ const array_len = @as(u32, @intCast(array_ty.arrayLen(mod)));
const ptr_bits = self.target.ptrBitWidth();
const ptr_bytes = @divExact(ptr_bits, 8);
@@ -6058,7 +6058,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
const vector_ty = self.typeOfIndex(inst);
const len = vector_ty.vectorLen(mod);
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
- const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]);
+ const elements = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[ty_pl.payload..][0..len]));
const result: MCValue = res: {
if (self.liveness.isUnused(inst)) break :res MCValue.dead;
return self.fail("TODO implement airAggregateInit for {}", .{self.target.cpu.arch});
@@ -6105,7 +6105,7 @@ fn airTry(self: *Self, inst: Air.Inst.Index) !void {
const result: MCValue = result: {
const error_union_bind: ReadArg.Bind = .{ .inst = pl_op.operand };
const error_union_ty = self.typeOf(pl_op.operand);
- const error_union_size = @intCast(u32, error_union_ty.abiSize(mod));
+ const error_union_size = @as(u32, @intCast(error_union_ty.abiSize(mod)));
const error_union_align = error_union_ty.abiAlignment(mod);
// The error union will die in the body. However, we need the
@@ -6247,7 +6247,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
} else if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod) and !ret_ty.isError(mod)) {
result.return_value = .{ .none = {} };
} else {
- const ret_ty_size = @intCast(u32, ret_ty.abiSize(mod));
+ const ret_ty_size = @as(u32, @intCast(ret_ty.abiSize(mod)));
if (ret_ty_size == 0) {
assert(ret_ty.isError(mod));
result.return_value = .{ .immediate = 0 };
@@ -6259,7 +6259,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
}
for (fn_info.param_types, 0..) |ty, i| {
- const param_size = @intCast(u32, ty.toType().abiSize(mod));
+ const param_size = @as(u32, @intCast(ty.toType().abiSize(mod)));
if (param_size == 0) {
result.args[i] = .{ .none = {} };
continue;
@@ -6305,7 +6305,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
} else if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod) and !ret_ty.isError(mod)) {
result.return_value = .{ .none = {} };
} else {
- const ret_ty_size = @intCast(u32, ret_ty.abiSize(mod));
+ const ret_ty_size = @as(u32, @intCast(ret_ty.abiSize(mod)));
if (ret_ty_size == 0) {
assert(ret_ty.isError(mod));
result.return_value = .{ .immediate = 0 };
@@ -6325,7 +6325,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
for (fn_info.param_types, 0..) |ty, i| {
if (ty.toType().abiSize(mod) > 0) {
- const param_size = @intCast(u32, ty.toType().abiSize(mod));
+ const param_size = @as(u32, @intCast(ty.toType().abiSize(mod)));
const param_alignment = ty.toType().abiAlignment(mod);
stack_offset = std.mem.alignForward(u32, stack_offset, param_alignment);
diff --git a/src/arch/aarch64/Emit.zig b/src/arch/aarch64/Emit.zig
index 238a63c921..8cf2386138 100644
--- a/src/arch/aarch64/Emit.zig
+++ b/src/arch/aarch64/Emit.zig
@@ -81,7 +81,7 @@ pub fn emitMir(
// Emit machine code
for (mir_tags, 0..) |tag, index| {
- const inst = @intCast(u32, index);
+ const inst = @as(u32, @intCast(index));
switch (tag) {
.add_immediate => try emit.mirAddSubtractImmediate(inst),
.adds_immediate => try emit.mirAddSubtractImmediate(inst),
@@ -324,7 +324,7 @@ fn lowerBranches(emit: *Emit) !void {
// TODO optimization opportunity: do this in codegen while
// generating MIR
for (mir_tags, 0..) |tag, index| {
- const inst = @intCast(u32, index);
+ const inst = @as(u32, @intCast(index));
if (isBranch(tag)) {
const target_inst = emit.branchTarget(inst);
@@ -369,7 +369,7 @@ fn lowerBranches(emit: *Emit) !void {
var current_code_offset: usize = 0;
for (mir_tags, 0..) |tag, index| {
- const inst = @intCast(u32, index);
+ const inst = @as(u32, @intCast(index));
// If this instruction contained in the code offset
// mapping (when it is a target of a branch or if it is a
@@ -384,7 +384,7 @@ fn lowerBranches(emit: *Emit) !void {
const target_inst = emit.branchTarget(inst);
if (target_inst < inst) {
const target_offset = emit.code_offset_mapping.get(target_inst).?;
- const offset = @intCast(i64, target_offset) - @intCast(i64, current_code_offset);
+ const offset = @as(i64, @intCast(target_offset)) - @as(i64, @intCast(current_code_offset));
const branch_type = emit.branch_types.getPtr(inst).?;
const optimal_branch_type = try emit.optimalBranchType(tag, offset);
if (branch_type.* != optimal_branch_type) {
@@ -403,7 +403,7 @@ fn lowerBranches(emit: *Emit) !void {
for (origin_list.items) |forward_branch_inst| {
const branch_tag = emit.mir.instructions.items(.tag)[forward_branch_inst];
const forward_branch_inst_offset = emit.code_offset_mapping.get(forward_branch_inst).?;
- const offset = @intCast(i64, current_code_offset) - @intCast(i64, forward_branch_inst_offset);
+ const offset = @as(i64, @intCast(current_code_offset)) - @as(i64, @intCast(forward_branch_inst_offset));
const branch_type = emit.branch_types.getPtr(forward_branch_inst).?;
const optimal_branch_type = try emit.optimalBranchType(branch_tag, offset);
if (branch_type.* != optimal_branch_type) {
@@ -434,7 +434,7 @@ fn fail(emit: *Emit, comptime format: []const u8, args: anytype) InnerError {
}
fn dbgAdvancePCAndLine(self: *Emit, line: u32, column: u32) !void {
- const delta_line = @intCast(i32, line) - @intCast(i32, self.prev_di_line);
+ const delta_line = @as(i32, @intCast(line)) - @as(i32, @intCast(self.prev_di_line));
const delta_pc: usize = self.code.items.len - self.prev_di_pc;
switch (self.debug_output) {
.dwarf => |dw| {
@@ -451,13 +451,13 @@ fn dbgAdvancePCAndLine(self: *Emit, line: u32, column: u32) !void {
// increasing the line number
try @import("../../link/Plan9.zig").changeLine(dbg_out.dbg_line, delta_line);
// increasing the pc
- const d_pc_p9 = @intCast(i64, delta_pc) - quant;
+ const d_pc_p9 = @as(i64, @intCast(delta_pc)) - quant;
if (d_pc_p9 > 0) {
// minus one because if its the last one, we want to leave space to change the line which is one quanta
- try dbg_out.dbg_line.append(@intCast(u8, @divExact(d_pc_p9, quant) + 128) - quant);
+ try dbg_out.dbg_line.append(@as(u8, @intCast(@divExact(d_pc_p9, quant) + 128)) - quant);
if (dbg_out.pcop_change_index.*) |pci|
dbg_out.dbg_line.items[pci] += 1;
- dbg_out.pcop_change_index.* = @intCast(u32, dbg_out.dbg_line.items.len - 1);
+ dbg_out.pcop_change_index.* = @as(u32, @intCast(dbg_out.dbg_line.items.len - 1));
} else if (d_pc_p9 == 0) {
// we don't need to do anything, because adding the quant does it for us
} else unreachable;
@@ -548,13 +548,13 @@ fn mirConditionalBranchImmediate(emit: *Emit, inst: Mir.Inst.Index) !void {
const tag = emit.mir.instructions.items(.tag)[inst];
const inst_cond = emit.mir.instructions.items(.data)[inst].inst_cond;
- const offset = @intCast(i64, emit.code_offset_mapping.get(inst_cond.inst).?) - @intCast(i64, emit.code.items.len);
+ const offset = @as(i64, @intCast(emit.code_offset_mapping.get(inst_cond.inst).?)) - @as(i64, @intCast(emit.code.items.len));
const branch_type = emit.branch_types.get(inst).?;
log.debug("mirConditionalBranchImmediate: {} offset={}", .{ inst, offset });
switch (branch_type) {
.b_cond => switch (tag) {
- .b_cond => try emit.writeInstruction(Instruction.bCond(inst_cond.cond, @intCast(i21, offset))),
+ .b_cond => try emit.writeInstruction(Instruction.bCond(inst_cond.cond, @as(i21, @intCast(offset)))),
else => unreachable,
},
else => unreachable,
@@ -572,14 +572,14 @@ fn mirBranch(emit: *Emit, inst: Mir.Inst.Index) !void {
emit.mir.instructions.items(.tag)[target_inst],
});
- const offset = @intCast(i64, emit.code_offset_mapping.get(target_inst).?) - @intCast(i64, emit.code.items.len);
+ const offset = @as(i64, @intCast(emit.code_offset_mapping.get(target_inst).?)) - @as(i64, @intCast(emit.code.items.len));
const branch_type = emit.branch_types.get(inst).?;
log.debug("mirBranch: {} offset={}", .{ inst, offset });
switch (branch_type) {
.unconditional_branch_immediate => switch (tag) {
- .b => try emit.writeInstruction(Instruction.b(@intCast(i28, offset))),
- .bl => try emit.writeInstruction(Instruction.bl(@intCast(i28, offset))),
+ .b => try emit.writeInstruction(Instruction.b(@as(i28, @intCast(offset)))),
+ .bl => try emit.writeInstruction(Instruction.bl(@as(i28, @intCast(offset)))),
else => unreachable,
},
else => unreachable,
@@ -590,13 +590,13 @@ fn mirCompareAndBranch(emit: *Emit, inst: Mir.Inst.Index) !void {
const tag = emit.mir.instructions.items(.tag)[inst];
const r_inst = emit.mir.instructions.items(.data)[inst].r_inst;
- const offset = @intCast(i64, emit.code_offset_mapping.get(r_inst.inst).?) - @intCast(i64, emit.code.items.len);
+ const offset = @as(i64, @intCast(emit.code_offset_mapping.get(r_inst.inst).?)) - @as(i64, @intCast(emit.code.items.len));
const branch_type = emit.branch_types.get(inst).?;
log.debug("mirCompareAndBranch: {} offset={}", .{ inst, offset });
switch (branch_type) {
.cbz => switch (tag) {
- .cbz => try emit.writeInstruction(Instruction.cbz(r_inst.rt, @intCast(i21, offset))),
+ .cbz => try emit.writeInstruction(Instruction.cbz(r_inst.rt, @as(i21, @intCast(offset)))),
else => unreachable,
},
else => unreachable,
@@ -662,7 +662,7 @@ fn mirCallExtern(emit: *Emit, inst: Mir.Inst.Index) !void {
const relocation = emit.mir.instructions.items(.data)[inst].relocation;
const offset = blk: {
- const offset = @intCast(u32, emit.code.items.len);
+ const offset = @as(u32, @intCast(emit.code.items.len));
// bl
try emit.writeInstruction(Instruction.bl(0));
break :blk offset;
@@ -837,11 +837,11 @@ fn mirLoadMemoryPie(emit: *Emit, inst: Mir.Inst.Index) !void {
const tag = emit.mir.instructions.items(.tag)[inst];
const payload = emit.mir.instructions.items(.data)[inst].payload;
const data = emit.mir.extraData(Mir.LoadMemoryPie, payload).data;
- const reg = @enumFromInt(Register, data.register);
+ const reg = @as(Register, @enumFromInt(data.register));
// PC-relative displacement to the entry in memory.
// adrp
- const offset = @intCast(u32, emit.code.items.len);
+ const offset = @as(u32, @intCast(emit.code.items.len));
try emit.writeInstruction(Instruction.adrp(reg.toX(), 0));
switch (tag) {
@@ -1220,7 +1220,7 @@ fn mirNop(emit: *Emit) !void {
}
fn regListIsSet(reg_list: u32, reg: Register) bool {
- return reg_list & @as(u32, 1) << @intCast(u5, reg.id()) != 0;
+ return reg_list & @as(u32, 1) << @as(u5, @intCast(reg.id())) != 0;
}
fn mirPushPopRegs(emit: *Emit, inst: Mir.Inst.Index) !void {
@@ -1245,7 +1245,7 @@ fn mirPushPopRegs(emit: *Emit, inst: Mir.Inst.Index) !void {
var count: u6 = 0;
var other_reg: ?Register = null;
while (i > 0) : (i -= 1) {
- const reg = @enumFromInt(Register, i - 1);
+ const reg = @as(Register, @enumFromInt(i - 1));
if (regListIsSet(reg_list, reg)) {
if (count == 0 and odd_number_of_regs) {
try emit.writeInstruction(Instruction.ldr(
@@ -1274,7 +1274,7 @@ fn mirPushPopRegs(emit: *Emit, inst: Mir.Inst.Index) !void {
var count: u6 = 0;
var other_reg: ?Register = null;
while (i < 32) : (i += 1) {
- const reg = @enumFromInt(Register, i);
+ const reg = @as(Register, @enumFromInt(i));
if (regListIsSet(reg_list, reg)) {
if (count == number_of_regs - 1 and odd_number_of_regs) {
try emit.writeInstruction(Instruction.str(
diff --git a/src/arch/aarch64/Mir.zig b/src/arch/aarch64/Mir.zig
index cc478c874a..6c0a1ec5b4 100644
--- a/src/arch/aarch64/Mir.zig
+++ b/src/arch/aarch64/Mir.zig
@@ -507,7 +507,7 @@ pub fn extraData(mir: Mir, comptime T: type, index: usize) struct { data: T, end
inline for (fields) |field| {
@field(result, field.name) = switch (field.type) {
u32 => mir.extra[i],
- i32 => @bitCast(i32, mir.extra[i]),
+ i32 => @as(i32, @bitCast(mir.extra[i])),
else => @compileError("bad field type"),
};
i += 1;
diff --git a/src/arch/aarch64/bits.zig b/src/arch/aarch64/bits.zig
index 3446d69950..6e4508fb0e 100644
--- a/src/arch/aarch64/bits.zig
+++ b/src/arch/aarch64/bits.zig
@@ -80,34 +80,34 @@ pub const Register = enum(u8) {
pub fn id(self: Register) u6 {
return switch (@intFromEnum(self)) {
- @intFromEnum(Register.x0)...@intFromEnum(Register.xzr) => @intCast(u6, @intFromEnum(self) - @intFromEnum(Register.x0)),
- @intFromEnum(Register.w0)...@intFromEnum(Register.wzr) => @intCast(u6, @intFromEnum(self) - @intFromEnum(Register.w0)),
+ @intFromEnum(Register.x0)...@intFromEnum(Register.xzr) => @as(u6, @intCast(@intFromEnum(self) - @intFromEnum(Register.x0))),
+ @intFromEnum(Register.w0)...@intFromEnum(Register.wzr) => @as(u6, @intCast(@intFromEnum(self) - @intFromEnum(Register.w0))),
@intFromEnum(Register.sp) => 32,
@intFromEnum(Register.wsp) => 32,
- @intFromEnum(Register.q0)...@intFromEnum(Register.q31) => @intCast(u6, @intFromEnum(self) - @intFromEnum(Register.q0) + 33),
- @intFromEnum(Register.d0)...@intFromEnum(Register.d31) => @intCast(u6, @intFromEnum(self) - @intFromEnum(Register.d0) + 33),
- @intFromEnum(Register.s0)...@intFromEnum(Register.s31) => @intCast(u6, @intFromEnum(self) - @intFromEnum(Register.s0) + 33),
- @intFromEnum(Register.h0)...@intFromEnum(Register.h31) => @intCast(u6, @intFromEnum(self) - @intFromEnum(Register.h0) + 33),
- @intFromEnum(Register.b0)...@intFromEnum(Register.b31) => @intCast(u6, @intFromEnum(self) - @intFromEnum(Register.b0) + 33),
+ @intFromEnum(Register.q0)...@intFromEnum(Register.q31) => @as(u6, @intCast(@intFromEnum(self) - @intFromEnum(Register.q0) + 33)),
+ @intFromEnum(Register.d0)...@intFromEnum(Register.d31) => @as(u6, @intCast(@intFromEnum(self) - @intFromEnum(Register.d0) + 33)),
+ @intFromEnum(Register.s0)...@intFromEnum(Register.s31) => @as(u6, @intCast(@intFromEnum(self) - @intFromEnum(Register.s0) + 33)),
+ @intFromEnum(Register.h0)...@intFromEnum(Register.h31) => @as(u6, @intCast(@intFromEnum(self) - @intFromEnum(Register.h0) + 33)),
+ @intFromEnum(Register.b0)...@intFromEnum(Register.b31) => @as(u6, @intCast(@intFromEnum(self) - @intFromEnum(Register.b0) + 33)),
else => unreachable,
};
}
pub fn enc(self: Register) u5 {
return switch (@intFromEnum(self)) {
- @intFromEnum(Register.x0)...@intFromEnum(Register.xzr) => @intCast(u5, @intFromEnum(self) - @intFromEnum(Register.x0)),
- @intFromEnum(Register.w0)...@intFromEnum(Register.wzr) => @intCast(u5, @intFromEnum(self) - @intFromEnum(Register.w0)),
+ @intFromEnum(Register.x0)...@intFromEnum(Register.xzr) => @as(u5, @intCast(@intFromEnum(self) - @intFromEnum(Register.x0))),
+ @intFromEnum(Register.w0)...@intFromEnum(Register.wzr) => @as(u5, @intCast(@intFromEnum(self) - @intFromEnum(Register.w0))),
@intFromEnum(Register.sp) => 31,
@intFromEnum(Register.wsp) => 31,
- @intFromEnum(Register.q0)...@intFromEnum(Register.q31) => @intCast(u5, @intFromEnum(self) - @intFromEnum(Register.q0)),
- @intFromEnum(Register.d0)...@intFromEnum(Register.d31) => @intCast(u5, @intFromEnum(self) - @intFromEnum(Register.d0)),
- @intFromEnum(Register.s0)...@intFromEnum(Register.s31) => @intCast(u5, @intFromEnum(self) - @intFromEnum(Register.s0)),
- @intFromEnum(Register.h0)...@intFromEnum(Register.h31) => @intCast(u5, @intFromEnum(self) - @intFromEnum(Register.h0)),
- @intFromEnum(Register.b0)...@intFromEnum(Register.b31) => @intCast(u5, @intFromEnum(self) - @intFromEnum(Register.b0)),
+ @intFromEnum(Register.q0)...@intFromEnum(Register.q31) => @as(u5, @intCast(@intFromEnum(self) - @intFromEnum(Register.q0))),
+ @intFromEnum(Register.d0)...@intFromEnum(Register.d31) => @as(u5, @intCast(@intFromEnum(self) - @intFromEnum(Register.d0))),
+ @intFromEnum(Register.s0)...@intFromEnum(Register.s31) => @as(u5, @intCast(@intFromEnum(self) - @intFromEnum(Register.s0))),
+ @intFromEnum(Register.h0)...@intFromEnum(Register.h31) => @as(u5, @intCast(@intFromEnum(self) - @intFromEnum(Register.h0))),
+ @intFromEnum(Register.b0)...@intFromEnum(Register.b31) => @as(u5, @intCast(@intFromEnum(self) - @intFromEnum(Register.b0))),
else => unreachable,
};
}
@@ -133,13 +133,13 @@ pub const Register = enum(u8) {
/// Convert from a general-purpose register to its 64 bit alias.
pub fn toX(self: Register) Register {
return switch (@intFromEnum(self)) {
- @intFromEnum(Register.x0)...@intFromEnum(Register.xzr) => @enumFromInt(
+ @intFromEnum(Register.x0)...@intFromEnum(Register.xzr) => @as(
Register,
- @intFromEnum(self) - @intFromEnum(Register.x0) + @intFromEnum(Register.x0),
+ @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.x0) + @intFromEnum(Register.x0)),
),
- @intFromEnum(Register.w0)...@intFromEnum(Register.wzr) => @enumFromInt(
+ @intFromEnum(Register.w0)...@intFromEnum(Register.wzr) => @as(
Register,
- @intFromEnum(self) - @intFromEnum(Register.w0) + @intFromEnum(Register.x0),
+ @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.w0) + @intFromEnum(Register.x0)),
),
else => unreachable,
};
@@ -148,13 +148,13 @@ pub const Register = enum(u8) {
/// Convert from a general-purpose register to its 32 bit alias.
pub fn toW(self: Register) Register {
return switch (@intFromEnum(self)) {
- @intFromEnum(Register.x0)...@intFromEnum(Register.xzr) => @enumFromInt(
+ @intFromEnum(Register.x0)...@intFromEnum(Register.xzr) => @as(
Register,
- @intFromEnum(self) - @intFromEnum(Register.x0) + @intFromEnum(Register.w0),
+ @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.x0) + @intFromEnum(Register.w0)),
),
- @intFromEnum(Register.w0)...@intFromEnum(Register.wzr) => @enumFromInt(
+ @intFromEnum(Register.w0)...@intFromEnum(Register.wzr) => @as(
Register,
- @intFromEnum(self) - @intFromEnum(Register.w0) + @intFromEnum(Register.w0),
+ @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.w0) + @intFromEnum(Register.w0)),
),
else => unreachable,
};
@@ -163,25 +163,25 @@ pub const Register = enum(u8) {
/// Convert from a floating-point register to its 128 bit alias.
pub fn toQ(self: Register) Register {
return switch (@intFromEnum(self)) {
- @intFromEnum(Register.q0)...@intFromEnum(Register.q31) => @enumFromInt(
+ @intFromEnum(Register.q0)...@intFromEnum(Register.q31) => @as(
Register,
- @intFromEnum(self) - @intFromEnum(Register.q0) + @intFromEnum(Register.q0),
+ @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.q0) + @intFromEnum(Register.q0)),
),
- @intFromEnum(Register.d0)...@intFromEnum(Register.d31) => @enumFromInt(
+ @intFromEnum(Register.d0)...@intFromEnum(Register.d31) => @as(
Register,
- @intFromEnum(self) - @intFromEnum(Register.d0) + @intFromEnum(Register.q0),
+ @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.d0) + @intFromEnum(Register.q0)),
),
- @intFromEnum(Register.s0)...@intFromEnum(Register.s31) => @enumFromInt(
+ @intFromEnum(Register.s0)...@intFromEnum(Register.s31) => @as(
Register,
- @intFromEnum(self) - @intFromEnum(Register.s0) + @intFromEnum(Register.q0),
+ @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.s0) + @intFromEnum(Register.q0)),
),
- @intFromEnum(Register.h0)...@intFromEnum(Register.h31) => @enumFromInt(
+ @intFromEnum(Register.h0)...@intFromEnum(Register.h31) => @as(
Register,
- @intFromEnum(self) - @intFromEnum(Register.h0) + @intFromEnum(Register.q0),
+ @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.h0) + @intFromEnum(Register.q0)),
),
- @intFromEnum(Register.b0)...@intFromEnum(Register.b31) => @enumFromInt(
+ @intFromEnum(Register.b0)...@intFromEnum(Register.b31) => @as(
Register,
- @intFromEnum(self) - @intFromEnum(Register.b0) + @intFromEnum(Register.q0),
+ @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.b0) + @intFromEnum(Register.q0)),
),
else => unreachable,
};
@@ -190,25 +190,25 @@ pub const Register = enum(u8) {
/// Convert from a floating-point register to its 64 bit alias.
pub fn toD(self: Register) Register {
return switch (@intFromEnum(self)) {
- @intFromEnum(Register.q0)...@intFromEnum(Register.q31) => @enumFromInt(
+ @intFromEnum(Register.q0)...@intFromEnum(Register.q31) => @as(
Register,
- @intFromEnum(self) - @intFromEnum(Register.q0) + @intFromEnum(Register.d0),
+ @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.q0) + @intFromEnum(Register.d0)),
),
- @intFromEnum(Register.d0)...@intFromEnum(Register.d31) => @enumFromInt(
+ @intFromEnum(Register.d0)...@intFromEnum(Register.d31) => @as(
Register,
- @intFromEnum(self) - @intFromEnum(Register.d0) + @intFromEnum(Register.d0),
+ @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.d0) + @intFromEnum(Register.d0)),
),
- @intFromEnum(Register.s0)...@intFromEnum(Register.s31) => @enumFromInt(
+ @intFromEnum(Register.s0)...@intFromEnum(Register.s31) => @as(
Register,
- @intFromEnum(self) - @intFromEnum(Register.s0) + @intFromEnum(Register.d0),
+ @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.s0) + @intFromEnum(Register.d0)),
),
- @intFromEnum(Register.h0)...@intFromEnum(Register.h31) => @enumFromInt(
+ @intFromEnum(Register.h0)...@intFromEnum(Register.h31) => @as(
Register,
- @intFromEnum(self) - @intFromEnum(Register.h0) + @intFromEnum(Register.d0),
+ @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.h0) + @intFromEnum(Register.d0)),
),
- @intFromEnum(Register.b0)...@intFromEnum(Register.b31) => @enumFromInt(
+ @intFromEnum(Register.b0)...@intFromEnum(Register.b31) => @as(
Register,
- @intFromEnum(self) - @intFromEnum(Register.b0) + @intFromEnum(Register.d0),
+ @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.b0) + @intFromEnum(Register.d0)),
),
else => unreachable,
};
@@ -217,25 +217,25 @@ pub const Register = enum(u8) {
/// Convert from a floating-point register to its 32 bit alias.
pub fn toS(self: Register) Register {
return switch (@intFromEnum(self)) {
- @intFromEnum(Register.q0)...@intFromEnum(Register.q31) => @enumFromInt(
+ @intFromEnum(Register.q0)...@intFromEnum(Register.q31) => @as(
Register,
- @intFromEnum(self) - @intFromEnum(Register.q0) + @intFromEnum(Register.s0),
+ @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.q0) + @intFromEnum(Register.s0)),
),
- @intFromEnum(Register.d0)...@intFromEnum(Register.d31) => @enumFromInt(
+ @intFromEnum(Register.d0)...@intFromEnum(Register.d31) => @as(
Register,
- @intFromEnum(self) - @intFromEnum(Register.d0) + @intFromEnum(Register.s0),
+ @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.d0) + @intFromEnum(Register.s0)),
),
- @intFromEnum(Register.s0)...@intFromEnum(Register.s31) => @enumFromInt(
+ @intFromEnum(Register.s0)...@intFromEnum(Register.s31) => @as(
Register,
- @intFromEnum(self) - @intFromEnum(Register.s0) + @intFromEnum(Register.s0),
+ @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.s0) + @intFromEnum(Register.s0)),
),
- @intFromEnum(Register.h0)...@intFromEnum(Register.h31) => @enumFromInt(
+ @intFromEnum(Register.h0)...@intFromEnum(Register.h31) => @as(
Register,
- @intFromEnum(self) - @intFromEnum(Register.h0) + @intFromEnum(Register.s0),
+ @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.h0) + @intFromEnum(Register.s0)),
),
- @intFromEnum(Register.b0)...@intFromEnum(Register.b31) => @enumFromInt(
+ @intFromEnum(Register.b0)...@intFromEnum(Register.b31) => @as(
Register,
- @intFromEnum(self) - @intFromEnum(Register.b0) + @intFromEnum(Register.s0),
+ @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.b0) + @intFromEnum(Register.s0)),
),
else => unreachable,
};
@@ -244,25 +244,25 @@ pub const Register = enum(u8) {
/// Convert from a floating-point register to its 16 bit alias.
pub fn toH(self: Register) Register {
return switch (@intFromEnum(self)) {
- @intFromEnum(Register.q0)...@intFromEnum(Register.q31) => @enumFromInt(
+ @intFromEnum(Register.q0)...@intFromEnum(Register.q31) => @as(
Register,
- @intFromEnum(self) - @intFromEnum(Register.q0) + @intFromEnum(Register.h0),
+ @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.q0) + @intFromEnum(Register.h0)),
),
- @intFromEnum(Register.d0)...@intFromEnum(Register.d31) => @enumFromInt(
+ @intFromEnum(Register.d0)...@intFromEnum(Register.d31) => @as(
Register,
- @intFromEnum(self) - @intFromEnum(Register.d0) + @intFromEnum(Register.h0),
+ @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.d0) + @intFromEnum(Register.h0)),
),
- @intFromEnum(Register.s0)...@intFromEnum(Register.s31) => @enumFromInt(
+ @intFromEnum(Register.s0)...@intFromEnum(Register.s31) => @as(
Register,
- @intFromEnum(self) - @intFromEnum(Register.s0) + @intFromEnum(Register.h0),
+ @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.s0) + @intFromEnum(Register.h0)),
),
- @intFromEnum(Register.h0)...@intFromEnum(Register.h31) => @enumFromInt(
+ @intFromEnum(Register.h0)...@intFromEnum(Register.h31) => @as(
Register,
- @intFromEnum(self) - @intFromEnum(Register.h0) + @intFromEnum(Register.h0),
+ @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.h0) + @intFromEnum(Register.h0)),
),
- @intFromEnum(Register.b0)...@intFromEnum(Register.b31) => @enumFromInt(
+ @intFromEnum(Register.b0)...@intFromEnum(Register.b31) => @as(
Register,
- @intFromEnum(self) - @intFromEnum(Register.b0) + @intFromEnum(Register.h0),
+ @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.b0) + @intFromEnum(Register.h0)),
),
else => unreachable,
};
@@ -271,25 +271,25 @@ pub const Register = enum(u8) {
/// Convert from a floating-point register to its 8 bit alias.
pub fn toB(self: Register) Register {
return switch (@intFromEnum(self)) {
- @intFromEnum(Register.q0)...@intFromEnum(Register.q31) => @enumFromInt(
+ @intFromEnum(Register.q0)...@intFromEnum(Register.q31) => @as(
Register,
- @intFromEnum(self) - @intFromEnum(Register.q0) + @intFromEnum(Register.b0),
+ @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.q0) + @intFromEnum(Register.b0)),
),
- @intFromEnum(Register.d0)...@intFromEnum(Register.d31) => @enumFromInt(
+ @intFromEnum(Register.d0)...@intFromEnum(Register.d31) => @as(
Register,
- @intFromEnum(self) - @intFromEnum(Register.d0) + @intFromEnum(Register.b0),
+ @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.d0) + @intFromEnum(Register.b0)),
),
- @intFromEnum(Register.s0)...@intFromEnum(Register.s31) => @enumFromInt(
+ @intFromEnum(Register.s0)...@intFromEnum(Register.s31) => @as(
Register,
- @intFromEnum(self) - @intFromEnum(Register.s0) + @intFromEnum(Register.b0),
+ @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.s0) + @intFromEnum(Register.b0)),
),
- @intFromEnum(Register.h0)...@intFromEnum(Register.h31) => @enumFromInt(
+ @intFromEnum(Register.h0)...@intFromEnum(Register.h31) => @as(
Register,
- @intFromEnum(self) - @intFromEnum(Register.h0) + @intFromEnum(Register.b0),
+ @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.h0) + @intFromEnum(Register.b0)),
),
- @intFromEnum(Register.b0)...@intFromEnum(Register.b31) => @enumFromInt(
+ @intFromEnum(Register.b0)...@intFromEnum(Register.b31) => @as(
Register,
- @intFromEnum(self) - @intFromEnum(Register.b0) + @intFromEnum(Register.b0),
+ @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.b0) + @intFromEnum(Register.b0)),
),
else => unreachable,
};
@@ -612,27 +612,27 @@ pub const Instruction = union(enum) {
pub fn toU32(self: Instruction) u32 {
return switch (self) {
- .move_wide_immediate => |v| @bitCast(u32, v),
- .pc_relative_address => |v| @bitCast(u32, v),
- .load_store_register => |v| @bitCast(u32, v),
- .load_store_register_pair => |v| @bitCast(u32, v),
- .load_literal => |v| @bitCast(u32, v),
- .exception_generation => |v| @bitCast(u32, v),
- .unconditional_branch_register => |v| @bitCast(u32, v),
- .unconditional_branch_immediate => |v| @bitCast(u32, v),
- .no_operation => |v| @bitCast(u32, v),
- .logical_shifted_register => |v| @bitCast(u32, v),
- .add_subtract_immediate => |v| @bitCast(u32, v),
- .logical_immediate => |v| @bitCast(u32, v),
- .bitfield => |v| @bitCast(u32, v),
- .add_subtract_shifted_register => |v| @bitCast(u32, v),
- .add_subtract_extended_register => |v| @bitCast(u32, v),
+ .move_wide_immediate => |v| @as(u32, @bitCast(v)),
+ .pc_relative_address => |v| @as(u32, @bitCast(v)),
+ .load_store_register => |v| @as(u32, @bitCast(v)),
+ .load_store_register_pair => |v| @as(u32, @bitCast(v)),
+ .load_literal => |v| @as(u32, @bitCast(v)),
+ .exception_generation => |v| @as(u32, @bitCast(v)),
+ .unconditional_branch_register => |v| @as(u32, @bitCast(v)),
+ .unconditional_branch_immediate => |v| @as(u32, @bitCast(v)),
+ .no_operation => |v| @as(u32, @bitCast(v)),
+ .logical_shifted_register => |v| @as(u32, @bitCast(v)),
+ .add_subtract_immediate => |v| @as(u32, @bitCast(v)),
+ .logical_immediate => |v| @as(u32, @bitCast(v)),
+ .bitfield => |v| @as(u32, @bitCast(v)),
+ .add_subtract_shifted_register => |v| @as(u32, @bitCast(v)),
+ .add_subtract_extended_register => |v| @as(u32, @bitCast(v)),
// TODO once packed structs work, this can be refactored
.conditional_branch => |v| @as(u32, v.cond) | (@as(u32, v.o0) << 4) | (@as(u32, v.imm19) << 5) | (@as(u32, v.o1) << 24) | (@as(u32, v.fixed) << 25),
.compare_and_branch => |v| @as(u32, v.rt) | (@as(u32, v.imm19) << 5) | (@as(u32, v.op) << 24) | (@as(u32, v.fixed) << 25) | (@as(u32, v.sf) << 31),
.conditional_select => |v| @as(u32, v.rd) | @as(u32, v.rn) << 5 | @as(u32, v.op2) << 10 | @as(u32, v.cond) << 12 | @as(u32, v.rm) << 16 | @as(u32, v.fixed) << 21 | @as(u32, v.s) << 29 | @as(u32, v.op) << 30 | @as(u32, v.sf) << 31,
- .data_processing_3_source => |v| @bitCast(u32, v),
- .data_processing_2_source => |v| @bitCast(u32, v),
+ .data_processing_3_source => |v| @as(u32, @bitCast(v)),
+ .data_processing_2_source => |v| @as(u32, @bitCast(v)),
};
}
@@ -650,7 +650,7 @@ pub const Instruction = union(enum) {
.move_wide_immediate = .{
.rd = rd.enc(),
.imm16 = imm16,
- .hw = @intCast(u2, shift / 16),
+ .hw = @as(u2, @intCast(shift / 16)),
.opc = opc,
.sf = switch (rd.size()) {
32 => 0,
@@ -663,12 +663,12 @@ pub const Instruction = union(enum) {
fn pcRelativeAddress(rd: Register, imm21: i21, op: u1) Instruction {
assert(rd.size() == 64);
- const imm21_u = @bitCast(u21, imm21);
+ const imm21_u = @as(u21, @bitCast(imm21));
return Instruction{
.pc_relative_address = .{
.rd = rd.enc(),
- .immlo = @truncate(u2, imm21_u),
- .immhi = @truncate(u19, imm21_u >> 2),
+ .immlo = @as(u2, @truncate(imm21_u)),
+ .immhi = @as(u19, @truncate(imm21_u >> 2)),
.op = op,
},
};
@@ -704,15 +704,15 @@ pub const Instruction = union(enum) {
pub fn toU12(self: LoadStoreOffset) u12 {
return switch (self) {
.immediate => |imm_type| switch (imm_type) {
- .post_index => |v| (@intCast(u12, @bitCast(u9, v)) << 2) + 1,
- .pre_index => |v| (@intCast(u12, @bitCast(u9, v)) << 2) + 3,
+ .post_index => |v| (@as(u12, @intCast(@as(u9, @bitCast(v)))) << 2) + 1,
+ .pre_index => |v| (@as(u12, @intCast(@as(u9, @bitCast(v)))) << 2) + 3,
.unsigned => |v| v,
},
.register => |r| switch (r.shift) {
- .uxtw => |v| (@intCast(u12, r.rm) << 6) + (@intCast(u12, v) << 2) + 16 + 2050,
- .lsl => |v| (@intCast(u12, r.rm) << 6) + (@intCast(u12, v) << 2) + 24 + 2050,
- .sxtw => |v| (@intCast(u12, r.rm) << 6) + (@intCast(u12, v) << 2) + 48 + 2050,
- .sxtx => |v| (@intCast(u12, r.rm) << 6) + (@intCast(u12, v) << 2) + 56 + 2050,
+ .uxtw => |v| (@as(u12, @intCast(r.rm)) << 6) + (@as(u12, @intCast(v)) << 2) + 16 + 2050,
+ .lsl => |v| (@as(u12, @intCast(r.rm)) << 6) + (@as(u12, @intCast(v)) << 2) + 24 + 2050,
+ .sxtw => |v| (@as(u12, @intCast(r.rm)) << 6) + (@as(u12, @intCast(v)) << 2) + 48 + 2050,
+ .sxtx => |v| (@as(u12, @intCast(r.rm)) << 6) + (@as(u12, @intCast(v)) << 2) + 56 + 2050,
},
};
}
@@ -894,7 +894,7 @@ pub const Instruction = union(enum) {
switch (rt1.size()) {
32 => {
assert(-256 <= offset and offset <= 252);
- const imm7 = @truncate(u7, @bitCast(u9, offset >> 2));
+ const imm7 = @as(u7, @truncate(@as(u9, @bitCast(offset >> 2))));
return Instruction{
.load_store_register_pair = .{
.rt1 = rt1.enc(),
@@ -909,7 +909,7 @@ pub const Instruction = union(enum) {
},
64 => {
assert(-512 <= offset and offset <= 504);
- const imm7 = @truncate(u7, @bitCast(u9, offset >> 3));
+ const imm7 = @as(u7, @truncate(@as(u9, @bitCast(offset >> 3))));
return Instruction{
.load_store_register_pair = .{
.rt1 = rt1.enc(),
@@ -982,7 +982,7 @@ pub const Instruction = union(enum) {
) Instruction {
return Instruction{
.unconditional_branch_immediate = .{
- .imm26 = @bitCast(u26, @intCast(i26, offset >> 2)),
+ .imm26 = @as(u26, @bitCast(@as(i26, @intCast(offset >> 2)))),
.op = op,
},
};
@@ -1188,7 +1188,7 @@ pub const Instruction = union(enum) {
.conditional_branch = .{
.cond = @intFromEnum(cond),
.o0 = o0,
- .imm19 = @bitCast(u19, @intCast(i19, offset >> 2)),
+ .imm19 = @as(u19, @bitCast(@as(i19, @intCast(offset >> 2)))),
.o1 = o1,
},
};
@@ -1204,7 +1204,7 @@ pub const Instruction = union(enum) {
return Instruction{
.compare_and_branch = .{
.rt = rt.enc(),
- .imm19 = @bitCast(u19, @intCast(i19, offset >> 2)),
+ .imm19 = @as(u19, @bitCast(@as(i19, @intCast(offset >> 2)))),
.op = op,
.sf = switch (rt.size()) {
32 => 0b0,
@@ -1609,12 +1609,12 @@ pub const Instruction = union(enum) {
}
pub fn asrImmediate(rd: Register, rn: Register, shift: u6) Instruction {
- const imms = @intCast(u6, rd.size() - 1);
+ const imms = @as(u6, @intCast(rd.size() - 1));
return sbfm(rd, rn, shift, imms);
}
pub fn sbfx(rd: Register, rn: Register, lsb: u6, width: u7) Instruction {
- return sbfm(rd, rn, lsb, @intCast(u6, lsb + width - 1));
+ return sbfm(rd, rn, lsb, @as(u6, @intCast(lsb + width - 1)));
}
pub fn sxtb(rd: Register, rn: Register) Instruction {
@@ -1631,17 +1631,17 @@ pub const Instruction = union(enum) {
}
pub fn lslImmediate(rd: Register, rn: Register, shift: u6) Instruction {
- const size = @intCast(u6, rd.size() - 1);
+ const size = @as(u6, @intCast(rd.size() - 1));
return ubfm(rd, rn, size - shift + 1, size - shift);
}
pub fn lsrImmediate(rd: Register, rn: Register, shift: u6) Instruction {
- const imms = @intCast(u6, rd.size() - 1);
+ const imms = @as(u6, @intCast(rd.size() - 1));
return ubfm(rd, rn, shift, imms);
}
pub fn ubfx(rd: Register, rn: Register, lsb: u6, width: u7) Instruction {
- return ubfm(rd, rn, lsb, @intCast(u6, lsb + width - 1));
+ return ubfm(rd, rn, lsb, @as(u6, @intCast(lsb + width - 1)));
}
pub fn uxtb(rd: Register, rn: Register) Instruction {
diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig
index 7ece4ba2e3..885a07ec6e 100644
--- a/src/arch/arm/CodeGen.zig
+++ b/src/arch/arm/CodeGen.zig
@@ -266,8 +266,8 @@ const DbgInfoReloc = struct {
.stack_argument_offset,
=> blk: {
const adjusted_stack_offset = switch (reloc.mcv) {
- .stack_offset => |offset| -@intCast(i32, offset),
- .stack_argument_offset => |offset| @intCast(i32, function.saved_regs_stack_space + offset),
+ .stack_offset => |offset| -@as(i32, @intCast(offset)),
+ .stack_argument_offset => |offset| @as(i32, @intCast(function.saved_regs_stack_space + offset)),
else => unreachable,
};
break :blk .{ .stack = .{
@@ -303,8 +303,8 @@ const DbgInfoReloc = struct {
const adjusted_offset = switch (reloc.mcv) {
.ptr_stack_offset,
.stack_offset,
- => -@intCast(i32, offset),
- .stack_argument_offset => @intCast(i32, function.saved_regs_stack_space + offset),
+ => -@as(i32, @intCast(offset)),
+ .stack_argument_offset => @as(i32, @intCast(function.saved_regs_stack_space + offset)),
else => unreachable,
};
break :blk .{ .stack = .{
@@ -446,7 +446,7 @@ fn addInst(self: *Self, inst: Mir.Inst) error{OutOfMemory}!Mir.Inst.Index {
try self.mir_instructions.ensureUnusedCapacity(gpa, 1);
- const result_index = @intCast(Air.Inst.Index, self.mir_instructions.len);
+ const result_index = @as(Air.Inst.Index, @intCast(self.mir_instructions.len));
self.mir_instructions.appendAssumeCapacity(inst);
return result_index;
}
@@ -466,11 +466,11 @@ pub fn addExtra(self: *Self, extra: anytype) Allocator.Error!u32 {
pub fn addExtraAssumeCapacity(self: *Self, extra: anytype) u32 {
const fields = std.meta.fields(@TypeOf(extra));
- const result = @intCast(u32, self.mir_extra.items.len);
+ const result = @as(u32, @intCast(self.mir_extra.items.len));
inline for (fields) |field| {
self.mir_extra.appendAssumeCapacity(switch (field.type) {
u32 => @field(extra, field.name),
- i32 => @bitCast(u32, @field(extra, field.name)),
+ i32 => @as(u32, @bitCast(@field(extra, field.name))),
else => @compileError("bad field type"),
});
}
@@ -522,7 +522,7 @@ fn gen(self: *Self) !void {
const ty = self.typeOfIndex(inst);
- const abi_size = @intCast(u32, ty.abiSize(mod));
+ const abi_size = @as(u32, @intCast(ty.abiSize(mod)));
const abi_align = ty.abiAlignment(mod);
const stack_offset = try self.allocMem(abi_size, abi_align, inst);
try self.genSetStack(ty, stack_offset, MCValue{ .register = reg });
@@ -588,7 +588,7 @@ fn gen(self: *Self) !void {
for (self.exitlude_jump_relocs.items) |jmp_reloc| {
self.mir_instructions.set(jmp_reloc, .{
.tag = .b,
- .data = .{ .inst = @intCast(u32, self.mir_instructions.len) },
+ .data = .{ .inst = @as(u32, @intCast(self.mir_instructions.len)) },
});
}
@@ -934,15 +934,15 @@ fn finishAirBookkeeping(self: *Self) void {
fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Liveness.bpi - 1]Air.Inst.Ref) void {
var tomb_bits = self.liveness.getTombBits(inst);
for (operands) |op| {
- const dies = @truncate(u1, tomb_bits) != 0;
+ const dies = @as(u1, @truncate(tomb_bits)) != 0;
tomb_bits >>= 1;
if (!dies) continue;
const op_int = @intFromEnum(op);
if (op_int < Air.ref_start_index) continue;
- const op_index = @intCast(Air.Inst.Index, op_int - Air.ref_start_index);
+ const op_index = @as(Air.Inst.Index, @intCast(op_int - Air.ref_start_index));
self.processDeath(op_index);
}
- const is_used = @truncate(u1, tomb_bits) == 0;
+ const is_used = @as(u1, @truncate(tomb_bits)) == 0;
if (is_used) {
log.debug("%{d} => {}", .{ inst, result });
const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
@@ -1201,7 +1201,7 @@ fn truncRegister(
.rd = dest_reg,
.rn = operand_reg,
.lsb = 0,
- .width = @intCast(u6, int_bits),
+ .width = @as(u6, @intCast(int_bits)),
} },
});
}
@@ -1591,9 +1591,9 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void {
const rhs_ty = self.typeOf(extra.rhs);
const tuple_ty = self.typeOfIndex(inst);
- const tuple_size = @intCast(u32, tuple_ty.abiSize(mod));
+ const tuple_size = @as(u32, @intCast(tuple_ty.abiSize(mod)));
const tuple_align = tuple_ty.abiAlignment(mod);
- const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, mod));
+ const overflow_bit_offset = @as(u32, @intCast(tuple_ty.structFieldOffset(1, mod)));
switch (lhs_ty.zigTypeTag(mod)) {
.Vector => return self.fail("TODO implement add_with_overflow/sub_with_overflow for vectors", .{}),
@@ -1704,9 +1704,9 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
const rhs_ty = self.typeOf(extra.rhs);
const tuple_ty = self.typeOfIndex(inst);
- const tuple_size = @intCast(u32, tuple_ty.abiSize(mod));
+ const tuple_size = @as(u32, @intCast(tuple_ty.abiSize(mod)));
const tuple_align = tuple_ty.abiAlignment(mod);
- const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, mod));
+ const overflow_bit_offset = @as(u32, @intCast(tuple_ty.structFieldOffset(1, mod)));
switch (lhs_ty.zigTypeTag(mod)) {
.Vector => return self.fail("TODO implement mul_with_overflow for vectors", .{}),
@@ -1866,9 +1866,9 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
const rhs_ty = self.typeOf(extra.rhs);
const tuple_ty = self.typeOfIndex(inst);
- const tuple_size = @intCast(u32, tuple_ty.abiSize(mod));
+ const tuple_size = @as(u32, @intCast(tuple_ty.abiSize(mod)));
const tuple_align = tuple_ty.abiAlignment(mod);
- const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, mod));
+ const overflow_bit_offset = @as(u32, @intCast(tuple_ty.structFieldOffset(1, mod)));
switch (lhs_ty.zigTypeTag(mod)) {
.Vector => return self.fail("TODO implement shl_with_overflow for vectors", .{}),
@@ -1915,7 +1915,7 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
.data = .{ .rr_shift = .{
.rd = dest_reg,
.rm = lhs_reg,
- .shift_amount = Instruction.ShiftAmount.imm(@intCast(u5, rhs_mcv.immediate)),
+ .shift_amount = Instruction.ShiftAmount.imm(@as(u5, @intCast(rhs_mcv.immediate))),
} },
});
@@ -1927,7 +1927,7 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
.data = .{ .rr_shift = .{
.rd = reconstructed_reg,
.rm = dest_reg,
- .shift_amount = Instruction.ShiftAmount.imm(@intCast(u5, rhs_mcv.immediate)),
+ .shift_amount = Instruction.ShiftAmount.imm(@as(u5, @intCast(rhs_mcv.immediate))),
} },
});
} else {
@@ -2020,7 +2020,7 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const optional_ty = self.typeOfIndex(inst);
- const abi_size = @intCast(u32, optional_ty.abiSize(mod));
+ const abi_size = @as(u32, @intCast(optional_ty.abiSize(mod)));
// Optional with a zero-bit payload type is just a boolean true
if (abi_size == 1) {
@@ -2049,7 +2049,7 @@ fn errUnionErr(
return try error_union_bind.resolveToMcv(self);
}
- const err_offset = @intCast(u32, errUnionErrorOffset(payload_ty, mod));
+ const err_offset = @as(u32, @intCast(errUnionErrorOffset(payload_ty, mod)));
switch (try error_union_bind.resolveToMcv(self)) {
.register => {
var operand_reg: Register = undefined;
@@ -2071,15 +2071,15 @@ fn errUnionErr(
);
const err_bit_offset = err_offset * 8;
- const err_bit_size = @intCast(u32, err_ty.abiSize(mod)) * 8;
+ const err_bit_size = @as(u32, @intCast(err_ty.abiSize(mod))) * 8;
_ = try self.addInst(.{
.tag = .ubfx, // errors are unsigned integers
.data = .{ .rr_lsb_width = .{
.rd = dest_reg,
.rn = operand_reg,
- .lsb = @intCast(u5, err_bit_offset),
- .width = @intCast(u6, err_bit_size),
+ .lsb = @as(u5, @intCast(err_bit_offset)),
+ .width = @as(u6, @intCast(err_bit_size)),
} },
});
@@ -2126,7 +2126,7 @@ fn errUnionPayload(
return MCValue.none;
}
- const payload_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, mod));
+ const payload_offset = @as(u32, @intCast(errUnionPayloadOffset(payload_ty, mod)));
switch (try error_union_bind.resolveToMcv(self)) {
.register => {
var operand_reg: Register = undefined;
@@ -2148,15 +2148,15 @@ fn errUnionPayload(
);
const payload_bit_offset = payload_offset * 8;
- const payload_bit_size = @intCast(u32, payload_ty.abiSize(mod)) * 8;
+ const payload_bit_size = @as(u32, @intCast(payload_ty.abiSize(mod))) * 8;
_ = try self.addInst(.{
.tag = if (payload_ty.isSignedInt(mod)) Mir.Inst.Tag.sbfx else .ubfx,
.data = .{ .rr_lsb_width = .{
.rd = dest_reg,
.rn = operand_reg,
- .lsb = @intCast(u5, payload_bit_offset),
- .width = @intCast(u6, payload_bit_size),
+ .lsb = @as(u5, @intCast(payload_bit_offset)),
+ .width = @as(u6, @intCast(payload_bit_size)),
} },
});
@@ -2235,13 +2235,13 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void {
const operand = try self.resolveInst(ty_op.operand);
if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result operand;
- const abi_size = @intCast(u32, error_union_ty.abiSize(mod));
+ const abi_size = @as(u32, @intCast(error_union_ty.abiSize(mod)));
const abi_align = error_union_ty.abiAlignment(mod);
- const stack_offset = @intCast(u32, try self.allocMem(abi_size, abi_align, inst));
+ const stack_offset = @as(u32, @intCast(try self.allocMem(abi_size, abi_align, inst)));
const payload_off = errUnionPayloadOffset(payload_ty, mod);
const err_off = errUnionErrorOffset(payload_ty, mod);
- try self.genSetStack(payload_ty, stack_offset - @intCast(u32, payload_off), operand);
- try self.genSetStack(error_ty, stack_offset - @intCast(u32, err_off), .{ .immediate = 0 });
+ try self.genSetStack(payload_ty, stack_offset - @as(u32, @intCast(payload_off)), operand);
+ try self.genSetStack(error_ty, stack_offset - @as(u32, @intCast(err_off)), .{ .immediate = 0 });
break :result MCValue{ .stack_offset = stack_offset };
};
@@ -2259,13 +2259,13 @@ fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void {
const operand = try self.resolveInst(ty_op.operand);
if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result operand;
- const abi_size = @intCast(u32, error_union_ty.abiSize(mod));
+ const abi_size = @as(u32, @intCast(error_union_ty.abiSize(mod)));
const abi_align = error_union_ty.abiAlignment(mod);
- const stack_offset = @intCast(u32, try self.allocMem(abi_size, abi_align, inst));
+ const stack_offset = @as(u32, @intCast(try self.allocMem(abi_size, abi_align, inst)));
const payload_off = errUnionPayloadOffset(payload_ty, mod);
const err_off = errUnionErrorOffset(payload_ty, mod);
- try self.genSetStack(error_ty, stack_offset - @intCast(u32, err_off), operand);
- try self.genSetStack(payload_ty, stack_offset - @intCast(u32, payload_off), .undef);
+ try self.genSetStack(error_ty, stack_offset - @as(u32, @intCast(err_off)), operand);
+ try self.genSetStack(payload_ty, stack_offset - @as(u32, @intCast(payload_off)), .undef);
break :result MCValue{ .stack_offset = stack_offset };
};
@@ -2369,7 +2369,7 @@ fn ptrElemVal(
) !MCValue {
const mod = self.bin_file.options.module.?;
const elem_ty = ptr_ty.childType(mod);
- const elem_size = @intCast(u32, elem_ty.abiSize(mod));
+ const elem_size = @as(u32, @intCast(elem_ty.abiSize(mod)));
switch (elem_size) {
1, 4 => {
@@ -2480,7 +2480,7 @@ fn arrayElemVal(
=> {
const ptr_to_mcv = switch (mcv) {
.stack_offset => |off| MCValue{ .ptr_stack_offset = off },
- .memory => |addr| MCValue{ .immediate = @intCast(u32, addr) },
+ .memory => |addr| MCValue{ .immediate = @as(u32, @intCast(addr)) },
.stack_argument_offset => |off| blk: {
const reg = try self.register_manager.allocReg(null, gp);
@@ -2654,7 +2654,7 @@ fn reuseOperand(
fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!void {
const mod = self.bin_file.options.module.?;
const elem_ty = ptr_ty.childType(mod);
- const elem_size = @intCast(u32, elem_ty.abiSize(mod));
+ const elem_size = @as(u32, @intCast(elem_ty.abiSize(mod)));
switch (ptr) {
.none => unreachable,
@@ -2759,7 +2759,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void {
fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type) InnerError!void {
const mod = self.bin_file.options.module.?;
- const elem_size = @intCast(u32, value_ty.abiSize(mod));
+ const elem_size = @as(u32, @intCast(value_ty.abiSize(mod)));
switch (ptr) {
.none => unreachable,
@@ -2814,7 +2814,7 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
// sub src_reg, fp, #off
try self.genSetReg(ptr_ty, src_reg, .{ .ptr_stack_offset = off });
},
- .memory => |addr| try self.genSetReg(ptr_ty, src_reg, .{ .immediate = @intCast(u32, addr) }),
+ .memory => |addr| try self.genSetReg(ptr_ty, src_reg, .{ .immediate = @as(u32, @intCast(addr)) }),
.stack_argument_offset => |off| {
_ = try self.addInst(.{
.tag = .ldr_ptr_stack_argument,
@@ -2882,7 +2882,7 @@ fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde
const mcv = try self.resolveInst(operand);
const ptr_ty = self.typeOf(operand);
const struct_ty = ptr_ty.childType(mod);
- const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod));
+ const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(index, mod)));
switch (mcv) {
.ptr_stack_offset => |off| {
break :result MCValue{ .ptr_stack_offset = off - struct_field_offset };
@@ -2906,7 +2906,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const mcv = try self.resolveInst(operand);
const struct_ty = self.typeOf(operand);
- const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod));
+ const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(index, mod)));
const struct_field_ty = struct_ty.structFieldType(index, mod);
switch (mcv) {
@@ -2970,15 +2970,15 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
);
const field_bit_offset = struct_field_offset * 8;
- const field_bit_size = @intCast(u32, struct_field_ty.abiSize(mod)) * 8;
+ const field_bit_size = @as(u32, @intCast(struct_field_ty.abiSize(mod))) * 8;
_ = try self.addInst(.{
.tag = if (struct_field_ty.isSignedInt(mod)) Mir.Inst.Tag.sbfx else .ubfx,
.data = .{ .rr_lsb_width = .{
.rd = dest_reg,
.rn = operand_reg,
- .lsb = @intCast(u5, field_bit_offset),
- .width = @intCast(u6, field_bit_size),
+ .lsb = @as(u5, @intCast(field_bit_offset)),
+ .width = @as(u6, @intCast(field_bit_size)),
} },
});
@@ -3003,7 +3003,7 @@ fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void {
return self.fail("TODO implement @fieldParentPtr codegen for unions", .{});
}
- const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(extra.field_index, mod));
+ const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(extra.field_index, mod)));
switch (field_ptr) {
.ptr_stack_offset => |off| {
break :result MCValue{ .ptr_stack_offset = off + struct_field_offset };
@@ -3364,7 +3364,7 @@ fn binOpImmediate(
=> .{ .rr_shift = .{
.rd = dest_reg,
.rm = lhs_reg,
- .shift_amount = Instruction.ShiftAmount.imm(@intCast(u5, rhs_immediate)),
+ .shift_amount = Instruction.ShiftAmount.imm(@as(u5, @intCast(rhs_immediate))),
} },
else => unreachable,
};
@@ -3895,7 +3895,7 @@ fn ptrArithmetic(
.One => ptr_ty.childType(mod).childType(mod), // ptr to array, so get array element type
else => ptr_ty.childType(mod),
};
- const elem_size = @intCast(u32, elem_ty.abiSize(mod));
+ const elem_size = @as(u32, @intCast(elem_ty.abiSize(mod)));
const base_tag: Air.Inst.Tag = switch (tag) {
.ptr_add => .add,
@@ -4022,7 +4022,7 @@ fn genInlineMemcpy(
_ = try self.addInst(.{
.tag = .b,
.cond = .ge,
- .data = .{ .inst = @intCast(u32, self.mir_instructions.len + 5) },
+ .data = .{ .inst = @as(u32, @intCast(self.mir_instructions.len + 5)) },
});
// ldrb tmp, [src, count]
@@ -4058,7 +4058,7 @@ fn genInlineMemcpy(
// b loop
_ = try self.addInst(.{
.tag = .b,
- .data = .{ .inst = @intCast(u32, self.mir_instructions.len - 5) },
+ .data = .{ .inst = @as(u32, @intCast(self.mir_instructions.len - 5)) },
});
// end:
@@ -4126,7 +4126,7 @@ fn genInlineMemsetCode(
_ = try self.addInst(.{
.tag = .b,
.cond = .ge,
- .data = .{ .inst = @intCast(u32, self.mir_instructions.len + 4) },
+ .data = .{ .inst = @as(u32, @intCast(self.mir_instructions.len + 4)) },
});
// strb val, [src, count]
@@ -4152,7 +4152,7 @@ fn genInlineMemsetCode(
// b loop
_ = try self.addInst(.{
.tag = .b,
- .data = .{ .inst = @intCast(u32, self.mir_instructions.len - 4) },
+ .data = .{ .inst = @as(u32, @intCast(self.mir_instructions.len - 4)) },
});
// end:
@@ -4216,7 +4216,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
const callee = pl_op.operand;
const extra = self.air.extraData(Air.Call, pl_op.payload);
- const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]);
+ const args = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra.end..][0..extra.data.args_len]));
const ty = self.typeOf(callee);
const mod = self.bin_file.options.module.?;
@@ -4248,8 +4248,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
const r0_lock: ?RegisterLock = if (info.return_value == .stack_offset) blk: {
log.debug("airCall: return by reference", .{});
const ret_ty = fn_ty.fnReturnType(mod);
- const ret_abi_size = @intCast(u32, ret_ty.abiSize(mod));
- const ret_abi_align = @intCast(u32, ret_ty.abiAlignment(mod));
+ const ret_abi_size = @as(u32, @intCast(ret_ty.abiSize(mod)));
+ const ret_abi_align = @as(u32, @intCast(ret_ty.abiAlignment(mod)));
const stack_offset = try self.allocMem(ret_abi_size, ret_abi_align, inst);
const ptr_ty = try mod.singleMutPtrType(ret_ty);
@@ -4294,7 +4294,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl);
const atom = elf_file.getAtom(atom_index);
_ = try atom.getOrCreateOffsetTableEntry(elf_file);
- const got_addr = @intCast(u32, atom.getOffsetTableAddress(elf_file));
+ const got_addr = @as(u32, @intCast(atom.getOffsetTableAddress(elf_file)));
try self.genSetReg(Type.usize, .lr, .{ .memory = got_addr });
} else if (self.bin_file.cast(link.File.MachO)) |_| {
unreachable; // unsupported architecture for MachO
@@ -4425,7 +4425,7 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void {
// location.
const op_inst = Air.refToIndex(un_op).?;
if (self.air.instructions.items(.tag)[op_inst] != .ret_ptr) {
- const abi_size = @intCast(u32, ret_ty.abiSize(mod));
+ const abi_size = @as(u32, @intCast(ret_ty.abiSize(mod)));
const abi_align = ret_ty.abiAlignment(mod);
const offset = try self.allocMem(abi_size, abi_align, null);
@@ -4651,7 +4651,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void {
if (self.liveness.operandDies(inst, 0)) {
const op_int = @intFromEnum(pl_op.operand);
if (op_int >= Air.ref_start_index) {
- const op_index = @intCast(Air.Inst.Index, op_int - Air.ref_start_index);
+ const op_index = @as(Air.Inst.Index, @intCast(op_int - Air.ref_start_index));
self.processDeath(op_index);
}
}
@@ -4956,7 +4956,7 @@ fn airLoop(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const loop = self.air.extraData(Air.Block, ty_pl.payload);
const body = self.air.extra[loop.end..][0..loop.data.body_len];
- const start_index = @intCast(Mir.Inst.Index, self.mir_instructions.len);
+ const start_index = @as(Mir.Inst.Index, @intCast(self.mir_instructions.len));
try self.genBody(body);
try self.jump(start_index);
@@ -5021,7 +5021,7 @@ fn airSwitch(self: *Self, inst: Air.Inst.Index) !void {
var case_i: u32 = 0;
while (case_i < switch_br.data.cases_len) : (case_i += 1) {
const case = self.air.extraData(Air.SwitchBr.Case, extra_index);
- const items = @ptrCast([]const Air.Inst.Ref, self.air.extra[case.end..][0..case.data.items_len]);
+ const items = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[case.end..][0..case.data.items_len]));
assert(items.len > 0);
const case_body = self.air.extra[case.end + items.len ..][0..case.data.body_len];
extra_index = case.end + items.len + case_body.len;
@@ -5139,7 +5139,7 @@ fn airSwitch(self: *Self, inst: Air.Inst.Index) !void {
fn performReloc(self: *Self, inst: Mir.Inst.Index) !void {
const tag = self.mir_instructions.items(.tag)[inst];
switch (tag) {
- .b => self.mir_instructions.items(.data)[inst].inst = @intCast(Air.Inst.Index, self.mir_instructions.len),
+ .b => self.mir_instructions.items(.data)[inst].inst = @as(Air.Inst.Index, @intCast(self.mir_instructions.len)),
else => unreachable,
}
}
@@ -5188,12 +5188,12 @@ fn brVoid(self: *Self, block: Air.Inst.Index) !void {
fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.Asm, ty_pl.payload);
- const is_volatile = @truncate(u1, extra.data.flags >> 31) != 0;
- const clobbers_len = @truncate(u31, extra.data.flags);
+ const is_volatile = @as(u1, @truncate(extra.data.flags >> 31)) != 0;
+ const clobbers_len = @as(u31, @truncate(extra.data.flags));
var extra_i: usize = extra.end;
- const outputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.outputs_len]);
+ const outputs = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra_i..][0..extra.data.outputs_len]));
extra_i += outputs.len;
- const inputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.inputs_len]);
+ const inputs = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra_i..][0..extra.data.inputs_len]));
extra_i += inputs.len;
const dead = !is_volatile and self.liveness.isUnused(inst);
@@ -5323,7 +5323,7 @@ fn setRegOrMem(self: *Self, ty: Type, loc: MCValue, val: MCValue) !void {
fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void {
const mod = self.bin_file.options.module.?;
- const abi_size = @intCast(u32, ty.abiSize(mod));
+ const abi_size = @as(u32, @intCast(ty.abiSize(mod)));
switch (mcv) {
.dead => unreachable,
.unreach, .none => return, // Nothing to do.
@@ -5376,7 +5376,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
},
2 => {
const offset = if (stack_offset <= math.maxInt(u8)) blk: {
- break :blk Instruction.ExtraLoadStoreOffset.imm(@intCast(u8, stack_offset));
+ break :blk Instruction.ExtraLoadStoreOffset.imm(@as(u8, @intCast(stack_offset)));
} else Instruction.ExtraLoadStoreOffset.reg(try self.copyToTmpRegister(Type.u32, MCValue{ .immediate = stack_offset }));
_ = try self.addInst(.{
@@ -5404,7 +5404,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
try self.genSetStack(wrapped_ty, stack_offset, .{ .register = reg });
const overflow_bit_ty = ty.structFieldType(1, mod);
- const overflow_bit_offset = @intCast(u32, ty.structFieldOffset(1, mod));
+ const overflow_bit_offset = @as(u32, @intCast(ty.structFieldOffset(1, mod)));
const cond_reg = try self.register_manager.allocReg(null, gp);
// C flag: movcs reg, #1
@@ -5457,7 +5457,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
// sub src_reg, fp, #off
try self.genSetReg(ptr_ty, src_reg, .{ .ptr_stack_offset = off });
},
- .memory => |addr| try self.genSetReg(ptr_ty, src_reg, .{ .immediate = @intCast(u32, addr) }),
+ .memory => |addr| try self.genSetReg(ptr_ty, src_reg, .{ .immediate = @as(u32, @intCast(addr)) }),
.stack_argument_offset => |off| {
_ = try self.addInst(.{
.tag = .ldr_ptr_stack_argument,
@@ -5554,7 +5554,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
.tag = .movw,
.data = .{ .r_imm16 = .{
.rd = reg,
- .imm16 = @intCast(u16, x),
+ .imm16 = @as(u16, @intCast(x)),
} },
});
} else {
@@ -5562,7 +5562,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
.tag = .mov,
.data = .{ .r_op_mov = .{
.rd = reg,
- .op = Instruction.Operand.imm(@truncate(u8, x), 0),
+ .op = Instruction.Operand.imm(@as(u8, @truncate(x)), 0),
} },
});
_ = try self.addInst(.{
@@ -5570,7 +5570,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
.data = .{ .rr_op = .{
.rd = reg,
.rn = reg,
- .op = Instruction.Operand.imm(@truncate(u8, x >> 8), 12),
+ .op = Instruction.Operand.imm(@as(u8, @truncate(x >> 8)), 12),
} },
});
}
@@ -5585,14 +5585,14 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
.tag = .movw,
.data = .{ .r_imm16 = .{
.rd = reg,
- .imm16 = @truncate(u16, x),
+ .imm16 = @as(u16, @truncate(x)),
} },
});
_ = try self.addInst(.{
.tag = .movt,
.data = .{ .r_imm16 = .{
.rd = reg,
- .imm16 = @truncate(u16, x >> 16),
+ .imm16 = @as(u16, @truncate(x >> 16)),
} },
});
} else {
@@ -5605,7 +5605,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
.tag = .mov,
.data = .{ .r_op_mov = .{
.rd = reg,
- .op = Instruction.Operand.imm(@truncate(u8, x), 0),
+ .op = Instruction.Operand.imm(@as(u8, @truncate(x)), 0),
} },
});
_ = try self.addInst(.{
@@ -5613,7 +5613,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
.data = .{ .rr_op = .{
.rd = reg,
.rn = reg,
- .op = Instruction.Operand.imm(@truncate(u8, x >> 8), 12),
+ .op = Instruction.Operand.imm(@as(u8, @truncate(x >> 8)), 12),
} },
});
_ = try self.addInst(.{
@@ -5621,7 +5621,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
.data = .{ .rr_op = .{
.rd = reg,
.rn = reg,
- .op = Instruction.Operand.imm(@truncate(u8, x >> 16), 8),
+ .op = Instruction.Operand.imm(@as(u8, @truncate(x >> 16)), 8),
} },
});
_ = try self.addInst(.{
@@ -5629,7 +5629,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
.data = .{ .rr_op = .{
.rd = reg,
.rn = reg,
- .op = Instruction.Operand.imm(@truncate(u8, x >> 24), 4),
+ .op = Instruction.Operand.imm(@as(u8, @truncate(x >> 24)), 4),
} },
});
}
@@ -5654,12 +5654,12 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
.memory => |addr| {
// The value is in memory at a hard-coded address.
// If the type is a pointer, it means the pointer address is at this memory location.
- try self.genSetReg(ty, reg, .{ .immediate = @intCast(u32, addr) });
+ try self.genSetReg(ty, reg, .{ .immediate = @as(u32, @intCast(addr)) });
try self.genLdrRegister(reg, reg, ty);
},
.stack_offset => |off| {
// TODO: maybe addressing from sp instead of fp
- const abi_size = @intCast(u32, ty.abiSize(mod));
+ const abi_size = @as(u32, @intCast(ty.abiSize(mod)));
const tag: Mir.Inst.Tag = switch (abi_size) {
1 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsb else .ldrb,
@@ -5677,7 +5677,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
if (extra_offset) {
const offset = if (off <= math.maxInt(u8)) blk: {
- break :blk Instruction.ExtraLoadStoreOffset.imm(@intCast(u8, off));
+ break :blk Instruction.ExtraLoadStoreOffset.imm(@as(u8, @intCast(off)));
} else Instruction.ExtraLoadStoreOffset.reg(try self.copyToTmpRegister(Type.usize, MCValue{ .immediate = off }));
_ = try self.addInst(.{
@@ -5693,7 +5693,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
});
} else {
const offset = if (off <= math.maxInt(u12)) blk: {
- break :blk Instruction.Offset.imm(@intCast(u12, off));
+ break :blk Instruction.Offset.imm(@as(u12, @intCast(off)));
} else Instruction.Offset.reg(try self.copyToTmpRegister(Type.usize, MCValue{ .immediate = off }), .none);
_ = try self.addInst(.{
@@ -5732,7 +5732,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void {
const mod = self.bin_file.options.module.?;
- const abi_size = @intCast(u32, ty.abiSize(mod));
+ const abi_size = @as(u32, @intCast(ty.abiSize(mod)));
switch (mcv) {
.dead => unreachable,
.none, .unreach => return,
@@ -5771,7 +5771,7 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I
},
2 => {
const offset = if (stack_offset <= math.maxInt(u8)) blk: {
- break :blk Instruction.ExtraLoadStoreOffset.imm(@intCast(u8, stack_offset));
+ break :blk Instruction.ExtraLoadStoreOffset.imm(@as(u8, @intCast(stack_offset)));
} else Instruction.ExtraLoadStoreOffset.reg(try self.copyToTmpRegister(Type.u32, MCValue{ .immediate = stack_offset }));
_ = try self.addInst(.{
@@ -5814,7 +5814,7 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I
// sub src_reg, fp, #off
try self.genSetReg(ptr_ty, src_reg, .{ .ptr_stack_offset = off });
},
- .memory => |addr| try self.genSetReg(ptr_ty, src_reg, .{ .immediate = @intCast(u32, addr) }),
+ .memory => |addr| try self.genSetReg(ptr_ty, src_reg, .{ .immediate = @as(u32, @intCast(addr)) }),
.stack_argument_offset => |off| {
_ = try self.addInst(.{
.tag = .ldr_ptr_stack_argument,
@@ -5893,7 +5893,7 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void {
const ptr_ty = self.typeOf(ty_op.operand);
const ptr = try self.resolveInst(ty_op.operand);
const array_ty = ptr_ty.childType(mod);
- const array_len = @intCast(u32, array_ty.arrayLen(mod));
+ const array_len = @as(u32, @intCast(array_ty.arrayLen(mod)));
const stack_offset = try self.allocMem(8, 8, inst);
try self.genSetStack(ptr_ty, stack_offset, ptr);
@@ -6010,7 +6010,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
const vector_ty = self.typeOfIndex(inst);
const len = vector_ty.vectorLen(mod);
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
- const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]);
+ const elements = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[ty_pl.payload..][0..len]));
const result: MCValue = res: {
if (self.liveness.isUnused(inst)) break :res MCValue.dead;
return self.fail("TODO implement airAggregateInit for arm", .{});
@@ -6058,7 +6058,7 @@ fn airTry(self: *Self, inst: Air.Inst.Index) !void {
const error_union_bind: ReadArg.Bind = .{ .inst = pl_op.operand };
const error_union_ty = self.typeOf(pl_op.operand);
const mod = self.bin_file.options.module.?;
- const error_union_size = @intCast(u32, error_union_ty.abiSize(mod));
+ const error_union_size = @as(u32, @intCast(error_union_ty.abiSize(mod)));
const error_union_align = error_union_ty.abiAlignment(mod);
// The error union will die in the body. However, we need the
@@ -6141,7 +6141,7 @@ fn genTypedValue(self: *Self, arg_tv: TypedValue) InnerError!MCValue {
.none => .none,
.undef => .undef,
.load_got, .load_direct, .load_tlv => unreachable, // TODO
- .immediate => |imm| .{ .immediate = @truncate(u32, imm) },
+ .immediate => |imm| .{ .immediate = @as(u32, @truncate(imm)) },
.memory => |addr| .{ .memory = addr },
},
.fail => |msg| {
@@ -6198,7 +6198,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
} else if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) {
result.return_value = .{ .none = {} };
} else {
- const ret_ty_size = @intCast(u32, ret_ty.abiSize(mod));
+ const ret_ty_size = @as(u32, @intCast(ret_ty.abiSize(mod)));
// TODO handle cases where multiple registers are used
if (ret_ty_size <= 4) {
result.return_value = .{ .register = c_abi_int_return_regs[0] };
@@ -6216,7 +6216,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
if (ty.toType().abiAlignment(mod) == 8)
ncrn = std.mem.alignForward(usize, ncrn, 2);
- const param_size = @intCast(u32, ty.toType().abiSize(mod));
+ const param_size = @as(u32, @intCast(ty.toType().abiSize(mod)));
if (std.math.divCeil(u32, param_size, 4) catch unreachable <= 4 - ncrn) {
if (param_size <= 4) {
result.args[i] = .{ .register = c_abi_int_param_regs[ncrn] };
@@ -6245,7 +6245,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
} else if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod) and !ret_ty.isError(mod)) {
result.return_value = .{ .none = {} };
} else {
- const ret_ty_size = @intCast(u32, ret_ty.abiSize(mod));
+ const ret_ty_size = @as(u32, @intCast(ret_ty.abiSize(mod)));
if (ret_ty_size == 0) {
assert(ret_ty.isError(mod));
result.return_value = .{ .immediate = 0 };
@@ -6264,7 +6264,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
for (fn_info.param_types, 0..) |ty, i| {
if (ty.toType().abiSize(mod) > 0) {
- const param_size = @intCast(u32, ty.toType().abiSize(mod));
+ const param_size = @as(u32, @intCast(ty.toType().abiSize(mod)));
const param_alignment = ty.toType().abiAlignment(mod);
stack_offset = std.mem.alignForward(u32, stack_offset, param_alignment);
diff --git a/src/arch/arm/Emit.zig b/src/arch/arm/Emit.zig
index 17415318de..54062d00a7 100644
--- a/src/arch/arm/Emit.zig
+++ b/src/arch/arm/Emit.zig
@@ -78,7 +78,7 @@ pub fn emitMir(
// Emit machine code
for (mir_tags, 0..) |tag, index| {
- const inst = @intCast(u32, index);
+ const inst = @as(u32, @intCast(index));
switch (tag) {
.add => try emit.mirDataProcessing(inst),
.adds => try emit.mirDataProcessing(inst),
@@ -241,7 +241,7 @@ fn lowerBranches(emit: *Emit) !void {
// TODO optimization opportunity: do this in codegen while
// generating MIR
for (mir_tags, 0..) |tag, index| {
- const inst = @intCast(u32, index);
+ const inst = @as(u32, @intCast(index));
if (isBranch(tag)) {
const target_inst = emit.branchTarget(inst);
@@ -286,7 +286,7 @@ fn lowerBranches(emit: *Emit) !void {
var current_code_offset: usize = 0;
for (mir_tags, 0..) |tag, index| {
- const inst = @intCast(u32, index);
+ const inst = @as(u32, @intCast(index));
// If this instruction contained in the code offset
// mapping (when it is a target of a branch or if it is a
@@ -301,7 +301,7 @@ fn lowerBranches(emit: *Emit) !void {
const target_inst = emit.branchTarget(inst);
if (target_inst < inst) {
const target_offset = emit.code_offset_mapping.get(target_inst).?;
- const offset = @intCast(i64, target_offset) - @intCast(i64, current_code_offset + 8);
+ const offset = @as(i64, @intCast(target_offset)) - @as(i64, @intCast(current_code_offset + 8));
const branch_type = emit.branch_types.getPtr(inst).?;
const optimal_branch_type = try emit.optimalBranchType(tag, offset);
if (branch_type.* != optimal_branch_type) {
@@ -320,7 +320,7 @@ fn lowerBranches(emit: *Emit) !void {
for (origin_list.items) |forward_branch_inst| {
const branch_tag = emit.mir.instructions.items(.tag)[forward_branch_inst];
const forward_branch_inst_offset = emit.code_offset_mapping.get(forward_branch_inst).?;
- const offset = @intCast(i64, current_code_offset) - @intCast(i64, forward_branch_inst_offset + 8);
+ const offset = @as(i64, @intCast(current_code_offset)) - @as(i64, @intCast(forward_branch_inst_offset + 8));
const branch_type = emit.branch_types.getPtr(forward_branch_inst).?;
const optimal_branch_type = try emit.optimalBranchType(branch_tag, offset);
if (branch_type.* != optimal_branch_type) {
@@ -351,7 +351,7 @@ fn fail(emit: *Emit, comptime format: []const u8, args: anytype) InnerError {
}
fn dbgAdvancePCAndLine(self: *Emit, line: u32, column: u32) !void {
- const delta_line = @intCast(i32, line) - @intCast(i32, self.prev_di_line);
+ const delta_line = @as(i32, @intCast(line)) - @as(i32, @intCast(self.prev_di_line));
const delta_pc: usize = self.code.items.len - self.prev_di_pc;
switch (self.debug_output) {
.dwarf => |dw| {
@@ -368,13 +368,13 @@ fn dbgAdvancePCAndLine(self: *Emit, line: u32, column: u32) !void {
// increasing the line number
try @import("../../link/Plan9.zig").changeLine(dbg_out.dbg_line, delta_line);
// increasing the pc
- const d_pc_p9 = @intCast(i64, delta_pc) - quant;
+ const d_pc_p9 = @as(i64, @intCast(delta_pc)) - quant;
if (d_pc_p9 > 0) {
// minus one because if its the last one, we want to leave space to change the line which is one quanta
- try dbg_out.dbg_line.append(@intCast(u8, @divExact(d_pc_p9, quant) + 128) - quant);
+ try dbg_out.dbg_line.append(@as(u8, @intCast(@divExact(d_pc_p9, quant) + 128)) - quant);
if (dbg_out.pcop_change_index.*) |pci|
dbg_out.dbg_line.items[pci] += 1;
- dbg_out.pcop_change_index.* = @intCast(u32, dbg_out.dbg_line.items.len - 1);
+ dbg_out.pcop_change_index.* = @as(u32, @intCast(dbg_out.dbg_line.items.len - 1));
} else if (d_pc_p9 == 0) {
// we don't need to do anything, because adding the quant does it for us
} else unreachable;
@@ -448,13 +448,13 @@ fn mirSubStackPointer(emit: *Emit, inst: Mir.Inst.Index) !void {
const scratch: Register = .r4;
if (Target.arm.featureSetHas(emit.target.cpu.features, .has_v7)) {
- try emit.writeInstruction(Instruction.movw(cond, scratch, @truncate(u16, imm32)));
- try emit.writeInstruction(Instruction.movt(cond, scratch, @truncate(u16, imm32 >> 16)));
+ try emit.writeInstruction(Instruction.movw(cond, scratch, @as(u16, @truncate(imm32))));
+ try emit.writeInstruction(Instruction.movt(cond, scratch, @as(u16, @truncate(imm32 >> 16))));
} else {
- try emit.writeInstruction(Instruction.mov(cond, scratch, Instruction.Operand.imm(@truncate(u8, imm32), 0)));
- try emit.writeInstruction(Instruction.orr(cond, scratch, scratch, Instruction.Operand.imm(@truncate(u8, imm32 >> 8), 12)));
- try emit.writeInstruction(Instruction.orr(cond, scratch, scratch, Instruction.Operand.imm(@truncate(u8, imm32 >> 16), 8)));
- try emit.writeInstruction(Instruction.orr(cond, scratch, scratch, Instruction.Operand.imm(@truncate(u8, imm32 >> 24), 4)));
+ try emit.writeInstruction(Instruction.mov(cond, scratch, Instruction.Operand.imm(@as(u8, @truncate(imm32)), 0)));
+ try emit.writeInstruction(Instruction.orr(cond, scratch, scratch, Instruction.Operand.imm(@as(u8, @truncate(imm32 >> 8)), 12)));
+ try emit.writeInstruction(Instruction.orr(cond, scratch, scratch, Instruction.Operand.imm(@as(u8, @truncate(imm32 >> 16)), 8)));
+ try emit.writeInstruction(Instruction.orr(cond, scratch, scratch, Instruction.Operand.imm(@as(u8, @truncate(imm32 >> 24)), 4)));
}
break :blk Instruction.Operand.reg(scratch, Instruction.Operand.Shift.none);
@@ -484,12 +484,12 @@ fn mirBranch(emit: *Emit, inst: Mir.Inst.Index) !void {
const cond = emit.mir.instructions.items(.cond)[inst];
const target_inst = emit.mir.instructions.items(.data)[inst].inst;
- const offset = @intCast(i64, emit.code_offset_mapping.get(target_inst).?) - @intCast(i64, emit.code.items.len + 8);
+ const offset = @as(i64, @intCast(emit.code_offset_mapping.get(target_inst).?)) - @as(i64, @intCast(emit.code.items.len + 8));
const branch_type = emit.branch_types.get(inst).?;
switch (branch_type) {
.b => switch (tag) {
- .b => try emit.writeInstruction(Instruction.b(cond, @intCast(i26, offset))),
+ .b => try emit.writeInstruction(Instruction.b(cond, @as(i26, @intCast(offset)))),
else => unreachable,
},
}
@@ -585,7 +585,7 @@ fn mirLoadStackArgument(emit: *Emit, inst: Mir.Inst.Index) !void {
.ldrb_stack_argument,
=> {
const offset = if (raw_offset <= math.maxInt(u12)) blk: {
- break :blk Instruction.Offset.imm(@intCast(u12, raw_offset));
+ break :blk Instruction.Offset.imm(@as(u12, @intCast(raw_offset)));
} else return emit.fail("TODO mirLoadStack larger offsets", .{});
switch (tag) {
@@ -599,7 +599,7 @@ fn mirLoadStackArgument(emit: *Emit, inst: Mir.Inst.Index) !void {
.ldrsh_stack_argument,
=> {
const offset = if (raw_offset <= math.maxInt(u8)) blk: {
- break :blk Instruction.ExtraLoadStoreOffset.imm(@intCast(u8, raw_offset));
+ break :blk Instruction.ExtraLoadStoreOffset.imm(@as(u8, @intCast(raw_offset)));
} else return emit.fail("TODO mirLoadStack larger offsets", .{});
switch (tag) {
diff --git a/src/arch/arm/Mir.zig b/src/arch/arm/Mir.zig
index 736d0574bb..e890aaf29c 100644
--- a/src/arch/arm/Mir.zig
+++ b/src/arch/arm/Mir.zig
@@ -287,7 +287,7 @@ pub fn extraData(mir: Mir, comptime T: type, index: usize) struct { data: T, end
inline for (fields) |field| {
@field(result, field.name) = switch (field.type) {
u32 => mir.extra[i],
- i32 => @bitCast(i32, mir.extra[i]),
+ i32 => @as(i32, @bitCast(mir.extra[i])),
else => @compileError("bad field type"),
};
i += 1;
diff --git a/src/arch/arm/abi.zig b/src/arch/arm/abi.zig
index a4a4fe472b..2e1e26d220 100644
--- a/src/arch/arm/abi.zig
+++ b/src/arch/arm/abi.zig
@@ -13,7 +13,7 @@ pub const Class = union(enum) {
i64_array: u8,
fn arrSize(total_size: u64, arr_size: u64) Class {
- const count = @intCast(u8, std.mem.alignForward(u64, total_size, arr_size) / arr_size);
+ const count = @as(u8, @intCast(std.mem.alignForward(u64, total_size, arr_size) / arr_size));
if (arr_size == 32) {
return .{ .i32_array = count };
} else {
diff --git a/src/arch/arm/bits.zig b/src/arch/arm/bits.zig
index 1de40a7059..6c33f3e82a 100644
--- a/src/arch/arm/bits.zig
+++ b/src/arch/arm/bits.zig
@@ -159,7 +159,7 @@ pub const Register = enum(u5) {
/// Returns the unique 4-bit ID of this register which is used in
/// the machine code
pub fn id(self: Register) u4 {
- return @truncate(u4, @intFromEnum(self));
+ return @as(u4, @truncate(@intFromEnum(self)));
}
pub fn dwarfLocOp(self: Register) u8 {
@@ -399,8 +399,8 @@ pub const Instruction = union(enum) {
pub fn toU8(self: Shift) u8 {
return switch (self) {
- .register => |v| @bitCast(u8, v),
- .immediate => |v| @bitCast(u8, v),
+ .register => |v| @as(u8, @bitCast(v)),
+ .immediate => |v| @as(u8, @bitCast(v)),
};
}
@@ -425,8 +425,8 @@ pub const Instruction = union(enum) {
pub fn toU12(self: Operand) u12 {
return switch (self) {
- .register => |v| @bitCast(u12, v),
- .immediate => |v| @bitCast(u12, v),
+ .register => |v| @as(u12, @bitCast(v)),
+ .immediate => |v| @as(u12, @bitCast(v)),
};
}
@@ -463,8 +463,8 @@ pub const Instruction = union(enum) {
if (x & mask == x) {
break Operand{
.immediate = .{
- .imm = @intCast(u8, std.math.rotl(u32, x, 2 * i)),
- .rotate = @intCast(u4, i),
+ .imm = @as(u8, @intCast(std.math.rotl(u32, x, 2 * i))),
+ .rotate = @as(u4, @intCast(i)),
},
};
}
@@ -522,7 +522,7 @@ pub const Instruction = union(enum) {
pub fn toU12(self: Offset) u12 {
return switch (self) {
- .register => |v| @bitCast(u12, v),
+ .register => |v| @as(u12, @bitCast(v)),
.immediate => |v| v,
};
}
@@ -604,20 +604,20 @@ pub const Instruction = union(enum) {
pub fn toU32(self: Instruction) u32 {
return switch (self) {
- .data_processing => |v| @bitCast(u32, v),
- .multiply => |v| @bitCast(u32, v),
- .multiply_long => |v| @bitCast(u32, v),
- .signed_multiply_halfwords => |v| @bitCast(u32, v),
- .integer_saturating_arithmetic => |v| @bitCast(u32, v),
- .bit_field_extract => |v| @bitCast(u32, v),
- .single_data_transfer => |v| @bitCast(u32, v),
- .extra_load_store => |v| @bitCast(u32, v),
- .block_data_transfer => |v| @bitCast(u32, v),
- .branch => |v| @bitCast(u32, v),
- .branch_exchange => |v| @bitCast(u32, v),
- .supervisor_call => |v| @bitCast(u32, v),
+ .data_processing => |v| @as(u32, @bitCast(v)),
+ .multiply => |v| @as(u32, @bitCast(v)),
+ .multiply_long => |v| @as(u32, @bitCast(v)),
+ .signed_multiply_halfwords => |v| @as(u32, @bitCast(v)),
+ .integer_saturating_arithmetic => |v| @as(u32, @bitCast(v)),
+ .bit_field_extract => |v| @as(u32, @bitCast(v)),
+ .single_data_transfer => |v| @as(u32, @bitCast(v)),
+ .extra_load_store => |v| @as(u32, @bitCast(v)),
+ .block_data_transfer => |v| @as(u32, @bitCast(v)),
+ .branch => |v| @as(u32, @bitCast(v)),
+ .branch_exchange => |v| @as(u32, @bitCast(v)),
+ .supervisor_call => |v| @as(u32, @bitCast(v)),
.undefined_instruction => |v| v.imm32,
- .breakpoint => |v| @intCast(u32, v.imm4) | (@intCast(u32, v.fixed_1) << 4) | (@intCast(u32, v.imm12) << 8) | (@intCast(u32, v.fixed_2_and_cond) << 20),
+ .breakpoint => |v| @as(u32, @intCast(v.imm4)) | (@as(u32, @intCast(v.fixed_1)) << 4) | (@as(u32, @intCast(v.imm12)) << 8) | (@as(u32, @intCast(v.fixed_2_and_cond)) << 20),
};
}
@@ -656,9 +656,9 @@ pub const Instruction = union(enum) {
.i = 1,
.opcode = if (top) 0b1010 else 0b1000,
.s = 0,
- .rn = @truncate(u4, imm >> 12),
+ .rn = @as(u4, @truncate(imm >> 12)),
.rd = rd.id(),
- .op2 = @truncate(u12, imm),
+ .op2 = @as(u12, @truncate(imm)),
},
};
}
@@ -760,7 +760,7 @@ pub const Instruction = union(enum) {
.rn = rn.id(),
.lsb = lsb,
.rd = rd.id(),
- .widthm1 = @intCast(u5, width - 1),
+ .widthm1 = @as(u5, @intCast(width - 1)),
.unsigned = unsigned,
.cond = @intFromEnum(cond),
},
@@ -810,11 +810,11 @@ pub const Instruction = union(enum) {
offset: ExtraLoadStoreOffset,
) Instruction {
const imm4l: u4 = switch (offset) {
- .immediate => |imm| @truncate(u4, imm),
+ .immediate => |imm| @as(u4, @truncate(imm)),
.register => |reg| reg,
};
const imm4h: u4 = switch (offset) {
- .immediate => |imm| @truncate(u4, imm >> 4),
+ .immediate => |imm| @as(u4, @truncate(imm >> 4)),
.register => 0b0000,
};
@@ -853,7 +853,7 @@ pub const Instruction = union(enum) {
) Instruction {
return Instruction{
.block_data_transfer = .{
- .register_list = @bitCast(u16, reg_list),
+ .register_list = @as(u16, @bitCast(reg_list)),
.rn = rn.id(),
.load_store = load_store,
.write_back = @intFromBool(write_back),
@@ -870,7 +870,7 @@ pub const Instruction = union(enum) {
.branch = .{
.cond = @intFromEnum(cond),
.link = link,
- .offset = @bitCast(u24, @intCast(i24, offset >> 2)),
+ .offset = @as(u24, @bitCast(@as(i24, @intCast(offset >> 2)))),
},
};
}
@@ -904,8 +904,8 @@ pub const Instruction = union(enum) {
fn breakpoint(imm: u16) Instruction {
return Instruction{
.breakpoint = .{
- .imm12 = @truncate(u12, imm >> 4),
- .imm4 = @truncate(u4, imm),
+ .imm12 = @as(u12, @truncate(imm >> 4)),
+ .imm4 = @as(u4, @truncate(imm)),
},
};
}
@@ -1319,7 +1319,7 @@ pub const Instruction = union(enum) {
const reg = @as(Register, arg);
register_list |= @as(u16, 1) << reg.id();
}
- return ldm(cond, .sp, true, @bitCast(RegisterList, register_list));
+ return ldm(cond, .sp, true, @as(RegisterList, @bitCast(register_list)));
}
}
@@ -1343,7 +1343,7 @@ pub const Instruction = union(enum) {
const reg = @as(Register, arg);
register_list |= @as(u16, 1) << reg.id();
}
- return stmdb(cond, .sp, true, @bitCast(RegisterList, register_list));
+ return stmdb(cond, .sp, true, @as(RegisterList, @bitCast(register_list)));
}
}
diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig
index cba1de92c1..d6bb9f8200 100644
--- a/src/arch/riscv64/CodeGen.zig
+++ b/src/arch/riscv64/CodeGen.zig
@@ -323,7 +323,7 @@ fn addInst(self: *Self, inst: Mir.Inst) error{OutOfMemory}!Mir.Inst.Index {
try self.mir_instructions.ensureUnusedCapacity(gpa, 1);
- const result_index = @intCast(Air.Inst.Index, self.mir_instructions.len);
+ const result_index = @as(Air.Inst.Index, @intCast(self.mir_instructions.len));
self.mir_instructions.appendAssumeCapacity(inst);
return result_index;
}
@@ -336,11 +336,11 @@ pub fn addExtra(self: *Self, extra: anytype) Allocator.Error!u32 {
pub fn addExtraAssumeCapacity(self: *Self, extra: anytype) u32 {
const fields = std.meta.fields(@TypeOf(extra));
- const result = @intCast(u32, self.mir_extra.items.len);
+ const result = @as(u32, @intCast(self.mir_extra.items.len));
inline for (fields) |field| {
self.mir_extra.appendAssumeCapacity(switch (field.type) {
u32 => @field(extra, field.name),
- i32 => @bitCast(u32, @field(extra, field.name)),
+ i32 => @as(u32, @bitCast(@field(extra, field.name))),
else => @compileError("bad field type"),
});
}
@@ -752,15 +752,15 @@ fn finishAirBookkeeping(self: *Self) void {
fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Liveness.bpi - 1]Air.Inst.Ref) void {
var tomb_bits = self.liveness.getTombBits(inst);
for (operands) |op| {
- const dies = @truncate(u1, tomb_bits) != 0;
+ const dies = @as(u1, @truncate(tomb_bits)) != 0;
tomb_bits >>= 1;
if (!dies) continue;
const op_int = @intFromEnum(op);
if (op_int < Air.ref_start_index) continue;
- const op_index = @intCast(Air.Inst.Index, op_int - Air.ref_start_index);
+ const op_index = @as(Air.Inst.Index, @intCast(op_int - Air.ref_start_index));
self.processDeath(op_index);
}
- const is_used = @truncate(u1, tomb_bits) == 0;
+ const is_used = @as(u1, @truncate(tomb_bits)) == 0;
if (is_used) {
log.debug("%{d} => {}", .{ inst, result });
const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
@@ -1709,7 +1709,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
const fn_ty = self.typeOf(pl_op.operand);
const callee = pl_op.operand;
const extra = self.air.extraData(Air.Call, pl_op.payload);
- const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]);
+ const args = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra.end..][0..extra.data.args_len]));
var info = try self.resolveCallingConventionValues(fn_ty);
defer info.deinit(self);
@@ -1747,7 +1747,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl);
const atom = elf_file.getAtom(atom_index);
_ = try atom.getOrCreateOffsetTableEntry(elf_file);
- const got_addr = @intCast(u32, atom.getOffsetTableAddress(elf_file));
+ const got_addr = @as(u32, @intCast(atom.getOffsetTableAddress(elf_file)));
try self.genSetReg(Type.usize, .ra, .{ .memory = got_addr });
_ = try self.addInst(.{
.tag = .jalr,
@@ -2139,12 +2139,12 @@ fn brVoid(self: *Self, block: Air.Inst.Index) !void {
fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.Asm, ty_pl.payload);
- const is_volatile = @truncate(u1, extra.data.flags >> 31) != 0;
- const clobbers_len = @truncate(u31, extra.data.flags);
+ const is_volatile = @as(u1, @truncate(extra.data.flags >> 31)) != 0;
+ const clobbers_len = @as(u31, @truncate(extra.data.flags));
var extra_i: usize = extra.end;
- const outputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.outputs_len]);
+ const outputs = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra_i..][0..extra.data.outputs_len]));
extra_i += outputs.len;
- const inputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.inputs_len]);
+ const inputs = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra_i..][0..extra.data.inputs_len]));
extra_i += inputs.len;
const dead = !is_volatile and self.liveness.isUnused(inst);
@@ -2289,20 +2289,20 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
return self.genSetReg(ty, reg, .{ .immediate = 0xaaaaaaaaaaaaaaaa });
},
.immediate => |unsigned_x| {
- const x = @bitCast(i64, unsigned_x);
+ const x = @as(i64, @bitCast(unsigned_x));
if (math.minInt(i12) <= x and x <= math.maxInt(i12)) {
_ = try self.addInst(.{
.tag = .addi,
.data = .{ .i_type = .{
.rd = reg,
.rs1 = .zero,
- .imm12 = @intCast(i12, x),
+ .imm12 = @as(i12, @intCast(x)),
} },
});
} else if (math.minInt(i32) <= x and x <= math.maxInt(i32)) {
- const lo12 = @truncate(i12, x);
+ const lo12 = @as(i12, @truncate(x));
const carry: i32 = if (lo12 < 0) 1 else 0;
- const hi20 = @truncate(i20, (x >> 12) +% carry);
+ const hi20 = @as(i20, @truncate((x >> 12) +% carry));
// TODO: add test case for 32-bit immediate
_ = try self.addInst(.{
@@ -2501,7 +2501,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
const vector_ty = self.typeOfIndex(inst);
const len = vector_ty.vectorLen(mod);
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
- const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]);
+ const elements = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[ty_pl.payload..][0..len]));
const result: MCValue = res: {
if (self.liveness.isUnused(inst)) break :res MCValue.dead;
return self.fail("TODO implement airAggregateInit for riscv64", .{});
@@ -2653,7 +2653,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
const argument_registers = [_]Register{ .a0, .a1, .a2, .a3, .a4, .a5, .a6, .a7 };
for (fn_info.param_types, 0..) |ty, i| {
- const param_size = @intCast(u32, ty.toType().abiSize(mod));
+ const param_size = @as(u32, @intCast(ty.toType().abiSize(mod)));
if (param_size <= 8) {
if (next_register < argument_registers.len) {
result.args[i] = .{ .register = argument_registers[next_register] };
@@ -2690,7 +2690,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
} else switch (cc) {
.Naked => unreachable,
.Unspecified, .C => {
- const ret_ty_size = @intCast(u32, ret_ty.abiSize(mod));
+ const ret_ty_size = @as(u32, @intCast(ret_ty.abiSize(mod)));
if (ret_ty_size <= 8) {
result.return_value = .{ .register = .a0 };
} else if (ret_ty_size <= 16) {
diff --git a/src/arch/riscv64/Emit.zig b/src/arch/riscv64/Emit.zig
index 3b330cbd3f..20f2c40ba4 100644
--- a/src/arch/riscv64/Emit.zig
+++ b/src/arch/riscv64/Emit.zig
@@ -39,7 +39,7 @@ pub fn emitMir(
// Emit machine code
for (mir_tags, 0..) |tag, index| {
- const inst = @intCast(u32, index);
+ const inst = @as(u32, @intCast(index));
switch (tag) {
.add => try emit.mirRType(inst),
.sub => try emit.mirRType(inst),
@@ -85,7 +85,7 @@ fn fail(emit: *Emit, comptime format: []const u8, args: anytype) InnerError {
}
fn dbgAdvancePCAndLine(self: *Emit, line: u32, column: u32) !void {
- const delta_line = @intCast(i32, line) - @intCast(i32, self.prev_di_line);
+ const delta_line = @as(i32, @intCast(line)) - @as(i32, @intCast(self.prev_di_line));
const delta_pc: usize = self.code.items.len - self.prev_di_pc;
switch (self.debug_output) {
.dwarf => |dw| {
@@ -102,13 +102,13 @@ fn dbgAdvancePCAndLine(self: *Emit, line: u32, column: u32) !void {
// increasing the line number
try @import("../../link/Plan9.zig").changeLine(dbg_out.dbg_line, delta_line);
// increasing the pc
- const d_pc_p9 = @intCast(i64, delta_pc) - quant;
+ const d_pc_p9 = @as(i64, @intCast(delta_pc)) - quant;
if (d_pc_p9 > 0) {
// minus one because if its the last one, we want to leave space to change the line which is one quanta
- try dbg_out.dbg_line.append(@intCast(u8, @divExact(d_pc_p9, quant) + 128) - quant);
+ try dbg_out.dbg_line.append(@as(u8, @intCast(@divExact(d_pc_p9, quant) + 128)) - quant);
if (dbg_out.pcop_change_index.*) |pci|
dbg_out.dbg_line.items[pci] += 1;
- dbg_out.pcop_change_index.* = @intCast(u32, dbg_out.dbg_line.items.len - 1);
+ dbg_out.pcop_change_index.* = @as(u32, @intCast(dbg_out.dbg_line.items.len - 1));
} else if (d_pc_p9 == 0) {
// we don't need to do anything, because adding the quant does it for us
} else unreachable;
diff --git a/src/arch/riscv64/Mir.zig b/src/arch/riscv64/Mir.zig
index 8905b24c3c..da62a68941 100644
--- a/src/arch/riscv64/Mir.zig
+++ b/src/arch/riscv64/Mir.zig
@@ -135,7 +135,7 @@ pub fn extraData(mir: Mir, comptime T: type, index: usize) struct { data: T, end
inline for (fields) |field| {
@field(result, field.name) = switch (field.type) {
u32 => mir.extra[i],
- i32 => @bitCast(i32, mir.extra[i]),
+ i32 => @as(i32, @bitCast(mir.extra[i])),
else => @compileError("bad field type"),
};
i += 1;
diff --git a/src/arch/riscv64/bits.zig b/src/arch/riscv64/bits.zig
index 5db3bf4f05..2239bd49f8 100644
--- a/src/arch/riscv64/bits.zig
+++ b/src/arch/riscv64/bits.zig
@@ -56,12 +56,12 @@ pub const Instruction = union(enum) {
// TODO: once packed structs work we can remove this monstrosity.
pub fn toU32(self: Instruction) u32 {
return switch (self) {
- .R => |v| @bitCast(u32, v),
- .I => |v| @bitCast(u32, v),
- .S => |v| @bitCast(u32, v),
- .B => |v| @intCast(u32, v.opcode) + (@intCast(u32, v.imm11) << 7) + (@intCast(u32, v.imm1_4) << 8) + (@intCast(u32, v.funct3) << 12) + (@intCast(u32, v.rs1) << 15) + (@intCast(u32, v.rs2) << 20) + (@intCast(u32, v.imm5_10) << 25) + (@intCast(u32, v.imm12) << 31),
- .U => |v| @bitCast(u32, v),
- .J => |v| @bitCast(u32, v),
+ .R => |v| @as(u32, @bitCast(v)),
+ .I => |v| @as(u32, @bitCast(v)),
+ .S => |v| @as(u32, @bitCast(v)),
+ .B => |v| @as(u32, @intCast(v.opcode)) + (@as(u32, @intCast(v.imm11)) << 7) + (@as(u32, @intCast(v.imm1_4)) << 8) + (@as(u32, @intCast(v.funct3)) << 12) + (@as(u32, @intCast(v.rs1)) << 15) + (@as(u32, @intCast(v.rs2)) << 20) + (@as(u32, @intCast(v.imm5_10)) << 25) + (@as(u32, @intCast(v.imm12)) << 31),
+ .U => |v| @as(u32, @bitCast(v)),
+ .J => |v| @as(u32, @bitCast(v)),
};
}
@@ -80,7 +80,7 @@ pub const Instruction = union(enum) {
// RISC-V is all signed all the time -- convert immediates to unsigned for processing
fn iType(op: u7, fn3: u3, rd: Register, r1: Register, imm: i12) Instruction {
- const umm = @bitCast(u12, imm);
+ const umm = @as(u12, @bitCast(imm));
return Instruction{
.I = .{
@@ -94,7 +94,7 @@ pub const Instruction = union(enum) {
}
fn sType(op: u7, fn3: u3, r1: Register, r2: Register, imm: i12) Instruction {
- const umm = @bitCast(u12, imm);
+ const umm = @as(u12, @bitCast(imm));
return Instruction{
.S = .{
@@ -102,8 +102,8 @@ pub const Instruction = union(enum) {
.funct3 = fn3,
.rs1 = r1.id(),
.rs2 = r2.id(),
- .imm0_4 = @truncate(u5, umm),
- .imm5_11 = @truncate(u7, umm >> 5),
+ .imm0_4 = @as(u5, @truncate(umm)),
+ .imm5_11 = @as(u7, @truncate(umm >> 5)),
},
};
}
@@ -111,7 +111,7 @@ pub const Instruction = union(enum) {
// Use significance value rather than bit value, same for J-type
// -- less burden on callsite, bonus semantic checking
fn bType(op: u7, fn3: u3, r1: Register, r2: Register, imm: i13) Instruction {
- const umm = @bitCast(u13, imm);
+ const umm = @as(u13, @bitCast(imm));
assert(umm % 2 == 0); // misaligned branch target
return Instruction{
@@ -120,17 +120,17 @@ pub const Instruction = union(enum) {
.funct3 = fn3,
.rs1 = r1.id(),
.rs2 = r2.id(),
- .imm1_4 = @truncate(u4, umm >> 1),
- .imm5_10 = @truncate(u6, umm >> 5),
- .imm11 = @truncate(u1, umm >> 11),
- .imm12 = @truncate(u1, umm >> 12),
+ .imm1_4 = @as(u4, @truncate(umm >> 1)),
+ .imm5_10 = @as(u6, @truncate(umm >> 5)),
+ .imm11 = @as(u1, @truncate(umm >> 11)),
+ .imm12 = @as(u1, @truncate(umm >> 12)),
},
};
}
// We have to extract the 20 bits anyway -- let's not make it more painful
fn uType(op: u7, rd: Register, imm: i20) Instruction {
- const umm = @bitCast(u20, imm);
+ const umm = @as(u20, @bitCast(imm));
return Instruction{
.U = .{
@@ -142,17 +142,17 @@ pub const Instruction = union(enum) {
}
fn jType(op: u7, rd: Register, imm: i21) Instruction {
- const umm = @bitCast(u21, imm);
+ const umm = @as(u21, @bitCast(imm));
assert(umm % 2 == 0); // misaligned jump target
return Instruction{
.J = .{
.opcode = op,
.rd = rd.id(),
- .imm1_10 = @truncate(u10, umm >> 1),
- .imm11 = @truncate(u1, umm >> 11),
- .imm12_19 = @truncate(u8, umm >> 12),
- .imm20 = @truncate(u1, umm >> 20),
+ .imm1_10 = @as(u10, @truncate(umm >> 1)),
+ .imm11 = @as(u1, @truncate(umm >> 11)),
+ .imm12_19 = @as(u8, @truncate(umm >> 12)),
+ .imm20 = @as(u1, @truncate(umm >> 20)),
},
};
}
@@ -258,7 +258,7 @@ pub const Instruction = union(enum) {
}
pub fn sltiu(rd: Register, r1: Register, imm: u12) Instruction {
- return iType(0b0010011, 0b011, rd, r1, @bitCast(i12, imm));
+ return iType(0b0010011, 0b011, rd, r1, @as(i12, @bitCast(imm)));
}
// Arithmetic/Logical, Register-Immediate (32-bit)
@@ -407,7 +407,7 @@ pub const Register = enum(u6) {
/// Returns the unique 4-bit ID of this register which is used in
/// the machine code
pub fn id(self: Register) u5 {
- return @truncate(u5, @intFromEnum(self));
+ return @as(u5, @truncate(@intFromEnum(self)));
}
pub fn dwarfLocOp(reg: Register) u8 {
diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig
index f210f8e144..9975cda5cb 100644
--- a/src/arch/sparc64/CodeGen.zig
+++ b/src/arch/sparc64/CodeGen.zig
@@ -415,7 +415,7 @@ fn gen(self: *Self) !void {
.branch_predict_int = .{
.ccr = .xcc,
.cond = .al,
- .inst = @intCast(u32, self.mir_instructions.len),
+ .inst = @as(u32, @intCast(self.mir_instructions.len)),
},
},
});
@@ -840,7 +840,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
const vector_ty = self.typeOfIndex(inst);
const len = vector_ty.vectorLen(mod);
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
- const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]);
+ const elements = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[ty_pl.payload..][0..len]));
const result: MCValue = res: {
if (self.liveness.isUnused(inst)) break :res MCValue.dead;
return self.fail("TODO implement airAggregateInit for {}", .{self.target.cpu.arch});
@@ -876,7 +876,7 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void {
const ptr_ty = self.typeOf(ty_op.operand);
const ptr = try self.resolveInst(ty_op.operand);
const array_ty = ptr_ty.childType(mod);
- const array_len = @intCast(u32, array_ty.arrayLen(mod));
+ const array_len = @as(u32, @intCast(array_ty.arrayLen(mod)));
const ptr_bits = self.target.ptrBitWidth();
const ptr_bytes = @divExact(ptr_bits, 8);
@@ -893,11 +893,11 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.Asm, ty_pl.payload);
const is_volatile = (extra.data.flags & 0x80000000) != 0;
- const clobbers_len = @truncate(u31, extra.data.flags);
+ const clobbers_len = @as(u31, @truncate(extra.data.flags));
var extra_i: usize = extra.end;
- const outputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i .. extra_i + extra.data.outputs_len]);
+ const outputs = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra_i .. extra_i + extra.data.outputs_len]));
extra_i += outputs.len;
- const inputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i .. extra_i + extra.data.inputs_len]);
+ const inputs = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra_i .. extra_i + extra.data.inputs_len]));
extra_i += inputs.len;
const dead = !is_volatile and self.liveness.isUnused(inst);
@@ -1237,13 +1237,13 @@ fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void {
switch (operand) {
.immediate => |imm| {
const swapped = switch (int_info.bits) {
- 16 => @byteSwap(@intCast(u16, imm)),
- 24 => @byteSwap(@intCast(u24, imm)),
- 32 => @byteSwap(@intCast(u32, imm)),
- 40 => @byteSwap(@intCast(u40, imm)),
- 48 => @byteSwap(@intCast(u48, imm)),
- 56 => @byteSwap(@intCast(u56, imm)),
- 64 => @byteSwap(@intCast(u64, imm)),
+ 16 => @byteSwap(@as(u16, @intCast(imm))),
+ 24 => @byteSwap(@as(u24, @intCast(imm))),
+ 32 => @byteSwap(@as(u32, @intCast(imm))),
+ 40 => @byteSwap(@as(u40, @intCast(imm))),
+ 48 => @byteSwap(@as(u48, @intCast(imm))),
+ 56 => @byteSwap(@as(u56, @intCast(imm))),
+ 64 => @byteSwap(@as(u64, @intCast(imm))),
else => return self.fail("TODO synthesize SPARCv9 byteswap for other integer sizes", .{}),
};
break :result .{ .immediate = swapped };
@@ -1295,7 +1295,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
const callee = pl_op.operand;
const extra = self.air.extraData(Air.Call, pl_op.payload);
- const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end .. extra.end + extra.data.args_len]);
+ const args = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra.end .. extra.end + extra.data.args_len]));
const ty = self.typeOf(callee);
const mod = self.bin_file.options.module.?;
const fn_ty = switch (ty.zigTypeTag(mod)) {
@@ -1348,7 +1348,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl);
const atom = elf_file.getAtom(atom_index);
_ = try atom.getOrCreateOffsetTableEntry(elf_file);
- break :blk @intCast(u32, atom.getOffsetTableAddress(elf_file));
+ break :blk @as(u32, @intCast(atom.getOffsetTableAddress(elf_file)));
} else unreachable;
try self.genSetReg(Type.usize, .o7, .{ .memory = got_addr });
@@ -1515,7 +1515,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void {
if (self.liveness.operandDies(inst, 0)) {
const op_int = @intFromEnum(pl_op.operand);
if (op_int >= Air.ref_start_index) {
- const op_index = @intCast(Air.Inst.Index, op_int - Air.ref_start_index);
+ const op_index = @as(Air.Inst.Index, @intCast(op_int - Air.ref_start_index));
self.processDeath(op_index);
}
}
@@ -1851,7 +1851,7 @@ fn airLoop(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const loop = self.air.extraData(Air.Block, ty_pl.payload);
const body = self.air.extra[loop.end .. loop.end + loop.data.body_len];
- const start = @intCast(u32, self.mir_instructions.len);
+ const start = @as(u32, @intCast(self.mir_instructions.len));
try self.genBody(body);
try self.jump(start);
@@ -2574,7 +2574,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
const mod = self.bin_file.options.module.?;
const mcv = try self.resolveInst(operand);
const struct_ty = self.typeOf(operand);
- const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod));
+ const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(index, mod)));
switch (mcv) {
.dead, .unreach => unreachable,
@@ -2772,7 +2772,7 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void {
fn addInst(self: *Self, inst: Mir.Inst) error{OutOfMemory}!Mir.Inst.Index {
const gpa = self.gpa;
try self.mir_instructions.ensureUnusedCapacity(gpa, 1);
- const result_index = @intCast(Air.Inst.Index, self.mir_instructions.len);
+ const result_index = @as(Air.Inst.Index, @intCast(self.mir_instructions.len));
self.mir_instructions.appendAssumeCapacity(inst);
return result_index;
}
@@ -3207,7 +3207,7 @@ fn binOpImmediate(
.is_imm = true,
.rd = dest_reg,
.rs1 = lhs_reg,
- .rs2_or_imm = .{ .imm = @intCast(u12, rhs.immediate) },
+ .rs2_or_imm = .{ .imm = @as(u12, @intCast(rhs.immediate)) },
},
},
.sll,
@@ -3218,7 +3218,7 @@ fn binOpImmediate(
.is_imm = true,
.rd = dest_reg,
.rs1 = lhs_reg,
- .rs2_or_imm = .{ .imm = @intCast(u5, rhs.immediate) },
+ .rs2_or_imm = .{ .imm = @as(u5, @intCast(rhs.immediate)) },
},
},
.sllx,
@@ -3229,14 +3229,14 @@ fn binOpImmediate(
.is_imm = true,
.rd = dest_reg,
.rs1 = lhs_reg,
- .rs2_or_imm = .{ .imm = @intCast(u6, rhs.immediate) },
+ .rs2_or_imm = .{ .imm = @as(u6, @intCast(rhs.immediate)) },
},
},
.cmp => .{
.arithmetic_2op = .{
.is_imm = true,
.rs1 = lhs_reg,
- .rs2_or_imm = .{ .imm = @intCast(u12, rhs.immediate) },
+ .rs2_or_imm = .{ .imm = @as(u12, @intCast(rhs.immediate)) },
},
},
else => unreachable,
@@ -3535,7 +3535,7 @@ fn errUnionPayload(self: *Self, error_union_mcv: MCValue, error_union_ty: Type)
return MCValue.none;
}
- const payload_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, mod));
+ const payload_offset = @as(u32, @intCast(errUnionPayloadOffset(payload_ty, mod)));
switch (error_union_mcv) {
.register => return self.fail("TODO errUnionPayload for registers", .{}),
.stack_offset => |off| {
@@ -3565,15 +3565,15 @@ fn finishAirBookkeeping(self: *Self) void {
fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Liveness.bpi - 1]Air.Inst.Ref) void {
var tomb_bits = self.liveness.getTombBits(inst);
for (operands) |op| {
- const dies = @truncate(u1, tomb_bits) != 0;
+ const dies = @as(u1, @truncate(tomb_bits)) != 0;
tomb_bits >>= 1;
if (!dies) continue;
const op_int = @intFromEnum(op);
if (op_int < Air.ref_start_index) continue;
- const op_index = @intCast(Air.Inst.Index, op_int - Air.ref_start_index);
+ const op_index = @as(Air.Inst.Index, @intCast(op_int - Air.ref_start_index));
self.processDeath(op_index);
}
- const is_used = @truncate(u1, tomb_bits) == 0;
+ const is_used = @as(u1, @truncate(tomb_bits)) == 0;
if (is_used) {
log.debug("%{d} => {}", .{ inst, result });
const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
@@ -3663,7 +3663,7 @@ fn genInlineMemcpy(
.data = .{ .branch_predict_reg = .{
.cond = .ne_zero,
.rs1 = len,
- .inst = @intCast(u32, self.mir_instructions.len - 2),
+ .inst = @as(u32, @intCast(self.mir_instructions.len - 2)),
} },
});
@@ -3838,7 +3838,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
.arithmetic_2op = .{
.is_imm = true,
.rs1 = reg,
- .rs2_or_imm = .{ .imm = @truncate(u12, x) },
+ .rs2_or_imm = .{ .imm = @as(u12, @truncate(x)) },
},
},
});
@@ -3848,7 +3848,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
.data = .{
.sethi = .{
.rd = reg,
- .imm = @truncate(u22, x >> 10),
+ .imm = @as(u22, @truncate(x >> 10)),
},
},
});
@@ -3860,12 +3860,12 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
.is_imm = true,
.rd = reg,
.rs1 = reg,
- .rs2_or_imm = .{ .imm = @truncate(u10, x) },
+ .rs2_or_imm = .{ .imm = @as(u10, @truncate(x)) },
},
},
});
} else if (x <= math.maxInt(u44)) {
- try self.genSetReg(ty, reg, .{ .immediate = @truncate(u32, x >> 12) });
+ try self.genSetReg(ty, reg, .{ .immediate = @as(u32, @truncate(x >> 12)) });
_ = try self.addInst(.{
.tag = .sllx,
@@ -3886,7 +3886,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
.is_imm = true,
.rd = reg,
.rs1 = reg,
- .rs2_or_imm = .{ .imm = @truncate(u12, x) },
+ .rs2_or_imm = .{ .imm = @as(u12, @truncate(x)) },
},
},
});
@@ -3894,8 +3894,8 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
// Need to allocate a temporary register to load 64-bit immediates.
const tmp_reg = try self.register_manager.allocReg(null, gp);
- try self.genSetReg(ty, tmp_reg, .{ .immediate = @truncate(u32, x) });
- try self.genSetReg(ty, reg, .{ .immediate = @truncate(u32, x >> 32) });
+ try self.genSetReg(ty, tmp_reg, .{ .immediate = @as(u32, @truncate(x)) });
+ try self.genSetReg(ty, reg, .{ .immediate = @as(u32, @truncate(x >> 32)) });
_ = try self.addInst(.{
.tag = .sllx,
@@ -3994,7 +3994,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
try self.genSetStack(wrapped_ty, stack_offset, .{ .register = rwo.reg });
const overflow_bit_ty = ty.structFieldType(1, mod);
- const overflow_bit_offset = @intCast(u32, ty.structFieldOffset(1, mod));
+ const overflow_bit_offset = @as(u32, @intCast(ty.structFieldOffset(1, mod)));
const cond_reg = try self.register_manager.allocReg(null, gp);
// TODO handle floating point CCRs
@@ -4412,8 +4412,8 @@ fn parseRegName(name: []const u8) ?Register {
fn performReloc(self: *Self, inst: Mir.Inst.Index) !void {
const tag = self.mir_instructions.items(.tag)[inst];
switch (tag) {
- .bpcc => self.mir_instructions.items(.data)[inst].branch_predict_int.inst = @intCast(Mir.Inst.Index, self.mir_instructions.len),
- .bpr => self.mir_instructions.items(.data)[inst].branch_predict_reg.inst = @intCast(Mir.Inst.Index, self.mir_instructions.len),
+ .bpcc => self.mir_instructions.items(.data)[inst].branch_predict_int.inst = @as(Mir.Inst.Index, @intCast(self.mir_instructions.len)),
+ .bpr => self.mir_instructions.items(.data)[inst].branch_predict_reg.inst = @as(Mir.Inst.Index, @intCast(self.mir_instructions.len)),
else => unreachable,
}
}
@@ -4490,7 +4490,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView)
};
for (fn_info.param_types, 0..) |ty, i| {
- const param_size = @intCast(u32, ty.toType().abiSize(mod));
+ const param_size = @as(u32, @intCast(ty.toType().abiSize(mod)));
if (param_size <= 8) {
if (next_register < argument_registers.len) {
result.args[i] = .{ .register = argument_registers[next_register] };
@@ -4522,7 +4522,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView)
} else if (!ret_ty.hasRuntimeBits(mod)) {
result.return_value = .{ .none = {} };
} else {
- const ret_ty_size = @intCast(u32, ret_ty.abiSize(mod));
+ const ret_ty_size = @as(u32, @intCast(ret_ty.abiSize(mod)));
// The callee puts the return values in %i0-%i3, which becomes %o0-%o3 inside the caller.
if (ret_ty_size <= 8) {
result.return_value = switch (role) {
@@ -4721,7 +4721,7 @@ fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde
const mcv = try self.resolveInst(operand);
const ptr_ty = self.typeOf(operand);
const struct_ty = ptr_ty.childType(mod);
- const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod));
+ const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(index, mod)));
switch (mcv) {
.ptr_stack_offset => |off| {
break :result MCValue{ .ptr_stack_offset = off - struct_field_offset };
@@ -4816,7 +4816,7 @@ fn truncRegister(
.is_imm = true,
.rd = dest_reg,
.rs1 = operand_reg,
- .rs2_or_imm = .{ .imm = @intCast(u6, 64 - int_bits) },
+ .rs2_or_imm = .{ .imm = @as(u6, @intCast(64 - int_bits)) },
},
},
});
@@ -4830,7 +4830,7 @@ fn truncRegister(
.is_imm = true,
.rd = dest_reg,
.rs1 = dest_reg,
- .rs2_or_imm = .{ .imm = @intCast(u6, int_bits) },
+ .rs2_or_imm = .{ .imm = @as(u6, @intCast(int_bits)) },
},
},
});
diff --git a/src/arch/sparc64/Emit.zig b/src/arch/sparc64/Emit.zig
index 7d16105348..2c39c70269 100644
--- a/src/arch/sparc64/Emit.zig
+++ b/src/arch/sparc64/Emit.zig
@@ -70,7 +70,7 @@ pub fn emitMir(
// Emit machine code
for (mir_tags, 0..) |tag, index| {
- const inst = @intCast(u32, index);
+ const inst = @as(u32, @intCast(index));
switch (tag) {
.dbg_line => try emit.mirDbgLine(inst),
.dbg_prologue_end => try emit.mirDebugPrologueEnd(),
@@ -294,7 +294,7 @@ fn mirConditionalBranch(emit: *Emit, inst: Mir.Inst.Index) !void {
.bpcc => switch (tag) {
.bpcc => {
const branch_predict_int = emit.mir.instructions.items(.data)[inst].branch_predict_int;
- const offset = @intCast(i64, emit.code_offset_mapping.get(branch_predict_int.inst).?) - @intCast(i64, emit.code.items.len);
+ const offset = @as(i64, @intCast(emit.code_offset_mapping.get(branch_predict_int.inst).?)) - @as(i64, @intCast(emit.code.items.len));
log.debug("mirConditionalBranch: {} offset={}", .{ inst, offset });
try emit.writeInstruction(
@@ -303,7 +303,7 @@ fn mirConditionalBranch(emit: *Emit, inst: Mir.Inst.Index) !void {
branch_predict_int.annul,
branch_predict_int.pt,
branch_predict_int.ccr,
- @intCast(i21, offset),
+ @as(i21, @intCast(offset)),
),
);
},
@@ -312,7 +312,7 @@ fn mirConditionalBranch(emit: *Emit, inst: Mir.Inst.Index) !void {
.bpr => switch (tag) {
.bpr => {
const branch_predict_reg = emit.mir.instructions.items(.data)[inst].branch_predict_reg;
- const offset = @intCast(i64, emit.code_offset_mapping.get(branch_predict_reg.inst).?) - @intCast(i64, emit.code.items.len);
+ const offset = @as(i64, @intCast(emit.code_offset_mapping.get(branch_predict_reg.inst).?)) - @as(i64, @intCast(emit.code.items.len));
log.debug("mirConditionalBranch: {} offset={}", .{ inst, offset });
try emit.writeInstruction(
@@ -321,7 +321,7 @@ fn mirConditionalBranch(emit: *Emit, inst: Mir.Inst.Index) !void {
branch_predict_reg.annul,
branch_predict_reg.pt,
branch_predict_reg.rs1,
- @intCast(i18, offset),
+ @as(i18, @intCast(offset)),
),
);
},
@@ -437,9 +437,9 @@ fn mirShift(emit: *Emit, inst: Mir.Inst.Index) !void {
if (data.is_imm) {
const imm = data.rs2_or_imm.imm;
switch (tag) {
- .sll => try emit.writeInstruction(Instruction.sll(u5, rs1, @truncate(u5, imm), rd)),
- .srl => try emit.writeInstruction(Instruction.srl(u5, rs1, @truncate(u5, imm), rd)),
- .sra => try emit.writeInstruction(Instruction.sra(u5, rs1, @truncate(u5, imm), rd)),
+ .sll => try emit.writeInstruction(Instruction.sll(u5, rs1, @as(u5, @truncate(imm)), rd)),
+ .srl => try emit.writeInstruction(Instruction.srl(u5, rs1, @as(u5, @truncate(imm)), rd)),
+ .sra => try emit.writeInstruction(Instruction.sra(u5, rs1, @as(u5, @truncate(imm)), rd)),
.sllx => try emit.writeInstruction(Instruction.sllx(u6, rs1, imm, rd)),
.srlx => try emit.writeInstruction(Instruction.srlx(u6, rs1, imm, rd)),
.srax => try emit.writeInstruction(Instruction.srax(u6, rs1, imm, rd)),
@@ -495,7 +495,7 @@ fn branchTarget(emit: *Emit, inst: Mir.Inst.Index) Mir.Inst.Index {
}
fn dbgAdvancePCAndLine(emit: *Emit, line: u32, column: u32) !void {
- const delta_line = @intCast(i32, line) - @intCast(i32, emit.prev_di_line);
+ const delta_line = @as(i32, @intCast(line)) - @as(i32, @intCast(emit.prev_di_line));
const delta_pc: usize = emit.code.items.len - emit.prev_di_pc;
switch (emit.debug_output) {
.dwarf => |dbg_out| {
@@ -547,7 +547,7 @@ fn lowerBranches(emit: *Emit) !void {
// TODO optimization opportunity: do this in codegen while
// generating MIR
for (mir_tags, 0..) |tag, index| {
- const inst = @intCast(u32, index);
+ const inst = @as(u32, @intCast(index));
if (isBranch(tag)) {
const target_inst = emit.branchTarget(inst);
@@ -592,7 +592,7 @@ fn lowerBranches(emit: *Emit) !void {
var current_code_offset: usize = 0;
for (mir_tags, 0..) |tag, index| {
- const inst = @intCast(u32, index);
+ const inst = @as(u32, @intCast(index));
// If this instruction contained in the code offset
// mapping (when it is a target of a branch or if it is a
@@ -607,7 +607,7 @@ fn lowerBranches(emit: *Emit) !void {
const target_inst = emit.branchTarget(inst);
if (target_inst < inst) {
const target_offset = emit.code_offset_mapping.get(target_inst).?;
- const offset = @intCast(i64, target_offset) - @intCast(i64, current_code_offset);
+ const offset = @as(i64, @intCast(target_offset)) - @as(i64, @intCast(current_code_offset));
const branch_type = emit.branch_types.getPtr(inst).?;
const optimal_branch_type = try emit.optimalBranchType(tag, offset);
if (branch_type.* != optimal_branch_type) {
@@ -626,7 +626,7 @@ fn lowerBranches(emit: *Emit) !void {
for (origin_list.items) |forward_branch_inst| {
const branch_tag = emit.mir.instructions.items(.tag)[forward_branch_inst];
const forward_branch_inst_offset = emit.code_offset_mapping.get(forward_branch_inst).?;
- const offset = @intCast(i64, current_code_offset) - @intCast(i64, forward_branch_inst_offset);
+ const offset = @as(i64, @intCast(current_code_offset)) - @as(i64, @intCast(forward_branch_inst_offset));
const branch_type = emit.branch_types.getPtr(forward_branch_inst).?;
const optimal_branch_type = try emit.optimalBranchType(branch_tag, offset);
if (branch_type.* != optimal_branch_type) {
diff --git a/src/arch/sparc64/Mir.zig b/src/arch/sparc64/Mir.zig
index f9a4056705..31ea4e23c8 100644
--- a/src/arch/sparc64/Mir.zig
+++ b/src/arch/sparc64/Mir.zig
@@ -379,7 +379,7 @@ pub fn extraData(mir: Mir, comptime T: type, index: usize) struct { data: T, end
inline for (fields) |field| {
@field(result, field.name) = switch (field.type) {
u32 => mir.extra[i],
- i32 => @bitCast(i32, mir.extra[i]),
+ i32 => @as(i32, @bitCast(mir.extra[i])),
else => @compileError("bad field type"),
};
i += 1;
diff --git a/src/arch/sparc64/bits.zig b/src/arch/sparc64/bits.zig
index 81656b422b..04da91ca74 100644
--- a/src/arch/sparc64/bits.zig
+++ b/src/arch/sparc64/bits.zig
@@ -16,7 +16,7 @@ pub const Register = enum(u6) {
// zig fmt: on
pub fn id(self: Register) u5 {
- return @truncate(u5, @intFromEnum(self));
+ return @as(u5, @truncate(@intFromEnum(self)));
}
pub fn enc(self: Register) u5 {
@@ -96,9 +96,9 @@ pub const FloatingPointRegister = enum(u7) {
pub fn id(self: FloatingPointRegister) u6 {
return switch (self.size()) {
- 32 => @truncate(u6, @intFromEnum(self)),
- 64 => @truncate(u6, (@intFromEnum(self) - 32) * 2),
- 128 => @truncate(u6, (@intFromEnum(self) - 64) * 4),
+ 32 => @as(u6, @truncate(@intFromEnum(self))),
+ 64 => @as(u6, @truncate((@intFromEnum(self) - 32) * 2)),
+ 128 => @as(u6, @truncate((@intFromEnum(self) - 64) * 4)),
else => unreachable,
};
}
@@ -109,7 +109,7 @@ pub const FloatingPointRegister = enum(u7) {
// (See section 5.1.4.1 of SPARCv9 ISA specification)
const reg_id = self.id();
- return @truncate(u5, reg_id | (reg_id >> 5));
+ return @as(u5, @truncate(reg_id | (reg_id >> 5)));
}
/// Returns the bit-width of the register.
@@ -752,13 +752,13 @@ pub const Instruction = union(enum) {
// See section 6.2 of the SPARCv9 ISA manual.
fn format1(disp: i32) Instruction {
- const udisp = @bitCast(u32, disp);
+ const udisp = @as(u32, @bitCast(disp));
// In SPARC, branch target needs to be aligned to 4 bytes.
assert(udisp % 4 == 0);
// Discard the last two bits since those are implicitly zero.
- const udisp_truncated = @truncate(u30, udisp >> 2);
+ const udisp_truncated = @as(u30, @truncate(udisp >> 2));
return Instruction{
.format_1 = .{
.disp30 = udisp_truncated,
@@ -777,13 +777,13 @@ pub const Instruction = union(enum) {
}
fn format2b(op2: u3, cond: Condition, annul: bool, disp: i24) Instruction {
- const udisp = @bitCast(u24, disp);
+ const udisp = @as(u24, @bitCast(disp));
// In SPARC, branch target needs to be aligned to 4 bytes.
assert(udisp % 4 == 0);
// Discard the last two bits since those are implicitly zero.
- const udisp_truncated = @truncate(u22, udisp >> 2);
+ const udisp_truncated = @as(u22, @truncate(udisp >> 2));
return Instruction{
.format_2b = .{
.a = @intFromBool(annul),
@@ -795,16 +795,16 @@ pub const Instruction = union(enum) {
}
fn format2c(op2: u3, cond: Condition, annul: bool, pt: bool, ccr: CCR, disp: i21) Instruction {
- const udisp = @bitCast(u21, disp);
+ const udisp = @as(u21, @bitCast(disp));
// In SPARC, branch target needs to be aligned to 4 bytes.
assert(udisp % 4 == 0);
// Discard the last two bits since those are implicitly zero.
- const udisp_truncated = @truncate(u19, udisp >> 2);
+ const udisp_truncated = @as(u19, @truncate(udisp >> 2));
- const ccr_cc1 = @truncate(u1, @intFromEnum(ccr) >> 1);
- const ccr_cc0 = @truncate(u1, @intFromEnum(ccr));
+ const ccr_cc1 = @as(u1, @truncate(@intFromEnum(ccr) >> 1));
+ const ccr_cc0 = @as(u1, @truncate(@intFromEnum(ccr)));
return Instruction{
.format_2c = .{
.a = @intFromBool(annul),
@@ -819,16 +819,16 @@ pub const Instruction = union(enum) {
}
fn format2d(op2: u3, rcond: RCondition, annul: bool, pt: bool, rs1: Register, disp: i18) Instruction {
- const udisp = @bitCast(u18, disp);
+ const udisp = @as(u18, @bitCast(disp));
// In SPARC, branch target needs to be aligned to 4 bytes.
assert(udisp % 4 == 0);
// Discard the last two bits since those are implicitly zero,
// and split it into low and high parts.
- const udisp_truncated = @truncate(u16, udisp >> 2);
- const udisp_hi = @truncate(u2, (udisp_truncated & 0b1100_0000_0000_0000) >> 14);
- const udisp_lo = @truncate(u14, udisp_truncated & 0b0011_1111_1111_1111);
+ const udisp_truncated = @as(u16, @truncate(udisp >> 2));
+ const udisp_hi = @as(u2, @truncate((udisp_truncated & 0b1100_0000_0000_0000) >> 14));
+ const udisp_lo = @as(u14, @truncate(udisp_truncated & 0b0011_1111_1111_1111));
return Instruction{
.format_2d = .{
.a = @intFromBool(annul),
@@ -860,7 +860,7 @@ pub const Instruction = union(enum) {
.rd = rd.enc(),
.op3 = op3,
.rs1 = rs1.enc(),
- .simm13 = @bitCast(u13, imm),
+ .simm13 = @as(u13, @bitCast(imm)),
},
};
}
@@ -880,7 +880,7 @@ pub const Instruction = union(enum) {
.op = op,
.op3 = op3,
.rs1 = rs1.enc(),
- .simm13 = @bitCast(u13, imm),
+ .simm13 = @as(u13, @bitCast(imm)),
},
};
}
@@ -904,7 +904,7 @@ pub const Instruction = union(enum) {
.op3 = op3,
.rs1 = rs1.enc(),
.rcond = @intFromEnum(rcond),
- .simm10 = @bitCast(u10, imm),
+ .simm10 = @as(u10, @bitCast(imm)),
},
};
}
@@ -922,8 +922,8 @@ pub const Instruction = union(enum) {
fn format3h(cmask: MemCompletionConstraint, mmask: MemOrderingConstraint) Instruction {
return Instruction{
.format_3h = .{
- .cmask = @bitCast(u3, cmask),
- .mmask = @bitCast(u4, mmask),
+ .cmask = @as(u3, @bitCast(cmask)),
+ .mmask = @as(u4, @bitCast(mmask)),
},
};
}
@@ -995,8 +995,8 @@ pub const Instruction = union(enum) {
};
}
fn format3o(op: u2, op3: u6, opf: u9, ccr: CCR, rs1: Register, rs2: Register) Instruction {
- const ccr_cc1 = @truncate(u1, @intFromEnum(ccr) >> 1);
- const ccr_cc0 = @truncate(u1, @intFromEnum(ccr));
+ const ccr_cc1 = @as(u1, @truncate(@intFromEnum(ccr) >> 1));
+ const ccr_cc0 = @as(u1, @truncate(@intFromEnum(ccr)));
return Instruction{
.format_3o = .{
.op = op,
@@ -1051,8 +1051,8 @@ pub const Instruction = union(enum) {
}
fn format4a(op3: u6, ccr: CCR, rs1: Register, rs2: Register, rd: Register) Instruction {
- const ccr_cc1 = @truncate(u1, @intFromEnum(ccr) >> 1);
- const ccr_cc0 = @truncate(u1, @intFromEnum(ccr));
+ const ccr_cc1 = @as(u1, @truncate(@intFromEnum(ccr) >> 1));
+ const ccr_cc0 = @as(u1, @truncate(@intFromEnum(ccr)));
return Instruction{
.format_4a = .{
.rd = rd.enc(),
@@ -1066,8 +1066,8 @@ pub const Instruction = union(enum) {
}
fn format4b(op3: u6, ccr: CCR, rs1: Register, imm: i11, rd: Register) Instruction {
- const ccr_cc1 = @truncate(u1, @intFromEnum(ccr) >> 1);
- const ccr_cc0 = @truncate(u1, @intFromEnum(ccr));
+ const ccr_cc1 = @as(u1, @truncate(@intFromEnum(ccr) >> 1));
+ const ccr_cc0 = @as(u1, @truncate(@intFromEnum(ccr)));
return Instruction{
.format_4b = .{
.rd = rd.enc(),
@@ -1075,15 +1075,15 @@ pub const Instruction = union(enum) {
.rs1 = rs1.enc(),
.cc1 = ccr_cc1,
.cc0 = ccr_cc0,
- .simm11 = @bitCast(u11, imm),
+ .simm11 = @as(u11, @bitCast(imm)),
},
};
}
fn format4c(op3: u6, cond: Condition, ccr: CCR, rs2: Register, rd: Register) Instruction {
- const ccr_cc2 = @truncate(u1, @intFromEnum(ccr) >> 2);
- const ccr_cc1 = @truncate(u1, @intFromEnum(ccr) >> 1);
- const ccr_cc0 = @truncate(u1, @intFromEnum(ccr));
+ const ccr_cc2 = @as(u1, @truncate(@intFromEnum(ccr) >> 2));
+ const ccr_cc1 = @as(u1, @truncate(@intFromEnum(ccr) >> 1));
+ const ccr_cc0 = @as(u1, @truncate(@intFromEnum(ccr)));
return Instruction{
.format_4c = .{
.rd = rd.enc(),
@@ -1098,9 +1098,9 @@ pub const Instruction = union(enum) {
}
fn format4d(op3: u6, cond: Condition, ccr: CCR, imm: i11, rd: Register) Instruction {
- const ccr_cc2 = @truncate(u1, @intFromEnum(ccr) >> 2);
- const ccr_cc1 = @truncate(u1, @intFromEnum(ccr) >> 1);
- const ccr_cc0 = @truncate(u1, @intFromEnum(ccr));
+ const ccr_cc2 = @as(u1, @truncate(@intFromEnum(ccr) >> 2));
+ const ccr_cc1 = @as(u1, @truncate(@intFromEnum(ccr) >> 1));
+ const ccr_cc0 = @as(u1, @truncate(@intFromEnum(ccr)));
return Instruction{
.format_4d = .{
.rd = rd.enc(),
@@ -1109,14 +1109,14 @@ pub const Instruction = union(enum) {
.cond = cond.enc(),
.cc1 = ccr_cc1,
.cc0 = ccr_cc0,
- .simm11 = @bitCast(u11, imm),
+ .simm11 = @as(u11, @bitCast(imm)),
},
};
}
fn format4e(op3: u6, ccr: CCR, rs1: Register, rd: Register, sw_trap: u7) Instruction {
- const ccr_cc1 = @truncate(u1, @intFromEnum(ccr) >> 1);
- const ccr_cc0 = @truncate(u1, @intFromEnum(ccr));
+ const ccr_cc1 = @as(u1, @truncate(@intFromEnum(ccr) >> 1));
+ const ccr_cc0 = @as(u1, @truncate(@intFromEnum(ccr)));
return Instruction{
.format_4e = .{
.rd = rd.enc(),
@@ -1468,8 +1468,8 @@ pub const Instruction = union(enum) {
pub fn trap(comptime s2: type, cond: ICondition, ccr: CCR, rs1: Register, rs2: s2) Instruction {
// Tcc instructions abuse the rd field to store the conditionals.
return switch (s2) {
- Register => format4a(0b11_1010, ccr, rs1, rs2, @enumFromInt(Register, @intFromEnum(cond))),
- u7 => format4e(0b11_1010, ccr, rs1, @enumFromInt(Register, @intFromEnum(cond)), rs2),
+ Register => format4a(0b11_1010, ccr, rs1, rs2, @as(Register, @enumFromInt(@intFromEnum(cond)))),
+ u7 => format4e(0b11_1010, ccr, rs1, @as(Register, @enumFromInt(@intFromEnum(cond))), rs2),
else => unreachable,
};
}
diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig
index f9e5eed626..3a50fc9824 100644
--- a/src/arch/wasm/CodeGen.zig
+++ b/src/arch/wasm/CodeGen.zig
@@ -120,7 +120,7 @@ const WValue = union(enum) {
if (local_value < reserved + 2) return; // reserved locals may never be re-used. Also accounts for 2 stack locals.
const index = local_value - reserved;
- const valtype = @enumFromInt(wasm.Valtype, gen.locals.items[index]);
+ const valtype = @as(wasm.Valtype, @enumFromInt(gen.locals.items[index]));
switch (valtype) {
.i32 => gen.free_locals_i32.append(gen.gpa, local_value) catch return, // It's ok to fail any of those, a new local can be allocated instead
.i64 => gen.free_locals_i64.append(gen.gpa, local_value) catch return,
@@ -817,7 +817,7 @@ fn finishAir(func: *CodeGen, inst: Air.Inst.Index, result: WValue, operands: []c
assert(operands.len <= Liveness.bpi - 1);
var tomb_bits = func.liveness.getTombBits(inst);
for (operands) |operand| {
- const dies = @truncate(u1, tomb_bits) != 0;
+ const dies = @as(u1, @truncate(tomb_bits)) != 0;
tomb_bits >>= 1;
if (!dies) continue;
processDeath(func, operand);
@@ -910,7 +910,7 @@ fn addTag(func: *CodeGen, tag: Mir.Inst.Tag) error{OutOfMemory}!void {
}
fn addExtended(func: *CodeGen, opcode: wasm.MiscOpcode) error{OutOfMemory}!void {
- const extra_index = @intCast(u32, func.mir_extra.items.len);
+ const extra_index = @as(u32, @intCast(func.mir_extra.items.len));
try func.mir_extra.append(func.gpa, @intFromEnum(opcode));
try func.addInst(.{ .tag = .misc_prefix, .data = .{ .payload = extra_index } });
}
@@ -934,11 +934,11 @@ fn addImm64(func: *CodeGen, imm: u64) error{OutOfMemory}!void {
/// Accepts the index into the list of 128bit-immediates
fn addImm128(func: *CodeGen, index: u32) error{OutOfMemory}!void {
const simd_values = func.simd_immediates.items[index];
- const extra_index = @intCast(u32, func.mir_extra.items.len);
+ const extra_index = @as(u32, @intCast(func.mir_extra.items.len));
// tag + 128bit value
try func.mir_extra.ensureUnusedCapacity(func.gpa, 5);
func.mir_extra.appendAssumeCapacity(std.wasm.simdOpcode(.v128_const));
- func.mir_extra.appendSliceAssumeCapacity(@alignCast(4, mem.bytesAsSlice(u32, &simd_values)));
+ func.mir_extra.appendSliceAssumeCapacity(@alignCast(mem.bytesAsSlice(u32, &simd_values)));
try func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
}
@@ -979,7 +979,7 @@ fn addExtra(func: *CodeGen, extra: anytype) error{OutOfMemory}!u32 {
/// Returns the index into `mir_extra`
fn addExtraAssumeCapacity(func: *CodeGen, extra: anytype) error{OutOfMemory}!u32 {
const fields = std.meta.fields(@TypeOf(extra));
- const result = @intCast(u32, func.mir_extra.items.len);
+ const result = @as(u32, @intCast(func.mir_extra.items.len));
inline for (fields) |field| {
func.mir_extra.appendAssumeCapacity(switch (field.type) {
u32 => @field(extra, field.name),
@@ -1020,7 +1020,7 @@ fn typeToValtype(ty: Type, mod: *Module) wasm.Valtype {
},
.Union => switch (ty.containerLayout(mod)) {
.Packed => {
- const int_ty = mod.intType(.unsigned, @intCast(u16, ty.bitSize(mod))) catch @panic("out of memory");
+ const int_ty = mod.intType(.unsigned, @as(u16, @intCast(ty.bitSize(mod)))) catch @panic("out of memory");
return typeToValtype(int_ty, mod);
},
else => wasm.Valtype.i32,
@@ -1050,7 +1050,7 @@ fn emitWValue(func: *CodeGen, value: WValue) InnerError!void {
.dead => unreachable, // reference to free'd `WValue` (missing reuseOperand?)
.none, .stack => {}, // no-op
.local => |idx| try func.addLabel(.local_get, idx.value),
- .imm32 => |val| try func.addImm32(@bitCast(i32, val)),
+ .imm32 => |val| try func.addImm32(@as(i32, @bitCast(val))),
.imm64 => |val| try func.addImm64(val),
.imm128 => |val| try func.addImm128(val),
.float32 => |val| try func.addInst(.{ .tag = .f32_const, .data = .{ .float32 = val } }),
@@ -1264,7 +1264,7 @@ fn genFunc(func: *CodeGen) InnerError!void {
// In case we have a return value, but the last instruction is a noreturn (such as a while loop)
// we emit an unreachable instruction to tell the stack validator that part will never be reached.
if (func_type.returns.len != 0 and func.air.instructions.len > 0) {
- const inst = @intCast(u32, func.air.instructions.len - 1);
+ const inst = @as(u32, @intCast(func.air.instructions.len - 1));
const last_inst_ty = func.typeOfIndex(inst);
if (!last_inst_ty.hasRuntimeBitsIgnoreComptime(mod) or last_inst_ty.isNoReturn(mod)) {
try func.addTag(.@"unreachable");
@@ -1287,11 +1287,11 @@ fn genFunc(func: *CodeGen) InnerError!void {
try prologue.append(.{ .tag = .local_tee, .data = .{ .label = func.initial_stack_value.local.value } });
// get the total stack size
const aligned_stack = std.mem.alignForward(u32, func.stack_size, func.stack_alignment);
- try prologue.append(.{ .tag = .i32_const, .data = .{ .imm32 = @intCast(i32, aligned_stack) } });
+ try prologue.append(.{ .tag = .i32_const, .data = .{ .imm32 = @as(i32, @intCast(aligned_stack)) } });
// substract it from the current stack pointer
try prologue.append(.{ .tag = .i32_sub, .data = .{ .tag = {} } });
// Get negative stack aligment
- try prologue.append(.{ .tag = .i32_const, .data = .{ .imm32 = @intCast(i32, func.stack_alignment) * -1 } });
+ try prologue.append(.{ .tag = .i32_const, .data = .{ .imm32 = @as(i32, @intCast(func.stack_alignment)) * -1 } });
// Bitwise-and the value to get the new stack pointer to ensure the pointers are aligned with the abi alignment
try prologue.append(.{ .tag = .i32_and, .data = .{ .tag = {} } });
// store the current stack pointer as the bottom, which will be used to calculate all stack pointer offsets
@@ -1432,7 +1432,7 @@ fn lowerArg(func: *CodeGen, cc: std.builtin.CallingConvention, ty: Type, value:
if (value != .imm32 and value != .imm64) {
const opcode = buildOpcode(.{
.op = .load,
- .width = @intCast(u8, abi_size),
+ .width = @as(u8, @intCast(abi_size)),
.signedness = if (scalar_type.isSignedInt(mod)) .signed else .unsigned,
.valtype1 = typeToValtype(scalar_type, mod),
});
@@ -1468,7 +1468,7 @@ fn lowerToStack(func: *CodeGen, value: WValue) !void {
if (offset.value > 0) {
switch (func.arch()) {
.wasm32 => {
- try func.addImm32(@bitCast(i32, offset.value));
+ try func.addImm32(@as(i32, @bitCast(offset.value)));
try func.addTag(.i32_add);
},
.wasm64 => {
@@ -1815,7 +1815,7 @@ fn buildPointerOffset(func: *CodeGen, ptr_value: WValue, offset: u64, action: en
if (offset + ptr_value.offset() > 0) {
switch (func.arch()) {
.wasm32 => {
- try func.addImm32(@bitCast(i32, @intCast(u32, offset + ptr_value.offset())));
+ try func.addImm32(@as(i32, @bitCast(@as(u32, @intCast(offset + ptr_value.offset())))));
try func.addTag(.i32_add);
},
.wasm64 => {
@@ -2111,7 +2111,7 @@ fn airRet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.emitWValue(operand);
const opcode = buildOpcode(.{
.op = .load,
- .width = @intCast(u8, scalar_type.abiSize(mod) * 8),
+ .width = @as(u8, @intCast(scalar_type.abiSize(mod) * 8)),
.signedness = if (scalar_type.isSignedInt(mod)) .signed else .unsigned,
.valtype1 = typeToValtype(scalar_type, mod),
});
@@ -2180,7 +2180,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
if (modifier == .always_tail) return func.fail("TODO implement tail calls for wasm", .{});
const pl_op = func.air.instructions.items(.data)[inst].pl_op;
const extra = func.air.extraData(Air.Call, pl_op.payload);
- const args = @ptrCast([]const Air.Inst.Ref, func.air.extra[extra.end..][0..extra.data.args_len]);
+ const args = @as([]const Air.Inst.Ref, @ptrCast(func.air.extra[extra.end..][0..extra.data.args_len]));
const ty = func.typeOf(pl_op.operand);
const mod = func.bin_file.base.options.module.?;
@@ -2319,15 +2319,15 @@ fn airStore(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void
return func.fail("TODO: airStore for pointers to bitfields with backing type larger than 64bits", .{});
}
- var mask = @intCast(u64, (@as(u65, 1) << @intCast(u7, ty.bitSize(mod))) - 1);
- mask <<= @intCast(u6, ptr_info.packed_offset.bit_offset);
+ var mask = @as(u64, @intCast((@as(u65, 1) << @as(u7, @intCast(ty.bitSize(mod)))) - 1));
+ mask <<= @as(u6, @intCast(ptr_info.packed_offset.bit_offset));
mask ^= ~@as(u64, 0);
const shift_val = if (ptr_info.packed_offset.host_size <= 4)
WValue{ .imm32 = ptr_info.packed_offset.bit_offset }
else
WValue{ .imm64 = ptr_info.packed_offset.bit_offset };
const mask_val = if (ptr_info.packed_offset.host_size <= 4)
- WValue{ .imm32 = @truncate(u32, mask) }
+ WValue{ .imm32 = @as(u32, @truncate(mask)) }
else
WValue{ .imm64 = mask };
@@ -2357,7 +2357,7 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE
return func.store(lhs, rhs, Type.anyerror, 0);
}
- const len = @intCast(u32, abi_size);
+ const len = @as(u32, @intCast(abi_size));
return func.memcpy(lhs, rhs, .{ .imm32 = len });
},
.Optional => {
@@ -2372,23 +2372,23 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE
return func.store(lhs, rhs, Type.anyerror, 0);
}
- const len = @intCast(u32, abi_size);
+ const len = @as(u32, @intCast(abi_size));
return func.memcpy(lhs, rhs, .{ .imm32 = len });
},
.Struct, .Array, .Union => if (isByRef(ty, mod)) {
- const len = @intCast(u32, abi_size);
+ const len = @as(u32, @intCast(abi_size));
return func.memcpy(lhs, rhs, .{ .imm32 = len });
},
.Vector => switch (determineSimdStoreStrategy(ty, mod)) {
.unrolled => {
- const len = @intCast(u32, abi_size);
+ const len = @as(u32, @intCast(abi_size));
return func.memcpy(lhs, rhs, .{ .imm32 = len });
},
.direct => {
try func.emitWValue(lhs);
try func.lowerToStack(rhs);
// TODO: Add helper functions for simd opcodes
- const extra_index = @intCast(u32, func.mir_extra.items.len);
+ const extra_index = @as(u32, @intCast(func.mir_extra.items.len));
// stores as := opcode, offset, alignment (opcode::memarg)
try func.mir_extra.appendSlice(func.gpa, &[_]u32{
std.wasm.simdOpcode(.v128_store),
@@ -2423,7 +2423,7 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE
try func.store(.{ .stack = {} }, msb, Type.u64, 8 + lhs.offset());
return;
} else if (abi_size > 16) {
- try func.memcpy(lhs, rhs, .{ .imm32 = @intCast(u32, ty.abiSize(mod)) });
+ try func.memcpy(lhs, rhs, .{ .imm32 = @as(u32, @intCast(ty.abiSize(mod))) });
},
else => if (abi_size > 8) {
return func.fail("TODO: `store` for type `{}` with abisize `{d}`", .{
@@ -2440,7 +2440,7 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE
const valtype = typeToValtype(ty, mod);
const opcode = buildOpcode(.{
.valtype1 = valtype,
- .width = @intCast(u8, abi_size * 8),
+ .width = @as(u8, @intCast(abi_size * 8)),
.op = .store,
});
@@ -2501,7 +2501,7 @@ fn load(func: *CodeGen, operand: WValue, ty: Type, offset: u32) InnerError!WValu
if (ty.zigTypeTag(mod) == .Vector) {
// TODO: Add helper functions for simd opcodes
- const extra_index = @intCast(u32, func.mir_extra.items.len);
+ const extra_index = @as(u32, @intCast(func.mir_extra.items.len));
// stores as := opcode, offset, alignment (opcode::memarg)
try func.mir_extra.appendSlice(func.gpa, &[_]u32{
std.wasm.simdOpcode(.v128_load),
@@ -2512,7 +2512,7 @@ fn load(func: *CodeGen, operand: WValue, ty: Type, offset: u32) InnerError!WValu
return WValue{ .stack = {} };
}
- const abi_size = @intCast(u8, ty.abiSize(mod));
+ const abi_size = @as(u8, @intCast(ty.abiSize(mod)));
const opcode = buildOpcode(.{
.valtype1 = typeToValtype(ty, mod),
.width = abi_size * 8,
@@ -2589,10 +2589,10 @@ fn airBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
// For big integers we can ignore this as we will call into compiler-rt which handles this.
const result = switch (op) {
.shr, .shl => res: {
- const lhs_wasm_bits = toWasmBits(@intCast(u16, lhs_ty.bitSize(mod))) orelse {
+ const lhs_wasm_bits = toWasmBits(@as(u16, @intCast(lhs_ty.bitSize(mod)))) orelse {
return func.fail("TODO: implement '{s}' for types larger than 128 bits", .{@tagName(op)});
};
- const rhs_wasm_bits = toWasmBits(@intCast(u16, rhs_ty.bitSize(mod))).?;
+ const rhs_wasm_bits = toWasmBits(@as(u16, @intCast(rhs_ty.bitSize(mod)))).?;
const new_rhs = if (lhs_wasm_bits != rhs_wasm_bits and lhs_wasm_bits != 128) blk: {
const tmp = try func.intcast(rhs, rhs_ty, lhs_ty);
break :blk try tmp.toLocal(func, lhs_ty);
@@ -2868,10 +2868,10 @@ fn airWrapBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
// For big integers we can ignore this as we will call into compiler-rt which handles this.
const result = switch (op) {
.shr, .shl => res: {
- const lhs_wasm_bits = toWasmBits(@intCast(u16, lhs_ty.bitSize(mod))) orelse {
+ const lhs_wasm_bits = toWasmBits(@as(u16, @intCast(lhs_ty.bitSize(mod)))) orelse {
return func.fail("TODO: implement '{s}' for types larger than 128 bits", .{@tagName(op)});
};
- const rhs_wasm_bits = toWasmBits(@intCast(u16, rhs_ty.bitSize(mod))).?;
+ const rhs_wasm_bits = toWasmBits(@as(u16, @intCast(rhs_ty.bitSize(mod)))).?;
const new_rhs = if (lhs_wasm_bits != rhs_wasm_bits and lhs_wasm_bits != 128) blk: {
const tmp = try func.intcast(rhs, rhs_ty, lhs_ty);
break :blk try tmp.toLocal(func, lhs_ty);
@@ -2902,7 +2902,7 @@ fn wrapBinOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerEr
fn wrapOperand(func: *CodeGen, operand: WValue, ty: Type) InnerError!WValue {
const mod = func.bin_file.base.options.module.?;
assert(ty.abiSize(mod) <= 16);
- const bitsize = @intCast(u16, ty.bitSize(mod));
+ const bitsize = @as(u16, @intCast(ty.bitSize(mod)));
const wasm_bits = toWasmBits(bitsize) orelse {
return func.fail("TODO: Implement wrapOperand for bitsize '{d}'", .{bitsize});
};
@@ -2916,7 +2916,7 @@ fn wrapOperand(func: *CodeGen, operand: WValue, ty: Type) InnerError!WValue {
const result_ptr = try func.allocStack(ty);
try func.emitWValue(result_ptr);
try func.store(.{ .stack = {} }, lsb, Type.u64, 8 + result_ptr.offset());
- const result = (@as(u64, 1) << @intCast(u6, 64 - (wasm_bits - bitsize))) - 1;
+ const result = (@as(u64, 1) << @as(u6, @intCast(64 - (wasm_bits - bitsize)))) - 1;
try func.emitWValue(result_ptr);
_ = try func.load(operand, Type.u64, 0);
try func.addImm64(result);
@@ -2925,10 +2925,10 @@ fn wrapOperand(func: *CodeGen, operand: WValue, ty: Type) InnerError!WValue {
return result_ptr;
}
- const result = (@as(u64, 1) << @intCast(u6, bitsize)) - 1;
+ const result = (@as(u64, 1) << @as(u6, @intCast(bitsize))) - 1;
try func.emitWValue(operand);
if (bitsize <= 32) {
- try func.addImm32(@bitCast(i32, @intCast(u32, result)));
+ try func.addImm32(@as(i32, @bitCast(@as(u32, @intCast(result)))));
try func.addTag(.i32_and);
} else if (bitsize <= 64) {
try func.addImm64(result);
@@ -2957,15 +2957,15 @@ fn lowerParentPtr(func: *CodeGen, ptr_val: Value, offset: u32) InnerError!WValue
const index = elem.index;
const elem_type = mod.intern_pool.typeOf(elem.base).toType().elemType2(mod);
const elem_offset = index * elem_type.abiSize(mod);
- return func.lowerParentPtr(elem.base.toValue(), @intCast(u32, elem_offset + offset));
+ return func.lowerParentPtr(elem.base.toValue(), @as(u32, @intCast(elem_offset + offset)));
},
.field => |field| {
const parent_ty = mod.intern_pool.typeOf(field.base).toType().childType(mod);
const field_offset = switch (parent_ty.zigTypeTag(mod)) {
.Struct => switch (parent_ty.containerLayout(mod)) {
- .Packed => parent_ty.packedStructFieldByteOffset(@intCast(usize, field.index), mod),
- else => parent_ty.structFieldOffset(@intCast(usize, field.index), mod),
+ .Packed => parent_ty.packedStructFieldByteOffset(@as(usize, @intCast(field.index)), mod),
+ else => parent_ty.structFieldOffset(@as(usize, @intCast(field.index)), mod),
},
.Union => switch (parent_ty.containerLayout(mod)) {
.Packed => 0,
@@ -2975,7 +2975,7 @@ fn lowerParentPtr(func: *CodeGen, ptr_val: Value, offset: u32) InnerError!WValue
if (layout.payload_align > layout.tag_align) break :blk 0;
// tag is stored first so calculate offset from where payload starts
- break :blk @intCast(u32, std.mem.alignForward(u64, layout.tag_size, layout.tag_align));
+ break :blk @as(u32, @intCast(std.mem.alignForward(u64, layout.tag_size, layout.tag_align)));
},
},
.Pointer => switch (parent_ty.ptrSize(mod)) {
@@ -2988,7 +2988,7 @@ fn lowerParentPtr(func: *CodeGen, ptr_val: Value, offset: u32) InnerError!WValue
},
else => unreachable,
};
- return func.lowerParentPtr(field.base.toValue(), @intCast(u32, offset + field_offset));
+ return func.lowerParentPtr(field.base.toValue(), @as(u32, @intCast(offset + field_offset)));
},
}
}
@@ -3045,11 +3045,11 @@ fn toTwosComplement(value: anytype, bits: u7) std.meta.Int(.unsigned, @typeInfo(
comptime assert(@typeInfo(T).Int.signedness == .signed);
assert(bits <= 64);
const WantedT = std.meta.Int(.unsigned, @typeInfo(T).Int.bits);
- if (value >= 0) return @bitCast(WantedT, value);
- const max_value = @intCast(u64, (@as(u65, 1) << bits) - 1);
- const flipped = @intCast(T, (~-@as(i65, value)) + 1);
- const result = @bitCast(WantedT, flipped) & max_value;
- return @intCast(WantedT, result);
+ if (value >= 0) return @as(WantedT, @bitCast(value));
+ const max_value = @as(u64, @intCast((@as(u65, 1) << bits) - 1));
+ const flipped = @as(T, @intCast((~-@as(i65, value)) + 1));
+ const result = @as(WantedT, @bitCast(flipped)) & max_value;
+ return @as(WantedT, @intCast(result));
}
fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue {
@@ -3150,18 +3150,18 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue {
const int_info = ty.intInfo(mod);
switch (int_info.signedness) {
.signed => switch (int_info.bits) {
- 0...32 => return WValue{ .imm32 = @intCast(u32, toTwosComplement(
+ 0...32 => return WValue{ .imm32 = @as(u32, @intCast(toTwosComplement(
val.toSignedInt(mod),
- @intCast(u6, int_info.bits),
- )) },
+ @as(u6, @intCast(int_info.bits)),
+ ))) },
33...64 => return WValue{ .imm64 = toTwosComplement(
val.toSignedInt(mod),
- @intCast(u7, int_info.bits),
+ @as(u7, @intCast(int_info.bits)),
) },
else => unreachable,
},
.unsigned => switch (int_info.bits) {
- 0...32 => return WValue{ .imm32 = @intCast(u32, val.toUnsignedInt(mod)) },
+ 0...32 => return WValue{ .imm32 = @as(u32, @intCast(val.toUnsignedInt(mod))) },
33...64 => return WValue{ .imm64 = val.toUnsignedInt(mod) },
else => unreachable,
},
@@ -3198,7 +3198,7 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue {
return func.lowerConstant(enum_tag.int.toValue(), int_tag_ty.toType());
},
.float => |float| switch (float.storage) {
- .f16 => |f16_val| return WValue{ .imm32 = @bitCast(u16, f16_val) },
+ .f16 => |f16_val| return WValue{ .imm32 = @as(u16, @bitCast(f16_val)) },
.f32 => |f32_val| return WValue{ .float32 = f32_val },
.f64 => |f64_val| return WValue{ .float64 = f64_val },
else => unreachable,
@@ -3254,7 +3254,7 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue {
/// Stores the value as a 128bit-immediate value by storing it inside
/// the list and returning the index into this list as `WValue`.
fn storeSimdImmd(func: *CodeGen, value: [16]u8) !WValue {
- const index = @intCast(u32, func.simd_immediates.items.len);
+ const index = @as(u32, @intCast(func.simd_immediates.items.len));
try func.simd_immediates.append(func.gpa, value);
return WValue{ .imm128 = index };
}
@@ -3270,8 +3270,8 @@ fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue {
},
.Float => switch (ty.floatBits(func.target)) {
16 => return WValue{ .imm32 = 0xaaaaaaaa },
- 32 => return WValue{ .float32 = @bitCast(f32, @as(u32, 0xaaaaaaaa)) },
- 64 => return WValue{ .float64 = @bitCast(f64, @as(u64, 0xaaaaaaaaaaaaaaaa)) },
+ 32 => return WValue{ .float32 = @as(f32, @bitCast(@as(u32, 0xaaaaaaaa))) },
+ 64 => return WValue{ .float64 = @as(f64, @bitCast(@as(u64, 0xaaaaaaaaaaaaaaaa))) },
else => unreachable,
},
.Pointer => switch (func.arch()) {
@@ -3312,13 +3312,13 @@ fn valueAsI32(func: *const CodeGen, val: Value, ty: Type) i32 {
.enum_tag => |enum_tag| intIndexAsI32(&mod.intern_pool, enum_tag.int, mod),
.int => |int| intStorageAsI32(int.storage, mod),
.ptr => |ptr| intIndexAsI32(&mod.intern_pool, ptr.addr.int, mod),
- .err => |err| @bitCast(i32, @intCast(Module.ErrorInt, mod.global_error_set.getIndex(err.name).?)),
+ .err => |err| @as(i32, @bitCast(@as(Module.ErrorInt, @intCast(mod.global_error_set.getIndex(err.name).?)))),
else => unreachable,
},
}
return switch (ty.zigTypeTag(mod)) {
- .ErrorSet => @bitCast(i32, val.getErrorInt(mod)),
+ .ErrorSet => @as(i32, @bitCast(val.getErrorInt(mod))),
else => unreachable, // Programmer called this function for an illegal type
};
}
@@ -3329,11 +3329,11 @@ fn intIndexAsI32(ip: *const InternPool, int: InternPool.Index, mod: *Module) i32
fn intStorageAsI32(storage: InternPool.Key.Int.Storage, mod: *Module) i32 {
return switch (storage) {
- .i64 => |x| @intCast(i32, x),
- .u64 => |x| @bitCast(i32, @intCast(u32, x)),
+ .i64 => |x| @as(i32, @intCast(x)),
+ .u64 => |x| @as(i32, @bitCast(@as(u32, @intCast(x)))),
.big_int => unreachable,
- .lazy_align => |ty| @bitCast(i32, ty.toType().abiAlignment(mod)),
- .lazy_size => |ty| @bitCast(i32, @intCast(u32, ty.toType().abiSize(mod))),
+ .lazy_align => |ty| @as(i32, @bitCast(ty.toType().abiAlignment(mod))),
+ .lazy_size => |ty| @as(i32, @bitCast(@as(u32, @intCast(ty.toType().abiSize(mod))))),
};
}
@@ -3421,7 +3421,7 @@ fn airCondBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.branches.ensureUnusedCapacity(func.gpa, 2);
{
func.branches.appendAssumeCapacity(.{});
- try func.currentBranch().values.ensureUnusedCapacity(func.gpa, @intCast(u32, liveness_condbr.else_deaths.len));
+ try func.currentBranch().values.ensureUnusedCapacity(func.gpa, @as(u32, @intCast(liveness_condbr.else_deaths.len)));
defer {
var else_stack = func.branches.pop();
else_stack.deinit(func.gpa);
@@ -3433,7 +3433,7 @@ fn airCondBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// Outer block that matches the condition
{
func.branches.appendAssumeCapacity(.{});
- try func.currentBranch().values.ensureUnusedCapacity(func.gpa, @intCast(u32, liveness_condbr.then_deaths.len));
+ try func.currentBranch().values.ensureUnusedCapacity(func.gpa, @as(u32, @intCast(liveness_condbr.then_deaths.len)));
defer {
var then_stack = func.branches.pop();
then_stack.deinit(func.gpa);
@@ -3715,7 +3715,7 @@ fn structFieldPtr(
}
switch (struct_ptr) {
.stack_offset => |stack_offset| {
- return WValue{ .stack_offset = .{ .value = stack_offset.value + @intCast(u32, offset), .references = 1 } };
+ return WValue{ .stack_offset = .{ .value = stack_offset.value + @as(u32, @intCast(offset)), .references = 1 } };
},
else => return func.buildPointerOffset(struct_ptr, offset, .new),
}
@@ -3755,7 +3755,7 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.binOp(operand, const_wvalue, backing_ty, .shr);
if (field_ty.zigTypeTag(mod) == .Float) {
- const int_type = try mod.intType(.unsigned, @intCast(u16, field_ty.bitSize(mod)));
+ const int_type = try mod.intType(.unsigned, @as(u16, @intCast(field_ty.bitSize(mod))));
const truncated = try func.trunc(shifted_value, int_type, backing_ty);
const bitcasted = try func.bitcast(field_ty, int_type, truncated);
break :result try bitcasted.toLocal(func, field_ty);
@@ -3764,7 +3764,7 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// we can simply reuse the operand.
break :result func.reuseOperand(struct_field.struct_operand, operand);
} else if (field_ty.isPtrAtRuntime(mod)) {
- const int_type = try mod.intType(.unsigned, @intCast(u16, field_ty.bitSize(mod)));
+ const int_type = try mod.intType(.unsigned, @as(u16, @intCast(field_ty.bitSize(mod))));
const truncated = try func.trunc(shifted_value, int_type, backing_ty);
break :result try truncated.toLocal(func, field_ty);
}
@@ -3783,14 +3783,14 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
}
- const union_int_type = try mod.intType(.unsigned, @intCast(u16, struct_ty.bitSize(mod)));
+ const union_int_type = try mod.intType(.unsigned, @as(u16, @intCast(struct_ty.bitSize(mod))));
if (field_ty.zigTypeTag(mod) == .Float) {
- const int_type = try mod.intType(.unsigned, @intCast(u16, field_ty.bitSize(mod)));
+ const int_type = try mod.intType(.unsigned, @as(u16, @intCast(field_ty.bitSize(mod))));
const truncated = try func.trunc(operand, int_type, union_int_type);
const bitcasted = try func.bitcast(field_ty, int_type, truncated);
break :result try bitcasted.toLocal(func, field_ty);
} else if (field_ty.isPtrAtRuntime(mod)) {
- const int_type = try mod.intType(.unsigned, @intCast(u16, field_ty.bitSize(mod)));
+ const int_type = try mod.intType(.unsigned, @as(u16, @intCast(field_ty.bitSize(mod))));
const truncated = try func.trunc(operand, int_type, union_int_type);
break :result try truncated.toLocal(func, field_ty);
}
@@ -3847,7 +3847,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
var highest_maybe: ?i32 = null;
while (case_i < switch_br.data.cases_len) : (case_i += 1) {
const case = func.air.extraData(Air.SwitchBr.Case, extra_index);
- const items = @ptrCast([]const Air.Inst.Ref, func.air.extra[case.end..][0..case.data.items_len]);
+ const items = @as([]const Air.Inst.Ref, @ptrCast(func.air.extra[case.end..][0..case.data.items_len]));
const case_body = func.air.extra[case.end + items.len ..][0..case.data.body_len];
extra_index = case.end + items.len + case_body.len;
const values = try func.gpa.alloc(CaseValue, items.len);
@@ -3904,7 +3904,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
// Account for default branch so always add '1'
- const depth = @intCast(u32, highest - lowest + @intFromBool(has_else_body)) + 1;
+ const depth = @as(u32, @intCast(highest - lowest + @intFromBool(has_else_body))) + 1;
const jump_table: Mir.JumpTable = .{ .length = depth };
const table_extra_index = try func.addExtra(jump_table);
try func.addInst(.{ .tag = .br_table, .data = .{ .payload = table_extra_index } });
@@ -3915,7 +3915,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const idx = blk: {
for (case_list.items, 0..) |case, idx| {
for (case.values) |case_value| {
- if (case_value.integer == value) break :blk @intCast(u32, idx);
+ if (case_value.integer == value) break :blk @as(u32, @intCast(idx));
}
}
// error sets are almost always sparse so we use the default case
@@ -4018,7 +4018,7 @@ fn airIsErr(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerErro
try func.emitWValue(operand);
if (pl_ty.hasRuntimeBitsIgnoreComptime(mod)) {
try func.addMemArg(.i32_load16_u, .{
- .offset = operand.offset() + @intCast(u32, errUnionErrorOffset(pl_ty, mod)),
+ .offset = operand.offset() + @as(u32, @intCast(errUnionErrorOffset(pl_ty, mod))),
.alignment = Type.anyerror.abiAlignment(mod),
});
}
@@ -4051,7 +4051,7 @@ fn airUnwrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: boo
break :result WValue{ .none = {} };
}
- const pl_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, mod));
+ const pl_offset = @as(u32, @intCast(errUnionPayloadOffset(payload_ty, mod)));
if (op_is_ptr or isByRef(payload_ty, mod)) {
break :result try func.buildPointerOffset(operand, pl_offset, .new);
}
@@ -4080,7 +4080,7 @@ fn airUnwrapErrUnionError(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: bool)
break :result func.reuseOperand(ty_op.operand, operand);
}
- const error_val = try func.load(operand, Type.anyerror, @intCast(u32, errUnionErrorOffset(payload_ty, mod)));
+ const error_val = try func.load(operand, Type.anyerror, @as(u32, @intCast(errUnionErrorOffset(payload_ty, mod))));
break :result try error_val.toLocal(func, Type.anyerror);
};
func.finishAir(inst, result, &.{ty_op.operand});
@@ -4100,13 +4100,13 @@ fn airWrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void
}
const err_union = try func.allocStack(err_ty);
- const payload_ptr = try func.buildPointerOffset(err_union, @intCast(u32, errUnionPayloadOffset(pl_ty, mod)), .new);
+ const payload_ptr = try func.buildPointerOffset(err_union, @as(u32, @intCast(errUnionPayloadOffset(pl_ty, mod))), .new);
try func.store(payload_ptr, operand, pl_ty, 0);
// ensure we also write '0' to the error part, so any present stack value gets overwritten by it.
try func.emitWValue(err_union);
try func.addImm32(0);
- const err_val_offset = @intCast(u32, errUnionErrorOffset(pl_ty, mod));
+ const err_val_offset = @as(u32, @intCast(errUnionErrorOffset(pl_ty, mod)));
try func.addMemArg(.i32_store16, .{ .offset = err_union.offset() + err_val_offset, .alignment = 2 });
break :result err_union;
};
@@ -4128,11 +4128,11 @@ fn airWrapErrUnionErr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const err_union = try func.allocStack(err_ty);
// store error value
- try func.store(err_union, operand, Type.anyerror, @intCast(u32, errUnionErrorOffset(pl_ty, mod)));
+ try func.store(err_union, operand, Type.anyerror, @as(u32, @intCast(errUnionErrorOffset(pl_ty, mod))));
// write 'undefined' to the payload
- const payload_ptr = try func.buildPointerOffset(err_union, @intCast(u32, errUnionPayloadOffset(pl_ty, mod)), .new);
- const len = @intCast(u32, err_ty.errorUnionPayload(mod).abiSize(mod));
+ const payload_ptr = try func.buildPointerOffset(err_union, @as(u32, @intCast(errUnionPayloadOffset(pl_ty, mod))), .new);
+ const len = @as(u32, @intCast(err_ty.errorUnionPayload(mod).abiSize(mod)));
try func.memset(Type.u8, payload_ptr, .{ .imm32 = len }, .{ .imm32 = 0xaa });
break :result err_union;
@@ -4154,8 +4154,8 @@ fn airIntcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
return func.fail("todo Wasm intcast for bitsize > 128", .{});
}
- const op_bits = toWasmBits(@intCast(u16, operand_ty.bitSize(mod))).?;
- const wanted_bits = toWasmBits(@intCast(u16, ty.bitSize(mod))).?;
+ const op_bits = toWasmBits(@as(u16, @intCast(operand_ty.bitSize(mod)))).?;
+ const wanted_bits = toWasmBits(@as(u16, @intCast(ty.bitSize(mod)))).?;
const result = if (op_bits == wanted_bits)
func.reuseOperand(ty_op.operand, operand)
else
@@ -4170,8 +4170,8 @@ fn airIntcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
/// NOTE: May leave the result on the top of the stack.
fn intcast(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerError!WValue {
const mod = func.bin_file.base.options.module.?;
- const given_bitsize = @intCast(u16, given.bitSize(mod));
- const wanted_bitsize = @intCast(u16, wanted.bitSize(mod));
+ const given_bitsize = @as(u16, @intCast(given.bitSize(mod)));
+ const wanted_bitsize = @as(u16, @intCast(wanted.bitSize(mod)));
assert(given_bitsize <= 128);
assert(wanted_bitsize <= 128);
@@ -4396,7 +4396,7 @@ fn airSliceElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// calculate index into slice
try func.emitWValue(index);
- try func.addImm32(@bitCast(i32, @intCast(u32, elem_size)));
+ try func.addImm32(@as(i32, @bitCast(@as(u32, @intCast(elem_size)))));
try func.addTag(.i32_mul);
try func.addTag(.i32_add);
@@ -4426,7 +4426,7 @@ fn airSliceElemPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// calculate index into slice
try func.emitWValue(index);
- try func.addImm32(@bitCast(i32, @intCast(u32, elem_size)));
+ try func.addImm32(@as(i32, @bitCast(@as(u32, @intCast(elem_size)))));
try func.addTag(.i32_mul);
try func.addTag(.i32_add);
@@ -4466,13 +4466,13 @@ fn airTrunc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
/// NOTE: Resulting value is left on the stack.
fn trunc(func: *CodeGen, operand: WValue, wanted_ty: Type, given_ty: Type) InnerError!WValue {
const mod = func.bin_file.base.options.module.?;
- const given_bits = @intCast(u16, given_ty.bitSize(mod));
+ const given_bits = @as(u16, @intCast(given_ty.bitSize(mod)));
if (toWasmBits(given_bits) == null) {
return func.fail("TODO: Implement wasm integer truncation for integer bitsize: {d}", .{given_bits});
}
var result = try func.intcast(operand, given_ty, wanted_ty);
- const wanted_bits = @intCast(u16, wanted_ty.bitSize(mod));
+ const wanted_bits = @as(u16, @intCast(wanted_ty.bitSize(mod)));
const wasm_bits = toWasmBits(wanted_bits).?;
if (wasm_bits != wanted_bits) {
result = try func.wrapOperand(result, wanted_ty);
@@ -4505,7 +4505,7 @@ fn airArrayToSlice(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
// store the length of the array in the slice
- const len = WValue{ .imm32 = @intCast(u32, array_ty.arrayLen(mod)) };
+ const len = WValue{ .imm32 = @as(u32, @intCast(array_ty.arrayLen(mod))) };
try func.store(slice_local, len, Type.usize, func.ptrSize());
func.finishAir(inst, slice_local, &.{ty_op.operand});
@@ -4545,7 +4545,7 @@ fn airPtrElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// calculate index into slice
try func.emitWValue(index);
- try func.addImm32(@bitCast(i32, @intCast(u32, elem_size)));
+ try func.addImm32(@as(i32, @bitCast(@as(u32, @intCast(elem_size)))));
try func.addTag(.i32_mul);
try func.addTag(.i32_add);
@@ -4584,7 +4584,7 @@ fn airPtrElemPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// calculate index into ptr
try func.emitWValue(index);
- try func.addImm32(@bitCast(i32, @intCast(u32, elem_size)));
+ try func.addImm32(@as(i32, @bitCast(@as(u32, @intCast(elem_size)))));
try func.addTag(.i32_mul);
try func.addTag(.i32_add);
@@ -4612,7 +4612,7 @@ fn airPtrBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
try func.lowerToStack(ptr);
try func.emitWValue(offset);
- try func.addImm32(@bitCast(i32, @intCast(u32, pointee_ty.abiSize(mod))));
+ try func.addImm32(@as(i32, @bitCast(@as(u32, @intCast(pointee_ty.abiSize(mod))))));
try func.addTag(Mir.Inst.Tag.fromOpcode(mul_opcode));
try func.addTag(Mir.Inst.Tag.fromOpcode(bin_opcode));
@@ -4635,7 +4635,7 @@ fn airMemset(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void
const value = try func.resolveInst(bin_op.rhs);
const len = switch (ptr_ty.ptrSize(mod)) {
.Slice => try func.sliceLen(ptr),
- .One => @as(WValue, .{ .imm32 = @intCast(u32, ptr_ty.childType(mod).arrayLen(mod)) }),
+ .One => @as(WValue, .{ .imm32 = @as(u32, @intCast(ptr_ty.childType(mod).arrayLen(mod))) }),
.C, .Many => unreachable,
};
@@ -4656,7 +4656,7 @@ fn airMemset(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void
/// we implement it manually.
fn memset(func: *CodeGen, elem_ty: Type, ptr: WValue, len: WValue, value: WValue) InnerError!void {
const mod = func.bin_file.base.options.module.?;
- const abi_size = @intCast(u32, elem_ty.abiSize(mod));
+ const abi_size = @as(u32, @intCast(elem_ty.abiSize(mod)));
// When bulk_memory is enabled, we lower it to wasm's memset instruction.
// If not, we lower it ourselves.
@@ -4756,7 +4756,7 @@ fn airArrayElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
if (isByRef(array_ty, mod)) {
try func.lowerToStack(array);
try func.emitWValue(index);
- try func.addImm32(@bitCast(i32, @intCast(u32, elem_size)));
+ try func.addImm32(@as(i32, @bitCast(@as(u32, @intCast(elem_size)))));
try func.addTag(.i32_mul);
try func.addTag(.i32_add);
} else {
@@ -4772,11 +4772,11 @@ fn airArrayElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
else => unreachable,
};
- var operands = [_]u32{ std.wasm.simdOpcode(opcode), @intCast(u8, lane) };
+ var operands = [_]u32{ std.wasm.simdOpcode(opcode), @as(u8, @intCast(lane)) };
try func.emitWValue(array);
- const extra_index = @intCast(u32, func.mir_extra.items.len);
+ const extra_index = @as(u32, @intCast(func.mir_extra.items.len));
try func.mir_extra.appendSlice(func.gpa, &operands);
try func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
@@ -4789,7 +4789,7 @@ fn airArrayElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// Is a non-unrolled vector (v128)
try func.lowerToStack(stack_vec);
try func.emitWValue(index);
- try func.addImm32(@bitCast(i32, @intCast(u32, elem_size)));
+ try func.addImm32(@as(i32, @bitCast(@as(u32, @intCast(elem_size)))));
try func.addTag(.i32_mul);
try func.addTag(.i32_add);
},
@@ -4886,7 +4886,7 @@ fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const result = try func.allocLocal(ty);
try func.emitWValue(operand);
// TODO: Add helper functions for simd opcodes
- const extra_index = @intCast(u32, func.mir_extra.items.len);
+ const extra_index = @as(u32, @intCast(func.mir_extra.items.len));
// stores as := opcode, offset, alignment (opcode::memarg)
try func.mir_extra.appendSlice(func.gpa, &[_]u32{
opcode,
@@ -4907,7 +4907,7 @@ fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
};
const result = try func.allocLocal(ty);
try func.emitWValue(operand);
- const extra_index = @intCast(u32, func.mir_extra.items.len);
+ const extra_index = @as(u32, @intCast(func.mir_extra.items.len));
try func.mir_extra.append(func.gpa, opcode);
try func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
try func.addLabel(.local_set, result.local.value);
@@ -4917,13 +4917,13 @@ fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
}
const elem_size = elem_ty.bitSize(mod);
- const vector_len = @intCast(usize, ty.vectorLen(mod));
+ const vector_len = @as(usize, @intCast(ty.vectorLen(mod)));
if ((!std.math.isPowerOfTwo(elem_size) or elem_size % 8 != 0) and vector_len > 1) {
return func.fail("TODO: WebAssembly `@splat` for arbitrary element bitsize {d}", .{elem_size});
}
const result = try func.allocStack(ty);
- const elem_byte_size = @intCast(u32, elem_ty.abiSize(mod));
+ const elem_byte_size = @as(u32, @intCast(elem_ty.abiSize(mod)));
var index: usize = 0;
var offset: u32 = 0;
while (index < vector_len) : (index += 1) {
@@ -4966,11 +4966,11 @@ fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.emitWValue(result);
const loaded = if (value >= 0)
- try func.load(a, child_ty, @intCast(u32, @intCast(i64, elem_size) * value))
+ try func.load(a, child_ty, @as(u32, @intCast(@as(i64, @intCast(elem_size)) * value)))
else
- try func.load(b, child_ty, @intCast(u32, @intCast(i64, elem_size) * ~value));
+ try func.load(b, child_ty, @as(u32, @intCast(@as(i64, @intCast(elem_size)) * ~value)));
- try func.store(.stack, loaded, child_ty, result.stack_offset.value + @intCast(u32, elem_size) * @intCast(u32, index));
+ try func.store(.stack, loaded, child_ty, result.stack_offset.value + @as(u32, @intCast(elem_size)) * @as(u32, @intCast(index)));
}
return func.finishAir(inst, result, &.{ extra.a, extra.b });
@@ -4980,22 +4980,22 @@ fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
} ++ [1]u32{undefined} ** 4;
var lanes = std.mem.asBytes(operands[1..]);
- for (0..@intCast(usize, mask_len)) |index| {
+ for (0..@as(usize, @intCast(mask_len))) |index| {
const mask_elem = (try mask.elemValue(mod, index)).toSignedInt(mod);
const base_index = if (mask_elem >= 0)
- @intCast(u8, @intCast(i64, elem_size) * mask_elem)
+ @as(u8, @intCast(@as(i64, @intCast(elem_size)) * mask_elem))
else
- 16 + @intCast(u8, @intCast(i64, elem_size) * ~mask_elem);
+ 16 + @as(u8, @intCast(@as(i64, @intCast(elem_size)) * ~mask_elem));
- for (0..@intCast(usize, elem_size)) |byte_offset| {
- lanes[index * @intCast(usize, elem_size) + byte_offset] = base_index + @intCast(u8, byte_offset);
+ for (0..@as(usize, @intCast(elem_size))) |byte_offset| {
+ lanes[index * @as(usize, @intCast(elem_size)) + byte_offset] = base_index + @as(u8, @intCast(byte_offset));
}
}
try func.emitWValue(a);
try func.emitWValue(b);
- const extra_index = @intCast(u32, func.mir_extra.items.len);
+ const extra_index = @as(u32, @intCast(func.mir_extra.items.len));
try func.mir_extra.appendSlice(func.gpa, &operands);
try func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
@@ -5015,15 +5015,15 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const mod = func.bin_file.base.options.module.?;
const ty_pl = func.air.instructions.items(.data)[inst].ty_pl;
const result_ty = func.typeOfIndex(inst);
- const len = @intCast(usize, result_ty.arrayLen(mod));
- const elements = @ptrCast([]const Air.Inst.Ref, func.air.extra[ty_pl.payload..][0..len]);
+ const len = @as(usize, @intCast(result_ty.arrayLen(mod)));
+ const elements = @as([]const Air.Inst.Ref, @ptrCast(func.air.extra[ty_pl.payload..][0..len]));
const result: WValue = result_value: {
switch (result_ty.zigTypeTag(mod)) {
.Array => {
const result = try func.allocStack(result_ty);
const elem_ty = result_ty.childType(mod);
- const elem_size = @intCast(u32, elem_ty.abiSize(mod));
+ const elem_size = @as(u32, @intCast(elem_ty.abiSize(mod)));
const sentinel = if (result_ty.sentinel(mod)) |sent| blk: {
break :blk try func.lowerConstant(sent, elem_ty);
} else null;
@@ -5087,7 +5087,7 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
WValue{ .imm64 = current_bit };
const value = try func.resolveInst(elem);
- const value_bit_size = @intCast(u16, field.ty.bitSize(mod));
+ const value_bit_size = @as(u16, @intCast(field.ty.bitSize(mod)));
const int_ty = try mod.intType(.unsigned, value_bit_size);
// load our current result on stack so we can perform all transformations
@@ -5113,7 +5113,7 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
if ((try result_ty.structFieldValueComptime(mod, elem_index)) != null) continue;
const elem_ty = result_ty.structFieldType(elem_index, mod);
- const elem_size = @intCast(u32, elem_ty.abiSize(mod));
+ const elem_size = @as(u32, @intCast(elem_ty.abiSize(mod)));
const value = try func.resolveInst(elem);
try func.store(offset, value, elem_ty, 0);
@@ -5174,7 +5174,7 @@ fn airUnionInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const payload_ptr = try func.buildPointerOffset(result_ptr, layout.tag_size, .new);
try func.store(payload_ptr, payload, field.ty, 0);
} else {
- try func.store(result_ptr, payload, field.ty, @intCast(u32, layout.tag_size));
+ try func.store(result_ptr, payload, field.ty, @as(u32, @intCast(layout.tag_size)));
}
if (layout.tag_size > 0) {
@@ -5187,21 +5187,21 @@ fn airUnionInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
result_ptr,
tag_int,
union_obj.tag_ty,
- @intCast(u32, layout.payload_size),
+ @as(u32, @intCast(layout.payload_size)),
);
}
}
break :result result_ptr;
} else {
const operand = try func.resolveInst(extra.init);
- const union_int_type = try mod.intType(.unsigned, @intCast(u16, union_ty.bitSize(mod)));
+ const union_int_type = try mod.intType(.unsigned, @as(u16, @intCast(union_ty.bitSize(mod))));
if (field.ty.zigTypeTag(mod) == .Float) {
- const int_type = try mod.intType(.unsigned, @intCast(u16, field.ty.bitSize(mod)));
+ const int_type = try mod.intType(.unsigned, @as(u16, @intCast(field.ty.bitSize(mod))));
const bitcasted = try func.bitcast(field.ty, int_type, operand);
const casted = try func.trunc(bitcasted, int_type, union_int_type);
break :result try casted.toLocal(func, field.ty);
} else if (field.ty.isPtrAtRuntime(mod)) {
- const int_type = try mod.intType(.unsigned, @intCast(u16, field.ty.bitSize(mod)));
+ const int_type = try mod.intType(.unsigned, @as(u16, @intCast(field.ty.bitSize(mod))));
const casted = try func.intcast(operand, int_type, union_int_type);
break :result try casted.toLocal(func, field.ty);
}
@@ -5334,7 +5334,7 @@ fn airSetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// when the tag alignment is smaller than the payload, the field will be stored
// after the payload.
const offset = if (layout.tag_align < layout.payload_align) blk: {
- break :blk @intCast(u32, layout.payload_size);
+ break :blk @as(u32, @intCast(layout.payload_size));
} else @as(u32, 0);
try func.store(union_ptr, new_tag, tag_ty, offset);
func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
@@ -5353,7 +5353,7 @@ fn airGetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// when the tag alignment is smaller than the payload, the field will be stored
// after the payload.
const offset = if (layout.tag_align < layout.payload_align) blk: {
- break :blk @intCast(u32, layout.payload_size);
+ break :blk @as(u32, @intCast(layout.payload_size));
} else @as(u32, 0);
const tag = try func.load(operand, tag_ty, offset);
const result = try tag.toLocal(func, tag_ty);
@@ -5458,7 +5458,7 @@ fn airErrUnionPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!voi
operand,
.{ .imm32 = 0 },
Type.anyerror,
- @intCast(u32, errUnionErrorOffset(payload_ty, mod)),
+ @as(u32, @intCast(errUnionErrorOffset(payload_ty, mod))),
);
const result = result: {
@@ -5466,7 +5466,7 @@ fn airErrUnionPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!voi
break :result func.reuseOperand(ty_op.operand, operand);
}
- break :result try func.buildPointerOffset(operand, @intCast(u32, errUnionPayloadOffset(payload_ty, mod)), .new);
+ break :result try func.buildPointerOffset(operand, @as(u32, @intCast(errUnionPayloadOffset(payload_ty, mod))), .new);
};
func.finishAir(inst, result, &.{ty_op.operand});
}
@@ -5483,7 +5483,7 @@ fn airFieldParentPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const result = if (field_offset != 0) result: {
const base = try func.buildPointerOffset(field_ptr, 0, .new);
try func.addLabel(.local_get, base.local.value);
- try func.addImm32(@bitCast(i32, @intCast(u32, field_offset)));
+ try func.addImm32(@as(i32, @bitCast(@as(u32, @intCast(field_offset)))));
try func.addTag(.i32_sub);
try func.addLabel(.local_set, base.local.value);
break :result base;
@@ -5514,14 +5514,14 @@ fn airMemcpy(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const slice_len = try func.sliceLen(dst);
if (ptr_elem_ty.abiSize(mod) != 1) {
try func.emitWValue(slice_len);
- try func.emitWValue(.{ .imm32 = @intCast(u32, ptr_elem_ty.abiSize(mod)) });
+ try func.emitWValue(.{ .imm32 = @as(u32, @intCast(ptr_elem_ty.abiSize(mod))) });
try func.addTag(.i32_mul);
try func.addLabel(.local_set, slice_len.local.value);
}
break :blk slice_len;
},
.One => @as(WValue, .{
- .imm32 = @intCast(u32, ptr_elem_ty.arrayLen(mod) * ptr_elem_ty.childType(mod).abiSize(mod)),
+ .imm32 = @as(u32, @intCast(ptr_elem_ty.arrayLen(mod) * ptr_elem_ty.childType(mod).abiSize(mod))),
}),
.C, .Many => unreachable,
};
@@ -5611,7 +5611,7 @@ fn airErrorName(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.emitWValue(operand);
switch (func.arch()) {
.wasm32 => {
- try func.addImm32(@bitCast(i32, @intCast(u32, abi_size)));
+ try func.addImm32(@as(i32, @bitCast(@as(u32, @intCast(abi_size)))));
try func.addTag(.i32_mul);
try func.addTag(.i32_add);
},
@@ -5708,7 +5708,7 @@ fn airAddSubWithOverflow(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerErro
const result_ptr = try func.allocStack(func.typeOfIndex(inst));
try func.store(result_ptr, result, lhs_ty, 0);
- const offset = @intCast(u32, lhs_ty.abiSize(mod));
+ const offset = @as(u32, @intCast(lhs_ty.abiSize(mod)));
try func.store(result_ptr, overflow_local, Type.u1, offset);
func.finishAir(inst, result_ptr, &.{ extra.lhs, extra.rhs });
@@ -5830,7 +5830,7 @@ fn airShlWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const result_ptr = try func.allocStack(func.typeOfIndex(inst));
try func.store(result_ptr, result, lhs_ty, 0);
- const offset = @intCast(u32, lhs_ty.abiSize(mod));
+ const offset = @as(u32, @intCast(lhs_ty.abiSize(mod)));
try func.store(result_ptr, overflow_local, Type.u1, offset);
func.finishAir(inst, result_ptr, &.{ extra.lhs, extra.rhs });
@@ -6005,7 +6005,7 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const result_ptr = try func.allocStack(func.typeOfIndex(inst));
try func.store(result_ptr, bin_op_local, lhs_ty, 0);
- const offset = @intCast(u32, lhs_ty.abiSize(mod));
+ const offset = @as(u32, @intCast(lhs_ty.abiSize(mod)));
try func.store(result_ptr, overflow_bit, Type.u1, offset);
func.finishAir(inst, result_ptr, &.{ extra.lhs, extra.rhs });
@@ -6149,7 +6149,7 @@ fn airCtz(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
switch (wasm_bits) {
32 => {
if (wasm_bits != int_info.bits) {
- const val: u32 = @as(u32, 1) << @intCast(u5, int_info.bits);
+ const val: u32 = @as(u32, 1) << @as(u5, @intCast(int_info.bits));
// leave value on the stack
_ = try func.binOp(operand, .{ .imm32 = val }, ty, .@"or");
} else try func.emitWValue(operand);
@@ -6157,7 +6157,7 @@ fn airCtz(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
},
64 => {
if (wasm_bits != int_info.bits) {
- const val: u64 = @as(u64, 1) << @intCast(u6, int_info.bits);
+ const val: u64 = @as(u64, 1) << @as(u6, @intCast(int_info.bits));
// leave value on the stack
_ = try func.binOp(operand, .{ .imm64 = val }, ty, .@"or");
} else try func.emitWValue(operand);
@@ -6172,7 +6172,7 @@ fn airCtz(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.addTag(.i64_ctz);
_ = try func.load(operand, Type.u64, 8);
if (wasm_bits != int_info.bits) {
- try func.addImm64(@as(u64, 1) << @intCast(u6, int_info.bits - 64));
+ try func.addImm64(@as(u64, 1) << @as(u6, @intCast(int_info.bits - 64)));
try func.addTag(.i64_or);
}
try func.addTag(.i64_ctz);
@@ -6275,7 +6275,7 @@ fn lowerTry(
// check if the error tag is set for the error union.
try func.emitWValue(err_union);
if (pl_has_bits) {
- const err_offset = @intCast(u32, errUnionErrorOffset(pl_ty, mod));
+ const err_offset = @as(u32, @intCast(errUnionErrorOffset(pl_ty, mod)));
try func.addMemArg(.i32_load16_u, .{
.offset = err_union.offset() + err_offset,
.alignment = Type.anyerror.abiAlignment(mod),
@@ -6300,7 +6300,7 @@ fn lowerTry(
return WValue{ .none = {} };
}
- const pl_offset = @intCast(u32, errUnionPayloadOffset(pl_ty, mod));
+ const pl_offset = @as(u32, @intCast(errUnionPayloadOffset(pl_ty, mod)));
if (isByRef(pl_ty, mod)) {
return buildPointerOffset(func, err_union, pl_offset, .new);
}
@@ -6590,9 +6590,9 @@ fn airSatBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
var bin_result = try (try func.binOp(lhs, rhs, ty, op)).toLocal(func, ty);
defer bin_result.free(func);
if (wasm_bits != int_info.bits and op == .add) {
- const val: u64 = @intCast(u64, (@as(u65, 1) << @intCast(u7, int_info.bits)) - 1);
+ const val: u64 = @as(u64, @intCast((@as(u65, 1) << @as(u7, @intCast(int_info.bits))) - 1));
const imm_val = switch (wasm_bits) {
- 32 => WValue{ .imm32 = @intCast(u32, val) },
+ 32 => WValue{ .imm32 = @as(u32, @intCast(val)) },
64 => WValue{ .imm64 = val },
else => unreachable,
};
@@ -6603,7 +6603,7 @@ fn airSatBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
} else {
switch (wasm_bits) {
32 => try func.addImm32(if (op == .add) @as(i32, -1) else 0),
- 64 => try func.addImm64(if (op == .add) @bitCast(u64, @as(i64, -1)) else 0),
+ 64 => try func.addImm64(if (op == .add) @as(u64, @bitCast(@as(i64, -1))) else 0),
else => unreachable,
}
try func.emitWValue(bin_result);
@@ -6629,16 +6629,16 @@ fn signedSat(func: *CodeGen, lhs_operand: WValue, rhs_operand: WValue, ty: Type,
break :rhs try (try func.signAbsValue(rhs_operand, ty)).toLocal(func, ty);
} else rhs_operand;
- const max_val: u64 = @intCast(u64, (@as(u65, 1) << @intCast(u7, int_info.bits - 1)) - 1);
- const min_val: i64 = (-@intCast(i64, @intCast(u63, max_val))) - 1;
+ const max_val: u64 = @as(u64, @intCast((@as(u65, 1) << @as(u7, @intCast(int_info.bits - 1))) - 1));
+ const min_val: i64 = (-@as(i64, @intCast(@as(u63, @intCast(max_val))))) - 1;
const max_wvalue = switch (wasm_bits) {
- 32 => WValue{ .imm32 = @truncate(u32, max_val) },
+ 32 => WValue{ .imm32 = @as(u32, @truncate(max_val)) },
64 => WValue{ .imm64 = max_val },
else => unreachable,
};
const min_wvalue = switch (wasm_bits) {
- 32 => WValue{ .imm32 = @bitCast(u32, @truncate(i32, min_val)) },
- 64 => WValue{ .imm64 = @bitCast(u64, min_val) },
+ 32 => WValue{ .imm32 = @as(u32, @bitCast(@as(i32, @truncate(min_val)))) },
+ 64 => WValue{ .imm64 = @as(u64, @bitCast(min_val)) },
else => unreachable,
};
@@ -6715,11 +6715,11 @@ fn airShlSat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
},
64 => blk: {
if (!is_signed) {
- try func.addImm64(@bitCast(u64, @as(i64, -1)));
+ try func.addImm64(@as(u64, @bitCast(@as(i64, -1))));
break :blk;
}
- try func.addImm64(@bitCast(u64, @as(i64, std.math.minInt(i64))));
- try func.addImm64(@bitCast(u64, @as(i64, std.math.maxInt(i64))));
+ try func.addImm64(@as(u64, @bitCast(@as(i64, std.math.minInt(i64)))));
+ try func.addImm64(@as(u64, @bitCast(@as(i64, std.math.maxInt(i64)))));
_ = try func.cmp(lhs, .{ .imm64 = 0 }, ty, .lt);
try func.addTag(.select);
},
@@ -6759,12 +6759,12 @@ fn airShlSat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
},
64 => blk: {
if (!is_signed) {
- try func.addImm64(@bitCast(u64, @as(i64, -1)));
+ try func.addImm64(@as(u64, @bitCast(@as(i64, -1))));
break :blk;
}
- try func.addImm64(@bitCast(u64, @as(i64, std.math.minInt(i64))));
- try func.addImm64(@bitCast(u64, @as(i64, std.math.maxInt(i64))));
+ try func.addImm64(@as(u64, @bitCast(@as(i64, std.math.minInt(i64)))));
+ try func.addImm64(@as(u64, @bitCast(@as(i64, std.math.maxInt(i64)))));
_ = try func.cmp(shl_res, .{ .imm64 = 0 }, ty, .lt);
try func.addTag(.select);
},
@@ -6894,7 +6894,7 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 {
// TODO: Make switch implementation generic so we can use a jump table for this when the tags are not sparse.
// generate an if-else chain for each tag value as well as constant.
for (enum_ty.enumFields(mod), 0..) |tag_name_ip, field_index_usize| {
- const field_index = @intCast(u32, field_index_usize);
+ const field_index = @as(u32, @intCast(field_index_usize));
const tag_name = mod.intern_pool.stringToSlice(tag_name_ip);
// for each tag name, create an unnamed const,
// and then get a pointer to its value.
@@ -6953,7 +6953,7 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 {
try writer.writeByte(std.wasm.opcode(.i32_const));
try relocs.append(.{
.relocation_type = .R_WASM_MEMORY_ADDR_LEB,
- .offset = @intCast(u32, body_list.items.len),
+ .offset = @as(u32, @intCast(body_list.items.len)),
.index = tag_sym_index,
});
try writer.writeAll(&[_]u8{0} ** 5); // will be relocated
@@ -6965,7 +6965,7 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 {
// store length
try writer.writeByte(std.wasm.opcode(.i32_const));
- try leb.writeULEB128(writer, @intCast(u32, tag_name.len));
+ try leb.writeULEB128(writer, @as(u32, @intCast(tag_name.len)));
try writer.writeByte(std.wasm.opcode(.i32_store));
try leb.writeULEB128(writer, encoded_alignment);
try leb.writeULEB128(writer, @as(u32, 4));
@@ -6974,7 +6974,7 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 {
try writer.writeByte(std.wasm.opcode(.i64_const));
try relocs.append(.{
.relocation_type = .R_WASM_MEMORY_ADDR_LEB64,
- .offset = @intCast(u32, body_list.items.len),
+ .offset = @as(u32, @intCast(body_list.items.len)),
.index = tag_sym_index,
});
try writer.writeAll(&[_]u8{0} ** 10); // will be relocated
@@ -6986,7 +6986,7 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 {
// store length
try writer.writeByte(std.wasm.opcode(.i64_const));
- try leb.writeULEB128(writer, @intCast(u64, tag_name.len));
+ try leb.writeULEB128(writer, @as(u64, @intCast(tag_name.len)));
try writer.writeByte(std.wasm.opcode(.i64_store));
try leb.writeULEB128(writer, encoded_alignment);
try leb.writeULEB128(writer, @as(u32, 8));
@@ -7026,7 +7026,7 @@ fn airErrorSetHasValue(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
var lowest: ?u32 = null;
var highest: ?u32 = null;
for (names) |name| {
- const err_int = @intCast(Module.ErrorInt, mod.global_error_set.getIndex(name).?);
+ const err_int = @as(Module.ErrorInt, @intCast(mod.global_error_set.getIndex(name).?));
if (lowest) |*l| {
if (err_int < l.*) {
l.* = err_int;
@@ -7054,11 +7054,11 @@ fn airErrorSetHasValue(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// lower operand to determine jump table target
try func.emitWValue(operand);
- try func.addImm32(@intCast(i32, lowest.?));
+ try func.addImm32(@as(i32, @intCast(lowest.?)));
try func.addTag(.i32_sub);
// Account for default branch so always add '1'
- const depth = @intCast(u32, highest.? - lowest.? + 1);
+ const depth = @as(u32, @intCast(highest.? - lowest.? + 1));
const jump_table: Mir.JumpTable = .{ .length = depth };
const table_extra_index = try func.addExtra(jump_table);
try func.addInst(.{ .tag = .br_table, .data = .{ .payload = table_extra_index } });
@@ -7155,7 +7155,7 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.addTag(.i32_and);
const and_result = try WValue.toLocal(.stack, func, Type.bool);
const result_ptr = try func.allocStack(result_ty);
- try func.store(result_ptr, and_result, Type.bool, @intCast(u32, ty.abiSize(mod)));
+ try func.store(result_ptr, and_result, Type.bool, @as(u32, @intCast(ty.abiSize(mod))));
try func.store(result_ptr, ptr_val, ty, 0);
break :val result_ptr;
} else val: {
@@ -7221,13 +7221,13 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.emitWValue(ptr);
try func.emitWValue(value);
if (op == .Nand) {
- const wasm_bits = toWasmBits(@intCast(u16, ty.bitSize(mod))).?;
+ const wasm_bits = toWasmBits(@as(u16, @intCast(ty.bitSize(mod)))).?;
const and_res = try func.binOp(value, operand, ty, .@"and");
if (wasm_bits == 32)
try func.addImm32(-1)
else if (wasm_bits == 64)
- try func.addImm64(@bitCast(u64, @as(i64, -1)))
+ try func.addImm64(@as(u64, @bitCast(@as(i64, -1))))
else
return func.fail("TODO: `@atomicRmw` with operator `Nand` for types larger than 64 bits", .{});
_ = try func.binOp(and_res, .stack, ty, .xor);
@@ -7352,14 +7352,14 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.store(.stack, .stack, ty, ptr.offset());
},
.Nand => {
- const wasm_bits = toWasmBits(@intCast(u16, ty.bitSize(mod))).?;
+ const wasm_bits = toWasmBits(@as(u16, @intCast(ty.bitSize(mod)))).?;
try func.emitWValue(ptr);
const and_res = try func.binOp(result, operand, ty, .@"and");
if (wasm_bits == 32)
try func.addImm32(-1)
else if (wasm_bits == 64)
- try func.addImm64(@bitCast(u64, @as(i64, -1)))
+ try func.addImm64(@as(u64, @bitCast(@as(i64, -1))))
else
return func.fail("TODO: `@atomicRmw` with operator `Nand` for types larger than 64 bits", .{});
_ = try func.binOp(and_res, .stack, ty, .xor);
diff --git a/src/arch/wasm/Emit.zig b/src/arch/wasm/Emit.zig
index 3314f4d993..3b1911b895 100644
--- a/src/arch/wasm/Emit.zig
+++ b/src/arch/wasm/Emit.zig
@@ -45,7 +45,7 @@ pub fn emitMir(emit: *Emit) InnerError!void {
try emit.emitLocals();
for (mir_tags, 0..) |tag, index| {
- const inst = @intCast(u32, index);
+ const inst = @as(u32, @intCast(index));
switch (tag) {
// block instructions
.block => try emit.emitBlock(tag, inst),
@@ -247,7 +247,7 @@ pub fn emitMir(emit: *Emit) InnerError!void {
}
fn offset(self: Emit) u32 {
- return @intCast(u32, self.code.items.len);
+ return @as(u32, @intCast(self.code.items.len));
}
fn fail(emit: *Emit, comptime format: []const u8, args: anytype) InnerError {
@@ -260,7 +260,7 @@ fn fail(emit: *Emit, comptime format: []const u8, args: anytype) InnerError {
fn emitLocals(emit: *Emit) !void {
const writer = emit.code.writer();
- try leb128.writeULEB128(writer, @intCast(u32, emit.locals.len));
+ try leb128.writeULEB128(writer, @as(u32, @intCast(emit.locals.len)));
// emit the actual locals amount
for (emit.locals) |local| {
try leb128.writeULEB128(writer, @as(u32, 1));
@@ -324,13 +324,13 @@ fn emitImm64(emit: *Emit, inst: Mir.Inst.Index) !void {
const extra_index = emit.mir.instructions.items(.data)[inst].payload;
const value = emit.mir.extraData(Mir.Imm64, extra_index);
try emit.code.append(std.wasm.opcode(.i64_const));
- try leb128.writeILEB128(emit.code.writer(), @bitCast(i64, value.data.toU64()));
+ try leb128.writeILEB128(emit.code.writer(), @as(i64, @bitCast(value.data.toU64())));
}
fn emitFloat32(emit: *Emit, inst: Mir.Inst.Index) !void {
const value: f32 = emit.mir.instructions.items(.data)[inst].float32;
try emit.code.append(std.wasm.opcode(.f32_const));
- try emit.code.writer().writeIntLittle(u32, @bitCast(u32, value));
+ try emit.code.writer().writeIntLittle(u32, @as(u32, @bitCast(value)));
}
fn emitFloat64(emit: *Emit, inst: Mir.Inst.Index) !void {
@@ -425,7 +425,7 @@ fn emitMemAddress(emit: *Emit, inst: Mir.Inst.Index) !void {
.offset = mem_offset,
.index = mem.pointer,
.relocation_type = if (is_wasm32) .R_WASM_MEMORY_ADDR_LEB else .R_WASM_MEMORY_ADDR_LEB64,
- .addend = @intCast(i32, mem.offset),
+ .addend = @as(i32, @intCast(mem.offset)),
});
}
}
@@ -436,7 +436,7 @@ fn emitExtended(emit: *Emit, inst: Mir.Inst.Index) !void {
const writer = emit.code.writer();
try emit.code.append(std.wasm.opcode(.misc_prefix));
try leb128.writeULEB128(writer, opcode);
- switch (@enumFromInt(std.wasm.MiscOpcode, opcode)) {
+ switch (@as(std.wasm.MiscOpcode, @enumFromInt(opcode))) {
// bulk-memory opcodes
.data_drop => {
const segment = emit.mir.extra[extra_index + 1];
@@ -475,7 +475,7 @@ fn emitSimd(emit: *Emit, inst: Mir.Inst.Index) !void {
const writer = emit.code.writer();
try emit.code.append(std.wasm.opcode(.simd_prefix));
try leb128.writeULEB128(writer, opcode);
- switch (@enumFromInt(std.wasm.SimdOpcode, opcode)) {
+ switch (@as(std.wasm.SimdOpcode, @enumFromInt(opcode))) {
.v128_store,
.v128_load,
.v128_load8_splat,
@@ -507,7 +507,7 @@ fn emitSimd(emit: *Emit, inst: Mir.Inst.Index) !void {
.f64x2_extract_lane,
.f64x2_replace_lane,
=> {
- try writer.writeByte(@intCast(u8, emit.mir.extra[extra_index + 1]));
+ try writer.writeByte(@as(u8, @intCast(emit.mir.extra[extra_index + 1])));
},
.i8x16_splat,
.i16x8_splat,
@@ -526,7 +526,7 @@ fn emitAtomic(emit: *Emit, inst: Mir.Inst.Index) !void {
const writer = emit.code.writer();
try emit.code.append(std.wasm.opcode(.atomics_prefix));
try leb128.writeULEB128(writer, opcode);
- switch (@enumFromInt(std.wasm.AtomicsOpcode, opcode)) {
+ switch (@as(std.wasm.AtomicsOpcode, @enumFromInt(opcode))) {
.i32_atomic_load,
.i64_atomic_load,
.i32_atomic_load8_u,
@@ -623,7 +623,7 @@ fn emitDbgLine(emit: *Emit, inst: Mir.Inst.Index) !void {
fn dbgAdvancePCAndLine(emit: *Emit, line: u32, column: u32) !void {
if (emit.dbg_output != .dwarf) return;
- const delta_line = @intCast(i32, line) - @intCast(i32, emit.prev_di_line);
+ const delta_line = @as(i32, @intCast(line)) - @as(i32, @intCast(emit.prev_di_line));
const delta_pc = emit.offset() - emit.prev_di_offset;
// TODO: This must emit a relocation to calculate the offset relative
// to the code section start.
diff --git a/src/arch/wasm/Mir.zig b/src/arch/wasm/Mir.zig
index 6e93f0fb88..2d4f624b22 100644
--- a/src/arch/wasm/Mir.zig
+++ b/src/arch/wasm/Mir.zig
@@ -544,12 +544,12 @@ pub const Inst = struct {
/// From a given wasm opcode, returns a MIR tag.
pub fn fromOpcode(opcode: std.wasm.Opcode) Tag {
- return @enumFromInt(Tag, @intFromEnum(opcode)); // Given `Opcode` is not present as a tag for MIR yet
+ return @as(Tag, @enumFromInt(@intFromEnum(opcode))); // Given `Opcode` is not present as a tag for MIR yet
}
/// Returns a wasm opcode from a given MIR tag.
pub fn toOpcode(self: Tag) std.wasm.Opcode {
- return @enumFromInt(std.wasm.Opcode, @intFromEnum(self));
+ return @as(std.wasm.Opcode, @enumFromInt(@intFromEnum(self)));
}
};
@@ -621,8 +621,8 @@ pub const Imm64 = struct {
pub fn fromU64(imm: u64) Imm64 {
return .{
- .msb = @truncate(u32, imm >> 32),
- .lsb = @truncate(u32, imm),
+ .msb = @as(u32, @truncate(imm >> 32)),
+ .lsb = @as(u32, @truncate(imm)),
};
}
@@ -639,15 +639,15 @@ pub const Float64 = struct {
lsb: u32,
pub fn fromFloat64(float: f64) Float64 {
- const tmp = @bitCast(u64, float);
+ const tmp = @as(u64, @bitCast(float));
return .{
- .msb = @truncate(u32, tmp >> 32),
- .lsb = @truncate(u32, tmp),
+ .msb = @as(u32, @truncate(tmp >> 32)),
+ .lsb = @as(u32, @truncate(tmp)),
};
}
pub fn toF64(self: Float64) f64 {
- @bitCast(f64, self.toU64());
+ @as(f64, @bitCast(self.toU64()));
}
pub fn toU64(self: Float64) u64 {
diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig
index edf84089b1..4993e3fe45 100644
--- a/src/arch/x86_64/CodeGen.zig
+++ b/src/arch/x86_64/CodeGen.zig
@@ -329,7 +329,7 @@ pub const MCValue = union(enum) {
.load_frame,
.reserved_frame,
=> unreachable, // not offsettable
- .immediate => |imm| .{ .immediate = @bitCast(u64, @bitCast(i64, imm) +% off) },
+ .immediate => |imm| .{ .immediate = @as(u64, @bitCast(@as(i64, @bitCast(imm)) +% off)) },
.register => |reg| .{ .register_offset = .{ .reg = reg, .off = off } },
.register_offset => |reg_off| .{
.register_offset = .{ .reg = reg_off.reg, .off = reg_off.off + off },
@@ -360,7 +360,7 @@ pub const MCValue = union(enum) {
.lea_frame,
.reserved_frame,
=> unreachable,
- .memory => |addr| if (math.cast(i32, @bitCast(i64, addr))) |small_addr|
+ .memory => |addr| if (math.cast(i32, @as(i64, @bitCast(addr)))) |small_addr|
Memory.sib(ptr_size, .{ .base = .{ .reg = .ds }, .disp = small_addr })
else
Memory.moffs(.ds, addr),
@@ -606,7 +606,7 @@ const FrameAlloc = struct {
fn init(alloc_abi: struct { size: u64, alignment: u32 }) FrameAlloc {
assert(math.isPowerOfTwo(alloc_abi.alignment));
return .{
- .abi_size = @intCast(u31, alloc_abi.size),
+ .abi_size = @as(u31, @intCast(alloc_abi.size)),
.abi_align = math.log2_int(u32, alloc_abi.alignment),
.ref_count = 0,
};
@@ -694,7 +694,7 @@ pub fn generate(
FrameAlloc.init(.{
.size = 0,
.alignment = if (mod.align_stack_fns.get(module_fn_index)) |set_align_stack|
- @intCast(u32, set_align_stack.alignment.toByteUnitsOptional().?)
+ @as(u32, @intCast(set_align_stack.alignment.toByteUnitsOptional().?))
else
1,
}),
@@ -979,7 +979,7 @@ fn fmtTracking(self: *Self) std.fmt.Formatter(formatTracking) {
fn addInst(self: *Self, inst: Mir.Inst) error{OutOfMemory}!Mir.Inst.Index {
const gpa = self.gpa;
try self.mir_instructions.ensureUnusedCapacity(gpa, 1);
- const result_index = @intCast(Mir.Inst.Index, self.mir_instructions.len);
+ const result_index = @as(Mir.Inst.Index, @intCast(self.mir_instructions.len));
self.mir_instructions.appendAssumeCapacity(inst);
if (inst.tag != .pseudo or switch (inst.ops) {
else => true,
@@ -1000,11 +1000,11 @@ fn addExtra(self: *Self, extra: anytype) Allocator.Error!u32 {
fn addExtraAssumeCapacity(self: *Self, extra: anytype) u32 {
const fields = std.meta.fields(@TypeOf(extra));
- const result = @intCast(u32, self.mir_extra.items.len);
+ const result = @as(u32, @intCast(self.mir_extra.items.len));
inline for (fields) |field| {
self.mir_extra.appendAssumeCapacity(switch (field.type) {
u32 => @field(extra, field.name),
- i32 => @bitCast(u32, @field(extra, field.name)),
+ i32 => @as(u32, @bitCast(@field(extra, field.name))),
else => @compileError("bad field type: " ++ field.name ++ ": " ++ @typeName(field.type)),
});
}
@@ -1214,8 +1214,8 @@ fn asmImmediate(self: *Self, tag: Mir.Inst.FixedTag, imm: Immediate) !void {
.data = .{ .i = .{
.fixes = tag[0],
.i = switch (imm) {
- .signed => |s| @bitCast(u32, s),
- .unsigned => |u| @intCast(u32, u),
+ .signed => |s| @as(u32, @bitCast(s)),
+ .unsigned => |u| @as(u32, @intCast(u)),
},
} },
});
@@ -1246,8 +1246,8 @@ fn asmRegisterImmediate(self: *Self, tag: Mir.Inst.FixedTag, reg: Register, imm:
.fixes = tag[0],
.r1 = reg,
.i = switch (imm) {
- .signed => |s| @bitCast(u32, s),
- .unsigned => |u| @intCast(u32, u),
+ .signed => |s| @as(u32, @bitCast(s)),
+ .unsigned => |u| @as(u32, @intCast(u)),
},
} },
.ri64 => .{ .rx = .{
@@ -1316,7 +1316,7 @@ fn asmRegisterRegisterRegisterImmediate(
.r1 = reg1,
.r2 = reg2,
.r3 = reg3,
- .i = @intCast(u8, imm.unsigned),
+ .i = @as(u8, @intCast(imm.unsigned)),
} },
});
}
@@ -1339,8 +1339,8 @@ fn asmRegisterRegisterImmediate(
.r1 = reg1,
.r2 = reg2,
.i = switch (imm) {
- .signed => |s| @bitCast(u32, s),
- .unsigned => |u| @intCast(u32, u),
+ .signed => |s| @as(u32, @bitCast(s)),
+ .unsigned => |u| @as(u32, @intCast(u)),
},
} },
});
@@ -1429,7 +1429,7 @@ fn asmRegisterMemoryImmediate(
.data = .{ .rix = .{
.fixes = tag[0],
.r1 = reg,
- .i = @intCast(u8, imm.unsigned),
+ .i = @as(u8, @intCast(imm.unsigned)),
.payload = switch (m) {
.sib => try self.addExtra(Mir.MemorySib.encode(m)),
.rip => try self.addExtra(Mir.MemoryRip.encode(m)),
@@ -1458,7 +1458,7 @@ fn asmRegisterRegisterMemoryImmediate(
.fixes = tag[0],
.r1 = reg1,
.r2 = reg2,
- .i = @intCast(u8, imm.unsigned),
+ .i = @as(u8, @intCast(imm.unsigned)),
.payload = switch (m) {
.sib => try self.addExtra(Mir.MemorySib.encode(m)),
.rip => try self.addExtra(Mir.MemoryRip.encode(m)),
@@ -1490,8 +1490,8 @@ fn asmMemoryRegister(self: *Self, tag: Mir.Inst.FixedTag, m: Memory, reg: Regist
fn asmMemoryImmediate(self: *Self, tag: Mir.Inst.FixedTag, m: Memory, imm: Immediate) !void {
const payload = try self.addExtra(Mir.Imm32{ .imm = switch (imm) {
- .signed => |s| @bitCast(u32, s),
- .unsigned => |u| @intCast(u32, u),
+ .signed => |s| @as(u32, @bitCast(s)),
+ .unsigned => |u| @as(u32, @intCast(u)),
} });
assert(payload + 1 == switch (m) {
.sib => try self.addExtra(Mir.MemorySib.encode(m)),
@@ -1562,7 +1562,7 @@ fn asmMemoryRegisterImmediate(
.data = .{ .rix = .{
.fixes = tag[0],
.r1 = reg,
- .i = @intCast(u8, imm.unsigned),
+ .i = @as(u8, @intCast(imm.unsigned)),
.payload = switch (m) {
.sib => try self.addExtra(Mir.MemorySib.encode(m)),
.rip => try self.addExtra(Mir.MemoryRip.encode(m)),
@@ -1617,7 +1617,7 @@ fn gen(self: *Self) InnerError!void {
// Eliding the reloc will cause a miscompilation in this case.
for (self.exitlude_jump_relocs.items) |jmp_reloc| {
self.mir_instructions.items(.data)[jmp_reloc].inst.inst =
- @intCast(u32, self.mir_instructions.len);
+ @as(u32, @intCast(self.mir_instructions.len));
}
try self.asmPseudo(.pseudo_dbg_epilogue_begin_none);
@@ -1739,7 +1739,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
for (body) |inst| {
if (builtin.mode == .Debug) {
- const mir_inst = @intCast(Mir.Inst.Index, self.mir_instructions.len);
+ const mir_inst = @as(Mir.Inst.Index, @intCast(self.mir_instructions.len));
try self.mir_to_air_map.put(self.gpa, mir_inst, inst);
}
@@ -2032,7 +2032,7 @@ fn genLazy(self: *Self, lazy_sym: link.File.LazySymbol) InnerError!void {
var data_off: i32 = 0;
for (exitlude_jump_relocs, 0..) |*exitlude_jump_reloc, index_usize| {
- const index = @intCast(u32, index_usize);
+ const index = @as(u32, @intCast(index_usize));
const tag_name = mod.intern_pool.stringToSlice(enum_ty.enumFields(mod)[index_usize]);
const tag_val = try mod.enumValueFieldIndex(enum_ty, index);
const tag_mcv = try self.genTypedValue(.{ .ty = enum_ty, .val = tag_val });
@@ -2050,7 +2050,7 @@ fn genLazy(self: *Self, lazy_sym: link.File.LazySymbol) InnerError!void {
exitlude_jump_reloc.* = try self.asmJmpReloc(undefined);
try self.performReloc(skip_reloc);
- data_off += @intCast(i32, tag_name.len + 1);
+ data_off += @as(i32, @intCast(tag_name.len + 1));
}
try self.airTrap();
@@ -2126,7 +2126,7 @@ fn finishAirResult(self: *Self, inst: Air.Inst.Index, result: MCValue) void {
fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Liveness.bpi - 1]Air.Inst.Ref) void {
var tomb_bits = self.liveness.getTombBits(inst);
for (operands) |op| {
- const dies = @truncate(u1, tomb_bits) != 0;
+ const dies = @as(u1, @truncate(tomb_bits)) != 0;
tomb_bits >>= 1;
if (!dies) continue;
self.processDeath(Air.refToIndexAllowNone(op) orelse continue);
@@ -2167,7 +2167,7 @@ fn computeFrameLayout(self: *Self) !FrameLayout {
const frame_offset = self.frame_locs.items(.disp);
for (stack_frame_order, FrameIndex.named_count..) |*frame_order, frame_index|
- frame_order.* = @enumFromInt(FrameIndex, frame_index);
+ frame_order.* = @as(FrameIndex, @enumFromInt(frame_index));
{
const SortContext = struct {
frame_align: @TypeOf(frame_align),
@@ -2195,7 +2195,7 @@ fn computeFrameLayout(self: *Self) !FrameLayout {
}
}
- var rbp_offset = @intCast(i32, save_reg_list.count() * 8);
+ var rbp_offset = @as(i32, @intCast(save_reg_list.count() * 8));
self.setFrameLoc(.base_ptr, .rbp, &rbp_offset, false);
self.setFrameLoc(.ret_addr, .rbp, &rbp_offset, false);
self.setFrameLoc(.args_frame, .rbp, &rbp_offset, false);
@@ -2210,22 +2210,22 @@ fn computeFrameLayout(self: *Self) !FrameLayout {
rsp_offset = mem.alignForward(i32, rsp_offset, @as(i32, 1) << needed_align);
rsp_offset -= stack_frame_align_offset;
frame_size[@intFromEnum(FrameIndex.call_frame)] =
- @intCast(u31, rsp_offset - frame_offset[@intFromEnum(FrameIndex.stack_frame)]);
+ @as(u31, @intCast(rsp_offset - frame_offset[@intFromEnum(FrameIndex.stack_frame)]));
return .{
.stack_mask = @as(u32, math.maxInt(u32)) << (if (need_align_stack) needed_align else 0),
- .stack_adjust = @intCast(u32, rsp_offset - frame_offset[@intFromEnum(FrameIndex.call_frame)]),
+ .stack_adjust = @as(u32, @intCast(rsp_offset - frame_offset[@intFromEnum(FrameIndex.call_frame)])),
.save_reg_list = save_reg_list,
};
}
fn getFrameAddrAlignment(self: *Self, frame_addr: FrameAddr) u32 {
const alloc_align = @as(u32, 1) << self.frame_allocs.get(@intFromEnum(frame_addr.index)).abi_align;
- return @min(alloc_align, @bitCast(u32, frame_addr.off) & (alloc_align - 1));
+ return @min(alloc_align, @as(u32, @bitCast(frame_addr.off)) & (alloc_align - 1));
}
fn getFrameAddrSize(self: *Self, frame_addr: FrameAddr) u32 {
- return self.frame_allocs.get(@intFromEnum(frame_addr.index)).abi_size - @intCast(u31, frame_addr.off);
+ return self.frame_allocs.get(@intFromEnum(frame_addr.index)).abi_size - @as(u31, @intCast(frame_addr.off));
}
fn allocFrameIndex(self: *Self, alloc: FrameAlloc) !FrameIndex {
@@ -2245,7 +2245,7 @@ fn allocFrameIndex(self: *Self, alloc: FrameAlloc) !FrameIndex {
_ = self.free_frame_indices.swapRemoveAt(free_i);
return frame_index;
}
- const frame_index = @enumFromInt(FrameIndex, self.frame_allocs.len);
+ const frame_index = @as(FrameIndex, @enumFromInt(self.frame_allocs.len));
try self.frame_allocs.append(self.gpa, alloc);
return frame_index;
}
@@ -2321,7 +2321,7 @@ const State = struct {
fn initRetroactiveState(self: *Self) State {
var state: State = undefined;
- state.inst_tracking_len = @intCast(u32, self.inst_tracking.count());
+ state.inst_tracking_len = @as(u32, @intCast(self.inst_tracking.count()));
state.scope_generation = self.scope_generation;
return state;
}
@@ -2393,7 +2393,7 @@ fn restoreState(self: *Self, state: State, deaths: []const Air.Inst.Index, compt
}
{
const reg = RegisterManager.regAtTrackedIndex(
- @intCast(RegisterManager.RegisterBitSet.ShiftInt, index),
+ @as(RegisterManager.RegisterBitSet.ShiftInt, @intCast(index)),
);
self.register_manager.freeReg(reg);
self.register_manager.getRegAssumeFree(reg, target_maybe_inst);
@@ -2628,7 +2628,7 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void {
const dst_ty = self.typeOfIndex(inst);
const dst_int_info = dst_ty.intInfo(mod);
- const abi_size = @intCast(u32, dst_ty.abiSize(mod));
+ const abi_size = @as(u32, @intCast(dst_ty.abiSize(mod)));
const min_ty = if (dst_int_info.bits < src_int_info.bits) dst_ty else src_ty;
const extend = switch (src_int_info.signedness) {
@@ -2706,9 +2706,9 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const dst_ty = self.typeOfIndex(inst);
- const dst_abi_size = @intCast(u32, dst_ty.abiSize(mod));
+ const dst_abi_size = @as(u32, @intCast(dst_ty.abiSize(mod)));
const src_ty = self.typeOf(ty_op.operand);
- const src_abi_size = @intCast(u32, src_ty.abiSize(mod));
+ const src_abi_size = @as(u32, @intCast(src_ty.abiSize(mod)));
const result = result: {
const src_mcv = try self.resolveInst(ty_op.operand);
@@ -2753,13 +2753,13 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void {
});
const elem_ty = src_ty.childType(mod);
- const mask_val = try mod.intValue(elem_ty, @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - dst_info.bits));
+ const mask_val = try mod.intValue(elem_ty, @as(u64, math.maxInt(u64)) >> @as(u6, @intCast(64 - dst_info.bits)));
const splat_ty = try mod.vectorType(.{
- .len = @intCast(u32, @divExact(@as(u64, if (src_abi_size > 16) 256 else 128), src_info.bits)),
+ .len = @as(u32, @intCast(@divExact(@as(u64, if (src_abi_size > 16) 256 else 128), src_info.bits))),
.child = elem_ty.ip_index,
});
- const splat_abi_size = @intCast(u32, splat_ty.abiSize(mod));
+ const splat_abi_size = @as(u32, @intCast(splat_ty.abiSize(mod)));
const splat_val = try mod.intern(.{ .aggregate = .{
.ty = splat_ty.ip_index,
@@ -2834,7 +2834,7 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void {
try self.genSetMem(.{ .frame = frame_index }, 0, ptr_ty, ptr);
try self.genSetMem(
.{ .frame = frame_index },
- @intCast(i32, ptr_ty.abiSize(mod)),
+ @as(i32, @intCast(ptr_ty.abiSize(mod))),
len_ty,
len,
);
@@ -2875,7 +2875,7 @@ fn activeIntBits(self: *Self, dst_air: Air.Inst.Ref) u16 {
const src_val = air_data[inst].interned.toValue();
var space: Value.BigIntSpace = undefined;
const src_int = src_val.toBigInt(&space, mod);
- return @intCast(u16, src_int.bitCountTwosComp()) +
+ return @as(u16, @intCast(src_int.bitCountTwosComp())) +
@intFromBool(src_int.positive and dst_info.signedness == .signed);
},
.intcast => {
@@ -2964,7 +2964,7 @@ fn airAddSat(self: *Self, inst: Air.Inst.Index) !void {
try self.genSetReg(limit_reg, ty, dst_mcv);
try self.genShiftBinOpMir(.{ ._r, .sa }, ty, limit_mcv, .{ .immediate = reg_bits - 1 });
try self.genBinOpMir(.{ ._, .xor }, ty, limit_mcv, .{
- .immediate = (@as(u64, 1) << @intCast(u6, reg_bits - 1)) - 1,
+ .immediate = (@as(u64, 1) << @as(u6, @intCast(reg_bits - 1))) - 1,
});
if (reg_extra_bits > 0) {
const shifted_rhs_reg = try self.copyToTmpRegister(ty, rhs_mcv);
@@ -2983,7 +2983,7 @@ fn airAddSat(self: *Self, inst: Air.Inst.Index) !void {
break :cc .o;
} else cc: {
try self.genSetReg(limit_reg, ty, .{
- .immediate = @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - ty.bitSize(mod)),
+ .immediate = @as(u64, math.maxInt(u64)) >> @as(u6, @intCast(64 - ty.bitSize(mod))),
});
try self.genBinOpMir(.{ ._, .add }, ty, dst_mcv, rhs_mcv);
@@ -2994,7 +2994,7 @@ fn airAddSat(self: *Self, inst: Air.Inst.Index) !void {
break :cc .c;
};
- const cmov_abi_size = @max(@intCast(u32, ty.abiSize(mod)), 2);
+ const cmov_abi_size = @max(@as(u32, @intCast(ty.abiSize(mod))), 2);
try self.asmCmovccRegisterRegister(
registerAlias(dst_reg, cmov_abi_size),
registerAlias(limit_reg, cmov_abi_size),
@@ -3043,7 +3043,7 @@ fn airSubSat(self: *Self, inst: Air.Inst.Index) !void {
try self.genSetReg(limit_reg, ty, dst_mcv);
try self.genShiftBinOpMir(.{ ._r, .sa }, ty, limit_mcv, .{ .immediate = reg_bits - 1 });
try self.genBinOpMir(.{ ._, .xor }, ty, limit_mcv, .{
- .immediate = (@as(u64, 1) << @intCast(u6, reg_bits - 1)) - 1,
+ .immediate = (@as(u64, 1) << @as(u6, @intCast(reg_bits - 1))) - 1,
});
if (reg_extra_bits > 0) {
const shifted_rhs_reg = try self.copyToTmpRegister(ty, rhs_mcv);
@@ -3066,7 +3066,7 @@ fn airSubSat(self: *Self, inst: Air.Inst.Index) !void {
break :cc .c;
};
- const cmov_abi_size = @max(@intCast(u32, ty.abiSize(mod)), 2);
+ const cmov_abi_size = @max(@as(u32, @intCast(ty.abiSize(mod))), 2);
try self.asmCmovccRegisterRegister(
registerAlias(dst_reg, cmov_abi_size),
registerAlias(limit_reg, cmov_abi_size),
@@ -3114,18 +3114,18 @@ fn airMulSat(self: *Self, inst: Air.Inst.Index) !void {
try self.genBinOpMir(.{ ._, .xor }, ty, limit_mcv, rhs_mcv);
try self.genShiftBinOpMir(.{ ._, .sa }, ty, limit_mcv, .{ .immediate = reg_bits - 1 });
try self.genBinOpMir(.{ ._, .xor }, ty, limit_mcv, .{
- .immediate = (@as(u64, 1) << @intCast(u6, reg_bits - 1)) - 1,
+ .immediate = (@as(u64, 1) << @as(u6, @intCast(reg_bits - 1))) - 1,
});
break :cc .o;
} else cc: {
try self.genSetReg(limit_reg, ty, .{
- .immediate = @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - reg_bits),
+ .immediate = @as(u64, math.maxInt(u64)) >> @as(u6, @intCast(64 - reg_bits)),
});
break :cc .c;
};
const dst_mcv = try self.genMulDivBinOp(.mul, inst, ty, ty, lhs_mcv, rhs_mcv);
- const cmov_abi_size = @max(@intCast(u32, ty.abiSize(mod)), 2);
+ const cmov_abi_size = @max(@as(u32, @intCast(ty.abiSize(mod))), 2);
try self.asmCmovccRegisterRegister(
registerAlias(dst_mcv.register, cmov_abi_size),
registerAlias(limit_reg, cmov_abi_size),
@@ -3172,13 +3172,13 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, mod));
try self.genSetMem(
.{ .frame = frame_index },
- @intCast(i32, tuple_ty.structFieldOffset(1, mod)),
+ @as(i32, @intCast(tuple_ty.structFieldOffset(1, mod))),
Type.u1,
.{ .eflags = cc },
);
try self.genSetMem(
.{ .frame = frame_index },
- @intCast(i32, tuple_ty.structFieldOffset(0, mod)),
+ @as(i32, @intCast(tuple_ty.structFieldOffset(0, mod))),
ty,
partial_mcv,
);
@@ -3245,13 +3245,13 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, mod));
try self.genSetMem(
.{ .frame = frame_index },
- @intCast(i32, tuple_ty.structFieldOffset(1, mod)),
+ @as(i32, @intCast(tuple_ty.structFieldOffset(1, mod))),
tuple_ty.structFieldType(1, mod),
.{ .eflags = cc },
);
try self.genSetMem(
.{ .frame = frame_index },
- @intCast(i32, tuple_ty.structFieldOffset(0, mod)),
+ @as(i32, @intCast(tuple_ty.structFieldOffset(0, mod))),
tuple_ty.structFieldType(0, mod),
partial_mcv,
);
@@ -3319,7 +3319,7 @@ fn genSetFrameTruncatedOverflowCompare(
);
}
- const payload_off = @intCast(i32, tuple_ty.structFieldOffset(0, mod));
+ const payload_off = @as(i32, @intCast(tuple_ty.structFieldOffset(0, mod)));
if (hi_limb_off > 0) try self.genSetMem(.{ .frame = frame_index }, payload_off, rest_ty, src_mcv);
try self.genSetMem(
.{ .frame = frame_index },
@@ -3329,7 +3329,7 @@ fn genSetFrameTruncatedOverflowCompare(
);
try self.genSetMem(
.{ .frame = frame_index },
- @intCast(i32, tuple_ty.structFieldOffset(1, mod)),
+ @as(i32, @intCast(tuple_ty.structFieldOffset(1, mod))),
tuple_ty.structFieldType(1, mod),
if (overflow_cc) |_| .{ .register = overflow_reg.to8() } else .{ .eflags = .ne },
);
@@ -3386,13 +3386,13 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
if (dst_info.bits >= lhs_active_bits + rhs_active_bits) {
try self.genSetMem(
.{ .frame = frame_index },
- @intCast(i32, tuple_ty.structFieldOffset(0, mod)),
+ @as(i32, @intCast(tuple_ty.structFieldOffset(0, mod))),
tuple_ty.structFieldType(0, mod),
partial_mcv,
);
try self.genSetMem(
.{ .frame = frame_index },
- @intCast(i32, tuple_ty.structFieldOffset(1, mod)),
+ @as(i32, @intCast(tuple_ty.structFieldOffset(1, mod))),
tuple_ty.structFieldType(1, mod),
.{ .immediate = 0 }, // cc being set is impossible
);
@@ -3416,7 +3416,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
/// Quotient is saved in .rax and remainder in .rdx.
fn genIntMulDivOpMir(self: *Self, tag: Mir.Inst.FixedTag, ty: Type, lhs: MCValue, rhs: MCValue) !void {
const mod = self.bin_file.options.module.?;
- const abi_size = @intCast(u32, ty.abiSize(mod));
+ const abi_size = @as(u32, @intCast(ty.abiSize(mod)));
if (abi_size > 8) {
return self.fail("TODO implement genIntMulDivOpMir for ABI size larger than 8", .{});
}
@@ -3456,7 +3456,7 @@ fn genIntMulDivOpMir(self: *Self, tag: Mir.Inst.FixedTag, ty: Type, lhs: MCValue
/// Clobbers .rax and .rdx registers.
fn genInlineIntDivFloor(self: *Self, ty: Type, lhs: MCValue, rhs: MCValue) !MCValue {
const mod = self.bin_file.options.module.?;
- const abi_size = @intCast(u32, ty.abiSize(mod));
+ const abi_size = @as(u32, @intCast(ty.abiSize(mod)));
const int_info = ty.intInfo(mod);
const dividend: Register = switch (lhs) {
.register => |reg| reg,
@@ -3595,7 +3595,7 @@ fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void {
try self.copyToRegisterWithInstTracking(inst, dst_ty, src_mcv);
const pl_ty = dst_ty.childType(mod);
- const pl_abi_size = @intCast(i32, pl_ty.abiSize(mod));
+ const pl_abi_size = @as(i32, @intCast(pl_ty.abiSize(mod)));
try self.genSetMem(.{ .reg = dst_mcv.getReg().? }, pl_abi_size, Type.bool, .{ .immediate = 1 });
break :result if (self.liveness.isUnused(inst)) .unreach else dst_mcv;
};
@@ -3628,7 +3628,7 @@ fn airUnwrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void {
const result = try self.copyToRegisterWithInstTracking(inst, err_union_ty, operand);
if (err_off > 0) {
- const shift = @intCast(u6, err_off * 8);
+ const shift = @as(u6, @intCast(err_off * 8));
try self.genShiftBinOpMir(
.{ ._r, .sh },
err_union_ty,
@@ -3642,7 +3642,7 @@ fn airUnwrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void {
},
.load_frame => |frame_addr| break :result .{ .load_frame = .{
.index = frame_addr.index,
- .off = frame_addr.off + @intCast(i32, err_off),
+ .off = frame_addr.off + @as(i32, @intCast(err_off)),
} },
else => return self.fail("TODO implement unwrap_err_err for {}", .{operand}),
}
@@ -3674,7 +3674,7 @@ fn genUnwrapErrorUnionPayloadMir(
switch (err_union) {
.load_frame => |frame_addr| break :result .{ .load_frame = .{
.index = frame_addr.index,
- .off = frame_addr.off + @intCast(i32, payload_off),
+ .off = frame_addr.off + @as(i32, @intCast(payload_off)),
} },
.register => |reg| {
// TODO reuse operand
@@ -3686,7 +3686,7 @@ fn genUnwrapErrorUnionPayloadMir(
else
.{ .register = try self.copyToTmpRegister(err_union_ty, err_union) };
if (payload_off > 0) {
- const shift = @intCast(u6, payload_off * 8);
+ const shift = @as(u6, @intCast(payload_off * 8));
try self.genShiftBinOpMir(
.{ ._r, .sh },
err_union_ty,
@@ -3727,8 +3727,8 @@ fn airUnwrapErrUnionErrPtr(self: *Self, inst: Air.Inst.Index) !void {
const eu_ty = src_ty.childType(mod);
const pl_ty = eu_ty.errorUnionPayload(mod);
const err_ty = eu_ty.errorUnionSet(mod);
- const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, mod));
- const err_abi_size = @intCast(u32, err_ty.abiSize(mod));
+ const err_off = @as(i32, @intCast(errUnionErrorOffset(pl_ty, mod)));
+ const err_abi_size = @as(u32, @intCast(err_ty.abiSize(mod)));
try self.asmRegisterMemory(
.{ ._, .mov },
registerAlias(dst_reg, err_abi_size),
@@ -3766,8 +3766,8 @@ fn airUnwrapErrUnionPayloadPtr(self: *Self, inst: Air.Inst.Index) !void {
const eu_ty = src_ty.childType(mod);
const pl_ty = eu_ty.errorUnionPayload(mod);
- const pl_off = @intCast(i32, errUnionPayloadOffset(pl_ty, mod));
- const dst_abi_size = @intCast(u32, dst_ty.abiSize(mod));
+ const pl_off = @as(i32, @intCast(errUnionPayloadOffset(pl_ty, mod)));
+ const dst_abi_size = @as(u32, @intCast(dst_ty.abiSize(mod)));
try self.asmRegisterMemory(
.{ ._, .lea },
registerAlias(dst_reg, dst_abi_size),
@@ -3793,8 +3793,8 @@ fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void {
const eu_ty = src_ty.childType(mod);
const pl_ty = eu_ty.errorUnionPayload(mod);
const err_ty = eu_ty.errorUnionSet(mod);
- const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, mod));
- const err_abi_size = @intCast(u32, err_ty.abiSize(mod));
+ const err_off = @as(i32, @intCast(errUnionErrorOffset(pl_ty, mod)));
+ const err_abi_size = @as(u32, @intCast(err_ty.abiSize(mod)));
try self.asmMemoryImmediate(
.{ ._, .mov },
Memory.sib(Memory.PtrSize.fromSize(err_abi_size), .{
@@ -3814,8 +3814,8 @@ fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void {
const dst_lock = self.register_manager.lockReg(dst_reg);
defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
- const pl_off = @intCast(i32, errUnionPayloadOffset(pl_ty, mod));
- const dst_abi_size = @intCast(u32, dst_ty.abiSize(mod));
+ const pl_off = @as(i32, @intCast(errUnionPayloadOffset(pl_ty, mod)));
+ const dst_abi_size = @as(u32, @intCast(dst_ty.abiSize(mod)));
try self.asmRegisterMemory(
.{ ._, .lea },
registerAlias(dst_reg, dst_abi_size),
@@ -3864,14 +3864,14 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void {
try self.genCopy(pl_ty, opt_mcv, pl_mcv);
if (!same_repr) {
- const pl_abi_size = @intCast(i32, pl_ty.abiSize(mod));
+ const pl_abi_size = @as(i32, @intCast(pl_ty.abiSize(mod)));
switch (opt_mcv) {
else => unreachable,
.register => |opt_reg| try self.asmRegisterImmediate(
.{ ._s, .bt },
opt_reg,
- Immediate.u(@intCast(u6, pl_abi_size * 8)),
+ Immediate.u(@as(u6, @intCast(pl_abi_size * 8))),
),
.load_frame => |frame_addr| try self.asmMemoryImmediate(
@@ -3903,8 +3903,8 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void {
if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result .{ .immediate = 0 };
const frame_index = try self.allocFrameIndex(FrameAlloc.initType(eu_ty, mod));
- const pl_off = @intCast(i32, errUnionPayloadOffset(pl_ty, mod));
- const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, mod));
+ const pl_off = @as(i32, @intCast(errUnionPayloadOffset(pl_ty, mod)));
+ const err_off = @as(i32, @intCast(errUnionErrorOffset(pl_ty, mod)));
try self.genSetMem(.{ .frame = frame_index }, pl_off, pl_ty, operand);
try self.genSetMem(.{ .frame = frame_index }, err_off, err_ty, .{ .immediate = 0 });
break :result .{ .load_frame = .{ .index = frame_index } };
@@ -3925,8 +3925,8 @@ fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void {
if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result try self.resolveInst(ty_op.operand);
const frame_index = try self.allocFrameIndex(FrameAlloc.initType(eu_ty, mod));
- const pl_off = @intCast(i32, errUnionPayloadOffset(pl_ty, mod));
- const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, mod));
+ const pl_off = @as(i32, @intCast(errUnionPayloadOffset(pl_ty, mod)));
+ const err_off = @as(i32, @intCast(errUnionErrorOffset(pl_ty, mod)));
try self.genSetMem(.{ .frame = frame_index }, pl_off, pl_ty, .undef);
const operand = try self.resolveInst(ty_op.operand);
try self.genSetMem(.{ .frame = frame_index }, err_off, err_ty, operand);
@@ -3988,7 +3988,7 @@ fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) !void {
const dst_lock = self.register_manager.lockReg(dst_reg);
defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
- const dst_abi_size = @intCast(u32, dst_ty.abiSize(mod));
+ const dst_abi_size = @as(u32, @intCast(dst_ty.abiSize(mod)));
try self.asmRegisterMemory(
.{ ._, .lea },
registerAlias(dst_reg, dst_abi_size),
@@ -4165,7 +4165,7 @@ fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void {
// additional `mov` is needed at the end to get the actual value
const elem_ty = ptr_ty.elemType2(mod);
- const elem_abi_size = @intCast(u32, elem_ty.abiSize(mod));
+ const elem_abi_size = @as(u32, @intCast(elem_ty.abiSize(mod)));
const index_ty = self.typeOf(bin_op.rhs);
const index_mcv = try self.resolveInst(bin_op.rhs);
const index_lock = switch (index_mcv) {
@@ -4305,7 +4305,7 @@ fn airGetUnionTag(self: *Self, inst: Air.Inst.Index) !void {
.load_frame => |frame_addr| {
if (tag_abi_size <= 8) {
const off: i32 = if (layout.tag_align < layout.payload_align)
- @intCast(i32, layout.payload_size)
+ @as(i32, @intCast(layout.payload_size))
else
0;
break :blk try self.copyToRegisterWithInstTracking(inst, tag_ty, .{
@@ -4317,13 +4317,13 @@ fn airGetUnionTag(self: *Self, inst: Air.Inst.Index) !void {
},
.register => {
const shift: u6 = if (layout.tag_align < layout.payload_align)
- @intCast(u6, layout.payload_size * 8)
+ @as(u6, @intCast(layout.payload_size * 8))
else
0;
const result = try self.copyToRegisterWithInstTracking(inst, union_ty, operand);
try self.genShiftBinOpMir(.{ ._r, .sh }, Type.usize, result, .{ .immediate = shift });
break :blk MCValue{
- .register = registerAlias(result.register, @intCast(u32, layout.tag_size)),
+ .register = registerAlias(result.register, @as(u32, @intCast(layout.tag_size))),
};
},
else => return self.fail("TODO implement get_union_tag for {}", .{operand}),
@@ -4420,7 +4420,7 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void {
try self.genBinOpMir(.{ ._, .bsr }, Type.u16, dst_mcv, .{ .register = wide_reg });
} else try self.genBinOpMir(.{ ._, .bsr }, src_ty, dst_mcv, mat_src_mcv);
- const cmov_abi_size = @max(@intCast(u32, dst_ty.abiSize(mod)), 2);
+ const cmov_abi_size = @max(@as(u32, @intCast(dst_ty.abiSize(mod))), 2);
try self.asmCmovccRegisterRegister(
registerAlias(dst_reg, cmov_abi_size),
registerAlias(imm_reg, cmov_abi_size),
@@ -4430,7 +4430,7 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void {
try self.genBinOpMir(.{ ._, .xor }, dst_ty, dst_mcv, .{ .immediate = src_bits - 1 });
} else {
const imm_reg = try self.copyToTmpRegister(dst_ty, .{
- .immediate = @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - self.regBitSize(dst_ty)),
+ .immediate = @as(u64, math.maxInt(u64)) >> @as(u6, @intCast(64 - self.regBitSize(dst_ty))),
});
const imm_lock = self.register_manager.lockRegAssumeUnused(imm_reg);
defer self.register_manager.unlockReg(imm_lock);
@@ -4447,7 +4447,7 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void {
.{ .register = wide_reg },
);
- const cmov_abi_size = @max(@intCast(u32, dst_ty.abiSize(mod)), 2);
+ const cmov_abi_size = @max(@as(u32, @intCast(dst_ty.abiSize(mod))), 2);
try self.asmCmovccRegisterRegister(
registerAlias(imm_reg, cmov_abi_size),
registerAlias(dst_reg, cmov_abi_size),
@@ -4501,8 +4501,8 @@ fn airCtz(self: *Self, inst: Air.Inst.Index) !void {
.{ ._, .@"or" },
wide_ty,
tmp_mcv,
- .{ .immediate = (@as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - extra_bits)) <<
- @intCast(u6, src_bits) },
+ .{ .immediate = (@as(u64, math.maxInt(u64)) >> @as(u6, @intCast(64 - extra_bits))) <<
+ @as(u6, @intCast(src_bits)) },
);
break :masked tmp_mcv;
} else mat_src_mcv;
@@ -4519,7 +4519,7 @@ fn airCtz(self: *Self, inst: Air.Inst.Index) !void {
.{ ._, .@"or" },
Type.u64,
dst_mcv,
- .{ .immediate = @as(u64, math.maxInt(u64)) << @intCast(u6, src_bits - 64) },
+ .{ .immediate = @as(u64, math.maxInt(u64)) << @as(u6, @intCast(src_bits - 64)) },
);
break :masked dst_mcv;
} else mat_src_mcv.address().offset(8).deref();
@@ -4547,7 +4547,7 @@ fn airCtz(self: *Self, inst: Air.Inst.Index) !void {
try self.genBinOpMir(.{ ._, .bsf }, Type.u16, dst_mcv, .{ .register = wide_reg });
} else try self.genBinOpMir(.{ ._, .bsf }, src_ty, dst_mcv, mat_src_mcv);
- const cmov_abi_size = @max(@intCast(u32, dst_ty.abiSize(mod)), 2);
+ const cmov_abi_size = @max(@as(u32, @intCast(dst_ty.abiSize(mod))), 2);
try self.asmCmovccRegisterRegister(
registerAlias(dst_reg, cmov_abi_size),
registerAlias(width_reg, cmov_abi_size),
@@ -4563,7 +4563,7 @@ fn airPopcount(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result: MCValue = result: {
const src_ty = self.typeOf(ty_op.operand);
- const src_abi_size = @intCast(u32, src_ty.abiSize(mod));
+ const src_abi_size = @as(u32, @intCast(src_ty.abiSize(mod)));
const src_mcv = try self.resolveInst(ty_op.operand);
if (self.hasFeature(.popcnt)) {
@@ -4588,7 +4588,7 @@ fn airPopcount(self: *Self, inst: Air.Inst.Index) !void {
break :result dst_mcv;
}
- const mask = @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - src_abi_size * 8);
+ const mask = @as(u64, math.maxInt(u64)) >> @as(u6, @intCast(64 - src_abi_size * 8));
const imm_0_1 = Immediate.u(mask / 0b1_1);
const imm_00_11 = Immediate.u(mask / 0b01_01);
const imm_0000_1111 = Immediate.u(mask / 0b0001_0001);
@@ -4754,7 +4754,7 @@ fn airBitReverse(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const src_ty = self.typeOf(ty_op.operand);
- const src_abi_size = @intCast(u32, src_ty.abiSize(mod));
+ const src_abi_size = @as(u32, @intCast(src_ty.abiSize(mod)));
const src_mcv = try self.resolveInst(ty_op.operand);
const dst_mcv = try self.byteSwap(inst, src_ty, src_mcv, false);
@@ -4774,7 +4774,7 @@ fn airBitReverse(self: *Self, inst: Air.Inst.Index) !void {
else
undefined;
- const mask = @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - src_abi_size * 8);
+ const mask = @as(u64, math.maxInt(u64)) >> @as(u6, @intCast(64 - src_abi_size * 8));
const imm_0000_1111 = Immediate.u(mask / 0b0001_0001);
const imm_00_11 = Immediate.u(mask / 0b01_01);
const imm_0_1 = Immediate.u(mask / 0b1_1);
@@ -5017,7 +5017,7 @@ fn genRound(self: *Self, ty: Type, dst_reg: Register, src_mcv: MCValue, mode: u4
})) |tag| tag else return self.fail("TODO implement genRound for {}", .{
ty.fmt(self.bin_file.options.module.?),
});
- const abi_size = @intCast(u32, ty.abiSize(mod));
+ const abi_size = @as(u32, @intCast(ty.abiSize(mod)));
const dst_alias = registerAlias(dst_reg, abi_size);
switch (mir_tag[0]) {
.v_ss, .v_sd => if (src_mcv.isMemory()) try self.asmRegisterRegisterMemoryImmediate(
@@ -5057,7 +5057,7 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void {
const mod = self.bin_file.options.module.?;
const un_op = self.air.instructions.items(.data)[inst].un_op;
const ty = self.typeOf(un_op);
- const abi_size = @intCast(u32, ty.abiSize(mod));
+ const abi_size = @as(u32, @intCast(ty.abiSize(mod)));
const src_mcv = try self.resolveInst(un_op);
const dst_mcv = if (src_mcv.isRegister() and self.reuseOperand(inst, un_op, 0, src_mcv))
@@ -5123,7 +5123,7 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void {
.{ .v_ps, .cvtph2 },
wide_reg,
src_mcv.mem(Memory.PtrSize.fromSize(
- @intCast(u32, @divExact(wide_reg.bitSize(), 16)),
+ @as(u32, @intCast(@divExact(wide_reg.bitSize(), 16))),
)),
) else try self.asmRegisterRegister(
.{ .v_ps, .cvtph2 },
@@ -5255,10 +5255,10 @@ fn packedLoad(self: *Self, dst_mcv: MCValue, ptr_ty: Type, ptr_mcv: MCValue) Inn
const ptr_info = ptr_ty.ptrInfo(mod);
const val_ty = ptr_info.child.toType();
- const val_abi_size = @intCast(u32, val_ty.abiSize(mod));
+ const val_abi_size = @as(u32, @intCast(val_ty.abiSize(mod)));
const limb_abi_size: u32 = @min(val_abi_size, 8);
const limb_abi_bits = limb_abi_size * 8;
- const val_byte_off = @intCast(i32, ptr_info.packed_offset.bit_offset / limb_abi_bits * limb_abi_size);
+ const val_byte_off = @as(i32, @intCast(ptr_info.packed_offset.bit_offset / limb_abi_bits * limb_abi_size));
const val_bit_off = ptr_info.packed_offset.bit_offset % limb_abi_bits;
const val_extra_bits = self.regExtraBits(val_ty);
@@ -5404,7 +5404,7 @@ fn packedStore(self: *Self, ptr_ty: Type, ptr_mcv: MCValue, src_mcv: MCValue) In
const limb_abi_bits = limb_abi_size * 8;
const src_bit_size = src_ty.bitSize(mod);
- const src_byte_off = @intCast(i32, ptr_info.packed_offset.bit_offset / limb_abi_bits * limb_abi_size);
+ const src_byte_off = @as(i32, @intCast(ptr_info.packed_offset.bit_offset / limb_abi_bits * limb_abi_size));
const src_bit_off = ptr_info.packed_offset.bit_offset % limb_abi_bits;
const ptr_reg = try self.copyToTmpRegister(ptr_ty, ptr_mcv);
@@ -5421,13 +5421,13 @@ fn packedStore(self: *Self, ptr_ty: Type, ptr_mcv: MCValue, src_mcv: MCValue) In
.disp = src_byte_off + limb_i * limb_abi_bits,
});
- const part_mask = (@as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - part_bit_size)) <<
- @intCast(u6, part_bit_off);
+ const part_mask = (@as(u64, math.maxInt(u64)) >> @as(u6, @intCast(64 - part_bit_size))) <<
+ @as(u6, @intCast(part_bit_off));
const part_mask_not = part_mask ^
- (@as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - limb_abi_bits));
+ (@as(u64, math.maxInt(u64)) >> @as(u6, @intCast(64 - limb_abi_bits)));
if (limb_abi_size <= 4) {
try self.asmMemoryImmediate(.{ ._, .@"and" }, limb_mem, Immediate.u(part_mask_not));
- } else if (math.cast(i32, @bitCast(i64, part_mask_not))) |small| {
+ } else if (math.cast(i32, @as(i64, @bitCast(part_mask_not)))) |small| {
try self.asmMemoryImmediate(.{ ._, .@"and" }, limb_mem, Immediate.s(small));
} else {
const part_mask_reg = try self.register_manager.allocReg(null, gp);
@@ -5542,14 +5542,14 @@ fn fieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32
const ptr_field_ty = self.typeOfIndex(inst);
const ptr_container_ty = self.typeOf(operand);
const container_ty = ptr_container_ty.childType(mod);
- const field_offset = @intCast(i32, switch (container_ty.containerLayout(mod)) {
+ const field_offset = @as(i32, @intCast(switch (container_ty.containerLayout(mod)) {
.Auto, .Extern => container_ty.structFieldOffset(index, mod),
.Packed => if (container_ty.zigTypeTag(mod) == .Struct and
ptr_field_ty.ptrInfo(mod).packed_offset.host_size == 0)
container_ty.packedStructFieldByteOffset(index, mod)
else
0,
- });
+ }));
const src_mcv = try self.resolveInst(operand);
const dst_mcv = if (switch (src_mcv) {
@@ -5577,7 +5577,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
const src_mcv = try self.resolveInst(operand);
const field_off = switch (container_ty.containerLayout(mod)) {
- .Auto, .Extern => @intCast(u32, container_ty.structFieldOffset(index, mod) * 8),
+ .Auto, .Extern => @as(u32, @intCast(container_ty.structFieldOffset(index, mod) * 8)),
.Packed => if (mod.typeToStruct(container_ty)) |struct_obj|
struct_obj.packedFieldBitOffset(mod, index)
else
@@ -5588,7 +5588,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
.load_frame => |frame_addr| {
if (field_off % 8 == 0) {
const off_mcv =
- src_mcv.address().offset(@intCast(i32, @divExact(field_off, 8))).deref();
+ src_mcv.address().offset(@as(i32, @intCast(@divExact(field_off, 8)))).deref();
if (self.reuseOperand(inst, operand, 0, src_mcv)) break :result off_mcv;
const dst_mcv = try self.allocRegOrMem(inst, true);
@@ -5596,10 +5596,10 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
break :result dst_mcv;
}
- const field_abi_size = @intCast(u32, field_ty.abiSize(mod));
+ const field_abi_size = @as(u32, @intCast(field_ty.abiSize(mod)));
const limb_abi_size: u32 = @min(field_abi_size, 8);
const limb_abi_bits = limb_abi_size * 8;
- const field_byte_off = @intCast(i32, field_off / limb_abi_bits * limb_abi_size);
+ const field_byte_off = @as(i32, @intCast(field_off / limb_abi_bits * limb_abi_size));
const field_bit_off = field_off % limb_abi_bits;
if (field_abi_size > 8) {
@@ -5643,7 +5643,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
tmp_reg,
Memory.sib(Memory.PtrSize.fromSize(field_abi_size), .{
.base = .{ .frame = frame_addr.index },
- .disp = frame_addr.off + field_byte_off + @intCast(i32, limb_abi_size),
+ .disp = frame_addr.off + field_byte_off + @as(i32, @intCast(limb_abi_size)),
}),
);
try self.asmRegisterRegisterImmediate(
@@ -5724,7 +5724,7 @@ fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void {
const inst_ty = self.typeOfIndex(inst);
const parent_ty = inst_ty.childType(mod);
- const field_offset = @intCast(i32, parent_ty.structFieldOffset(extra.field_index, mod));
+ const field_offset = @as(i32, @intCast(parent_ty.structFieldOffset(extra.field_index, mod)));
const src_mcv = try self.resolveInst(extra.field_ptr);
const dst_mcv = if (src_mcv.isRegisterOffset() and
@@ -5773,14 +5773,14 @@ fn genUnOp(self: *Self, maybe_inst: ?Air.Inst.Index, tag: Air.Inst.Tag, src_air:
switch (tag) {
.not => {
- const limb_abi_size = @intCast(u16, @min(src_ty.abiSize(mod), 8));
+ const limb_abi_size = @as(u16, @intCast(@min(src_ty.abiSize(mod), 8)));
const int_info = if (src_ty.ip_index == .bool_type)
std.builtin.Type.Int{ .signedness = .unsigned, .bits = 1 }
else
src_ty.intInfo(mod);
var byte_off: i32 = 0;
while (byte_off * 8 < int_info.bits) : (byte_off += limb_abi_size) {
- const limb_bits = @intCast(u16, @min(int_info.bits - byte_off * 8, limb_abi_size * 8));
+ const limb_bits = @as(u16, @intCast(@min(int_info.bits - byte_off * 8, limb_abi_size * 8)));
const limb_ty = try mod.intType(int_info.signedness, limb_bits);
const limb_mcv = switch (byte_off) {
0 => dst_mcv,
@@ -5788,7 +5788,7 @@ fn genUnOp(self: *Self, maybe_inst: ?Air.Inst.Index, tag: Air.Inst.Tag, src_air:
};
if (int_info.signedness == .unsigned and self.regExtraBits(limb_ty) > 0) {
- const mask = @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - limb_bits);
+ const mask = @as(u64, math.maxInt(u64)) >> @as(u6, @intCast(64 - limb_bits));
try self.genBinOpMir(.{ ._, .xor }, limb_ty, limb_mcv, .{ .immediate = mask });
} else try self.genUnOpMir(.{ ._, .not }, limb_ty, limb_mcv);
}
@@ -5801,7 +5801,7 @@ fn genUnOp(self: *Self, maybe_inst: ?Air.Inst.Index, tag: Air.Inst.Tag, src_air:
fn genUnOpMir(self: *Self, mir_tag: Mir.Inst.FixedTag, dst_ty: Type, dst_mcv: MCValue) !void {
const mod = self.bin_file.options.module.?;
- const abi_size = @intCast(u32, dst_ty.abiSize(mod));
+ const abi_size = @as(u32, @intCast(dst_ty.abiSize(mod)));
if (abi_size > 8) return self.fail("TODO implement {} for {}", .{
mir_tag,
dst_ty.fmt(self.bin_file.options.module.?),
@@ -5863,7 +5863,7 @@ fn genShiftBinOpMir(
break :rhs .{ .register = .rcx };
};
- const abi_size = @intCast(u32, ty.abiSize(mod));
+ const abi_size = @as(u32, @intCast(ty.abiSize(mod)));
if (abi_size <= 8) {
switch (lhs_mcv) {
.register => |lhs_reg| switch (rhs_mcv) {
@@ -5886,7 +5886,7 @@ fn genShiftBinOpMir(
const lhs_mem = Memory.sib(Memory.PtrSize.fromSize(abi_size), switch (lhs_mcv) {
.memory => |addr| .{
.base = .{ .reg = .ds },
- .disp = math.cast(i32, @bitCast(i64, addr)) orelse
+ .disp = math.cast(i32, @as(i64, @bitCast(addr))) orelse
return self.fail("TODO genShiftBinOpMir between {s} and {s}", .{
@tagName(lhs_mcv),
@tagName(rhs_mcv),
@@ -6151,8 +6151,8 @@ fn genMulDivBinOp(
if (dst_ty.zigTypeTag(mod) == .Vector or dst_ty.zigTypeTag(mod) == .Float) {
return self.fail("TODO implement genMulDivBinOp for {}", .{dst_ty.fmtDebug()});
}
- const dst_abi_size = @intCast(u32, dst_ty.abiSize(mod));
- const src_abi_size = @intCast(u32, src_ty.abiSize(mod));
+ const dst_abi_size = @as(u32, @intCast(dst_ty.abiSize(mod)));
+ const src_abi_size = @as(u32, @intCast(src_ty.abiSize(mod)));
if (switch (tag) {
else => unreachable,
.mul, .mulwrap => dst_abi_size != src_abi_size and dst_abi_size != src_abi_size * 2,
@@ -6326,7 +6326,7 @@ fn genBinOp(
const mod = self.bin_file.options.module.?;
const lhs_ty = self.typeOf(lhs_air);
const rhs_ty = self.typeOf(rhs_air);
- const abi_size = @intCast(u32, lhs_ty.abiSize(mod));
+ const abi_size = @as(u32, @intCast(lhs_ty.abiSize(mod)));
const maybe_mask_reg = switch (air_tag) {
else => null,
@@ -6481,7 +6481,7 @@ fn genBinOp(
.lea_tlv,
.lea_frame,
=> true,
- .memory => |addr| math.cast(i32, @bitCast(i64, addr)) == null,
+ .memory => |addr| math.cast(i32, @as(i64, @bitCast(addr))) == null,
else => false,
}) .{ .register = try self.copyToTmpRegister(rhs_ty, src_mcv) } else src_mcv;
const mat_mcv_lock = switch (mat_src_mcv) {
@@ -6506,7 +6506,7 @@ fn genBinOp(
},
};
- const cmov_abi_size = @max(@intCast(u32, lhs_ty.abiSize(mod)), 2);
+ const cmov_abi_size = @max(@as(u32, @intCast(lhs_ty.abiSize(mod))), 2);
const tmp_reg = switch (dst_mcv) {
.register => |reg| reg,
else => try self.copyToTmpRegister(lhs_ty, dst_mcv),
@@ -6541,7 +6541,7 @@ fn genBinOp(
Memory.sib(Memory.PtrSize.fromSize(cmov_abi_size), switch (mat_src_mcv) {
.memory => |addr| .{
.base = .{ .reg = .ds },
- .disp = @intCast(i32, @bitCast(i64, addr)),
+ .disp = @as(i32, @intCast(@as(i64, @bitCast(addr)))),
},
.indirect => |reg_off| .{
.base = .{ .reg = reg_off.reg },
@@ -7429,7 +7429,7 @@ fn genBinOpMir(
src_mcv: MCValue,
) !void {
const mod = self.bin_file.options.module.?;
- const abi_size = @intCast(u32, ty.abiSize(mod));
+ const abi_size = @as(u32, @intCast(ty.abiSize(mod)));
switch (dst_mcv) {
.none,
.unreach,
@@ -7465,28 +7465,28 @@ fn genBinOpMir(
8 => try self.asmRegisterImmediate(
mir_tag,
dst_alias,
- if (math.cast(i8, @bitCast(i64, imm))) |small|
+ if (math.cast(i8, @as(i64, @bitCast(imm)))) |small|
Immediate.s(small)
else
- Immediate.u(@intCast(u8, imm)),
+ Immediate.u(@as(u8, @intCast(imm))),
),
16 => try self.asmRegisterImmediate(
mir_tag,
dst_alias,
- if (math.cast(i16, @bitCast(i64, imm))) |small|
+ if (math.cast(i16, @as(i64, @bitCast(imm)))) |small|
Immediate.s(small)
else
- Immediate.u(@intCast(u16, imm)),
+ Immediate.u(@as(u16, @intCast(imm))),
),
32 => try self.asmRegisterImmediate(
mir_tag,
dst_alias,
- if (math.cast(i32, @bitCast(i64, imm))) |small|
+ if (math.cast(i32, @as(i64, @bitCast(imm)))) |small|
Immediate.s(small)
else
- Immediate.u(@intCast(u32, imm)),
+ Immediate.u(@as(u32, @intCast(imm))),
),
- 64 => if (math.cast(i32, @bitCast(i64, imm))) |small|
+ 64 => if (math.cast(i32, @as(i64, @bitCast(imm)))) |small|
try self.asmRegisterImmediate(mir_tag, dst_alias, Immediate.s(small))
else
try self.asmRegisterRegister(mir_tag, dst_alias, registerAlias(
@@ -7602,8 +7602,8 @@ fn genBinOpMir(
=> null,
.memory, .load_got, .load_direct, .load_tlv => src: {
switch (src_mcv) {
- .memory => |addr| if (math.cast(i32, @bitCast(i64, addr)) != null and
- math.cast(i32, @bitCast(i64, addr) + abi_size - limb_abi_size) != null)
+ .memory => |addr| if (math.cast(i32, @as(i64, @bitCast(addr))) != null and
+ math.cast(i32, @as(i64, @bitCast(addr)) + abi_size - limb_abi_size) != null)
break :src null,
.load_got, .load_direct, .load_tlv => {},
else => unreachable,
@@ -7680,7 +7680,7 @@ fn genBinOpMir(
const imm = switch (off) {
0 => src_imm,
else => switch (ty_signedness) {
- .signed => @bitCast(u64, @bitCast(i64, src_imm) >> 63),
+ .signed => @as(u64, @bitCast(@as(i64, @bitCast(src_imm)) >> 63)),
.unsigned => 0,
},
};
@@ -7688,28 +7688,28 @@ fn genBinOpMir(
8 => try self.asmMemoryImmediate(
mir_limb_tag,
dst_limb_mem,
- if (math.cast(i8, @bitCast(i64, imm))) |small|
+ if (math.cast(i8, @as(i64, @bitCast(imm)))) |small|
Immediate.s(small)
else
- Immediate.u(@intCast(u8, imm)),
+ Immediate.u(@as(u8, @intCast(imm))),
),
16 => try self.asmMemoryImmediate(
mir_limb_tag,
dst_limb_mem,
- if (math.cast(i16, @bitCast(i64, imm))) |small|
+ if (math.cast(i16, @as(i64, @bitCast(imm)))) |small|
Immediate.s(small)
else
- Immediate.u(@intCast(u16, imm)),
+ Immediate.u(@as(u16, @intCast(imm))),
),
32 => try self.asmMemoryImmediate(
mir_limb_tag,
dst_limb_mem,
- if (math.cast(i32, @bitCast(i64, imm))) |small|
+ if (math.cast(i32, @as(i64, @bitCast(imm)))) |small|
Immediate.s(small)
else
- Immediate.u(@intCast(u32, imm)),
+ Immediate.u(@as(u32, @intCast(imm))),
),
- 64 => if (math.cast(i32, @bitCast(i64, imm))) |small|
+ 64 => if (math.cast(i32, @as(i64, @bitCast(imm)))) |small|
try self.asmMemoryImmediate(
mir_limb_tag,
dst_limb_mem,
@@ -7753,7 +7753,7 @@ fn genBinOpMir(
0 => src_mcv,
else => .{ .immediate = 0 },
},
- .memory => |addr| .{ .memory = @bitCast(u64, @bitCast(i64, addr) + off) },
+ .memory => |addr| .{ .memory = @as(u64, @bitCast(@as(i64, @bitCast(addr)) + off)) },
.indirect => |reg_off| .{ .indirect = .{
.reg = reg_off.reg,
.off = reg_off.off + off,
@@ -7780,7 +7780,7 @@ fn genBinOpMir(
/// Does not support byte-size operands.
fn genIntMulComplexOpMir(self: *Self, dst_ty: Type, dst_mcv: MCValue, src_mcv: MCValue) InnerError!void {
const mod = self.bin_file.options.module.?;
- const abi_size = @intCast(u32, dst_ty.abiSize(mod));
+ const abi_size = @as(u32, @intCast(dst_ty.abiSize(mod)));
switch (dst_mcv) {
.none,
.unreach,
@@ -7847,7 +7847,7 @@ fn genIntMulComplexOpMir(self: *Self, dst_ty: Type, dst_mcv: MCValue, src_mcv: M
Memory.sib(Memory.PtrSize.fromSize(abi_size), switch (src_mcv) {
.memory => |addr| .{
.base = .{ .reg = .ds },
- .disp = math.cast(i32, @bitCast(i64, addr)) orelse
+ .disp = math.cast(i32, @as(i64, @bitCast(addr))) orelse
return self.asmRegisterRegister(
.{ .i_, .mul },
dst_alias,
@@ -8014,7 +8014,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
const callee = pl_op.operand;
const extra = self.air.extraData(Air.Call, pl_op.payload);
- const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]);
+ const args = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra.end..][0..extra.data.args_len]));
const ty = self.typeOf(callee);
const fn_ty = switch (ty.zigTypeTag(mod)) {
@@ -8107,7 +8107,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
const got_addr = atom.getOffsetTableAddress(elf_file);
try self.asmMemory(.{ ._, .call }, Memory.sib(.qword, .{
.base = .{ .reg = .ds },
- .disp = @intCast(i32, got_addr),
+ .disp = @as(i32, @intCast(got_addr)),
}));
} else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
const atom = try coff_file.getOrCreateAtomForDecl(owner_decl);
@@ -8124,7 +8124,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
const atom = p9.getAtom(atom_index);
try self.asmMemory(.{ ._, .call }, Memory.sib(.qword, .{
.base = .{ .reg = .ds },
- .disp = @intCast(i32, atom.getOffsetTableAddress(p9)),
+ .disp = @as(i32, @intCast(atom.getOffsetTableAddress(p9))),
}));
} else unreachable;
} else if (func_value.getExternFunc(mod)) |extern_func| {
@@ -8244,7 +8244,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
const result = MCValue{
.eflags = switch (ty.zigTypeTag(mod)) {
else => result: {
- const abi_size = @intCast(u16, ty.abiSize(mod));
+ const abi_size = @as(u16, @intCast(ty.abiSize(mod)));
const may_flip: enum {
may_flip,
must_flip,
@@ -8441,7 +8441,7 @@ fn airCmpLtErrorsLen(self: *Self, inst: Air.Inst.Index) !void {
self.eflags_inst = inst;
const op_ty = self.typeOf(un_op);
- const op_abi_size = @intCast(u32, op_ty.abiSize(mod));
+ const op_abi_size = @as(u32, @intCast(op_ty.abiSize(mod)));
const op_mcv = try self.resolveInst(un_op);
const dst_reg = switch (op_mcv) {
.register => |reg| reg,
@@ -8650,7 +8650,7 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC
const some_info: struct { off: i32, ty: Type } = if (opt_ty.optionalReprIsPayload(mod))
.{ .off = 0, .ty = if (pl_ty.isSlice(mod)) pl_ty.slicePtrFieldType(mod) else pl_ty }
else
- .{ .off = @intCast(i32, pl_ty.abiSize(mod)), .ty = Type.bool };
+ .{ .off = @as(i32, @intCast(pl_ty.abiSize(mod))), .ty = Type.bool };
switch (opt_mcv) {
.none,
@@ -8670,18 +8670,18 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC
.register => |opt_reg| {
if (some_info.off == 0) {
- const some_abi_size = @intCast(u32, some_info.ty.abiSize(mod));
+ const some_abi_size = @as(u32, @intCast(some_info.ty.abiSize(mod)));
const alias_reg = registerAlias(opt_reg, some_abi_size);
assert(some_abi_size * 8 == alias_reg.bitSize());
try self.asmRegisterRegister(.{ ._, .@"test" }, alias_reg, alias_reg);
return .{ .eflags = .z };
}
assert(some_info.ty.ip_index == .bool_type);
- const opt_abi_size = @intCast(u32, opt_ty.abiSize(mod));
+ const opt_abi_size = @as(u32, @intCast(opt_ty.abiSize(mod)));
try self.asmRegisterImmediate(
.{ ._, .bt },
registerAlias(opt_reg, opt_abi_size),
- Immediate.u(@intCast(u6, some_info.off * 8)),
+ Immediate.u(@as(u6, @intCast(some_info.off * 8))),
);
return .{ .eflags = .nc };
},
@@ -8696,7 +8696,7 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC
defer self.register_manager.unlockReg(addr_reg_lock);
try self.genSetReg(addr_reg, Type.usize, opt_mcv.address());
- const some_abi_size = @intCast(u32, some_info.ty.abiSize(mod));
+ const some_abi_size = @as(u32, @intCast(some_info.ty.abiSize(mod)));
try self.asmMemoryImmediate(
.{ ._, .cmp },
Memory.sib(Memory.PtrSize.fromSize(some_abi_size), .{
@@ -8709,7 +8709,7 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC
},
.indirect, .load_frame => {
- const some_abi_size = @intCast(u32, some_info.ty.abiSize(mod));
+ const some_abi_size = @as(u32, @intCast(some_info.ty.abiSize(mod)));
try self.asmMemoryImmediate(
.{ ._, .cmp },
Memory.sib(Memory.PtrSize.fromSize(some_abi_size), switch (opt_mcv) {
@@ -8741,7 +8741,7 @@ fn isNullPtr(self: *Self, inst: Air.Inst.Index, ptr_ty: Type, ptr_mcv: MCValue)
const some_info: struct { off: i32, ty: Type } = if (opt_ty.optionalReprIsPayload(mod))
.{ .off = 0, .ty = if (pl_ty.isSlice(mod)) pl_ty.slicePtrFieldType(mod) else pl_ty }
else
- .{ .off = @intCast(i32, pl_ty.abiSize(mod)), .ty = Type.bool };
+ .{ .off = @as(i32, @intCast(pl_ty.abiSize(mod))), .ty = Type.bool };
const ptr_reg = switch (ptr_mcv) {
.register => |reg| reg,
@@ -8750,7 +8750,7 @@ fn isNullPtr(self: *Self, inst: Air.Inst.Index, ptr_ty: Type, ptr_mcv: MCValue)
const ptr_lock = self.register_manager.lockReg(ptr_reg);
defer if (ptr_lock) |lock| self.register_manager.unlockReg(lock);
- const some_abi_size = @intCast(u32, some_info.ty.abiSize(mod));
+ const some_abi_size = @as(u32, @intCast(some_info.ty.abiSize(mod)));
try self.asmMemoryImmediate(
.{ ._, .cmp },
Memory.sib(Memory.PtrSize.fromSize(some_abi_size), .{
@@ -8783,7 +8783,7 @@ fn isErr(self: *Self, maybe_inst: ?Air.Inst.Index, ty: Type, operand: MCValue) !
const tmp_reg = try self.copyToTmpRegister(ty, operand);
if (err_off > 0) {
- const shift = @intCast(u6, err_off * 8);
+ const shift = @as(u6, @intCast(err_off * 8));
try self.genShiftBinOpMir(
.{ ._r, .sh },
ty,
@@ -8805,7 +8805,7 @@ fn isErr(self: *Self, maybe_inst: ?Air.Inst.Index, ty: Type, operand: MCValue) !
Type.anyerror,
.{ .load_frame = .{
.index = frame_addr.index,
- .off = frame_addr.off + @intCast(i32, err_off),
+ .off = frame_addr.off + @as(i32, @intCast(err_off)),
} },
.{ .immediate = 0 },
),
@@ -8943,7 +8943,7 @@ fn airLoop(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const loop = self.air.extraData(Air.Block, ty_pl.payload);
const body = self.air.extra[loop.end..][0..loop.data.body_len];
- const jmp_target = @intCast(u32, self.mir_instructions.len);
+ const jmp_target = @as(u32, @intCast(self.mir_instructions.len));
self.scope_generation += 1;
const state = try self.saveState();
@@ -9015,9 +9015,9 @@ fn airSwitchBr(self: *Self, inst: Air.Inst.Index) !void {
while (case_i < switch_br.data.cases_len) : (case_i += 1) {
const case = self.air.extraData(Air.SwitchBr.Case, extra_index);
- const items = @ptrCast(
+ const items = @as(
[]const Air.Inst.Ref,
- self.air.extra[case.end..][0..case.data.items_len],
+ @ptrCast(self.air.extra[case.end..][0..case.data.items_len]),
);
const case_body = self.air.extra[case.end + items.len ..][0..case.data.body_len];
extra_index = case.end + items.len + case_body.len;
@@ -9066,7 +9066,7 @@ fn airSwitchBr(self: *Self, inst: Air.Inst.Index) !void {
}
fn performReloc(self: *Self, reloc: Mir.Inst.Index) !void {
- const next_inst = @intCast(u32, self.mir_instructions.len);
+ const next_inst = @as(u32, @intCast(self.mir_instructions.len));
switch (self.mir_instructions.items(.tag)[reloc]) {
.j, .jmp => {},
.pseudo => switch (self.mir_instructions.items(.ops)[reloc]) {
@@ -9141,11 +9141,11 @@ fn airBr(self: *Self, inst: Air.Inst.Index) !void {
fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.Asm, ty_pl.payload);
- const clobbers_len = @truncate(u31, extra.data.flags);
+ const clobbers_len = @as(u31, @truncate(extra.data.flags));
var extra_i: usize = extra.end;
- const outputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.outputs_len]);
+ const outputs = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra_i..][0..extra.data.outputs_len]));
extra_i += outputs.len;
- const inputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.inputs_len]);
+ const inputs = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra_i..][0..extra.data.inputs_len]));
extra_i += inputs.len;
var result: MCValue = .none;
@@ -9281,7 +9281,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
if (std.fmt.parseInt(i32, op_str["$".len..], 0)) |s| {
if (mnem_size) |size| {
const max = @as(u64, math.maxInt(u64)) >>
- @intCast(u6, 64 - (size.bitSize() - 1));
+ @as(u6, @intCast(64 - (size.bitSize() - 1)));
if ((if (s < 0) ~s else s) > max)
return self.fail("Invalid immediate size: '{s}'", .{op_str});
}
@@ -9289,7 +9289,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
} else |_| if (std.fmt.parseInt(u64, op_str["$".len..], 0)) |u| {
if (mnem_size) |size| {
const max = @as(u64, math.maxInt(u64)) >>
- @intCast(u6, 64 - size.bitSize());
+ @as(u6, @intCast(64 - size.bitSize()));
if (u > max)
return self.fail("Invalid immediate size: '{s}'", .{op_str});
}
@@ -9618,7 +9618,7 @@ fn genCopy(self: *Self, ty: Type, dst_mcv: MCValue, src_mcv: MCValue) InnerError
.indirect => |reg_off| try self.genSetMem(.{ .reg = reg_off.reg }, reg_off.off, ty, src_mcv),
.memory, .load_direct, .load_got, .load_tlv => {
switch (dst_mcv) {
- .memory => |addr| if (math.cast(i32, @bitCast(i64, addr))) |small_addr|
+ .memory => |addr| if (math.cast(i32, @as(i64, @bitCast(addr)))) |small_addr|
return self.genSetMem(.{ .reg = .ds }, small_addr, ty, src_mcv),
.load_direct, .load_got, .load_tlv => {},
else => unreachable,
@@ -9641,7 +9641,7 @@ fn genCopy(self: *Self, ty: Type, dst_mcv: MCValue, src_mcv: MCValue) InnerError
fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerError!void {
const mod = self.bin_file.options.module.?;
- const abi_size = @intCast(u32, ty.abiSize(mod));
+ const abi_size = @as(u32, @intCast(ty.abiSize(mod)));
if (abi_size * 8 > dst_reg.bitSize())
return self.fail("genSetReg called with a value larger than dst_reg", .{});
switch (src_mcv) {
@@ -9662,11 +9662,11 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr
} else if (abi_size > 4 and math.cast(u32, imm) != null) {
// 32-bit moves zero-extend to 64-bit.
try self.asmRegisterImmediate(.{ ._, .mov }, dst_reg.to32(), Immediate.u(imm));
- } else if (abi_size <= 4 and @bitCast(i64, imm) < 0) {
+ } else if (abi_size <= 4 and @as(i64, @bitCast(imm)) < 0) {
try self.asmRegisterImmediate(
.{ ._, .mov },
registerAlias(dst_reg, abi_size),
- Immediate.s(@intCast(i32, @bitCast(i64, imm))),
+ Immediate.s(@as(i32, @intCast(@as(i64, @bitCast(imm))))),
);
} else {
try self.asmRegisterImmediate(
@@ -9806,7 +9806,7 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr
},
.memory, .load_direct, .load_got, .load_tlv => {
switch (src_mcv) {
- .memory => |addr| if (math.cast(i32, @bitCast(i64, addr))) |small_addr| {
+ .memory => |addr| if (math.cast(i32, @as(i64, @bitCast(addr)))) |small_addr| {
const dst_alias = registerAlias(dst_reg, abi_size);
const src_mem = Memory.sib(Memory.PtrSize.fromSize(abi_size), .{
.base = .{ .reg = .ds },
@@ -9814,7 +9814,7 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr
});
switch (try self.moveStrategy(ty, mem.isAlignedGeneric(
u32,
- @bitCast(u32, small_addr),
+ @as(u32, @bitCast(small_addr)),
ty.abiAlignment(mod),
))) {
.move => |tag| try self.asmRegisterMemory(tag, dst_alias, src_mem),
@@ -9928,9 +9928,9 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr
fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCValue) InnerError!void {
const mod = self.bin_file.options.module.?;
- const abi_size = @intCast(u32, ty.abiSize(mod));
+ const abi_size = @as(u32, @intCast(ty.abiSize(mod)));
const dst_ptr_mcv: MCValue = switch (base) {
- .none => .{ .immediate = @bitCast(u64, @as(i64, disp)) },
+ .none => .{ .immediate = @as(u64, @bitCast(@as(i64, disp))) },
.reg => |base_reg| .{ .register_offset = .{ .reg = base_reg, .off = disp } },
.frame => |base_frame_index| .{ .lea_frame = .{ .index = base_frame_index, .off = disp } },
};
@@ -9941,9 +9941,9 @@ fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCVal
.immediate => |imm| switch (abi_size) {
1, 2, 4 => {
const immediate = if (ty.isSignedInt(mod))
- Immediate.s(@truncate(i32, @bitCast(i64, imm)))
+ Immediate.s(@as(i32, @truncate(@as(i64, @bitCast(imm)))))
else
- Immediate.u(@intCast(u32, imm));
+ Immediate.u(@as(u32, @intCast(imm)));
try self.asmMemoryImmediate(
.{ ._, .mov },
Memory.sib(Memory.PtrSize.fromSize(abi_size), .{ .base = base, .disp = disp }),
@@ -9951,7 +9951,7 @@ fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCVal
);
},
3, 5...7 => unreachable,
- else => if (math.cast(i32, @bitCast(i64, imm))) |small| {
+ else => if (math.cast(i32, @as(i64, @bitCast(imm)))) |small| {
try self.asmMemoryImmediate(
.{ ._, .mov },
Memory.sib(Memory.PtrSize.fromSize(abi_size), .{ .base = base, .disp = disp }),
@@ -9963,14 +9963,14 @@ fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCVal
.{ ._, .mov },
Memory.sib(.dword, .{ .base = base, .disp = disp + offset }),
if (ty.isSignedInt(mod))
- Immediate.s(@truncate(
+ Immediate.s(@as(
i32,
- @bitCast(i64, imm) >> (math.cast(u6, offset * 8) orelse 63),
+ @truncate(@as(i64, @bitCast(imm)) >> (math.cast(u6, offset * 8) orelse 63)),
))
else
- Immediate.u(@truncate(
+ Immediate.u(@as(
u32,
- if (math.cast(u6, offset * 8)) |shift| imm >> shift else 0,
+ @truncate(if (math.cast(u6, offset * 8)) |shift| imm >> shift else 0),
)),
);
},
@@ -9985,13 +9985,13 @@ fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCVal
switch (try self.moveStrategy(ty, switch (base) {
.none => mem.isAlignedGeneric(
u32,
- @bitCast(u32, disp),
+ @as(u32, @bitCast(disp)),
ty.abiAlignment(mod),
),
.reg => |reg| switch (reg) {
.es, .cs, .ss, .ds => mem.isAlignedGeneric(
u32,
- @bitCast(u32, disp),
+ @as(u32, @bitCast(disp)),
ty.abiAlignment(mod),
),
else => false,
@@ -10012,13 +10012,13 @@ fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCVal
.register_overflow => |ro| {
try self.genSetMem(
base,
- disp + @intCast(i32, ty.structFieldOffset(0, mod)),
+ disp + @as(i32, @intCast(ty.structFieldOffset(0, mod))),
ty.structFieldType(0, mod),
.{ .register = ro.reg },
);
try self.genSetMem(
base,
- disp + @intCast(i32, ty.structFieldOffset(1, mod)),
+ disp + @as(i32, @intCast(ty.structFieldOffset(1, mod))),
ty.structFieldType(1, mod),
.{ .eflags = ro.eflags },
);
@@ -10077,7 +10077,7 @@ fn genLazySymbolRef(
_ = try atom.getOrCreateOffsetTableEntry(elf_file);
const got_addr = atom.getOffsetTableAddress(elf_file);
const got_mem =
- Memory.sib(.qword, .{ .base = .{ .reg = .ds }, .disp = @intCast(i32, got_addr) });
+ Memory.sib(.qword, .{ .base = .{ .reg = .ds }, .disp = @as(i32, @intCast(got_addr)) });
switch (tag) {
.lea, .mov => try self.asmRegisterMemory(.{ ._, .mov }, reg.to64(), got_mem),
.call => try self.asmMemory(.{ ._, .call }, got_mem),
@@ -10099,7 +10099,7 @@ fn genLazySymbolRef(
_ = atom.getOrCreateOffsetTableEntry(p9_file);
const got_addr = atom.getOffsetTableAddress(p9_file);
const got_mem =
- Memory.sib(.qword, .{ .base = .{ .reg = .ds }, .disp = @intCast(i32, got_addr) });
+ Memory.sib(.qword, .{ .base = .{ .reg = .ds }, .disp = @as(i32, @intCast(got_addr)) });
switch (tag) {
.lea, .mov => try self.asmRegisterMemory(.{ ._, .mov }, reg.to64(), got_mem),
.call => try self.asmMemory(.{ ._, .call }, got_mem),
@@ -10195,8 +10195,8 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void {
if (src_ty.isAbiInt(mod)) src_ty.intInfo(mod).signedness else .unsigned;
if (dst_signedness == src_signedness) break :result dst_mcv;
- const abi_size = @intCast(u16, dst_ty.abiSize(mod));
- const bit_size = @intCast(u16, dst_ty.bitSize(mod));
+ const abi_size = @as(u16, @intCast(dst_ty.abiSize(mod)));
+ const bit_size = @as(u16, @intCast(dst_ty.bitSize(mod)));
if (abi_size * 8 <= bit_size) break :result dst_mcv;
const dst_limbs_len = math.divCeil(i32, bit_size, 64) catch unreachable;
@@ -10237,7 +10237,7 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void {
try self.genSetMem(.{ .frame = frame_index }, 0, ptr_ty, ptr);
try self.genSetMem(
.{ .frame = frame_index },
- @intCast(i32, ptr_ty.abiSize(mod)),
+ @as(i32, @intCast(ptr_ty.abiSize(mod))),
Type.usize,
.{ .immediate = array_len },
);
@@ -10251,7 +10251,7 @@ fn airFloatFromInt(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const src_ty = self.typeOf(ty_op.operand);
- const src_bits = @intCast(u32, src_ty.bitSize(mod));
+ const src_bits = @as(u32, @intCast(src_ty.bitSize(mod)));
const src_signedness =
if (src_ty.isAbiInt(mod)) src_ty.intInfo(mod).signedness else .unsigned;
const dst_ty = self.typeOfIndex(inst);
@@ -10306,7 +10306,7 @@ fn airIntFromFloat(self: *Self, inst: Air.Inst.Index) !void {
const src_ty = self.typeOf(ty_op.operand);
const dst_ty = self.typeOfIndex(inst);
- const dst_bits = @intCast(u32, dst_ty.bitSize(mod));
+ const dst_bits = @as(u32, @intCast(dst_ty.bitSize(mod)));
const dst_signedness =
if (dst_ty.isAbiInt(mod)) dst_ty.intInfo(mod).signedness else .unsigned;
@@ -10359,7 +10359,7 @@ fn airCmpxchg(self: *Self, inst: Air.Inst.Index) !void {
const ptr_ty = self.typeOf(extra.ptr);
const val_ty = self.typeOf(extra.expected_value);
- const val_abi_size = @intCast(u32, val_ty.abiSize(mod));
+ const val_abi_size = @as(u32, @intCast(val_ty.abiSize(mod)));
try self.spillRegisters(&.{ .rax, .rdx, .rbx, .rcx });
const regs_lock = self.register_manager.lockRegsAssumeUnused(4, .{ .rax, .rdx, .rbx, .rcx });
@@ -10461,7 +10461,7 @@ fn atomicOp(
};
defer if (val_lock) |lock| self.register_manager.unlockReg(lock);
- const val_abi_size = @intCast(u32, val_ty.abiSize(mod));
+ const val_abi_size = @as(u32, @intCast(val_ty.abiSize(mod)));
const ptr_size = Memory.PtrSize.fromSize(val_abi_size);
const ptr_mem = switch (ptr_mcv) {
.immediate, .register, .register_offset, .lea_frame => ptr_mcv.deref().mem(ptr_size),
@@ -10539,7 +10539,7 @@ fn atomicOp(
defer self.register_manager.unlockReg(tmp_lock);
try self.asmRegisterMemory(.{ ._, .mov }, registerAlias(.rax, val_abi_size), ptr_mem);
- const loop = @intCast(u32, self.mir_instructions.len);
+ const loop = @as(u32, @intCast(self.mir_instructions.len));
if (rmw_op != std.builtin.AtomicRmwOp.Xchg) {
try self.genSetReg(tmp_reg, val_ty, .{ .register = .rax });
}
@@ -10613,7 +10613,7 @@ fn atomicOp(
.scale_index = ptr_mem.scaleIndex(),
.disp = ptr_mem.sib.disp + 8,
}));
- const loop = @intCast(u32, self.mir_instructions.len);
+ const loop = @as(u32, @intCast(self.mir_instructions.len));
const val_mem_mcv: MCValue = switch (val_mcv) {
.memory, .indirect, .load_frame => val_mcv,
else => .{ .indirect = .{
@@ -10769,7 +10769,7 @@ fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void {
};
defer if (src_val_lock) |lock| self.register_manager.unlockReg(lock);
- const elem_abi_size = @intCast(u31, elem_ty.abiSize(mod));
+ const elem_abi_size = @as(u31, @intCast(elem_ty.abiSize(mod)));
if (elem_abi_size == 1) {
const ptr: MCValue = switch (dst_ptr_ty.ptrSize(mod)) {
@@ -11249,9 +11249,9 @@ fn airReduce(self: *Self, inst: Air.Inst.Index) !void {
fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
const mod = self.bin_file.options.module.?;
const result_ty = self.typeOfIndex(inst);
- const len = @intCast(usize, result_ty.arrayLen(mod));
+ const len = @as(usize, @intCast(result_ty.arrayLen(mod)));
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
- const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]);
+ const elements = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[ty_pl.payload..][0..len]));
const result: MCValue = result: {
switch (result_ty.zigTypeTag(mod)) {
.Struct => {
@@ -11268,17 +11268,17 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
if ((try result_ty.structFieldValueComptime(mod, elem_i)) != null) continue;
const elem_ty = result_ty.structFieldType(elem_i, mod);
- const elem_bit_size = @intCast(u32, elem_ty.bitSize(mod));
+ const elem_bit_size = @as(u32, @intCast(elem_ty.bitSize(mod)));
if (elem_bit_size > 64) {
return self.fail(
"TODO airAggregateInit implement packed structs with large fields",
.{},
);
}
- const elem_abi_size = @intCast(u32, elem_ty.abiSize(mod));
+ const elem_abi_size = @as(u32, @intCast(elem_ty.abiSize(mod)));
const elem_abi_bits = elem_abi_size * 8;
const elem_off = struct_obj.packedFieldBitOffset(mod, elem_i);
- const elem_byte_off = @intCast(i32, elem_off / elem_abi_bits * elem_abi_size);
+ const elem_byte_off = @as(i32, @intCast(elem_off / elem_abi_bits * elem_abi_size));
const elem_bit_off = elem_off % elem_abi_bits;
const elem_mcv = try self.resolveInst(elem);
const mat_elem_mcv = switch (elem_mcv) {
@@ -11330,7 +11330,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
elem_ty,
.{ .load_frame = .{
.index = frame_index,
- .off = elem_byte_off + @intCast(i32, elem_abi_size),
+ .off = elem_byte_off + @as(i32, @intCast(elem_abi_size)),
} },
.{ .register = reg },
);
@@ -11340,7 +11340,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
if ((try result_ty.structFieldValueComptime(mod, elem_i)) != null) continue;
const elem_ty = result_ty.structFieldType(elem_i, mod);
- const elem_off = @intCast(i32, result_ty.structFieldOffset(elem_i, mod));
+ const elem_off = @as(i32, @intCast(result_ty.structFieldOffset(elem_i, mod)));
const elem_mcv = try self.resolveInst(elem);
const mat_elem_mcv = switch (elem_mcv) {
.load_tlv => |sym_index| MCValue{ .lea_tlv = sym_index },
@@ -11354,7 +11354,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
const frame_index =
try self.allocFrameIndex(FrameAlloc.initType(result_ty, mod));
const elem_ty = result_ty.childType(mod);
- const elem_size = @intCast(u32, elem_ty.abiSize(mod));
+ const elem_size = @as(u32, @intCast(elem_ty.abiSize(mod)));
for (elements, 0..) |elem, elem_i| {
const elem_mcv = try self.resolveInst(elem);
@@ -11362,12 +11362,12 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
.load_tlv => |sym_index| MCValue{ .lea_tlv = sym_index },
else => elem_mcv,
};
- const elem_off = @intCast(i32, elem_size * elem_i);
+ const elem_off = @as(i32, @intCast(elem_size * elem_i));
try self.genSetMem(.{ .frame = frame_index }, elem_off, elem_ty, mat_elem_mcv);
}
if (result_ty.sentinel(mod)) |sentinel| try self.genSetMem(
.{ .frame = frame_index },
- @intCast(i32, elem_size * elements.len),
+ @as(i32, @intCast(elem_size * elements.len)),
elem_ty,
try self.genTypedValue(.{ .ty = elem_ty, .val = sentinel }),
);
@@ -11416,7 +11416,7 @@ fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void {
const tag_int_val = try tag_val.intFromEnum(tag_ty, mod);
const tag_int = tag_int_val.toUnsignedInt(mod);
const tag_off = if (layout.tag_align < layout.payload_align)
- @intCast(i32, layout.payload_size)
+ @as(i32, @intCast(layout.payload_size))
else
0;
try self.genCopy(tag_ty, dst_mcv.address().offset(tag_off).deref(), .{ .immediate = tag_int });
@@ -11424,7 +11424,7 @@ fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void {
const pl_off = if (layout.tag_align < layout.payload_align)
0
else
- @intCast(i32, layout.tag_size);
+ @as(i32, @intCast(layout.tag_size));
try self.genCopy(src_ty, dst_mcv.address().offset(pl_off).deref(), src_mcv);
break :result dst_mcv;
@@ -11454,7 +11454,7 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void {
var order = [1]u2{0} ** 3;
var unused = std.StaticBitSet(3).initFull();
for (ops, &mcvs, &locks, 0..) |op, *mcv, *lock, op_i| {
- const op_index = @intCast(u2, op_i);
+ const op_index = @as(u2, @intCast(op_i));
mcv.* = try self.resolveInst(op);
if (unused.isSet(0) and mcv.isRegister() and self.reuseOperand(inst, op, op_index, mcv.*)) {
order[op_index] = 1;
@@ -11470,7 +11470,7 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void {
}
for (&order, &mcvs, &locks) |*mop_index, *mcv, *lock| {
if (mop_index.* != 0) continue;
- mop_index.* = 1 + @intCast(u2, unused.toggleFirstSet().?);
+ mop_index.* = 1 + @as(u2, @intCast(unused.toggleFirstSet().?));
if (mop_index.* > 1 and mcv.isRegister()) continue;
const reg = try self.copyToTmpRegister(ty, mcv.*);
mcv.* = .{ .register = reg };
@@ -11570,7 +11570,7 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void {
var mops: [3]MCValue = undefined;
for (order, mcvs) |mop_index, mcv| mops[mop_index - 1] = mcv;
- const abi_size = @intCast(u32, ty.abiSize(mod));
+ const abi_size = @as(u32, @intCast(ty.abiSize(mod)));
const mop1_reg = registerAlias(mops[0].getReg().?, abi_size);
const mop2_reg = registerAlias(mops[1].getReg().?, abi_size);
if (mops[2].isRegister()) try self.asmRegisterRegisterRegister(
@@ -11723,7 +11723,7 @@ fn resolveCallingConventionValues(
switch (self.target.os.tag) {
.windows => {
// Align the stack to 16bytes before allocating shadow stack space (if any).
- result.stack_byte_count += @intCast(u31, 4 * Type.usize.abiSize(mod));
+ result.stack_byte_count += @as(u31, @intCast(4 * Type.usize.abiSize(mod)));
},
else => {},
}
@@ -11746,7 +11746,7 @@ fn resolveCallingConventionValues(
result.return_value = switch (classes[0]) {
.integer => InstTracking.init(.{ .register = registerAlias(
ret_reg,
- @intCast(u32, ret_ty.abiSize(mod)),
+ @as(u32, @intCast(ret_ty.abiSize(mod))),
) }),
.float, .sse => InstTracking.init(.{ .register = .xmm0 }),
.memory => ret: {
@@ -11782,17 +11782,17 @@ fn resolveCallingConventionValues(
},
.float, .sse => switch (self.target.os.tag) {
.windows => if (param_reg_i < 4) {
- arg.* = .{ .register = @enumFromInt(
+ arg.* = .{ .register = @as(
Register,
- @intFromEnum(Register.xmm0) + param_reg_i,
+ @enumFromInt(@intFromEnum(Register.xmm0) + param_reg_i),
) };
param_reg_i += 1;
continue;
},
else => if (param_sse_reg_i < 8) {
- arg.* = .{ .register = @enumFromInt(
+ arg.* = .{ .register = @as(
Register,
- @intFromEnum(Register.xmm0) + param_sse_reg_i,
+ @enumFromInt(@intFromEnum(Register.xmm0) + param_sse_reg_i),
) };
param_sse_reg_i += 1;
continue;
@@ -11804,8 +11804,8 @@ fn resolveCallingConventionValues(
}),
}
- const param_size = @intCast(u31, ty.abiSize(mod));
- const param_align = @intCast(u31, ty.abiAlignment(mod));
+ const param_size = @as(u31, @intCast(ty.abiSize(mod)));
+ const param_align = @as(u31, @intCast(ty.abiAlignment(mod)));
result.stack_byte_count =
mem.alignForward(u31, result.stack_byte_count, param_align);
arg.* = .{ .load_frame = .{
@@ -11825,7 +11825,7 @@ fn resolveCallingConventionValues(
result.return_value = InstTracking.init(.none);
} else {
const ret_reg = abi.getCAbiIntReturnRegs(self.target.*)[0];
- const ret_ty_size = @intCast(u31, ret_ty.abiSize(mod));
+ const ret_ty_size = @as(u31, @intCast(ret_ty.abiSize(mod)));
if (ret_ty_size <= 8 and !ret_ty.isRuntimeFloat()) {
const aliased_reg = registerAlias(ret_reg, ret_ty_size);
result.return_value = .{ .short = .{ .register = aliased_reg }, .long = .none };
@@ -11844,8 +11844,8 @@ fn resolveCallingConventionValues(
arg.* = .none;
continue;
}
- const param_size = @intCast(u31, ty.abiSize(mod));
- const param_align = @intCast(u31, ty.abiAlignment(mod));
+ const param_size = @as(u31, @intCast(ty.abiSize(mod)));
+ const param_align = @as(u31, @intCast(ty.abiAlignment(mod)));
result.stack_byte_count =
mem.alignForward(u31, result.stack_byte_count, param_align);
arg.* = .{ .load_frame = .{
@@ -11932,12 +11932,12 @@ fn truncateRegister(self: *Self, ty: Type, reg: Register) !void {
const mod = self.bin_file.options.module.?;
const int_info = if (ty.isAbiInt(mod)) ty.intInfo(mod) else std.builtin.Type.Int{
.signedness = .unsigned,
- .bits = @intCast(u16, ty.bitSize(mod)),
+ .bits = @as(u16, @intCast(ty.bitSize(mod))),
};
const max_reg_bit_width = Register.rax.bitSize();
switch (int_info.signedness) {
.signed => {
- const shift = @intCast(u6, max_reg_bit_width - int_info.bits);
+ const shift = @as(u6, @intCast(max_reg_bit_width - int_info.bits));
try self.genShiftBinOpMir(
.{ ._l, .sa },
Type.isize,
@@ -11952,7 +11952,7 @@ fn truncateRegister(self: *Self, ty: Type, reg: Register) !void {
);
},
.unsigned => {
- const shift = @intCast(u6, max_reg_bit_width - int_info.bits);
+ const shift = @as(u6, @intCast(max_reg_bit_width - int_info.bits));
const mask = (~@as(u64, 0)) >> shift;
if (int_info.bits <= 32) {
try self.genBinOpMir(
diff --git a/src/arch/x86_64/Emit.zig b/src/arch/x86_64/Emit.zig
index 78ff918715..9c9aadbd13 100644
--- a/src/arch/x86_64/Emit.zig
+++ b/src/arch/x86_64/Emit.zig
@@ -19,18 +19,18 @@ pub const Error = Lower.Error || error{
pub fn emitMir(emit: *Emit) Error!void {
for (0..emit.lower.mir.instructions.len) |mir_i| {
- const mir_index = @intCast(Mir.Inst.Index, mir_i);
+ const mir_index = @as(Mir.Inst.Index, @intCast(mir_i));
try emit.code_offset_mapping.putNoClobber(
emit.lower.allocator,
mir_index,
- @intCast(u32, emit.code.items.len),
+ @as(u32, @intCast(emit.code.items.len)),
);
const lowered = try emit.lower.lowerMir(mir_index);
var lowered_relocs = lowered.relocs;
for (lowered.insts, 0..) |lowered_inst, lowered_index| {
- const start_offset = @intCast(u32, emit.code.items.len);
+ const start_offset = @as(u32, @intCast(emit.code.items.len));
try lowered_inst.encode(emit.code.writer(), .{});
- const end_offset = @intCast(u32, emit.code.items.len);
+ const end_offset = @as(u32, @intCast(emit.code.items.len));
while (lowered_relocs.len > 0 and
lowered_relocs[0].lowered_inst_index == lowered_index) : ({
lowered_relocs = lowered_relocs[1..];
@@ -39,7 +39,7 @@ pub fn emitMir(emit: *Emit) Error!void {
.source = start_offset,
.target = target,
.offset = end_offset - 4,
- .length = @intCast(u5, end_offset - start_offset),
+ .length = @as(u5, @intCast(end_offset - start_offset)),
}),
.linker_extern_fn => |symbol| if (emit.bin_file.cast(link.File.MachO)) |macho_file| {
// Add relocation to the decl.
@@ -89,7 +89,7 @@ pub fn emitMir(emit: *Emit) Error!void {
else => unreachable,
},
.target = .{ .sym_index = symbol.sym_index, .file = null },
- .offset = @intCast(u32, end_offset - 4),
+ .offset = @as(u32, @intCast(end_offset - 4)),
.addend = 0,
.pcrel = true,
.length = 2,
@@ -113,7 +113,7 @@ pub fn emitMir(emit: *Emit) Error!void {
.linker_import => coff_file.getGlobalByIndex(symbol.sym_index),
else => unreachable,
},
- .offset = @intCast(u32, end_offset - 4),
+ .offset = @as(u32, @intCast(end_offset - 4)),
.addend = 0,
.pcrel = true,
.length = 2,
@@ -122,7 +122,7 @@ pub fn emitMir(emit: *Emit) Error!void {
const atom_index = symbol.atom_index;
try p9_file.addReloc(atom_index, .{ // TODO we may need to add a .type field to the relocs if they are .linker_got instead of just .linker_direct
.target = symbol.sym_index, // we set sym_index to just be the atom index
- .offset = @intCast(u32, end_offset - 4),
+ .offset = @as(u32, @intCast(end_offset - 4)),
.addend = 0,
.pcrel = true,
});
@@ -209,13 +209,13 @@ fn fixupRelocs(emit: *Emit) Error!void {
for (emit.relocs.items) |reloc| {
const target = emit.code_offset_mapping.get(reloc.target) orelse
return emit.fail("JMP/CALL relocation target not found!", .{});
- const disp = @intCast(i32, @intCast(i64, target) - @intCast(i64, reloc.source + reloc.length));
+ const disp = @as(i32, @intCast(@as(i64, @intCast(target)) - @as(i64, @intCast(reloc.source + reloc.length))));
mem.writeIntLittle(i32, emit.code.items[reloc.offset..][0..4], disp);
}
}
fn dbgAdvancePCAndLine(emit: *Emit, line: u32, column: u32) Error!void {
- const delta_line = @intCast(i32, line) - @intCast(i32, emit.prev_di_line);
+ const delta_line = @as(i32, @intCast(line)) - @as(i32, @intCast(emit.prev_di_line));
const delta_pc: usize = emit.code.items.len - emit.prev_di_pc;
log.debug(" (advance pc={d} and line={d})", .{ delta_line, delta_pc });
switch (emit.debug_output) {
@@ -233,22 +233,22 @@ fn dbgAdvancePCAndLine(emit: *Emit, line: u32, column: u32) Error!void {
// increasing the line number
try @import("../../link/Plan9.zig").changeLine(dbg_out.dbg_line, delta_line);
// increasing the pc
- const d_pc_p9 = @intCast(i64, delta_pc) - quant;
+ const d_pc_p9 = @as(i64, @intCast(delta_pc)) - quant;
if (d_pc_p9 > 0) {
// minus one because if its the last one, we want to leave space to change the line which is one quanta
var diff = @divExact(d_pc_p9, quant) - quant;
while (diff > 0) {
if (diff < 64) {
- try dbg_out.dbg_line.append(@intCast(u8, diff + 128));
+ try dbg_out.dbg_line.append(@as(u8, @intCast(diff + 128)));
diff = 0;
} else {
- try dbg_out.dbg_line.append(@intCast(u8, 64 + 128));
+ try dbg_out.dbg_line.append(@as(u8, @intCast(64 + 128)));
diff -= 64;
}
}
if (dbg_out.pcop_change_index.*) |pci|
dbg_out.dbg_line.items[pci] += 1;
- dbg_out.pcop_change_index.* = @intCast(u32, dbg_out.dbg_line.items.len - 1);
+ dbg_out.pcop_change_index.* = @as(u32, @intCast(dbg_out.dbg_line.items.len - 1));
} else if (d_pc_p9 == 0) {
// we don't need to do anything, because adding the quant does it for us
} else unreachable;
diff --git a/src/arch/x86_64/Encoding.zig b/src/arch/x86_64/Encoding.zig
index a3963ca149..ca260f5ec4 100644
--- a/src/arch/x86_64/Encoding.zig
+++ b/src/arch/x86_64/Encoding.zig
@@ -85,7 +85,7 @@ pub fn findByOpcode(opc: []const u8, prefixes: struct {
rex: Rex,
}, modrm_ext: ?u3) ?Encoding {
for (mnemonic_to_encodings_map, 0..) |encs, mnemonic_int| for (encs) |data| {
- const enc = Encoding{ .mnemonic = @enumFromInt(Mnemonic, mnemonic_int), .data = data };
+ const enc = Encoding{ .mnemonic = @as(Mnemonic, @enumFromInt(mnemonic_int)), .data = data };
if (modrm_ext) |ext| if (ext != data.modrm_ext) continue;
if (!std.mem.eql(u8, opc, enc.opcode())) continue;
if (prefixes.rex.w) {
@@ -763,7 +763,7 @@ fn estimateInstructionLength(prefix: Prefix, encoding: Encoding, ops: []const Op
var cwriter = std.io.countingWriter(std.io.null_writer);
inst.encode(cwriter.writer(), .{ .allow_frame_loc = true }) catch unreachable; // Not allowed to fail here unless OOM.
- return @intCast(usize, cwriter.bytes_written);
+ return @as(usize, @intCast(cwriter.bytes_written));
}
const mnemonic_to_encodings_map = init: {
diff --git a/src/arch/x86_64/Lower.zig b/src/arch/x86_64/Lower.zig
index d77ddf3050..53aa182957 100644
--- a/src/arch/x86_64/Lower.zig
+++ b/src/arch/x86_64/Lower.zig
@@ -188,7 +188,7 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct {
.pseudo_probe_align_ri_s => {
try lower.emit(.none, .@"test", &.{
.{ .reg = inst.data.ri.r1 },
- .{ .imm = Immediate.s(@bitCast(i32, inst.data.ri.i)) },
+ .{ .imm = Immediate.s(@as(i32, @bitCast(inst.data.ri.i))) },
});
try lower.emit(.none, .jz, &.{
.{ .imm = lower.reloc(.{ .inst = index + 1 }) },
@@ -213,7 +213,7 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct {
},
.pseudo_probe_adjust_unrolled_ri_s => {
var offset = page_size;
- while (offset < @bitCast(i32, inst.data.ri.i)) : (offset += page_size) {
+ while (offset < @as(i32, @bitCast(inst.data.ri.i))) : (offset += page_size) {
try lower.emit(.none, .@"test", &.{
.{ .mem = Memory.sib(.dword, .{
.base = .{ .reg = inst.data.ri.r1 },
@@ -224,14 +224,14 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct {
}
try lower.emit(.none, .sub, &.{
.{ .reg = inst.data.ri.r1 },
- .{ .imm = Immediate.s(@bitCast(i32, inst.data.ri.i)) },
+ .{ .imm = Immediate.s(@as(i32, @bitCast(inst.data.ri.i))) },
});
assert(lower.result_insts_len <= pseudo_probe_adjust_unrolled_max_insts);
},
.pseudo_probe_adjust_setup_rri_s => {
try lower.emit(.none, .mov, &.{
.{ .reg = inst.data.rri.r2.to32() },
- .{ .imm = Immediate.s(@bitCast(i32, inst.data.rri.i)) },
+ .{ .imm = Immediate.s(@as(i32, @bitCast(inst.data.rri.i))) },
});
try lower.emit(.none, .sub, &.{
.{ .reg = inst.data.rri.r1 },
@@ -289,7 +289,7 @@ fn imm(lower: Lower, ops: Mir.Inst.Ops, i: u32) Immediate {
.i_s,
.mi_sib_s,
.mi_rip_s,
- => Immediate.s(@bitCast(i32, i)),
+ => Immediate.s(@as(i32, @bitCast(i))),
.rrri,
.rri_u,
diff --git a/src/arch/x86_64/Mir.zig b/src/arch/x86_64/Mir.zig
index 36eacf4db9..7753104b96 100644
--- a/src/arch/x86_64/Mir.zig
+++ b/src/arch/x86_64/Mir.zig
@@ -989,7 +989,7 @@ pub const RegisterList = struct {
fn getIndexForReg(registers: []const Register, reg: Register) BitSet.MaskInt {
for (registers, 0..) |cpreg, i| {
- if (reg.id() == cpreg.id()) return @intCast(u32, i);
+ if (reg.id() == cpreg.id()) return @as(u32, @intCast(i));
}
unreachable; // register not in input register list!
}
@@ -1009,7 +1009,7 @@ pub const RegisterList = struct {
}
pub fn count(self: Self) u32 {
- return @intCast(u32, self.bitset.count());
+ return @as(u32, @intCast(self.bitset.count()));
}
};
@@ -1023,15 +1023,15 @@ pub const Imm64 = struct {
pub fn encode(v: u64) Imm64 {
return .{
- .msb = @truncate(u32, v >> 32),
- .lsb = @truncate(u32, v),
+ .msb = @as(u32, @truncate(v >> 32)),
+ .lsb = @as(u32, @truncate(v)),
};
}
pub fn decode(imm: Imm64) u64 {
var res: u64 = 0;
- res |= (@intCast(u64, imm.msb) << 32);
- res |= @intCast(u64, imm.lsb);
+ res |= (@as(u64, @intCast(imm.msb)) << 32);
+ res |= @as(u64, @intCast(imm.lsb));
return res;
}
};
@@ -1070,18 +1070,18 @@ pub const MemorySib = struct {
}
pub fn decode(msib: MemorySib) Memory {
- const scale = @truncate(u4, msib.scale_index);
+ const scale = @as(u4, @truncate(msib.scale_index));
assert(scale == 0 or std.math.isPowerOfTwo(scale));
return .{ .sib = .{
- .ptr_size = @enumFromInt(Memory.PtrSize, msib.ptr_size),
- .base = switch (@enumFromInt(Memory.Base.Tag, msib.base_tag)) {
+ .ptr_size = @as(Memory.PtrSize, @enumFromInt(msib.ptr_size)),
+ .base = switch (@as(Memory.Base.Tag, @enumFromInt(msib.base_tag))) {
.none => .none,
- .reg => .{ .reg = @enumFromInt(Register, msib.base) },
- .frame => .{ .frame = @enumFromInt(bits.FrameIndex, msib.base) },
+ .reg => .{ .reg = @as(Register, @enumFromInt(msib.base)) },
+ .frame => .{ .frame = @as(bits.FrameIndex, @enumFromInt(msib.base)) },
},
.scale_index = .{
.scale = scale,
- .index = if (scale > 0) @enumFromInt(Register, msib.scale_index >> 4) else undefined,
+ .index = if (scale > 0) @as(Register, @enumFromInt(msib.scale_index >> 4)) else undefined,
},
.disp = msib.disp,
} };
@@ -1103,7 +1103,7 @@ pub const MemoryRip = struct {
pub fn decode(mrip: MemoryRip) Memory {
return .{ .rip = .{
- .ptr_size = @enumFromInt(Memory.PtrSize, mrip.ptr_size),
+ .ptr_size = @as(Memory.PtrSize, @enumFromInt(mrip.ptr_size)),
.disp = mrip.disp,
} };
}
@@ -1120,14 +1120,14 @@ pub const MemoryMoffs = struct {
pub fn encode(seg: Register, offset: u64) MemoryMoffs {
return .{
.seg = @intFromEnum(seg),
- .msb = @truncate(u32, offset >> 32),
- .lsb = @truncate(u32, offset >> 0),
+ .msb = @as(u32, @truncate(offset >> 32)),
+ .lsb = @as(u32, @truncate(offset >> 0)),
};
}
pub fn decode(moffs: MemoryMoffs) Memory {
return .{ .moffs = .{
- .seg = @enumFromInt(Register, moffs.seg),
+ .seg = @as(Register, @enumFromInt(moffs.seg)),
.offset = @as(u64, moffs.msb) << 32 | @as(u64, moffs.lsb) << 0,
} };
}
@@ -1147,7 +1147,7 @@ pub fn extraData(mir: Mir, comptime T: type, index: u32) struct { data: T, end:
inline for (fields) |field| {
@field(result, field.name) = switch (field.type) {
u32 => mir.extra[i],
- i32 => @bitCast(i32, mir.extra[i]),
+ i32 => @as(i32, @bitCast(mir.extra[i])),
else => @compileError("bad field type"),
};
i += 1;
diff --git a/src/arch/x86_64/abi.zig b/src/arch/x86_64/abi.zig
index b4e175f33d..f1ce3ebeb8 100644
--- a/src/arch/x86_64/abi.zig
+++ b/src/arch/x86_64/abi.zig
@@ -278,7 +278,7 @@ pub fn classifySystemV(ty: Type, mod: *Module, ctx: Context) [8]Class {
// "Otherwise class SSE is used."
result[result_i] = .sse;
}
- byte_i += @intCast(usize, field_size);
+ byte_i += @as(usize, @intCast(field_size));
if (byte_i == 8) {
byte_i = 0;
result_i += 1;
@@ -293,7 +293,7 @@ pub fn classifySystemV(ty: Type, mod: *Module, ctx: Context) [8]Class {
result_i += field_class.len;
// If there are any bytes leftover, we have to try to combine
// the next field with them.
- byte_i = @intCast(usize, field_size % 8);
+ byte_i = @as(usize, @intCast(field_size % 8));
if (byte_i != 0) result_i -= 1;
}
}
diff --git a/src/arch/x86_64/bits.zig b/src/arch/x86_64/bits.zig
index e232a2db05..04b21b9e21 100644
--- a/src/arch/x86_64/bits.zig
+++ b/src/arch/x86_64/bits.zig
@@ -232,7 +232,7 @@ pub const Register = enum(u7) {
else => unreachable,
// zig fmt: on
};
- return @intCast(u6, @intFromEnum(reg) - base);
+ return @as(u6, @intCast(@intFromEnum(reg) - base));
}
pub fn bitSize(reg: Register) u64 {
@@ -291,11 +291,11 @@ pub const Register = enum(u7) {
else => unreachable,
// zig fmt: on
};
- return @truncate(u4, @intFromEnum(reg) - base);
+ return @as(u4, @truncate(@intFromEnum(reg) - base));
}
pub fn lowEnc(reg: Register) u3 {
- return @truncate(u3, reg.enc());
+ return @as(u3, @truncate(reg.enc()));
}
pub fn toBitSize(reg: Register, bit_size: u64) Register {
@@ -325,19 +325,19 @@ pub const Register = enum(u7) {
}
pub fn to64(reg: Register) Register {
- return @enumFromInt(Register, @intFromEnum(reg) - reg.gpBase() + @intFromEnum(Register.rax));
+ return @as(Register, @enumFromInt(@intFromEnum(reg) - reg.gpBase() + @intFromEnum(Register.rax)));
}
pub fn to32(reg: Register) Register {
- return @enumFromInt(Register, @intFromEnum(reg) - reg.gpBase() + @intFromEnum(Register.eax));
+ return @as(Register, @enumFromInt(@intFromEnum(reg) - reg.gpBase() + @intFromEnum(Register.eax)));
}
pub fn to16(reg: Register) Register {
- return @enumFromInt(Register, @intFromEnum(reg) - reg.gpBase() + @intFromEnum(Register.ax));
+ return @as(Register, @enumFromInt(@intFromEnum(reg) - reg.gpBase() + @intFromEnum(Register.ax)));
}
pub fn to8(reg: Register) Register {
- return @enumFromInt(Register, @intFromEnum(reg) - reg.gpBase() + @intFromEnum(Register.al));
+ return @as(Register, @enumFromInt(@intFromEnum(reg) - reg.gpBase() + @intFromEnum(Register.al)));
}
fn sseBase(reg: Register) u7 {
@@ -350,11 +350,11 @@ pub const Register = enum(u7) {
}
pub fn to256(reg: Register) Register {
- return @enumFromInt(Register, @intFromEnum(reg) - reg.sseBase() + @intFromEnum(Register.ymm0));
+ return @as(Register, @enumFromInt(@intFromEnum(reg) - reg.sseBase() + @intFromEnum(Register.ymm0)));
}
pub fn to128(reg: Register) Register {
- return @enumFromInt(Register, @intFromEnum(reg) - reg.sseBase() + @intFromEnum(Register.xmm0));
+ return @as(Register, @enumFromInt(@intFromEnum(reg) - reg.sseBase() + @intFromEnum(Register.xmm0)));
}
/// DWARF register encoding
@@ -363,7 +363,7 @@ pub const Register = enum(u7) {
.general_purpose => if (reg.isExtended())
reg.enc()
else
- @truncate(u3, @as(u24, 0o54673120) >> @as(u5, reg.enc()) * 3),
+ @as(u3, @truncate(@as(u24, 0o54673120) >> @as(u5, reg.enc()) * 3)),
.sse => 17 + @as(u6, reg.enc()),
.x87 => 33 + @as(u6, reg.enc()),
.mmx => 41 + @as(u6, reg.enc()),
@@ -610,15 +610,15 @@ pub const Immediate = union(enum) {
pub fn asUnsigned(imm: Immediate, bit_size: u64) u64 {
return switch (imm) {
.signed => |x| switch (bit_size) {
- 1, 8 => @bitCast(u8, @intCast(i8, x)),
- 16 => @bitCast(u16, @intCast(i16, x)),
- 32, 64 => @bitCast(u32, x),
+ 1, 8 => @as(u8, @bitCast(@as(i8, @intCast(x)))),
+ 16 => @as(u16, @bitCast(@as(i16, @intCast(x)))),
+ 32, 64 => @as(u32, @bitCast(x)),
else => unreachable,
},
.unsigned => |x| switch (bit_size) {
- 1, 8 => @intCast(u8, x),
- 16 => @intCast(u16, x),
- 32 => @intCast(u32, x),
+ 1, 8 => @as(u8, @intCast(x)),
+ 16 => @as(u16, @intCast(x)),
+ 32 => @as(u32, @intCast(x)),
64 => x,
else => unreachable,
},
diff --git a/src/arch/x86_64/encoder.zig b/src/arch/x86_64/encoder.zig
index d953a9410d..bc4c59dc86 100644
--- a/src/arch/x86_64/encoder.zig
+++ b/src/arch/x86_64/encoder.zig
@@ -471,7 +471,7 @@ pub const Instruction = struct {
} else {
try encoder.sib_baseDisp8(dst);
}
- try encoder.disp8(@truncate(i8, sib.disp));
+ try encoder.disp8(@as(i8, @truncate(sib.disp)));
} else {
try encoder.modRm_SIBDisp32(src);
if (mem.scaleIndex()) |si| {
@@ -487,7 +487,7 @@ pub const Instruction = struct {
try encoder.modRm_indirectDisp0(src, dst);
} else if (math.cast(i8, sib.disp)) |_| {
try encoder.modRm_indirectDisp8(src, dst);
- try encoder.disp8(@truncate(i8, sib.disp));
+ try encoder.disp8(@as(i8, @truncate(sib.disp)));
} else {
try encoder.modRm_indirectDisp32(src, dst);
try encoder.disp32(sib.disp);
@@ -509,9 +509,9 @@ pub const Instruction = struct {
fn encodeImm(imm: Immediate, kind: Encoding.Op, encoder: anytype) !void {
const raw = imm.asUnsigned(kind.immBitSize());
switch (kind.immBitSize()) {
- 8 => try encoder.imm8(@intCast(u8, raw)),
- 16 => try encoder.imm16(@intCast(u16, raw)),
- 32 => try encoder.imm32(@intCast(u32, raw)),
+ 8 => try encoder.imm8(@as(u8, @intCast(raw))),
+ 16 => try encoder.imm16(@as(u16, @intCast(raw))),
+ 32 => try encoder.imm32(@as(u32, @intCast(raw))),
64 => try encoder.imm64(raw),
else => unreachable,
}
@@ -581,7 +581,7 @@ fn Encoder(comptime T: type, comptime opts: Options) type {
/// Encodes legacy prefixes
pub fn legacyPrefixes(self: Self, prefixes: LegacyPrefixes) !void {
- if (@bitCast(u16, prefixes) != 0) {
+ if (@as(u16, @bitCast(prefixes)) != 0) {
// Hopefully this path isn't taken very often, so we'll do it the slow way for now
// LOCK
@@ -891,7 +891,7 @@ fn Encoder(comptime T: type, comptime opts: Options) type {
///
/// It is sign-extended to 64 bits by the cpu.
pub fn disp8(self: Self, disp: i8) !void {
- try self.writer.writeByte(@bitCast(u8, disp));
+ try self.writer.writeByte(@as(u8, @bitCast(disp)));
}
/// Encode an 32 bit displacement